diff options
Diffstat (limited to 'scripts')
26 files changed, 4397 insertions, 70 deletions
diff --git a/scripts/README b/scripts/README index 70c763923c..02faabe06b 100644 --- a/scripts/README +++ b/scripts/README @@ -56,3 +56,8 @@ for servers to choose from. codegen/get_mozilla_ciphers.py -- Generate a list of TLS ciphersuites for clients to use in order to look like Firefox. +Code transformation scripts +--------------------------- + +coccinelle/calloc.cocci -- Transform code to replace variants of +malloc(a*b) with calloc(a,b) diff --git a/scripts/coccinelle/calloc.cocci b/scripts/coccinelle/calloc.cocci new file mode 100644 index 0000000000..fbda88e538 --- /dev/null +++ b/scripts/coccinelle/calloc.cocci @@ -0,0 +1,23 @@ +// Use calloc or realloc as appropriate instead of multiply-and-alloc + +@malloc_to_calloc@ +identifier f =~ "(tor_malloc|tor_malloc_zero)"; +expression a; +constant b; +@@ +- f(a * b) ++ tor_calloc(a, b) + +@calloc_arg_order@ +expression a; +type t; +@@ +- tor_calloc(sizeof(t), a) ++ tor_calloc(a, sizeof(t)) + +@realloc_to_reallocarray@ +expression a, b; +expression p; +@@ +- tor_realloc(p, a * b) ++ tor_reallocarray(p, a, b) diff --git a/scripts/coccinelle/malloc_cast.cocci b/scripts/coccinelle/malloc_cast.cocci new file mode 100644 index 0000000000..20321d4fd0 --- /dev/null +++ b/scripts/coccinelle/malloc_cast.cocci @@ -0,0 +1,38 @@ +@cast_malloc@ +expression e; +type T; +@@ +- (T *)tor_malloc(e) ++ tor_malloc(e) + +@cast_malloc_zero@ +expression e; +type T; +identifier func; +@@ +- (T *)tor_malloc_zero(e) ++ tor_malloc_zero(e) + +@cast_calloc@ +expression a, b; +type T; +identifier func; +@@ +- (T *)tor_calloc(a, b) ++ tor_calloc(a, b) + +@cast_realloc@ +expression e; +expression p; +type T; +@@ +- (T *)tor_realloc(p, e) ++ tor_realloc(p, e) + +@cast_reallocarray@ +expression a,b; +expression p; +type T; +@@ +- (T *)tor_reallocarray(p, a, b) ++ tor_reallocarray(p, a, b) diff --git a/scripts/coccinelle/uncalloc.cocci b/scripts/coccinelle/uncalloc.cocci new file mode 100644 index 0000000000..bf3f74165a --- /dev/null +++ b/scripts/coccinelle/uncalloc.cocci @@ -0,0 +1,13 @@ + +@@ +expression a; +@@ +- tor_calloc(1, a) ++ tor_malloc_zero(a) + +@@ +expression a; +@@ +- tor_calloc(a, 1) ++ tor_malloc_zero(a) + diff --git a/scripts/codegen/gen_server_ciphers.py b/scripts/codegen/gen_server_ciphers.py index 97ed9d0469..0dca8a6734 100755 --- a/scripts/codegen/gen_server_ciphers.py +++ b/scripts/codegen/gen_server_ciphers.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright 2014, The Tor Project, Inc +# Copyright 2014-2015, The Tor Project, Inc # See LICENSE for licensing information # This script parses openssl headers to find ciphersuite names, determines diff --git a/scripts/codegen/get_mozilla_ciphers.py b/scripts/codegen/get_mozilla_ciphers.py index 0636eb3658..e673ec7dc6 100644 --- a/scripts/codegen/get_mozilla_ciphers.py +++ b/scripts/codegen/get_mozilla_ciphers.py @@ -1,6 +1,6 @@ #!/usr/bin/python # coding=utf-8 -# Copyright 2011, The Tor Project, Inc +# Copyright 2011-2015, The Tor Project, Inc # original version by Arturo Filastò # See LICENSE for licensing information @@ -29,7 +29,7 @@ def ossl(s): ##### # Read the cpp file to understand what Ciphers map to what name : # Make "ciphers" a map from name used in the javascript to a cipher macro name -fileA = open(ff('security/manager/ssl/src/nsNSSComponent.cpp'),'r') +fileA = open(ff('security/manager/ssl/nsNSSComponent.cpp'),'r') # The input format is a file containing exactly one section of the form: # static CipherPref CipherPrefs[] = { @@ -71,7 +71,7 @@ for line in cipherLines: assert not key_pending key_pending = m.group(1) continue - m = re.search(r'^\s*(\S+)(?:,\s*(true|false))?\s*}', line) + m = re.search(r'^\s*(\S+)(?:,\s*(true|false))+\s*}', line) if m: assert key_pending key = key_pending @@ -107,7 +107,7 @@ fileC.close() # Build a map enabled_ciphers from javascript name to "true" or "false", # and an (unordered!) list of the macro names for those ciphers that are # enabled. -fileB = open(ff('netwerk/base/public/security-prefs.js'), 'r') +fileB = open(ff('netwerk/base/security-prefs.js'), 'r') enabled_ciphers = {} for line in fileB: diff --git a/scripts/codegen/makedesc.py b/scripts/codegen/makedesc.py new file mode 100644 index 0000000000..d4ba21efae --- /dev/null +++ b/scripts/codegen/makedesc.py @@ -0,0 +1,351 @@ +#!/usr/bin/python +# Copyright 2014-2015, The Tor Project, Inc. +# See LICENSE for license information + +# This is a kludgey python script that uses ctypes and openssl to sign +# router descriptors and extrainfo documents and put all the keys in +# the right places. There are examples at the end of the file. + +# I've used this to make inputs for unit tests. I wouldn't suggest +# using it for anything else. + +import base64 +import binascii +import ctypes +import ctypes.util +import hashlib +import optparse +import os +import re +import struct +import time +import UserDict + +import slow_ed25519 +import slownacl_curve25519 +import ed25519_exts_ref + +# Pull in the openssl stuff we need. + +crypt = ctypes.CDLL(ctypes.util.find_library('crypto')) +BIO_s_mem = crypt.BIO_s_mem +BIO_s_mem.argtypes = [] +BIO_s_mem.restype = ctypes.c_void_p + +BIO_new = crypt.BIO_new +BIO_new.argtypes = [ctypes.c_void_p] +BIO_new.restype = ctypes.c_void_p + +crypt.BIO_free.argtypes = [ctypes.c_void_p] +crypt.BIO_free.restype = ctypes.c_int + +crypt.BIO_ctrl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_long, ctypes.c_void_p ] +crypt.BIO_ctrl.restype = ctypes.c_long + +crypt.PEM_write_bio_RSAPublicKey.argtypes = [ ctypes.c_void_p, ctypes.c_void_p ] +crypt.PEM_write_bio_RSAPublicKey.restype = ctypes.c_int + +RSA_generate_key = crypt.RSA_generate_key +RSA_generate_key.argtypes = [ctypes.c_int, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_void_p] +RSA_generate_key.restype = ctypes.c_void_p + +RSA_private_encrypt = crypt.RSA_private_encrypt +RSA_private_encrypt.argtypes = [ + ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int ] +RSA_private_encrypt.restype = ctypes.c_int + +i2d_RSAPublicKey = crypt.i2d_RSAPublicKey +i2d_RSAPublicKey.argtypes = [ + ctypes.c_void_p, ctypes.POINTER(ctypes.c_char_p) +] +i2d_RSAPublicKey.restype = ctypes.c_int + + +def rsa_sign(msg, rsa): + buf = ctypes.create_string_buffer(1024) + n = RSA_private_encrypt(len(msg), msg, buf, rsa, 1) + if n <= 0: + raise Exception() + return buf.raw[:n] + +def b64(x): + x = base64.b64encode(x) + res = [] + for i in xrange(0, len(x), 64): + res.append(x[i:i+64]+"\n") + return "".join(res) + +def bio_extract(bio): + buf = ctypes.c_char_p() + length = crypt.BIO_ctrl(bio, 3, 0, ctypes.byref(buf)) + return ctypes.string_at(buf, length) + +def make_rsa_key(e=65537): + rsa = crypt.RSA_generate_key(1024, e, None, None) + bio = BIO_new(BIO_s_mem()) + crypt.PEM_write_bio_RSAPublicKey(bio, rsa) + pem = bio_extract(bio).rstrip() + crypt.BIO_free(bio) + buf = ctypes.create_string_buffer(1024) + pBuf = ctypes.c_char_p(ctypes.addressof(buf)) + n = crypt.i2d_RSAPublicKey(rsa, ctypes.byref(pBuf)) + s = buf.raw[:n] + digest = hashlib.sha1(s).digest() + return (rsa,pem,digest) + +def makeEdSigningKeyCert(sk_master, pk_master, pk_signing, date, + includeSigning=False, certType=1): + assert len(pk_signing) == len(pk_master) == 32 + expiration = struct.pack("!L", date//3600) + if includeSigning: + extensions = "\x01\x00\x20\x04\x00%s"%(pk_master) + else: + extensions = "\x00" + signed = "\x01%s%s\x01%s%s" % ( + chr(certType), expiration, pk_signing, extensions) + signature = ed25519_exts_ref.signatureWithESK(signed, sk_master, pk_master) + assert len(signature) == 64 + return signed+signature + +def objwrap(identifier, body): + return ("-----BEGIN {0}-----\n" + "{1}" + "-----END {0}-----").format(identifier, body) + +MAGIC1 = "<<<<<<MAGIC>>>>>>" +MAGIC2 = "<<<<<!#!#!#XYZZY#!#!#!>>>>>" + +class OnDemandKeys(object): + def __init__(self, certDate=None): + if certDate is None: + certDate = time.time() + 86400 + self.certDate = certDate + self.rsa_id = None + self.rsa_onion_key = None + self.ed_id_sk = None + self.ntor_sk = None + self.ntor_crosscert = None + self.rsa_crosscert_ed = None + self.rsa_crosscert_noed = None + + @property + def RSA_IDENTITY(self): + if self.rsa_id is None: + self.rsa_id, self.rsa_ident_pem, self.rsa_id_digest = make_rsa_key() + + return self.rsa_ident_pem + + @property + def RSA_ID_DIGEST(self): + self.RSA_IDENTITY + return self.rsa_id_digest + + @property + def RSA_FINGERPRINT_NOSPACE(self): + return binascii.b2a_hex(self.RSA_ID_DIGEST).upper() + + @property + def RSA_ONION_KEY(self): + if self.rsa_onion_key is None: + self.rsa_onion_key, self.rsa_onion_pem, _ = make_rsa_key() + + return self.rsa_onion_pem + + @property + def RSA_FINGERPRINT(self): + hexdigest = self.RSA_FINGERPRINT_NOSPACEK + return " ".join(hexdigest[i:i+4] for i in range(0,len(hexdigest),4)) + + @property + def RSA_SIGNATURE(self): + return MAGIC1 + + @property + def ED_SIGNATURE(self): + return MAGIC2 + + @property + def NTOR_ONION_KEY(self): + if self.ntor_sk is None: + self.ntor_sk = slownacl_curve25519.Private() + self.ntor_pk = self.ntor_sk.get_public() + return base64.b64encode(self.ntor_pk.serialize()) + + @property + def ED_CERT(self): + if self.ed_id_sk is None: + self.ed_id_sk = ed25519_exts_ref.expandSK(os.urandom(32)) + self.ed_signing_sk = ed25519_exts_ref.expandSK(os.urandom(32)) + self.ed_id_pk = ed25519_exts_ref.publickeyFromESK(self.ed_id_sk) + self.ed_signing_pk = ed25519_exts_ref.publickeyFromESK(self.ed_signing_sk) + self.ed_cert = makeEdSigningKeyCert(self.ed_id_sk, self.ed_id_pk, self.ed_signing_pk, self.certDate, includeSigning=True, certType=4) + + return objwrap('ED25519 CERT', b64(self.ed_cert)) + + @property + def NTOR_CROSSCERT(self): + if self.ntor_crosscert is None: + self.ED_CERT + self.NTOR_ONION_KEY + + ed_privkey = self.ntor_sk.serialize() + os.urandom(32) + ed_pub0 = ed25519_exts_ref.publickeyFromESK(ed_privkey) + sign = (ord(ed_pub0[31]) & 255) >> 7 + + self.ntor_crosscert = makeEdSigningKeyCert(self.ntor_sk.serialize() + os.urandom(32), ed_pub0, self.ed_id_pk, self.certDate, certType=10) + self.ntor_crosscert_sign = sign + + return objwrap('ED25519 CERT', b64(self.ntor_crosscert)) + + @property + def NTOR_CROSSCERT_SIGN(self): + self.NTOR_CROSSCERT + return self.ntor_crosscert_sign + + @property + def RSA_CROSSCERT_NOED(self): + if self.rsa_crosscert_noed is None: + self.RSA_ONION_KEY + signed = self.RSA_ID_DIGEST + self.rsa_crosscert_noed = rsa_sign(signed, self.rsa_onion_key) + return objwrap("CROSSCERT",b64(self.rsa_crosscert_noed)) + + @property + def RSA_CROSSCERT_ED(self): + if self.rsa_crosscert_ed is None: + self.RSA_ONION_KEY + self.ED_CERT + signed = self.RSA_ID_DIGEST + self.ed_id_pk + self.rsa_crosscert_ed = rsa_sign(signed, self.rsa_onion_key) + return objwrap("CROSSCERT",b64(self.rsa_crosscert_ed)) + + def sign_desc(self, body): + idx = body.rfind("\nrouter-sig-ed25519 ") + if idx >= 0: + self.ED_CERT + signed_part = body[:idx+len("\nrouter-sig-ed25519 ")] + signed_part = "Tor router descriptor signature v1" + signed_part + digest = hashlib.sha256(signed_part).digest() + ed_sig = ed25519_exts_ref.signatureWithESK(digest, + self.ed_signing_sk, self.ed_signing_pk) + + body = body.replace(MAGIC2, base64.b64encode(ed_sig).replace("=","")) + + idx = body.rindex("\nrouter-signature") + end_of_sig = body.index("\n", idx+1) + + signed_part = body[:end_of_sig+1] + + digest = hashlib.sha1(signed_part).digest() + assert len(digest) == 20 + + rsasig = rsa_sign(digest, self.rsa_id) + + body = body.replace(MAGIC1, objwrap("SIGNATURE", b64(rsasig))) + + return body + + +def signdesc(body, args_out=None): + rsa, ident_pem, id_digest = make_key() + _, onion_pem, _ = make_key() + + need_ed = '{ED25519-CERT}' in body or '{ED25519-SIGNATURE}' in body + if need_ed: + sk_master = os.urandom(32) + sk_signing = os.urandom(32) + pk_master = slow_ed25519.pubkey(sk_master) + pk_signing = slow_ed25519.pubkey(sk_signing) + + hexdigest = binascii.b2a_hex(id_digest).upper() + fingerprint = " ".join(hexdigest[i:i+4] for i in range(0,len(hexdigest),4)) + + MAGIC = "<<<<<<MAGIC>>>>>>" + MORE_MAGIC = "<<<<<!#!#!#XYZZY#!#!#!>>>>>" + args = { + "RSA-IDENTITY" : ident_pem, + "ONION-KEY" : onion_pem, + "FINGERPRINT" : fingerprint, + "FINGERPRINT-NOSPACE" : hexdigest, + "RSA-SIGNATURE" : MAGIC + } + if need_ed: + args['ED25519-CERT'] = makeEdSigningKeyCert( + sk_master, pk_master, pk_signing) + args['ED25519-SIGNATURE'] = MORE_MAGIC + + if args_out: + args_out.update(args) + body = body.format(**args) + + idx = body.rindex("\nrouter-signature") + end_of_sig = body.index("\n", idx+1) + + signed_part = body[:end_of_sig+1] + + digest = hashlib.sha1(signed_part).digest() + assert len(digest) == 20 + + buf = ctypes.create_string_buffer(1024) + n = RSA_private_encrypt(20, digest, buf, rsa, 1) + sig = buf.raw[:n] + + sig = """-----BEGIN SIGNATURE----- +%s +-----END SIGNATURE-----""" % b64(sig).rstrip() + body = body.replace(MAGIC, sig) + + return body.rstrip() + +def print_c_string(ident, body): + print "static const char %s[] =" % ident + for line in body.split("\n"): + print ' "%s\\n"' %(line) + print " ;" + +def emit_ri(name, body): + info = OnDemandKeys() + body = body.format(d=info) + body = info.sign_desc(body) + print_c_string("EX_RI_%s"%name.upper(), body) + +def emit_ei(name, body): + info = OnDemandKeys() + body = body.format(d=info) + body = info.sign_desc(body) + print_c_string("EX_EI_%s"%name.upper(), body) + + print 'const char EX_EI_{NAME}_FP[] = "{d.RSA_FINGERPRINT_NOSPACE}";'.format( + d=info, NAME=name.upper()) + print_c_string("EX_EI_%s_KEY"%name.upper(), info.RSA_IDENTITY) + +def analyze(s): + fields = {} + while s.startswith(":::"): + first,s=s.split("\n", 1) + m = re.match(r'^:::(\w+)=(.*)',first) + if not m: + raise ValueError(first) + k,v = m.groups() + fields[k] = v + return fields, s + +def process_file(s): + fields, s = analyze(s) + try: + name = fields['name'] + tp = fields['type'] + except KeyError: + raise ValueError("missing required field") + + if tp == 'ei': + emit_ei(name, s) + elif tp == 'ri': + emit_ri(name, s) + else: + raise ValueError("unrecognized type") + +if __name__ == '__main__': + import sys + for fn in sys.argv[1:]: + process_file(open(fn).read()) diff --git a/scripts/codegen/run_trunnel.sh b/scripts/codegen/run_trunnel.sh new file mode 100755 index 0000000000..d2669931e9 --- /dev/null +++ b/scripts/codegen/run_trunnel.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +if test "x$TRUNNEL_PATH" != "x"; then + PYTHONPATH="${TRUNNEL_PATH}:${PYTHONPATH}" + export PYTHONPATH +fi + +python -m trunnel --require-version=1.4 ./src/trunnel/*.trunnel + +python -m trunnel --require-version=1.4 --write-c-files --target-dir=./src/ext/trunnel/ + diff --git a/scripts/maint/analyze_callgraph.py b/scripts/maint/analyze_callgraph.py new file mode 100755 index 0000000000..8ce5827f07 --- /dev/null +++ b/scripts/maint/analyze_callgraph.py @@ -0,0 +1,259 @@ +#!/usr/bin/python + +import re +import sys +import copy +import cPickle +import os + +class Parser: + def __init__(self): + self.calls = {} + self.definedIn = {} + + def enter_func(self, name): + if self.infunc and not self.extern and self.calledfns: + if self.infunc in self.definedIn: + #print "{}: {} or {}?".format( + # self.infunc, self.definedIn[self.infunc], self.module) + self.definedIn[self.infunc] = 'nil' + else: + self.definedIn[self.infunc] = self.module + self.calls.setdefault(self.infunc, set()).update( self.calledfns ) + + self.calledfns = set() + self.infunc = name + self.extern = False + + def parse_callgraph_file(self, inp, module): + self.infunc = None + self.extern = False + self.calledfns = set() + self.module = module + + for line in inp: + m = re.match(r"Call graph node for function: '([^']+)'", line) + if m: + self.enter_func(m.group(1)) + continue + m = re.match(r" CS<[^>]+> calls external node", line) + if m: + self.extern = True + m = re.match(r" CS<[^>]+> calls function '([^']+)'", line) + if m: + self.calledfns.add(m.group(1)) + self.enter_func(None) + + def extract_callgraph(self): + c = self.calls + self.calls = {} + return c + + +def transitive_closure(g): + passno = 0 + changed = True + g = copy.deepcopy(g) + import random + while changed: + passno += 1 + changed = False + keys = g.keys() + idx = 0 + for k in keys: + idx += 1 + print "Pass %d/?: %d/%d\r" %(passno, idx, len(keys)), + sys.stdout.flush() + newset = g[k].copy() + for fn in g[k]: + newset.update(g.get(fn, set())) + if len(newset) != len(g[k]): + g[k].update( newset ) + changed = True + + print + + return g + +def strongly_connected_components(g): + # From https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm, done stupidly. + index_of = {} + index = [ 0 ] + lowlink = {} + S = [] + onStack = set() + + all_sccs = [] + + def strongconnect(fn): + index_of[fn] = index[0] + lowlink[fn] = index[0] + index[0] += 1 + S.append(fn) + onStack.add(fn) + + for w in g.get(fn, []): + if w not in index_of: + strongconnect(w) + lowlink[fn] = min(lowlink[fn], lowlink[w]) + elif w in onStack: + lowlink[fn] = min(lowlink[fn], index_of[w]) + + if lowlink[fn] == index_of[fn]: + this_scc = [] + all_sccs.append(this_scc) + while True: + w = S.pop() + onStack.remove(w) + this_scc.append(w) + if w == fn: + break + + for v in g.keys(): + if v not in index_of: + strongconnect(v) + + return all_sccs + +def biggest_component(sccs): + return max(len(c) for c in sccs) + +def connection_bottlenecks(callgraph): + + callers = {} + for fn in callgraph: + for fn2 in callgraph[fn]: + callers.setdefault(fn2, set()).add(fn) + + components = strongly_connected_components(callgraph) + components.sort(key=len) + big_component_fns = components[-1] + size = len(big_component_fns) + + function_bottlenecks = fn_results = [] + + total = len(big_component_fns) + idx = 0 + for fn in big_component_fns: + idx += 1 + print "Pass 1/3: %d/%d\r"%(idx, total), + sys.stdout.flush() + cg2 = copy.deepcopy(callgraph) + del cg2[fn] + + fn_results.append( (size - biggest_component(strongly_connected_components(cg2)), fn) ) + + print + bcf_set = set(big_component_fns) + + call_bottlenecks = fn_results = [] + result_set = set() + total = len(big_component_fns) + idx = 0 + for fn in big_component_fns: + fn_callers = callers[fn].intersection(bcf_set) + idx += 1 + if len(fn_callers) != 1: + continue + + print "Pass 2/3: %d/%d\r"%(idx, total), + sys.stdout.flush() + + caller = fn_callers.pop() + assert len(fn_callers) == 0 + cg2 = copy.deepcopy(callgraph) + cg2[caller].remove(fn) + + fn_results.append( (size - biggest_component(strongly_connected_components(cg2)), fn, "called by", caller) ) + result_set.add( (caller, fn) ) + + print + + total = len(big_component_fns) + idx = 0 + for fn in big_component_fns: + fn_calls = callgraph[fn].intersection(bcf_set) + idx += 1 + if len(fn_calls) != 1: + continue + + print "Pass 3/3: %d/%d\r"%(idx, total), + sys.stdout.flush() + + callee = fn_calls.pop() + if (fn, callee) in result_set: + continue + + assert len(fn_calls) == 0 + cg2 = copy.deepcopy(callgraph) + cg2[fn].remove(callee) + + fn_results.append( (size - biggest_component(strongly_connected_components(cg2)), callee, "called by", fn) ) + + print + + + return (function_bottlenecks, call_bottlenecks) + +if __name__ == '__main__': + p = Parser() + for fname in sys.argv[1:]: + modname = re.sub(r'.*/', '', fname).replace('.callgraph', '.c') + with open(fname, 'r') as f: + p.parse_callgraph_file(f, modname) + + sys.stdout.flush() + + print "Building callgraph" + callgraph = p.extract_callgraph() + inModule = p.definedIn + + print "Deriving module callgraph" + modCallgraph = {} + for fn in callgraph: + fnMod = inModule[fn] + for called in callgraph[fn]: + try: + calledMod = inModule[called] + except KeyError: + continue + modCallgraph.setdefault(fnMod, set()).add(calledMod) + del modCallgraph['nil'] + + print "Finding strongly connected components" + sccs = strongly_connected_components(callgraph) + + print "Finding the transitive closure of the callgraph.." + closure = transitive_closure(callgraph) + + print "Finding bottlenecks..." + bottlenecks = connection_bottlenecks(callgraph) + + print "Finding module SCCs" + modSCCS = strongly_connected_components(modCallgraph) + + print "Finding module TC" + modTC = transitive_closure(modCallgraph) + + print "Finding module bottlenecks" + modB = connection_bottlenecks(modCallgraph) + + data = { + 'callgraph' : callgraph, + 'sccs' : sccs, + 'closure' : closure, + 'bottlenecks' : bottlenecks, + 'modules' : p.definedIn, + 'modItems' : { + 'callgraph' : modCallgraph, + 'sccs' : modSCCS, + 'closure' : modTC, + 'bottlenecks' : modB, + } + } + + with open('callgraph.pkl', 'w') as f: + cPickle.dump(data, f) + + + diff --git a/scripts/maint/checkOptionDocs.pl b/scripts/maint/checkOptionDocs.pl.in index 94307c6cef..1f53adf099 100755..100644 --- a/scripts/maint/checkOptionDocs.pl +++ b/scripts/maint/checkOptionDocs.pl.in @@ -7,7 +7,7 @@ my %torrcSampleOptions = (); my %manPageOptions = (); # Load the canonical list as actually accepted by Tor. -open(F, "./src/or/tor --list-torrc-options |") or die; +open(F, "@abs_top_builddir@/src/or/tor --list-torrc-options |") or die; while (<F>) { next if m!\[notice\] Tor v0\.!; if (m!^([A-Za-z0-9_]+)!) { @@ -34,12 +34,12 @@ sub loadTorrc { 0; } -loadTorrc("./src/config/torrc.sample.in", \%torrcSampleOptions); +loadTorrc("@abs_top_srcdir@/src/config/torrc.sample.in", \%torrcSampleOptions); # Try to figure out what's in the man page. my $considerNextLine = 0; -open(F, "./doc/tor.1.txt") or die; +open(F, "@abs_top_srcdir@/doc/tor.1.txt") or die; while (<F>) { if (m!^(?:\[\[([A-za-z0-9_]+)\]\] *)?\*\*([A-Za-z0-9_]+)\*\*!) { $manPageOptions{$2} = 1; @@ -67,5 +67,3 @@ subtractHashes("Orphaned in torrc.sample.in", \%torrcSampleOptions, \%options); subtractHashes("Not in man page", \%options, \%manPageOptions); subtractHashes("Orphaned in man page", \%manPageOptions, \%options); - - diff --git a/scripts/maint/checkSpace.pl b/scripts/maint/checkSpace.pl index 682dbced00..906281112d 100755 --- a/scripts/maint/checkSpace.pl +++ b/scripts/maint/checkSpace.pl @@ -13,30 +13,45 @@ for $fn (@ARGV) { $incomment = 0; while (<F>) { ## Warn about windows-style newlines. + # (We insist on lines that end with a single LF character, not + # CR LF.) if (/\r/) { print " CR:$fn:$.\n"; } ## Warn about tabs. + # (We only use spaces) if (/\t/) { print " TAB:$fn:$.\n"; } - ## Warn about markers that don't have a space in front of them + ## Warn about labels that don't have a space in front of them + # (We indent every label at least one space) if (/^[a-zA-Z_][a-zA-Z_0-9]*:/) { print "nosplabel:$fn:$.\n"; } ## Warn about trailing whitespace. + # (We don't allow whitespace at the end of the line; make your + # editor highlight it for you so you can stop adding it in.) if (/ +$/) { print "Space\@EOL:$fn:$.\n"; } ## Warn about control keywords without following space. + # (We put a space after every 'if', 'while', 'for', 'switch', etc) if ($C && /\s(?:if|while|for|switch)\(/) { print " KW(:$fn:$.\n"; } ## Warn about #else #if instead of #elif. + # (We only allow #elif) if (($lastline =~ /^\# *else/) and ($_ =~ /^\# *if/)) { print " #else#if:$fn:$.\n"; } ## Warn about some K&R violations + # (We use K&R-style C, where open braces go on the same line as + # the statement that introduces them. In other words: + # if (a) { + # stuff; + # } else { + # other stuff; + # } if (/^\s+\{/ and $lastline =~ /^\s*(if|while|for|else if)/ and $lastline !~ /\{$/) { print "non-K&R {:$fn:$.\n"; @@ -46,10 +61,13 @@ for $fn (@ARGV) { } $lastline = $_; ## Warn about unnecessary empty lines. + # (Don't put an empty line before a line that contains nothing + # but a closing brace.) if ($lastnil && /^\s*}\n/) { print " UnnecNL:$fn:$.\n"; } ## Warn about multiple empty lines. + # (At most one blank line in a row.) if ($lastnil && /^$/) { print " DoubleNL:$fn:$.\n"; } elsif (/^$/) { @@ -59,6 +77,7 @@ for $fn (@ARGV) { } ## Terminals are still 80 columns wide in my world. I refuse to ## accept double-line lines. + # (Don't make lines wider than 80 characters, including newline.) if (/^.{80}/) { print " Wide:$fn:$.\n"; } @@ -83,11 +102,13 @@ for $fn (@ARGV) { s!"(?:[^\"]+|\\.)*"!"X"!g; next if /^\#/; ## Warn about C++-style comments. + # (Use C style comments only.) if (m!//!) { # print " //:$fn:$.\n"; s!//.*!!; } ## Warn about unquoted braces preceded by non-space. + # (No character except a space should come before a {) if (/([^\s'])\{/) { print " $1\{:$fn:$.\n"; } @@ -101,15 +122,22 @@ for $fn (@ARGV) { # print " {X:$fn:$.\n"; #} ## Warn about function calls with space before parens. + # (Don't put a space between the name of a function and its + # arguments.) if (/(\w+)\s\(([A-Z]*)/) { if ($1 ne "if" and $1 ne "while" and $1 ne "for" and $1 ne "switch" and $1 ne "return" and $1 ne "int" and $1 ne "elsif" and $1 ne "WINAPI" and $2 ne "WINAPI" and - $1 ne "void" and $1 ne "__attribute__" and $1 ne "op") { + $1 ne "void" and $1 ne "__attribute__" and $1 ne "op" and + $1 ne "size_t" and $1 ne "double" and + $1 ne "workqueue_reply_t") { print " fn ():$fn:$.\n"; } } ## Warn about functions not declared at start of line. + # (When you're declaring functions, put "static" and "const" + # and the return type on one line, and the function name at + # the start of a new line.) if ($in_func_head || ($fn !~ /\.h$/ && /^[a-zA-Z0-9_]/ && ! /^(?:const |static )*(?:typedef|struct|union)[^\(]*$/ && @@ -130,6 +158,8 @@ for $fn (@ARGV) { } } } + ## Warn if the file doesn't end with a blank line. + # (End each file with a single blank line.) if (! $lastnil) { print " EOL\@EOF:$fn:$.\n"; } diff --git a/scripts/maint/display_callgraph.py b/scripts/maint/display_callgraph.py new file mode 100755 index 0000000000..c9001c6d96 --- /dev/null +++ b/scripts/maint/display_callgraph.py @@ -0,0 +1,41 @@ +#!/usr/bin/python + +import cPickle + +data = cPickle.load(open("callgraph.pkl")) + +# data = data['modItems'] + +callgraph = data['callgraph'] +closure = data['closure'] +sccs = data['sccs'] +fn_bottle, call_bottle = data['bottlenecks'] + +for n_reachable, fn in sorted(list((len(r), fn) for fn, r in closure.iteritems())): + print "%s can reach %s other functions." %(fn, n_reachable) + + +c = [ (len(component), component) for component in sccs ] +c.sort() + +print "\n================================" + +for n, component in c: + if n < 2: + continue + print "Strongly connected component of size %d:"%n + print component + + +print "\n================================" + +print "====== Number of functions pulled into blob, by function in blob." +fn_bottle.sort() +for n, fn in fn_bottle[-30:]: + print "%3d: %s"%(n, fn) + +print "====== Number of functions pulled into blob, by call in blob." +call_bottle.sort() +for n, fn1, _, fn2 in call_bottle[-30:]: + print "%3d: %s -> %s "%(n, fn2, fn1) + diff --git a/scripts/maint/fallback.blacklist b/scripts/maint/fallback.blacklist new file mode 100644 index 0000000000..c9fd8a9236 --- /dev/null +++ b/scripts/maint/fallback.blacklist @@ -0,0 +1,229 @@ +# updateFallbackDirs.py directory mirror blacklist +# +# Format: +# [ IPv4[:DirPort] ] [ orport=<ORPort> ] [ id=<ID> ] ... +# [ ipv6=<IPv6>[:<IPv6 ORPort>] ] +# +# If a sufficiently specific group of attributes matches, the directory mirror +# will be excluded: (each group is listed on its own line) +# <IPv4>, <DirPort> +# <IPv4>, <ORPort> +# <ID> +# <IPv6>, <DirPort> +# <IPv6>, <IPv6 ORPort> +# If DirPort and ORPort are not present, the entire IP address is blacklisted. +# (The blacklist overrides the whitelist.) + +# If a relay operator doesn't want their relay to be a FallbackDir, +# enter the following information here: +# <IPv4>:<DirPort> orport=<ORPort> id=<ID> ipv6=<IPv6>:<IPv6 ORPort> + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008364.html +87.181.248.227:9030 orport=443 id=8827944C4BDCBDAC9079803F47823403C11A9B7A + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008368.html +149.18.2.82:9030 orport=9001 id=953DB709F2A2DECC8D7560661F934E64411444F7 + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008384.html +80.82.215.199:80 orport=443 id=3BEFAB76461B6B99DCF34C285E933562F5712AE4 ipv6=[2001:4ba0:cafe:a18::1]:443 + +# https://lists.torproject.org/pipermail/tor-relays/2016-January/008515.html +# later opt-out in +# https://lists.torproject.org/pipermail/tor-relays/2016-January/008521.html +5.9.158.75:80 orport=443 id=F1BE15429B3CE696D6807F4D4A58B1BFEC45C822 ipv6=[2a01:4f8:190:514a::2]:443 + +# Email sent directly to teor, verified using relay contact info +5.34.183.168:80 orport=443 id=601C92108A568742A7A6D9473FE3A414F7149070 +217.12.199.208:8080 orport=22 id=BCFB0933367D626715DA32A147F417194A5D48D6 + +# https://lists.torproject.org/pipermail/tor-relays/2016-January/008555.html +62.210.207.124:9030 orport=9001 id=58938B1A5C4029B4415D38A4F36B7724273F4755 ipv6=[2001:bc8:31eb:100::1]:9001 +62.210.207.124:9130 orport=9101 id=338D0AB6DBAB7B529B9C91B2FD770658000693C4 ipv6=[2001:bc8:31eb:100::1]:9101 + +# these fallback candidates fail the consensus download test in a way that +# causes stem to hang (and not respond to ^C, at least on OS X) +# (Is something sending weird responses to DirPort traffic?) +#217.23.14.190:1194 +#151.80.164.147:80 +#148.251.255.92:80 +#78.142.19.59:80 + +# Email sent directly to teor, verified using relay contact info +216.17.99.183:80 orport=443 id=D52CD431CEF28E01B11F545A84347EE45524BCA7 +216.17.99.183:8080 orport=9001 id=EE21F83AB6F76E3B3FFCBA5C2496F789CB84E7C6 +65.19.167.130:80 orport=443 id=890E2EA65455FBF0FAAB4159FAC4412BDCB24295 +65.19.167.131:80 orport=443 id=0DA9BD201766EDB19F57F49F1A013A8A5432C008 +65.19.167.132:80 orport=443 id=12B80ABF019354A9D25EE8BE85EB3C0AD8F7DFC1 +65.19.167.133:80 orport=443 id=C170AE5A886C5A09D6D1CF5CF284653632EEF25D + +# Email sent directly to teor, verified using relay contact info +213.136.83.225:80 orport=443 id=B411027C926A9BFFCF7DA91E3CAF1856A321EFFD +195.154.126.78:80 orport=443 id=F6556156E2B3837248E03FDB770441CF64DBBFBE + +# Email sent directly to teor, verified using relay contact info +178.63.198.113:80 orport=443 id=872B18761953254914F77C71846E8A2623C52591 + +# Email sent directly to teor, verified using relay contact info +63.141.226.34:80 orport=9001 id=5EF131C0C82270F40B756987FDB5D54D9C966238 +185.75.56.103:80 orport=9001 id=3763CE5C3F574670D4296573744F821C0FFFB98E + +# Email sent directly to teor, verified using relay contact info +81.7.14.227:9030 orport=9001 id=BCA197C43A44B7B9D14509637F96A45B13C233D0 + +# Email sent directly to teor, verified using relay contact info +84.245.32.195:9030 orport=9001 id=4CD4DFFEF3971C902A22100D911CAC639BE2EF5C + +# Email sent directly to teor, verified using relay contact info +185.21.217.10:9030 orport=9001 id=41537E1D3DD3CAE86F5A3F0882F1C647FE8FC0A0 + +# Email sent directly to teor, verified using relay contact info +185.21.216.140:9030 orport=9001 id=921DA852C95141F8964B359F774B35502E489869 + +# Email sent directly to teor, verified using relay contact info +62.210.82.44:143 orport=21 id=1C90D3AEADFF3BCD079810632C8B85637924A58E ipv6=[2001:bc8:3d7c::]:21 + +# Email sent directly to teor, verified using relay contact info +46.101.220.161:80 orport=443 id=7DDFE5B2C306B19A79832FBE581EAA245BAE90C6 ipv6=[2a03:b0c0:3:d0::8b:3001]:443 + +# Email sent directly to teor, verified using relay contact info +195.154.107.23:80 orport=443 id=A1F89F26E82209169E4037B035AE7B6C94A49AEB ipv6=[2001:bc8:3829:300::1]:443 +195.154.92.70:80 orport=443 id=E7FF4ECEEFCFE3A40A6D3594898A4A3DE018BBF5 ipv6=[2001:bc8:3829:500::1]:443 +195.154.113.200:80 orport=443 id=D1A4763FA0BD71978901B1951FEE1DC29777F95A ipv6=[2001:bc8:3829:600::1]:443 +195.154.92.155:110 orport=993 id=4477D3466FE136B7FE6F7FF8EBD0D6E2FFE3288B ipv6=[2001:bc8:3829:100::1]:993 +195.154.117.182:110 orport=993 id=B1A0F1143789466AADD5FAE5948C8138548EECEC ipv6=[2001:bc8:3829:400::1]:993 +195.154.97.163:80 orport=443 id=8A2994A63B20813B7724817A8FB8C444D10BA2E2 + +# Email sent directly to teor, verified using relay contact info +5.135.154.206:9030 orport=9001 id=7D67B342DC1158F4CFFEE8BC530A2448848026E3 + +# Email sent directly to teor, verified using relay contact info +85.24.215.117:9030 orport=9001 id=5989521A85C94EE101E88B8DB2E68321673F9405 ipv6=[2001:9b0:20:2106:21a:4aff:fea5:ad05]:9001 + +# Email sent directly to teor, verified using relay contact info +62.210.137.230:8888 orport=8843 id=CD6B850159CFF4C068A8D0F1BA5296AE4EDCAB39 ipv6=[2001:bc8:31d3:100::1]:3443 +62.210.137.230:8080 orport=8443 id=F596E1B1EF98E1DDBBDC934DB722AF54069868F6 ipv6=[2001:bc8:31d3:100::1]:8443 + +# Email sent directly to teor, verified using relay contact info +195.154.99.80:80 orport=443 id=6E7CB6E783C1B67B79D0EBBE7D48BC09BD441201 +195.154.127.60:80 orport=443 id=D74ABE34845190E242EC74BA28B8C89B0A480D4B + +# Email sent directly to teor, verified using relay contact info +212.51.143.20:80 orport=443 id=62DA0256BBC28992D41CBAFB549FFD7C9B846A99 + +# Email sent directly to teor, verified using relay contact info +195.154.90.122:80 orport=443 id=3A0D88024A30152E6F6372CFDF8F9B725F984362 + +# Email sent directly to teor, verified using relay contact info +188.166.118.215:9030 orport=443 id=FB5FF60F5EBA010F8A45AC6ED31A4393718A2C31 ipv6=[2a03:b0c0:2:d0::72:9001]:443 + +# Email sent directly to teor, verified using relay contact info +185.87.185.245:40001 orport=40000 id=2A499AEEA95FB10F07544383D562368E49BE32CA + +# Email sent directly to teor, verified using relay contact info +82.161.109.71:9030 orport=9001 id=BD9CE352648B940E788A8E45393C5400CC3E87E7 + +# Email sent directly to teor, verified using relay contact info +212.83.40.239:9030 orport=9001 id=6DC5616BD3FC463329DCE87DD7AAAEA112C264B5 + +# Email sent directly to teor, verified using relay contact info +178.32.53.53:80 orport=443 id=10582C360E972EE76B0DB1C246F4E892A6BF5465 + +# Email sent directly to teor, verified using relay contact info +85.114.135.20:9030 orport=9001 id=ED8A9291A3139E34BBD35037B082081EC6C26C80 ipv6=[2001:4ba0:fff5:2d::8]:9001 +148.251.128.156:9030 orport=9001 id=E382042E06A0A68AFC533E5AD5FB6867A12DF9FF ipv6=[2a01:4f8:210:238a::8]:9001 +62.210.115.147:9030 orport=9001 id=7F1D94E2C36F8CC595C2AB00022A5AE38171D50B ipv6=[2001:bc8:3182:101::8]:9001 +212.47.250.24:9030 orport=9001 id=33DA0CAB7C27812EFF2E22C9705630A54D101FEB + +# Email sent directly to teor, verified using relay contact info +74.208.220.222:60000 orport=59999 id=4AA22235F0E9B3795A33930343CBB3EDAC60C5B0 + +# Email sent directly to teor, verified using relay contact info +89.163.140.168:9030 orport=9001 id=839C1212DB15723263BE96C83DA7E1B24FA395E8 + +# Email sent directly to teor, verified using relay contact info +212.47.246.211:9030 orport=9001 id=AA34219475E41282095DD3C088009EE562AF14E5 + +# Email sent directly to teor, verified using relay contact info +85.195.235.148:9030 orport=9001 id=103336165A0D2EFCAD3605339843A0A7710B8B92 +85.195.235.148:19030 orport=19001 id=713235638AB6C64715EAFD1B4772685E38AFD52A + +# Email sent directly to teor, verified using relay contact info +163.172.7.30:9030 orport=9001 id=E2EACD4752B2583202F374A34CACC844A3AECAC4 + +# Email sent directly to teor, verified using relay contact info +178.62.90.111:22 orport=25 id=3254D1DC1F1531D9C07C535E4991F38EE99B99E1 + +# Email sent directly to teor, verified using relay contact info +213.200.106.131:9030 orport=4443 id=B07CE79FD215129C381F6645B16E76DCA0845CAB + +# Email sent directly to teor, verified using relay contact info +198.51.75.165:80 orport=9001 id=DABCB84A524A22FDDD3AFCB090E3090CC12D9770 + +# Email sent directly to teor, verified using relay contact info +204.194.29.4:80 orport=9001 id=78C7C299DB4C4BD119A22B87B57D5AF5F3741A79 + +# Email sent directly to teor, verified using relay contact info +104.207.132.109:9030 orport=9001 id=12D5737383C23E756A7AA1A90BB24413BA428DA7 ipv6=[2001:19f0:300:2261::1]:9001 + +# Email sent directly to teor, verified using relay contact info +46.252.25.249:9030 orport=443 id=80DCBB6EF4E86A7CD4FBCBDEE64979645509A610 + +# Email sent directly to teor, verified using relay contact info +176.10.99.200:8080 orport=443 id=2B44FD1742D26E4F28D4CACF1F0CF8A686270E45 +176.10.99.200:8000 orport=22 id=EB79F07792A065D3C534063773E83268E069F5EB +176.10.99.201:667 orport=666 id=3EAAAB35932610411E24FA4317603CB5780B80BC +176.10.99.201:990 orport=989 id=7C3A4CFF09C1981D41173CDE2A2ADD4A5CA109FD +176.10.99.202:992 orport=991 id=615EBC4B48F03858FA50A3E23E5AF569D0D2308A +176.10.99.202:994 orport=993 id=E34E25D958D46DDE5092385B14117C9B301DC0E9 +176.10.99.203:1194 orport=995 id=AD368442E9FF33C08C7407DF2DA7DB958F406CE2 +176.10.99.203:43 orport=53 id=79CF377F0ACEC5F0002D85335E4192B34202A269 +176.10.99.204:1755 orport=1723 id=69DF3CDA1CDA460C17ECAD9D6F0C117A42384FA0 +176.10.99.204:1293 orport=4321 id=3F061400B6FB1F55E7F19BB3C713884D677E55B7 +176.10.99.205:426 orport=425 id=C30B284784BF11D0D58C6A250240EE58D2084AD0 +176.10.99.205:109 orport=110 id=12D17D9F9E30FA901DE68806950A0EA278716CED +176.10.99.206:24 orport=23 id=2C804AAB0C02F971A4386B3A1F2AC00F9E080679 +176.10.99.206:20 orport=21 id=237588726AB6BEA37FF23CA00F5BD178586CA68E +176.10.99.207:3390 orport=3389 id=A838D5B8890B10172429ECE92EB5677DF93DC4DD +176.10.99.207:1415 orport=1414 id=377E5E817A84FAE0F4DC3427805DB2E8A6CBBFC0 +176.10.99.208:390 orport=389 id=7C288587BA0D99CC6B8537CDC2C4639FA827B907 +176.10.99.208:3307 orport=3306 id=1F0D2A44C56F42816DED2022EFD631878C29905B +176.10.99.209:1434 orport=1433 id=BDA7A91FF3806DE5109FDAE74CFEFB3BABB9E10F +176.10.99.209:220 orport=219 id=B8C2030001D832066A648269CFBA94171951D34B + +# Email sent directly to teor, verified using relay contact info +78.193.40.205:8080 orport=8443 id=C91450840E75AC1B654A3096744338A573A239C6 + +# Email sent directly to teor, verified using relay contact info +37.187.22.172:9030 orport=9035 id=335E4117BD9A4966403C2AFA31CFDD1BC13BD46A + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008367.html +# Email sent directly to teor to opt-out +88.198.38.226:22 orport=443 id=4B9E2C56FB42B891794FE2CD2FCAD08A320CC3BB ipv6=[2a01:4f8:a0:1351::2]:80 +213.239.210.204:22 orport=443 id=5BFDECCE9B4A23AE14EC767C5A2C1E10558B00B9 ipv6=[2a01:4f8:a0:9474::2]:80 +213.239.220.25:22 orport=443 id=BEE2317AE127EB681C5AE1551C1EA0630580638A ipv6=[2a01:4f8:a0:710c::2]:80 +85.10.201.38:22 orport=443 id=F6279A203C1950ACF592322A235647A05BFBCF91 ipv6=[2a01:4f8:a0:43cc::2]:80 + +# Email sent directly to teor, verified using relay contact info +88.190.208.4:30555 orport=30556 id=030A6EB24725C05D8E0FCE21923CBA5223E75E0E + +# Fallback was on 0.2.8.2-alpha list, but changed fingerprint before 0.2.8.5 +46.101.102.71:80 orport=443 id=9504CB22EEB25D344DE63CB7A6F2C46F895C3686 ipv6=[2a03:b0c0:3:d0::2ed:7001]:9050 +# Also blacklist anything with the new fingerprint +id=9C8A123081EFBE022EF795630F447839DDFDDDEC + +# Fallbacks were on 0.2.8.2-alpha list, but downloads were slow before 0.2.8.5 +185.96.88.29:80 orport=443 id=86C281AD135058238D7A337D546C902BE8505DDE +178.62.36.64:9030 orport=9001 id=B87C84E38DAECFFFFDE98E5AEE5786AFDC748F2C + +# Fallback was on 0.2.8.2-alpha list, but changed address before 0.2.8.5 +84.219.173.60:9030 orport=443 id=855BC2DABE24C861CD887DB9B2E950424B49FC34 +# Also blacklist anything with the new address +84.216.235.55:9030 orport=443 + +# Fallbacks were on 0.2.8.2-alpha list, but disappeared before 0.2.8.5 +81.7.17.171:80 orport=443 id=CFECDDCA990E3EF7B7EC958B22441386B6B8D820 ipv6=[2a02:180:1:1::517:11ab]:443 +51.254.215.121:80 orport=443 id=262B66AD25C79588AD1FC8ED0E966395B47E5C1D +185.100.85.138:80 orport=46356 id=5C4DF16A0029CC4F67D3E127356E68F219269859 + +# Fallback was on 0.2.8.2-alpha list, but opted-out before 0.2.8.6 +37.187.1.149:9030 orport=9001 id=08DC0F3C6E3D9C527C1FC8745D35DD1B0DE1875D ipv6=[2001:41d0:a:195::1]:9001 diff --git a/scripts/maint/fallback.whitelist b/scripts/maint/fallback.whitelist new file mode 100644 index 0000000000..c801e46b15 --- /dev/null +++ b/scripts/maint/fallback.whitelist @@ -0,0 +1,770 @@ +# updateFallbackDirs.py directory mirror whitelist +# +# Format: +# IPv4:DirPort orport=<ORPort> id=<ID> [ ipv6=<IPv6>:<IPv6 ORPort> ] +# +# All attributes must match for the directory mirror to be included. +# If the fallback has an ipv6 key, the whitelist line must also have +# it, and vice versa, otherwise they don't match. +# (The blacklist overrides the whitelist.) + +# To replace this list with the hard-coded fallback list (for testing), use +# a command similar to: +# cat src/or/fallback_dirs.inc | grep \" | grep -v weight | tr -d '\n' | \ +# sed 's/"" / /g' | sed 's/""/"/g' | tr \" '\n' | grep -v '^$' \ +# > scripts/maint/fallback.whitelist +# +# When testing before a release, exclusions due to changed details will result +# in a warning, unless the IPv4 address or port change happened recently. +# Then it is only logged at info level, as part of the eligibility check. +# Exclusions due to stability also are only shown at info level. +# +# Add the number of selected, slow, and excluded relays, and compare that to +# the number of hard-coded relays. If it's less, use info-level logs to find +# out why each of the missing relays was excluded. + +# If a relay operator wants their relay to be a FallbackDir, +# enter the following information here: +# <IPv4>:<DirPort> orport=<ORPort> id=<ID> [ ipv6=<IPv6>:<IPv6 ORPort> ] + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008362.html +78.47.18.110:443 orport=80 id=F8D27B163B9247B232A2EEE68DD8B698695C28DE +131.188.40.188:443 orport=80 id=EBE718E1A49EE229071702964F8DB1F318075FF8 + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008366.html +5.39.88.19:9030 orport=9001 id=7CB8C31432A796731EA7B6BF4025548DFEB25E0C ipv6=[2001:41d0:8:9a13::1]:9050 + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008370.html +# https://lists.torproject.org/pipermail/tor-relays/2016-January/008517.html +# https://lists.torproject.org/pipermail/tor-relays/2016-January/008555.html +62.210.124.124:9030 orport=9001 id=86E78DD3720C78DA8673182EF96C54B162CD660C ipv6=[2001:bc8:3f23:100::1]:9001 +62.210.124.124:9130 orport=9101 id=2EBD117806EE43C3CC885A8F1E4DC60F207E7D3E ipv6=[2001:bc8:3f23:100::1]:9101 +212.47.237.95:9030 orport=9001 id=3F5D8A879C58961BB45A3D26AC41B543B40236D6 +212.47.237.95:9130 orport=9101 id=6FB38EB22E57EF7ED5EF00238F6A48E553735D88 + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008372.html +# IPv6 tunnel available on request (is this a good idea?) +108.53.208.157:80 orport=443 id=4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2 + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008373.html +167.114.35.28:9030 orport=9001 id=E65D300F11E1DB12C534B0146BDAB6972F1A8A48 + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008374.html +170.130.1.7:9030 orport=9001 id=FA3415659444AE006E7E9E5375E82F29700CFDFD + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008378.html +144.76.14.145:110 orport=143 id=14419131033443AE6E21DA82B0D307F7CAE42BDB ipv6=[2a01:4f8:190:9490::dead]:443 + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008379.html +# Email sent directly to teor, verified using relay contact info +91.121.84.137:4951 orport=4051 id=6DE61A6F72C1E5418A66BFED80DFB63E4C77668F + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008380.html +5.175.233.86:80 orport=443 id=5525D0429BFE5DC4F1B0E9DE47A4CFA169661E33 + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008381.html +# Sent additional email to teor with more relays +178.254.44.135:9030 orport=9001 id=8FA37B93397015B2BC5A525C908485260BE9F422 +178.254.20.134:80 orport=443 id=9F5068310818ED7C70B0BC4087AB55CB12CB4377 +178.254.20.134:9030 orport=9001 id=2CE96A8A1DA032664C90F574AFFBECE18A6E8DFC +178.254.44.135:80 orport=443 id=AE6A8C18E7499B586CD36246AC4BCAFFBBF93AB2 +178.254.13.126:80 orport=443 id=F9246DEF2B653807236DA134F2AEAB103D58ABFE +178.254.13.126:9030 orport=9001 id=0C475BA4D3AA3C289B716F95954CAD616E50C4E5 + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008382.html +51.255.33.237:9091 orport=9001 id=A360C21FA87FFA2046D92C17086A6B47E5C68109 + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008383.html +81.7.14.246:80 orport=443 id=CE75BF0972ADD52AF8807602374E495C815DB304 ipv6=[2a02:180:a:51::dead]:443 + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008384.html +# Sent additional email to teor with fingerprint change +149.202.98.161:80 orport=443 id=FC64CD763F8C1A319BFBBF62551684F4E1E42332 ipv6=[2001:41d0:8:4528::161]:443 +193.111.136.162:80 orport=443 id=C79552275DFCD486B942510EF663ED36ACA1A84B ipv6=[2001:4ba0:cafe:10d0::1]:443 + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008416.html +185.100.84.212:80 orport=443 id=330CD3DB6AD266DC70CDB512B036957D03D9BC59 ipv6=[2a06:1700:0:7::1]:443 + +# https://lists.torproject.org/pipermail/tor-relays/2015-December/008417.html +178.16.208.56:80 orport=443 id=2CDCFED0142B28B002E89D305CBA2E26063FADE2 ipv6=[2a00:1c20:4089:1234:cd49:b58a:9ebe:67ec]:443 +178.16.208.57:80 orport=443 id=92CFD9565B24646CAC2D172D3DB503D69E777B8A ipv6=[2a00:1c20:4089:1234:7825:2c5d:1ecd:c66f]:443 + +# https://lists.torproject.org/pipermail/tor-relays/2016-January/008513.html +178.62.173.203:9030 orport=9001 id=DD85503F2D1F52EF9EAD621E942298F46CD2FC10 ipv6=[2a03:b0c0:0:1010::a4:b001]:9001 + +# https://lists.torproject.org/pipermail/tor-relays/2016-January/008534.html +5.9.110.236:9030 orport=9001 id=0756B7CD4DFC8182BE23143FAC0642F515182CEB ipv6=[2a01:4f8:162:51e2::2]:9001 + +# https://lists.torproject.org/pipermail/tor-relays/2016-January/008542.html +178.62.199.226:80 orport=443 id=CBEFF7BA4A4062045133C053F2D70524D8BBE5BE ipv6=[2a03:b0c0:2:d0::b7:5001]:443 + +# Emails sent directly to teor, verified using relay contact info +217.12.199.208:80 orport=443 id=DF3AED4322B1824BF5539AE54B2D1B38E080FF05 + +# Email sent directly to teor, verified using relay contact info +94.23.204.175:9030 orport=9001 id=5665A3904C89E22E971305EE8C1997BCA4123C69 + +# https://twitter.com/binarytenshi/status/717952514327453697 +94.126.23.174:9030 orport=9001 id=6FC6F08270D565BE89B7C819DD8E2D487397C073 + +# Email sent directly to teor, verified using relay contact info +171.25.193.78:80 orport=443 id=A478E421F83194C114F41E94F95999672AED51FE ipv6=[2001:67c:289c:3::78]:443 +171.25.193.77:80 orport=443 id=A10C4F666D27364036B562823E5830BC448E046A ipv6=[2001:67c:289c:3::77]:443 +171.25.193.131:80 orport=443 id=79861CF8522FC637EF046F7688F5289E49D94576 +171.25.193.20:80 orport=443 id=DD8BD7307017407FCC36F8D04A688F74A0774C02 ipv6=[2001:67c:289c::20]:443 +# OK, but same machine as 79861CF8522FC637EF046F7688F5289E49D94576 +#171.25.193.132:80 orport=443 id=01C67E0CA8F97111E652C7564CB3204361FFFAB8 +# OK, but same machine as DD8BD7307017407FCC36F8D04A688F74A0774C02 +#171.25.193.25:80 orport=443 id=185663B7C12777F052B2C2D23D7A239D8DA88A0F ipv6=[2001:67c:289c::25]:443 + +# Email sent directly to teor, verified using relay contact info +212.47.229.2:9030 orport=9001 id=20462CBA5DA4C2D963567D17D0B7249718114A68 +93.115.97.242:9030 orport=9001 id=B5212DB685A2A0FCFBAE425738E478D12361710D +46.28.109.231:9030 orport=9001 id=F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610 ipv6=[2a02:2b88:2:1::4205:42]:9001 + +# Email sent directly to teor, verified using relay contact info +85.235.250.88:80 orport=443 id=72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE +185.96.180.29:80 orport=443 id=F93D8F37E35C390BCAD9F9069E13085B745EC216 + +# Email sent directly to teor, verified using relay contact info +185.11.180.67:80 orport=9001 id=794D8EA8343A4E820320265D05D4FA83AB6D1778 + +# Email sent directly to teor, verified using relay contact info +178.16.208.62:80 orport=443 id=5CF8AFA5E4B0BB88942A44A3F3AAE08C3BDFD60B ipv6=[2a00:1c20:4089:1234:a6a4:2926:d0af:dfee]:443 +46.165.221.166:80 orport=443 id=EE5F897C752D46BCFF531641B853FC6BC78DD4A7 +178.16.208.60:80 orport=443 id=B44FBE5366AD98B46D829754FA4AC599BAE41A6A ipv6=[2a00:1c20:4089:1234:67bc:79f3:61c0:6e49]:443 +178.16.208.55:80 orport=443 id=C4AEA05CF380BAD2230F193E083B8869B4A29937 ipv6=[2a00:1c20:4089:1234:7b2c:11c5:5221:903e]:443 +178.16.208.61:80 orport=443 id=3B52392E2256C35CDCF7801FF898FC88CE6D431A ipv6=[2a00:1c20:4089:1234:2712:a3d0:666b:88a6]:443 +81.89.96.88:80 orport=443 id=55ED4BB49F6D3F36D8D9499BE43500E017A5EF82 ipv6=[2a02:180:1:1:14c5:b0b7:2d7d:5f3a]:443 +209.222.8.196:80 orport=443 id=C86D2F3DEFE287A0EEB28D4887AF14E35C172733 ipv6=[2001:19f0:1620:41c1:426c:5adf:2ed5:4e88]:443 +81.89.96.89:80 orport=443 id=28651F419F5A1CF74511BB500C58112192DD4943 ipv6=[2a02:180:1:1:2ced:24e:32ea:a03b]:443 +46.165.221.166:9030 orport=9001 id=8C7106C880FE8AA1319DD71B59623FCB8914C9F1 +178.16.208.62:80 orport=443 id=5CF8AFA5E4B0BB88942A44A3F3AAE08C3BDFD60B ipv6=[2a00:1c20:4089:1234:a6a4:2926:d0af:dfee]:443" +46.165.221.166:80 orport=443 id=EE5F897C752D46BCFF531641B853FC6BC78DD4A7 +178.16.208.60:80 orport=443 id=B44FBE5366AD98B46D829754FA4AC599BAE41A6A ipv6=[2a00:1c20:4089:1234:67bc:79f3:61c0:6e49]:443 +178.16.208.55:80 orport=443 id=C4AEA05CF380BAD2230F193E083B8869B4A29937 ipv6=[2a00:1c20:4089:1234:7b2c:11c5:5221:903e]:443 +178.16.208.61:80 orport=443 id=3B52392E2256C35CDCF7801FF898FC88CE6D431A ipv6=[2a00:1c20:4089:1234:2712:a3d0:666b:88a6]:443 +81.89.96.88:80 orport=443 id=55ED4BB49F6D3F36D8D9499BE43500E017A5EF82 ipv6=[2a02:180:1:1:14c5:b0b7:2d7d:5f3a]:443 +209.222.8.196:80 orport=443 id=C86D2F3DEFE287A0EEB28D4887AF14E35C172733 ipv6=[2001:19f0:1620:41c1:426c:5adf:2ed5:4e88]:443 +81.89.96.89:80 orport=443 id=28651F419F5A1CF74511BB500C58112192DD4943 ipv6=[2a02:180:1:1:2ced:24e:32ea:a03b]:443 +46.165.221.166:9030 orport=9001 id=8C7106C880FE8AA1319DD71B59623FCB8914C9F1 +178.16.208.56:80 orport=443 id=2CDCFED0142B28B002E89D305CBA2E26063FADE2 ipv6=[2a00:1c20:4089:1234:cd49:b58a:9ebe:67ec]:443 +178.16.208.58:80 orport=443 id=A4C98CEA3F34E05299417E9F885A642C88EF6029 ipv6=[2a00:1c20:4089:1234:cdae:1b3e:cc38:3d45]:443 +178.16.208.57:80 orport=443 id=92CFD9565B24646CAC2D172D3DB503D69E777B8A ipv6=[2a00:1c20:4089:1234:7825:2c5d:1ecd:c66f]:443 +178.16.208.59:80 orport=443 id=136F9299A5009A4E0E96494E723BDB556FB0A26B ipv6=[2a00:1c20:4089:1234:bff6:e1bb:1ce3:8dc6]:443 + +# Email sent directly to teor, verified using relay contact info +195.154.8.111:80 orport=443 id=FCB6695F8F2DC240E974510A4B3A0F2B12AB5B64 +51.255.235.246:80 orport=443 id=9B99C72B02AF8E3E5BE3596964F9CACD0090D132 +5.39.76.158:80 orport=443 id=C41F60F8B00E7FEF5CCC5BC6BB514CA1B8AAB651 + +# Email sent directly to teor, verified using relay contact info +109.163.234.5:80 orport=443 id=5C84C35936B7100B949AC75764EEF1352550550B +109.163.234.7:80 orport=443 id=C46524E586E1B997329703D356C07EE12B28C722 +109.163.234.9:80 orport=443 id=5714542DCBEE1DD9864824723638FD44B2122CEA +77.247.181.162:80 orport=443 id=7BB160A8F54BD74F3DA5F2CE701E8772B841859D +109.163.234.4:80 orport=443 id=6B1E001929AF4DDBB747D02EC28340792B7724A6 +77.247.181.164:80 orport=443 id=10E13E340651D0EF66B4DEBF610B3C0981168107 +109.163.234.8:80 orport=443 id=20B0038D7A2FD73C696922551B8344CB0893D1F8 +77.247.181.166:80 orport=443 id=06E123865C590189B3181114F23F0F13A7BC0E69 +109.163.234.2:80 orport=443 id=B4F883DB3D478C7AE569C9F6CB766FD58650DC6A +62.102.148.67:80 orport=443 id=4A0C3E177AF684581EF780981AEAF51A98A6B5CF +109.163.234.5:80 orport=443 id=5C84C35936B7100B949AC75764EEF1352550550B +109.163.234.7:80 orport=443 id=C46524E586E1B997329703D356C07EE12B28C722 +109.163.234.9:80 orport=443 id=5714542DCBEE1DD9864824723638FD44B2122CEA +77.247.181.162:80 orport=443 id=7BB160A8F54BD74F3DA5F2CE701E8772B841859D +109.163.234.4:80 orport=443 id=6B1E001929AF4DDBB747D02EC28340792B7724A6 +77.247.181.164:80 orport=443 id=10E13E340651D0EF66B4DEBF610B3C0981168107 +109.163.234.8:80 orport=443 id=20B0038D7A2FD73C696922551B8344CB0893D1F8 +77.247.181.166:80 orport=443 id=06E123865C590189B3181114F23F0F13A7BC0E69 +109.163.234.2:80 orport=443 id=B4F883DB3D478C7AE569C9F6CB766FD58650DC6A +62.102.148.67:80 orport=443 id=4A0C3E177AF684581EF780981AEAF51A98A6B5CF + +# https://twitter.com/biotimylated/status/718994247500718080 +212.47.252.149:9030 orport=9001 id=2CAC39BAA996791CEFAADC9D4754D65AF5EB77C0 + +# Email sent directly to teor, verified using relay contact info +46.165.230.5:80 orport=443 id=A0F06C2FADF88D3A39AA3072B406F09D7095AC9E + +# Email sent directly to teor, verified using relay contact info +94.242.246.24:23 orport=8080 id=EC116BCB80565A408CE67F8EC3FE3B0B02C3A065 ipv6=[2a01:608:ffff:ff07::1:24]:9004 +176.126.252.11:443 orport=9001 id=B0279A521375F3CB2AE210BDBFC645FDD2E1973A ipv6=[2a02:59e0:0:7::11]:9003 +176.126.252.12:21 orport=8080 id=379FB450010D17078B3766C2273303C358C3A442 ipv6=[2a02:59e0:0:7::12]:81 +94.242.246.23:443 orport=9001 id=F65E0196C94DFFF48AFBF2F5F9E3E19AAE583FD0 ipv6=[2a01:608:ffff:ff07::1:23]:9003 +85.248.227.164:444 orport=9002 id=B84F248233FEA90CAD439F292556A3139F6E1B82 ipv6=[2a00:1298:8011:212::164]:9004 +85.248.227.163:443 orport=9001 id=C793AB88565DDD3C9E4C6F15CCB9D8C7EF964CE9 ipv6=[2a00:1298:8011:212::163]:9003 + +# Email sent directly to teor, verified using relay contact info +148.251.190.229:9030 orport=9010 id=BF0FB582E37F738CD33C3651125F2772705BB8E8 ipv6=[2a01:4f8:211:c68::2]:9010 + +# Email sent directly to teor, verified using relay contact info +5.79.68.161:81 orport=443 id=9030DCF419F6E2FBF84F63CBACBA0097B06F557E ipv6=[2001:1af8:4700:a012:1::1]:443 +5.79.68.161:9030 orport=9001 id=B7EC0C02D7D9F1E31B0C251A6B058880778A0CD1 ipv6=[2001:1af8:4700:a012:1::1]:9001 + +# Email sent directly to teor, verified using relay contact info +62.210.92.11:9030 orport=9001 id=0266B0660F3F20A7D1F3D8335931C95EF50F6C6B ipv6=[2001:bc8:338c::1]:9001 +62.210.92.11:9130 orport=9101 id=387B065A38E4DAA16D9D41C2964ECBC4B31D30FF ipv6=[2001:bc8:338c::1]:9101 + +# Email sent directly to teor, verified using relay contact info +188.165.194.195:9030 orport=9001 id=49E7AD01BB96F6FE3AB8C3B15BD2470B150354DF + +# Message sent directly to teor, verified using relay contact info +95.215.44.110:80 orport=443 id=D56AA4A1AA71961F5279FB70A6DCF7AD7B993EB5 +95.215.44.122:80 orport=443 id=998D8FE06B867AA3F8D257A7D28FFF16964D53E2 +95.215.44.111:80 orport=443 id=A7C7FD510B20BC8BE8F2A1D911364E1A23FBD09F + +# Email sent directly to teor, verified using relay contact info +86.59.119.88:80 orport=443 id=ACD889D86E02EDDAB1AFD81F598C0936238DC6D0 + +# Email sent directly to teor, verified using relay contact info +144.76.73.140:9030 orport=9001 id=6A640018EABF3DA9BAD9321AA37C2C87BBE1F907 + +# Email sent directly to teor, verified using relay contact info +193.11.164.243:9030 orport=9001 id=FFA72BD683BC2FCF988356E6BEC1E490F313FB07 ipv6=[2001:6b0:7:125::243]:9001 +109.105.109.162:52860 orport=60784 id=32EE911D968BE3E016ECA572BB1ED0A9EE43FC2F ipv6=[2001:948:7:2::163]:5001 + +# Email sent directly to teor, verified using relay contact info +146.0.32.144:9030 orport=9001 id=35E8B344F661F4F2E68B17648F35798B44672D7E + +# Email sent directly to teor, verified using relay contact info +46.252.26.2:45212 orport=49991 id=E589316576A399C511A9781A73DA4545640B479D + +# Email sent directly to teor, verified using relay contact info +89.187.142.208:80 orport=443 id=64186650FFE4469EBBE52B644AE543864D32F43C + +# Email sent directly to teor, verified using relay contact info +212.51.134.123:9030 orport=9001 id=50586E25BE067FD1F739998550EDDCB1A14CA5B2 ipv6=[2a02:168:6e00:0:3a60:77ff:fe9c:8bd1]:9001 + +# Email sent directly to teor, verified using relay contact info +46.101.143.173:80 orport=443 id=F960DF50F0FD4075AC9B505C1D4FFC8384C490FB + +# Email sent directly to teor, verified using relay contact info +217.79.190.25:9030 orport=9090 id=361D33C96D0F161275EE67E2C91EE10B276E778B + +# Email sent directly to teor, verified using relay contact info +193.171.202.146:9030 orport=9001 id=01A9258A46E97FF8B2CAC7910577862C14F2C524 + +# Email sent directly to teor, verified using relay contact info +197.231.221.211:9030 orport=9001 id=BC630CBBB518BE7E9F4E09712AB0269E9DC7D626 + +# Email sent directly to teor, verified using relay contact info +185.61.138.18:8080 orport=4443 id=2541759BEC04D37811C2209A88E863320271EC9C + +# Email sent directly to teor, verified using relay contact info +193.11.114.45:9031 orport=9002 id=80AAF8D5956A43C197104CEF2550CD42D165C6FB +193.11.114.43:9030 orport=9001 id=12AD30E5D25AA67F519780E2111E611A455FDC89 ipv6=[2001:6b0:30:1000::99]:9050 +193.11.114.46:9032 orport=9003 id=B83DC1558F0D34353BB992EF93AFEAFDB226A73E + +# Email sent directly to teor, verified using relay contact info +144.76.26.175:9012 orport=9011 id=2BA2C8E96B2590E1072AECE2BDB5C48921BF8510 + +# Email sent directly to teor, verified using relay contact info +37.221.162.226:9030 orport=9001 id=D64366987CB39F61AD21DBCF8142FA0577B92811 + +# Email sent directly to teor, verified using relay contact info +91.219.237.244:80 orport=443 id=92ECC9E0E2AF81BB954719B189AC362E254AD4A5 + +# Email sent directly to teor, verified using relay contact info +185.21.100.50:9030 orport=9001 id=58ED9C9C35E433EE58764D62892B4FFD518A3CD0 ipv6=[2a00:1158:2:cd00:0:74:6f:72]:443 + +# Email sent directly to teor, verified using relay contact info +193.35.52.53:9030 orport=9001 id=DAA39FC00B196B353C2A271459C305C429AF09E4 + +# Email sent directly to teor, verified using relay contact info +134.119.3.164:9030 orport=9001 id=D1B8AAA98C65F3DF7D8BB3AF881CAEB84A33D8EE + +# Email sent directly to teor, verified using relay contact info +81.7.10.93:31336 orport=31337 id=99E246DB480B313A3012BC3363093CC26CD209C7 + +# Email sent directly to teor, verified using relay contact info +178.62.22.36:80 orport=443 id=A0766C0D3A667A3232C7D569DE94A28F9922FCB1 ipv6=[2a03:b0c0:1:d0::174:1]:9050 +188.166.23.127:80 orport=443 id=3771A8154DEA98D551607806C80A209CDAA74535 ipv6=[2a03:b0c0:2:d0::27b:7001]:9050 +198.199.64.217:80 orport=443 id=FAD306BAA59F6A02783F8606BDAA431F5FF7D1EA ipv6=[2604:a880:400:d0::1a9:b001]:9050 +159.203.32.149:80 orport=443 id=55C7554AFCEC1062DCBAC93E67B2E03C6F330EFC ipv6=[2604:a880:cad:d0::105:f001]:9050 + +# Email sent directly to teor, verified using relay contact info +5.196.31.80:9030 orport=9900 id=DFB2EB472643FAFCD5E73D2E37D51DB67203A695 ipv6=[2001:41d0:52:400::a65]:9900 + +# Email sent directly to teor, verified using relay contact info +188.138.112.60:1433 orport=1521 id=C414F28FD2BEC1553024299B31D4E726BEB8E788 + +# Email sent directly to teor, verified using relay contact info +213.61.66.118:9031 orport=9001 id=30648BC64CEDB3020F4A405E4AB2A6347FB8FA22 +213.61.66.117:9032 orport=9002 id=6E44A52E3D1FF7683FE5C399C3FB5E912DE1C6B4 +213.61.66.115:9034 orport=9004 id=480CCC94CEA04D2DEABC0D7373868E245D4C2AE2 +213.61.66.116:9033 orport=9003 id=A9DEB920B42B4EC1DE6249034039B06D61F38690 + +# Email sent directly to teor, verified using relay contact info +136.243.187.165:9030 orport=443 id=1AC65257D7BFDE7341046625470809693A8ED83E + +# Email sent directly to teor, verified using relay contact info +212.47.230.49:9030 orport=9001 id=3D6D0771E54056AEFC28BB1DE816951F11826E97 + +# Email sent directly to teor, verified using relay contact info +176.31.180.157:143 orport=22 id=E781F4EC69671B3F1864AE2753E0890351506329 ipv6=[2001:41d0:8:eb9d::1]:22 + +# Email sent directly to teor, verified using relay contact info +192.99.55.69:80 orport=443 id=0682DE15222A4A4A0D67DBA72A8132161992C023 +192.99.59.140:80 orport=443 id=3C9148DA49F20654730FAC83FFF693A4D49D0244 +51.254.215.13:80 orport=443 id=73C30C8ABDD6D9346C822966DE73B9F82CB6178A +51.254.215.129:80 orport=443 id=7B4491D05144B20AE8519AE784B94F0525A8BB79 +192.99.59.139:80 orport=443 id=82EC878ADA7C205146B9F5193A7310867FAA0D7B +51.254.215.124:80 orport=443 id=98999EBE89B5FA9AA0C58421F0B46C3D0AF51CBA +51.254.214.208:80 orport=443 id=C3F0D1417848EAFC41277A73DEB4A9F2AEC23DDF +192.99.59.141:80 orport=443 id=F45426551795B9DA78BEDB05CD5F2EACED8132E4 +192.99.59.14:80 orport=443 id=161A1B29A37EBF096D2F8A9B1E176D6487FE42AE + +# Email sent directly to teor, verified using relay contact info +151.80.42.103:9030 orport=9001 id=9007C1D8E4F03D506A4A011B907A9E8D04E3C605 ipv6=[2001:41d0:e:f67::114]:9001 + +# Email sent directly to teor, verified using relay contact info +5.39.92.199:80 orport=443 id=0BEA4A88D069753218EAAAD6D22EA87B9A1319D6 + +# Email sent directly to teor, verified using relay contact info +176.31.159.231:80 orport=443 id=D5DBCC0B4F029F80C7B8D33F20CF7D97F0423BB1 +176.31.159.230:80 orport=443 id=631748AFB41104D77ADBB7E5CD4F8E8AE876E683 +195.154.79.128:80 orport=443 id=C697612CA5AED06B8D829FCC6065B9287212CB2F +195.154.9.161:80 orport=443 id=B6295A9960F89BD0C743EEBC5670450EA6A34685 +46.148.18.74:8080 orport=443 id=6CACF0B5F03C779672F3C5C295F37C8D234CA3F7 + +# Email sent directly to teor, verified using relay contact info +37.187.102.108:9090 orport=5550 id=F4263275CF54A6836EE7BD527B1328836A6F06E1 +212.47.241.21:80 orport=443 id=892F941915F6A0C6E0958E52E0A9685C190CF45C + +# Email sent directly to teor, verified using relay contact info +195.191.233.221:80 orport=443 id=DE134FC8E5CC4EC8A5DE66934E70AC9D70267197 + +# Email sent directly to teor, verified using relay contact info +62.210.238.33:9030 orport=9001 id=FDF845FC159C0020E2BDDA120C30C5C5038F74B4 + +# Email sent directly to teor, verified using relay contact info +37.157.195.87:8030 orport=443 id=12FD624EE73CEF37137C90D38B2406A66F68FAA2 + +# Email sent directly to teor, verified using relay contact info +37.187.7.74:80 orport=443 id=AEA43CB1E47BE5F8051711B2BF01683DB1568E05 ipv6=[2001:41d0:a:74a::1]:443 + +# Email sent directly to teor, verified using relay contact info +185.66.250.141:9030 orport=9001 id=B1726B94885CE3AC3910CA8B60622B97B98E2529 + +# Email sent directly to teor, verified using relay contact info +185.104.120.7:9030 orport=443 id=445F1C853966624FB3CF1E12442570DC553CC2EC ipv6=[2a06:3000::120:7]:443 +185.104.120.2:9030 orport=21 id=518FF8708698E1DA09C823C36D35DF89A2CAD956 +185.104.120.4:9030 orport=9001 id=F92B3CB9BBE0CB22409843FB1AE4DBCD5EFAC835 +185.104.120.3:9030 orport=21 id=707C1B61AC72227B34487B56D04BAA3BA1179CE8 ipv6=[2a06:3000::120:3]:21 + +# Email sent directly to teor, verified using relay contact info +37.187.102.186:9030 orport=9001 id=489D94333DF66D57FFE34D9D59CC2D97E2CB0053 ipv6=[2001:41d0:a:26ba::1]:9001 + +# Email sent directly to teor, verified using relay contact info +5.35.251.247:9030 orport=9001 id=9B1F5187DFBA89DC24B37EA7BF896C12B43A27AE + +# Email sent directly to teor, verified using relay contact info +198.96.155.3:8080 orport=5001 id=BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E + +# Email sent directly to teor, verified using relay contact info +212.83.154.33:8888 orport=443 id=3C79699D4FBC37DE1A212D5033B56DAE079AC0EF +212.83.154.33:8080 orport=8443 id=322C6E3A973BC10FC36DE3037AD27BC89F14723B + +# Email sent directly to teor, verified using relay contact info +51.255.41.65:9030 orport=9001 id=9231DF741915AA1630031A93026D88726877E93A + +# Email sent directly to teor, verified using relay contact info +78.142.142.246:80 orport=443 id=5A5E03355C1908EBF424CAF1F3ED70782C0D2F74 + +# Email sent directly to teor, verified using relay contact info +195.154.97.91:80 orport=443 id=BD33C50D50DCA2A46AAED54CA319A1EFEBF5D714 + +# Email sent directly to teor, verified using relay contact info +62.210.129.246:80 orport=443 id=79E169B25E4C7CE99584F6ED06F379478F23E2B8 + +# Email sent directly to teor, verified using relay contact info +5.196.74.215:9030 orport=9001 id=5818055DFBAF0FA7F67E8125FD63E3E7F88E28F6 + +# Email sent directly to teor, verified using relay contact info +212.47.233.86:9030 orport=9001 id=B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20 + +# Email sent directly to teor, verified using relay contact info +85.214.206.219:9030 orport=9001 id=98F8D5F359949E41DE8DF3DBB1975A86E96A84A0 + +# Email sent directly to teor, verified using relay contact info +46.166.170.4:80 orport=443 id=19F42DB047B72C7507F939F5AEA5CD1FA4656205 +46.166.170.5:80 orport=443 id=DA705AD4591E7B4708FA2CAC3D53E81962F3E6F6 + +# Email sent directly to teor, verified using relay contact info +5.189.157.56:80 orport=443 id=77F6D6A6B6EAFB8F5DADDC07A918BBF378ED6725 + +# Email sent directly to teor, verified using relay contact info +46.28.110.244:80 orport=443 id=9F7D6E6420183C2B76D3CE99624EBC98A21A967E +185.13.39.197:80 orport=443 id=001524DD403D729F08F7E5D77813EF12756CFA8D +95.130.12.119:80 orport=443 id=587E0A9552E4274B251F29B5B2673D38442EE4BF + +# Email sent directly to teor, verified using relay contact info +212.129.62.232:80 orport=443 id=B143D439B72D239A419F8DCE07B8A8EB1B486FA7 + +# Email sent directly to teor, verified using relay contact info +91.219.237.229:80 orport=443 id=1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7 + +# Email sent directly to teor, verified using relay contact info +# Suitable, check with operator before adding +#212.47.240.10:82 orport=443 id=2A4C448784F5A83AFE6C78DA357D5E31F7989DEB +212.47.240.10:81 orport=993 id=72527E3242CB15AADE28374AE0D35833FC083F60 +163.172.131.88:80 orport=443 id=AD253B49E303C6AB1E048B014392AC569E8A7DAE ipv6=[2001:bc8:4400:2100::2:1009]:443 +# Suitable, check with operator before adding +#163.172.131.88:81 orport=993 id=D5F3FB17504744FB7ECEF46F4B1D155258A6D942 ipv6=D5F3FB17504744FB7ECEF46F4B1D155258A6D942 + +# Email sent directly to teor, verified using relay contact info +46.101.151.222:80 orport=443 id=1DBAED235E3957DE1ABD25B4206BE71406FB61F8 +178.62.60.37:80 orport=443 id=175921396C7C426309AB03775A9930B6F611F794 + +# Email sent directly to teor, verified using relay contact info +178.62.197.82:80 orport=443 id=0D3EBA17E1C78F1E9900BABDB23861D46FCAF163 + +# Email sent directly to teor, verified using relay contact info +82.223.21.74:9030 orport=9001 id=7A32C9519D80CA458FC8B034A28F5F6815649A98 ipv6=[2001:470:53e0::cafe]:9050 + +# Email sent directly to teor, verified using relay contact info +146.185.177.103:80 orport=9030 id=9EC5E097663862DF861A18C32B37C5F82284B27D + +# Email sent directly to teor, verified using relay contact info +37.187.22.87:9030 orport=9001 id=36B9E7AC1E36B62A9D6F330ABEB6012BA7F0D400 ipv6=[2001:41d0:a:1657::1]:9001 + +# Email sent directly to teor, verified using relay contact info +37.59.46.159:9030 orport=9001 id=CBD0D1BD110EC52963082D839AC6A89D0AE243E7 + +# Email sent directly to teor, verified using relay contact info +212.47.250.243:9030 orport=9001 id=5B33EDBAEA92F446768B3753549F3B813836D477 +# Confirm with operator before adding these +#163.172.133.36:9030 orport=9001 id=D8C2BD36F01FA86F4401848A0928C4CB7E5FDFF9 +#158.69.216.70:9030 orport=9001 id=0ACE25A978D4422C742D6BC6345896719BF6A7EB + +# Email sent directly to teor, verified using relay contact info +5.199.142.236:9030 orport=9001 id=F4C0EDAA0BF0F7EC138746F8FEF1CE26C7860265 + +# Email sent directly to teor, verified using relay contact info +188.166.133.133:9030 orport=9001 id=774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7 ipv6=[2a03:b0c0:2:d0::5:f001]:9001 + +# Email sent directly to teor, verified using relay contact info +5.196.88.122:9030 orport=9001 id=0C2C599AFCB26F5CFC2C7592435924C1D63D9484 + +# Email sent directly to teor, verified using relay contact info +46.8.249.10:80 orport=443 id=31670150090A7C3513CB7914B9610E786391A95D + +# Email sent directly to teor, verified using relay contact info +144.76.163.93:9030 orport=9001 id=22F08CF09764C4E8982640D77F71ED72FF26A9AC + +# Email sent directly to teor, verified using relay contact info +46.4.24.161:9030 orport=9001 id=DB4C76A3AD7E234DA0F00D6F1405D8AFDF4D8DED +46.4.24.161:9031 orport=9002 id=7460F3D12EBE861E4EE073F6233047AACFE46AB4 +46.38.51.132:9030 orport=9001 id=810DEFA7E90B6C6C383C063028EC397A71D7214A +163.172.194.53:9030 orport=9001 id=8C00FA7369A7A308F6A137600F0FA07990D9D451 + +# Email sent directly to teor, verified using relay contact info +176.10.107.180:9030 orport=9001 id=3D7E274A87D9A89AF064C13D1EE4CA1F184F2600 +195.154.75.84:9030 orport=9001 id=F80FDE27EFCB3F6A7B4E2CC517133DBFFA78BA2D +195.154.127.246:9030 orport=9001 id=4FEE77AFFD157BBCF2D896AE417FBF647860466C + +# Email sent directly to teor, verified using relay contact info +46.28.207.19:80 orport=443 id=5B92FA5C8A49D46D235735504C72DBB3472BA321 +46.28.207.141:80 orport=443 id=F69BED36177ED727706512BA6A97755025EEA0FB +46.28.205.170:80 orport=443 id=AF322D83A4D2048B22F7F1AF5F38AFF4D09D0B76 +95.183.48.12:80 orport=443 id=7187CED1A3871F837D0E60AC98F374AC541CB0DA + +# Email sent directly to teor, verified using relay contact info +93.180.156.84:9030 orport=9001 id=8844D87E9B038BE3270938F05AF797E1D3C74C0F + +# Email sent directly to teor, verified using relay contact info +37.187.115.157:9030 orport=9001 id=D5039E1EBFD96D9A3F9846BF99EC9F75EDDE902A + +# Email sent directly to teor, verified using relay contact info +5.34.183.205:80 orport=443 id=DDD7871C1B7FA32CB55061E08869A236E61BDDF8 + +# Email sent directly to teor, verified using relay contact info +51.254.246.203:9030 orport=9001 id=47B596B81C9E6277B98623A84B7629798A16E8D5 + +# Email sent directly to teor, verified using relay contact info +5.9.146.203:80 orport=443 id=1F45542A24A61BF9408F1C05E0DCE4E29F2CBA11 + +# Email sent directly to teor, verified using relay contact info +167.114.152.100:9030 orport=443 id=0EF5E5FFC5D1EABCBDA1AFF6F6D6325C5756B0B2 ipv6=[2607:5300:100:200::1608]:443 + +# Email sent directly to teor, verified using relay contact info +192.99.168.102:80 orport=443 id=230A8B2A8BA861210D9B4BA97745AEC217A94207 +167.114.153.21:80 orport=443 id=0B85617241252517E8ECF2CFC7F4C1A32DCD153F + +# Email sent directly to teor, verified using relay contact info +204.11.50.131:9030 orport=9001 id=185F2A57B0C4620582602761097D17DB81654F70 + +# Email sent directly to teor, verified using relay contact info +151.236.222.217:44607 orport=9001 id=94D58704C2589C130C9C39ED148BD8EA468DBA54 + +# Email sent directly to teor, verified using relay contact info +194.150.168.79:11112 orport=11111 id=29F1020B94BE25E6BE1AD13E93CE19D2131B487C + +# Email sent directly to teor, verified using relay contact info +185.35.202.221:9030 orport=9001 id=C13B91384CDD52A871E3ECECE4EF74A7AC7DCB08 ipv6=[2a02:ed06::221]:9001 + +# Email sent directly to teor, verified using relay contact info +5.9.151.241:9030 orport=4223 id=9BF04559224F0F1C3C953D641F1744AF0192543A + +# Email sent directly to teor, verified using relay contact info +89.40.71.149:8081 orport=8080 id=EC639EDAA5121B47DBDF3D6B01A22E48A8CB6CC7 + +# Email sent directly to teor, verified using relay contact info +92.222.20.130:80 orport=443 id=0639612FF149AA19DF3BCEA147E5B8FED6F3C87C + +# Email sent directly to teor, verified using relay contact info +80.112.155.100:9030 orport=9001 id=1163378F239C36CA1BDC730AC50BF4F2976141F5 ipv6=[2001:470:7b02::38]:9001 + +# Email sent directly to teor, verified using relay contact info +83.212.99.68:80 orport=443 id=DDBB2A38252ADDA53E4492DDF982CA6CC6E10EC0 ipv6=[2001:648:2ffc:1225:a800:bff:fe3d:67b5]:443 + +# Email sent directly to teor, verified using relay contact info +95.130.11.147:9030 orport=443 id=6B697F3FF04C26123466A5C0E5D1F8D91925967A + +# Email sent directly to teor, verified using relay contact info +176.31.191.26:9030 orport=9001 id=7350AB9ED7568F22745198359373C04AC783C37C + +# Email sent directly to teor, verified using relay contact info +128.199.55.207:9030 orport=9001 id=BCEF908195805E03E92CCFE669C48738E556B9C5 ipv6=[2a03:b0c0:2:d0::158:3001]:9001 + +# Email sent directly to teor, verified using relay contact info +178.32.216.146:9030 orport=9001 id=17898F9A2EBC7D69DAF87C00A1BD2FABF3C9E1D2 + +# Email sent directly to teor, verified using relay contact info +212.83.40.238:9030 orport=9001 id=F409FA7902FD89270E8DE0D7977EA23BC38E5887 + +# Email sent directly to teor, verified using relay contact info +204.8.156.142:80 orport=443 id=94C4B7B8C50C86A92B6A20107539EE2678CF9A28 + +# Email sent directly to teor, verified using relay contact info +80.240.139.111:80 orport=443 id=DD3BE7382C221F31723C7B294310EF9282B9111B + +# Email sent directly to teor, verified using relay contact info +185.97.32.18:9030 orport=9001 id=3BAB316CAAEC47E71905EB6C65584636D5689A8A + +# Email sent directly to teor, verified using relay contact info +149.56.45.200:9030 orport=9001 id=FE296180018833AF03A8EACD5894A614623D3F76 + +# Email sent directly to teor, verified using relay contact info +81.2.209.10:443 orport=80 id=B6904ADD4C0D10CDA7179E051962350A69A63243 + +# Email sent directly to teor, verified using relay contact info +195.154.164.243:80 orport=443 id=AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C ipv6=[2001:bc8:399f:f000::1]:993 +138.201.26.2:80 orport=443 id=6D3A3ED5671E4E3F58D4951438B10AE552A5FA0F +81.7.16.182:80 orport=443 id=51E1CF613FD6F9F11FE24743C91D6F9981807D82 ipv6=[2a02:180:1:1::517:10b6]:993 +134.119.36.135:80 orport=443 id=763C9556602BD6207771A7A3D958091D44C43228 ipv6=[2a00:1158:3::2a8]:993 +46.228.199.19:80 orport=443 id=E26AFC5F718E21AC502899B20C653AEFF688B0D2 ipv6=[2001:4ba0:cafe:4a::1]:993 +37.200.98.5:80 orport=443 id=231C2B9C8C31C295C472D031E06964834B745996 ipv6=[2a00:1158:3::11a]:993 +46.23.70.195:80 orport=443 id=C9933B3725239B6FAB5227BA33B30BE7B48BB485 +185.15.244.124:80 orport=443 id=935BABE2564F82016C19AEF63C0C40B5753BA3D2 ipv6=[2001:4ba0:cafe:e35::1]:993 +195.154.116.232:80 orport=443 id=B35C5739C8C5AB72094EB2B05738FD1F8EEF6EBD ipv6=[2001:bc8:399f:200::1]:993 +195.154.121.198:80 orport=443 id=0C77421C890D16B6D201283A2244F43DF5BC89DD ipv6=[2001:bc8:399f:100::1]:993 +37.187.20.59:80 orport=443 id=91D23D8A539B83D2FB56AA67ECD4D75CC093AC55 ipv6=[2001:41d0:a:143b::1]:993 +217.12.208.117:80 orport=443 id=E6E18151300F90C235D3809F90B31330737CEB43 ipv6=[2a00:1ca8:a7::1bb]:993 +81.7.10.251:80 orport=443 id=8073670F8F852971298F8AF2C5B23AE012645901 ipv6=[2a02:180:1:1::517:afb]:993 +46.36.39.50:80 orport=443 id=ED4B0DBA79AEF5521564FA0231455DCFDDE73BB6 ipv6=[2a02:25b0:aaaa:aaaa:8d49:b692:4852:0]:995 +91.194.90.103:80 orport=443 id=75C4495F4D80522CA6F6A3FB349F1B009563F4B7 ipv6=[2a02:c200:0:10:3:0:5449:1]:993 +163.172.25.118:80 orport=22 id=0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F +188.138.88.42:80 orport=443 id=70C55A114C0EF3DC5784A4FAEE64388434A3398F +81.7.13.84:80 orport=443 id=0C1E7DD9ED0676C788933F68A9985ED853CA5812 ipv6=[2a02:180:1:1::5b8f:538c]:993 +213.246.56.95:80 orport=443 id=27E6E8E19C46751E7312420723C6162FF3356A4C ipv6=[2a00:c70:1:213:246:56:95:1]:993 +94.198.100.18:80 orport=443 id=BAACCB29197DB833F107E410E2BFAE5009EE7583 +217.12.203.46:80 orport=443 id=6A29FD8C00D573E6C1D47852345B0E5275BA3307 +212.117.180.107:80 orport=443 id=0B454C7EBA58657B91133A587C1BDAEDC6E23142 +217.12.199.190:80 orport=443 id=A37C47B03FF31CA6937D3D68366B157997FE7BCD ipv6=[2a02:27a8:0:2::486]:993 +216.230.230.247:80 orport=443 id=4C7BF55B1BFF47993DFF995A2926C89C81E4F04A +69.30.215.42:80 orport=443 id=510176C07005D47B23E6796F02C93241A29AA0E9 ipv6=[2604:4300:a:2e::2]:993 +89.46.100.162:80 orport=443 id=6B7191639E179965FD694612C9B2C8FB4267B27D +107.181.174.22:80 orport=443 id=5A551BF2E46BF26CC50A983F7435CB749C752553 ipv6=[2607:f7a0:3:4::4e]:993 + +# Email sent directly to teor, verified using relay contact info +212.238.208.48:9030 orport=9001 id=F406219CDD339026D160E53FCA0EF6857C70F109 ipv6=[2001:984:a8fb:1:ba27:ebff:feac:c109]:9001 + +# Email sent directly to teor, verified using relay contact info +176.158.132.12:9030 orport=9001 id=DC163DDEF4B6F0C6BC226F9F6656A5A30C5C5686 + +# Email sent directly to teor, verified using relay contact info +91.229.20.27:9030 orport=9001 id=9A0D54D3A6D2E0767596BF1515E6162A75B3293F + +# Email sent directly to teor, verified using relay contact info +# Awaiting confirmation of new ORPort from relay operator +80.127.137.19:80 orport=443 id=6EF897645B79B6CB35E853B32506375014DE3621 ipv6=[2001:981:47c1:1::6]:443 + +# Email sent directly to teor, verified using relay contact info +163.172.138.22:80 orport=443 id=8664DC892540F3C789DB37008236C096C871734D + +# Email sent directly to teor, verified using relay contact info +97.74.237.196:9030 orport=9001 id=2F0F32AB1E5B943CA7D062C03F18960C86E70D94 + +# Email sent directly to teor, verified using relay contact info +192.187.124.98:9030 orport=9001 id=FD1871854BFC06D7B02F10742073069F0528B5CC + +# Email sent directly to teor, verified using relay contact info +178.62.98.160:9030 orport=9001 id=8B92044763E880996A988831B15B2B0E5AD1544A + +# Email sent directly to teor, verified using relay contact info +195.154.15.227:9030 orport=9001 id=6C3E3AB2F5F03CD71B637D433BAD924A1ECC5796 + +# Email sent directly to teor, verified using relay contact info +185.100.86.100:80 orport=443 id=0E8C0C8315B66DB5F703804B3889A1DD66C67CE0 + +# Email sent directly to teor, verified using relay contact info +164.132.77.175:9030 orport=9001 id=3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B +78.24.75.53:9030 orport=9001 id=DEB73705B2929AE9BE87091607388939332EF123 + +# Email sent directly to teor, verified using relay contact info +46.101.237.246:9030 orport=9001 id=75F1992FD3F403E9C082A5815EB5D12934CDF46C ipv6=[2a03:b0c0:3:d0::208:5001]:9050 +178.62.86.96:9030 orport=9001 id=439D0447772CB107B886F7782DBC201FA26B92D1 ipv6=[2a03:b0c0:1:d0::3cf:7001]:9050 + +# Email sent directly to teor, verified using relay contact info +91.233.106.121:80 orport=443 id=896364B7996F5DFBA0E15D1A2E06D0B98B555DD6 + +# Email sent directly to teor, verified using relay contact info +167.114.113.48:9030 orport=443 id=2EC0C66EA700C44670444280AABAB1EC78B722A0 + +# Email sent directly to teor, verified using relay contact info +79.120.16.42:9030 orport=9001 id=BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6 + +# Email sent directly to teor, verified using relay contact info +95.128.43.164:80 orport=443 id=616081EC829593AF4232550DE6FFAA1D75B37A90 ipv6=[2a02:ec0:209:10::4]:443 + +# Email sent directly to teor, verified using relay contact info +166.82.21.200:9030 orport=9029 id=D5C33F3E203728EDF8361EA868B2939CCC43FAFB + +# Email sent directly to teor, verified using relay contact info +91.121.54.8:9030 orport=9001 id=CBEE0F3303C8C50462A12107CA2AE061831931BC + +# Email sent directly to teor, verified using relay contact info +178.217.184.32:8080 orport=443 id=8B7F47AE1A5D954A3E58ACDE0865D09DBA5B738D + +# Email sent directly to teor, verified using relay contact info +85.10.201.47:9030 orport=9001 id=D8B7A3A6542AA54D0946B9DC0257C53B6C376679 ipv6=[2a01:4f8:a0:43eb::beef]:9001 + +# Email sent directly to teor, verified using relay contact info +120.29.217.46:80 orport=443 id=5E853C94AB1F655E9C908924370A0A6707508C62 + +# Email sent directly to teor, verified using relay contact info +37.153.1.10:9030 orport=9001 id=9772EFB535397C942C3AB8804FB35CFFAD012438 + +# Email sent directly to teor, verified using relay contact info +92.222.4.102:9030 orport=9001 id=1A6B8B8272632D8AD38442027F822A367128405C + +# Email sent directly to teor, verified using relay contact info +31.31.78.49:80 orport=443 id=46791D156C9B6C255C2665D4D8393EC7DBAA7798 + +# Email sent directly to teor, verified using relay contact info +96.47.231.214:9030 orport=8080 id=F843CB5729575D76FF1FFBB2179BDCF52C0C6387 +192.99.246.48:9030 orport=9001 id=CD6B149BED1BB254EF6DFF9D75DDB11E7F8A38A4 ipv6=[2607:5300:100:200::de3]:9002 +192.160.102.164:80 orport=9001 id=823AA81E277F366505545522CEDC2F529CE4DC3F ipv6=[2605:e200:d00c:c01d::1111]:9002 + +# Email sent directly to teor, verified using relay contact info +136.243.214.137:80 orport=443 id=B291D30517D23299AD7CEE3E60DFE60D0E3A4664 + +# Email sent directly to teor, verified using relay contact info +192.87.28.28:9030 orport=9001 id=ED2338CAC2711B3E331392E1ED2831219B794024 +192.87.28.82:9030 orport=9001 id=844AE9CAD04325E955E2BE1521563B79FE7094B7 + +# Email sent directly to teor, verified using relay contact info +192.87.28.28:9030 orport=9001 id=ED2338CAC2711B3E331392E1ED2831219B794024 +# OK, but same machine as ED2338CAC2711B3E331392E1ED2831219B794024 +#192.87.28.82:9030 orport=9001 id=844AE9CAD04325E955E2BE1521563B79FE7094B7 + +# https://twitter.com/kosjoli/status/719507270904758272 +85.10.202.87:9030 orport=9001 id=971AFB23C168DCD8EDA17473C1C452B359DE3A5A +176.9.5.116:9030 orport=9001 id=A1EB8D8F1EE28DB98BBB1EAA3B4BEDD303BAB911 +46.4.111.124:9030 orport=9001 id=D9065F9E57899B3D272AA212317AF61A9B14D204 + +# Email sent directly to teor, verified using relay contact info +78.46.164.129:9030 orport=9001 id=52AEA31188331F421B2EDB494DB65CD181E5B257 + +# Email sent directly to teor, verified using relay contact info +185.100.85.61:80 orport=443 id=025B66CEBC070FCB0519D206CF0CF4965C20C96E + +# Email sent directly to teor, verified using relay contact info +108.166.168.158:80 orport=443 id=CDAB3AE06A8C9C6BF817B3B0F1877A4B91465699 + +# Email sent directly to teor, verified using relay contact info +91.219.236.222:80 orport=443 id=EC413181CEB1C8EDC17608BBB177CD5FD8535E99 + +# Email sent directly to teor, verified using relay contact info +185.14.185.240:9030 orport=443 id=D62FB817B0288085FAC38A6DC8B36DCD85B70260 +192.34.63.137:9030 orport=443 id=ABCB4965F1FEE193602B50A365425105C889D3F8 + +# Email sent directly to teor, verified using relay contact info +185.13.38.75:9030 orport=9001 id=D2A1703758A0FBBA026988B92C2F88BAB59F9361 + +# Email sent directly to teor, verified using relay contact info +128.204.39.106:9030 orport=9001 id=6F0F3C09AF9580F7606B34A7678238B3AF7A57B7 + +# Email sent directly to teor, verified using relay contact info +198.50.191.95:80 orport=443 id=39F096961ED2576975C866D450373A9913AFDC92 + +# Email sent directly to teor, verified using relay contact info +167.114.66.61:9696 orport=443 id=DE6CD5F09DF26076F26321B0BDFBE78ACD935C65 ipv6=[2607:5300:100::78d]:443 + +# Email sent directly to teor, verified using relay contact info +66.111.2.20:9030 orport=9001 id=9A68B85A02318F4E7E87F2828039FBD5D75B0142 +66.111.2.16:9030 orport=9001 id=3F092986E9B87D3FDA09B71FA3A602378285C77A + +# Email sent directly to teor, verified using relay contact info +92.222.38.67:80 orport=443 id=DED6892FF89DBD737BA689698A171B2392EB3E82 + +# Email sent directly to teor, verified using relay contact info +212.47.228.115:9030 orport=443 id=BCA017ACDA48330D02BB70716639ED565493E36E + +# Email sent directly to teor, verified using relay contact info +185.100.84.175:80 orport=443 id=39B59AF4FE54FAD8C5085FA9C15FDF23087250DB + +# Email sent directly to teor, verified using relay contact info +166.70.207.2:9030 orport=9001 id=E3DB2E354B883B59E8DC56B3E7A353DDFD457812 + +# Emails sent directly to teor, verified using relay contact info +#69.162.139.9:9030 orport=9001 id=4791FC0692EAB60DF2BCCAFF940B95B74E7654F6 ipv6=[2607:f128:40:1212::45a2:8b09]:9001 + +# Email sent directly to teor, verified using relay contact info +213.239.217.18:1338 orport=1337 id=C37BC191AC389179674578C3E6944E925FE186C2 ipv6=[2a01:4f8:a0:746a:101:1:1:1]:1337 + +# Email sent directly to teor, verified using relay contact info +188.40.128.246:9030 orport=9001 id=AD19490C7DBB26D3A68EFC824F67E69B0A96E601 + +# Email sent directly to teor, verified using relay contact info +88.198.253.13:9030 orport=9001 id=DF924196D69AAE3C00C115A9CCDF7BB62A175310 ipv6=[2a01:4f8:11a:b1f::2]:9001 + +# Email sent directly to teor, verified using relay contact info +185.100.86.128:9030 orport=9001 id=9B31F1F1C1554F9FFB3455911F82E818EF7C7883 +46.36.36.127:9030 orport=9001 id=C80DF89B21FF932DEC0D7821F679B6C79E1449C3 + +# Email sent directly to teor, verified using relay contact info +176.10.104.240:80 orport=443 id=0111BA9B604669E636FFD5B503F382A4B7AD6E80 +176.10.104.240:8080 orport=8443 id=AD86CD1A49573D52A7B6F4A35750F161AAD89C88 +176.10.104.243:80 orport=443 id=88487BDD980BF6E72092EE690E8C51C0AA4A538C +176.10.104.243:8080 orport=8443 id=95DA61AEF23A6C851028C1AA88AD8593F659E60F + +# Email sent directly to teor, verified using relay contact info +107.170.101.39:9030 orport=443 id=30973217E70AF00EBE51797FF6D9AA720A902EAA + +# Email sent directly to teor, verified using relay contact info +192.99.212.139:80 orport=443 id=F10BDE279AE71515DDCCCC61DC19AC8765F8A3CC + +# Email sent directly to teor, verified using relay contact info +163.172.35.249:80 orport=443 id=C08DE49658E5B3CFC6F2A952B453C4B608C9A16A +163.172.35.247:80 orport=443 id=71AB4726D830FAE776D74AEF790CF04D8E0151B4 +163.172.13.124:80 orport=443 id=B771AA877687F88E6F1CA5354756DF6C8A7B6B24 + +# Email sent directly to teor, verified using relay contact info +64.113.32.29:9030 orport=9001 id=30C19B81981F450C402306E2E7CFB6C3F79CB6B2 + +# Email sent directly to teor, verified using relay contact info +212.51.156.193:995 orport=110 id=32E7AAF1F602814D699BEF6761AD03E387758D49 ipv6=[2a02:168:4a01::49]:110 + +# Emails sent directly to teor, verified using relay contact info +51.254.101.242:9002 orport=9001 id=4CC9CC9195EC38645B699A33307058624F660CCF + +# Emails sent directly to teor, verified using relay contact info +85.214.62.48:80 orport=443 id=6A7551EEE18F78A9813096E82BF84F740D32B911 + +# Email sent directly to teor, verified using relay contact info +173.255.245.116:9030 orport=9001 id=91E4015E1F82DAF0121D62267E54A1F661AB6DC7 + +# Email sent directly to teor, verified using relay contact info +62.216.5.120:9030 orport=9001 id=D032D4D617140D6B828FC7C4334860E45E414FBE diff --git a/scripts/maint/format_changelog.py b/scripts/maint/format_changelog.py index f67e89b602..5e4c8cac9a 100755 --- a/scripts/maint/format_changelog.py +++ b/scripts/maint/format_changelog.py @@ -1,5 +1,5 @@ #!/usr/bin/python -# Copyright (c) 2014, The Tor Project, Inc. +# Copyright (c) 2014-2015, The Tor Project, Inc. # See LICENSE for licensing information # # This script reformats a section of the changelog to wrap everything to @@ -12,6 +12,7 @@ import os import re import sys +import optparse # ============================== # Oh, look! It's a cruddy approximation to Knuth's elegant text wrapping @@ -33,6 +34,8 @@ import sys NO_HYPHENATE=set(""" pf-divert +tor-resolve +tor-gencert """.split()) LASTLINE_UNDERFLOW_EXPONENT = 1 @@ -55,7 +58,7 @@ def generate_wrapping(words, divisions): w = words[last:i] last = i line = " ".join(w).replace("\xff ","-").replace("\xff","-") - lines.append(line) + lines.append(line.strip()) return lines def wrapping_quality(words, divisions, width1, width2): @@ -115,7 +118,10 @@ def wrap_graf(words, prefix_len1=0, prefix_len2=0, width=72): return lines -def hyphenateable(word): +def hyphenatable(word): + if "--" in word: + return False + if re.match(r'^[^\d\-]\D*-', word): stripped = re.sub(r'^\W+','',word) stripped = re.sub(r'\W+$','',word) @@ -128,7 +134,7 @@ def split_paragraph(s): r = [] for word in s.split(): - if hyphenateable(word): + if hyphenatable(word): while "-" in word: a,word = word.split("-",1) r.append(a+"\xff") @@ -156,10 +162,13 @@ TP_SECHEAD = 3 TP_ITEMFIRST = 4 TP_ITEMBODY = 5 TP_END = 6 +TP_PREHEAD = 7 def head_parser(line): - if re.match(r'^[A-Z]', line): + if re.match(r'^Changes in', line): return TP_MAINHEAD + elif re.match(r'^[A-Za-z]', line): + return TP_PREHEAD elif re.match(r'^ o ', line): return TP_SECHEAD elif re.match(r'^\s*$', line): @@ -178,17 +187,67 @@ def body_parser(line): return TP_BLANK elif re.match(r'^Changes in', line): return TP_END + elif re.match(r'^\s+\S', line): + return TP_HEADTEXT else: print "Weird line %r"%line +def clean_head(head): + return head + +def head_score(s): + m = re.match(r'^ +o (.*)', s) + if not m: + print >>sys.stderr, "Can't score %r"%s + return 99999 + lw = m.group(1).lower() + if lw.startswith("security") and "feature" not in lw: + score = -300 + elif lw.startswith("deprecated version"): + score = -200 + elif (('new' in lw and 'requirement' in lw) or + ('new' in lw and 'dependenc' in lw) or + ('build' in lw and 'requirement' in lw) or + ('removed' in lw and 'platform' in lw)): + score = -100 + elif lw.startswith("major feature"): + score = 00 + elif lw.startswith("major bug"): + score = 50 + elif lw.startswith("major"): + score = 70 + elif lw.startswith("minor feature"): + score = 200 + elif lw.startswith("minor bug"): + score = 250 + elif lw.startswith("minor"): + score = 270 + else: + score = 1000 + + if 'secur' in lw: + score -= 2 + + if "(other)" in lw: + score += 2 + + if '(' not in lw: + score -= 1 + + return score + class ChangeLog(object): - def __init__(self): + def __init__(self, wrapText=True, blogOrder=True, drupalBreak=False): + self.prehead = [] self.mainhead = None self.headtext = [] self.curgraf = None self.sections = [] self.cursection = None self.lineno = 0 + self.wrapText = wrapText + self.blogOrder = blogOrder + self.drupalBreak = drupalBreak def addLine(self, tp, line): self.lineno += 1 @@ -197,6 +256,9 @@ class ChangeLog(object): assert not self.mainhead self.mainhead = line + elif tp == TP_PREHEAD: + self.prehead.append(line) + elif tp == TP_HEADTEXT: if self.curgraf is None: self.curgraf = [] @@ -240,6 +302,11 @@ class ChangeLog(object): self.lint_item(item_line, grafs, head_type) def dumpGraf(self,par,indent1,indent2=-1): + if not self.wrapText: + for line in par: + print line + return + if indent2 == -1: indent2 = indent1 text = " ".join(re.sub(r'\s+', ' ', line.strip()) for line in par) @@ -249,38 +316,210 @@ class ChangeLog(object): initial_indent=" "*indent1, subsequent_indent=" "*indent2)) + def dumpPreheader(self, graf): + self.dumpGraf(graf, 0) + print + + def dumpMainhead(self, head): + print head + + def dumpHeadGraf(self, graf): + self.dumpGraf(graf, 2) + print + + def dumpSectionHeader(self, header): + print header + + def dumpStartOfSections(self): + pass + + def dumpEndOfSections(self): + pass + + def dumpEndOfSection(self): + print + + def dumpEndOfChangelog(self): + print + + def dumpDrupalBreak(self): + pass + + def dumpItem(self, grafs): + self.dumpGraf(grafs[0],4,6) + for par in grafs[1:]: + print + self.dumpGraf(par,6,6) + + def collateAndSortSections(self): + heads = [] + sectionsByHead = { } + for _, head, items in self.sections: + head = clean_head(head) + try: + s = sectionsByHead[head] + except KeyError: + s = sectionsByHead[head] = [] + heads.append( (head_score(head), head.lower(), head, s) ) + + s.extend(items) + + heads.sort() + self.sections = [ (0, head, items) for _1,_2,head,items in heads ] + def dump(self): - print self.mainhead + if self.prehead: + self.dumpPreheader(self.prehead) + + if not self.blogOrder: + self.dumpMainhead(self.mainhead) + for par in self.headtext: - self.dumpGraf(par, 2) - print + self.dumpHeadGraf(par) + + if self.blogOrder: + self.dumpMainhead(self.mainhead) + + drupalBreakAfter = None + if self.drupalBreak and len(self.sections) > 4: + drupalBreakAfter = self.sections[1][2] + + self.dumpStartOfSections() for _,head,items in self.sections: if not head.endswith(':'): print >>sys.stderr, "adding : to %r"%head head = head + ":" - print head + self.dumpSectionHeader(head) for _,grafs in items: - self.dumpGraf(grafs[0],4,6) - for par in grafs[1:]: - print - self.dumpGraf(par,6,6) - print - print + self.dumpItem(grafs) + self.dumpEndOfSection() + if items is drupalBreakAfter: + self.dumpDrupalBreak() + self.dumpEndOfSections() + self.dumpEndOfChangelog() + +class HTMLChangeLog(ChangeLog): + def __init__(self, *args, **kwargs): + ChangeLog.__init__(self, *args, **kwargs) + + def htmlText(self, graf): + for line in graf: + line = line.rstrip().replace("&","&") + line = line.rstrip().replace("<","<").replace(">",">") + sys.stdout.write(line.strip()) + sys.stdout.write(" ") + + def htmlPar(self, graf): + sys.stdout.write("<p>") + self.htmlText(graf) + sys.stdout.write("</p>\n") + + def dumpPreheader(self, graf): + self.htmlPar(graf) + + def dumpMainhead(self, head): + sys.stdout.write("<h2>%s</h2>"%head) + + def dumpHeadGraf(self, graf): + self.htmlPar(graf) + + def dumpSectionHeader(self, header): + header = header.replace(" o ", "", 1).lstrip() + sys.stdout.write(" <li>%s\n"%header) + sys.stdout.write(" <ul>\n") + + def dumpEndOfSection(self): + sys.stdout.write(" </ul>\n\n") + + def dumpEndOfChangelog(self): + pass -CL = ChangeLog() -parser = head_parser + def dumpStartOfSections(self): + print "<ul>\n" + + def dumpEndOfSections(self): + print "</ul>\n" -if len(sys.argv) == 1: + def dumpDrupalBreak(self): + print "\n</ul>\n" + print "<p> </p>" + print "\n<!--break-->\n\n" + print "<ul>" + + def dumpItem(self, grafs): + grafs[0][0] = grafs[0][0].replace(" - ", "", 1).lstrip() + sys.stdout.write(" <li>") + if len(grafs) > 1: + for par in grafs: + self.htmlPar(par) + else: + self.htmlText(grafs[0]) + print + +op = optparse.OptionParser(usage="usage: %prog [options] [filename]") +op.add_option('-W', '--no-wrap', action='store_false', + dest='wrapText', default=True, + help='Do not re-wrap paragraphs') +op.add_option('-S', '--no-sort', action='store_false', + dest='sort', default=True, + help='Do not sort or collate sections') +op.add_option('-o', '--output', dest='output', + default='-', metavar='FILE', help="write output to FILE") +op.add_option('-H', '--html', action='store_true', + dest='html', default=False, + help="generate an HTML fragment") +op.add_option('-1', '--first', action='store_true', + dest='firstOnly', default=False, + help="write only the first section") +op.add_option('-b', '--blog-header', action='store_true', + dest='blogOrder', default=False, + help="Write the header in blog order") +op.add_option('-B', '--blog', action='store_true', + dest='blogFormat', default=False, + help="Set all other options as appropriate for a blog post") +op.add_option('--inplace', action='store_true', + dest='inplace', default=False, + help="Alter the ChangeLog in place") +op.add_option('--drupal-break', action='store_true', + dest='drupalBreak', default=False, + help='Insert a drupal-friendly <!--break--> as needed') + +options,args = op.parse_args() + +if options.blogFormat: + options.blogOrder = True + options.html = True + options.sort = False + options.wrapText = False + options.firstOnly = True + options.drupalBreak = True + +if len(args) > 1: + op.error("Too many arguments") +elif len(args) == 0: fname = 'ChangeLog' else: - fname = sys.argv[1] + fname = args[0] -fname_new = fname+".new" +if options.inplace: + assert options.output == '-' + options.output = fname -sys.stdin = open(fname, 'r') +if fname != '-': + sys.stdin = open(fname, 'r') nextline = None +if options.html: + ChangeLogClass = HTMLChangeLog +else: + ChangeLogClass = ChangeLog + +CL = ChangeLogClass(wrapText=options.wrapText, + blogOrder=options.blogOrder, + drupalBreak=options.drupalBreak) +parser = head_parser + for line in sys.stdin: line = line.rstrip() tp = parser(line) @@ -295,14 +534,26 @@ for line in sys.stdin: CL.lint() -sys.stdout = open(fname_new, 'w') +if options.output != '-': + fname_new = options.output+".new" + fname_out = options.output + sys.stdout = open(fname_new, 'w') +else: + fname_new = fname_out = None + +if options.sort: + CL.collateAndSortSections() CL.dump() +if options.firstOnly: + sys.exit(0) + if nextline is not None: print nextline for line in sys.stdin: sys.stdout.write(line) -os.rename(fname_new, fname) +if fname_new is not None: + os.rename(fname_new, fname_out) diff --git a/scripts/maint/generate_callgraph.sh b/scripts/maint/generate_callgraph.sh new file mode 100755 index 0000000000..c6b33c0aea --- /dev/null +++ b/scripts/maint/generate_callgraph.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +C_FILES=`echo src/common/*.c src/or/*.c src/tools/*.c` +CFLAGS="-Isrc/ext/trunnel -Isrc/trunnel -I. -Isrc/ext -Isrc/common -DLOCALSTATEDIR=\"\" -DSHARE_DATADIR=\"\" -Dinline=" + +mkdir -p callgraph/src/common +mkdir -p callgraph/src/or +mkdir -p callgraph/src/tools + +for fn in $C_FILES; do + echo $fn + clang $CFLAGS -S -emit-llvm -fno-inline -o - $fn | \ + opt -analyze -print-callgraph >/dev/null 2> "callgraph/${fn}allgraph" +done diff --git a/scripts/maint/lintChanges.py b/scripts/maint/lintChanges.py new file mode 100755 index 0000000000..48edd06fde --- /dev/null +++ b/scripts/maint/lintChanges.py @@ -0,0 +1,82 @@ +#!/usr/bin/python + +from __future__ import print_function +from __future__ import with_statement +import sys +import re +import os + + +KNOWN_GROUPS=set([ + "Minor bugfix", + "Minor bugfixes", + "Major bugfix", + "Major bugfixes", + "Minor feature", + "Minor features", + "Major feature", + "Major features", + "New system requirements", + "Testing", + "Documentation", + "Code simplification and refactoring", + "Removed features"]) + +def lintfile(fname): + have_warned = [] + + def warn(s): + if not have_warned: + have_warned.append(1) + print("{}:".format(fname)) + print("\t{}".format(s)) + + m = re.search(r'(\d{3,})', os.path.basename(fname)) + if m: + bugnum = m.group(1) + else: + bugnum = None + + with open(fname) as f: + contents = f.read() + + if bugnum and bugnum not in contents: + warn("bug number {} does not appear".format(bugnum)) + + lines = contents.split("\n") + isBug = ("bug" in lines[0] or "fix" in lines[0]) + + m = re.match(r'^[ ]{2}o ([^\(:]*)([^:]*):', contents) + if not m: + warn("header not in format expected") + elif m.group(1).strip() not in KNOWN_GROUPS: + warn("Weird header: %r"%m.group(1)) + elif ( ("bugfix" in m.group(1) or "feature" in m.group(1)) and + ("Removed" not in m.group(1)) and + '(' not in m.group(2)): + warn("Missing subcategory on %s"%m.group(1)) + + + contents = " ".join(contents.split()) + + if re.search(r'\#\d{2,}', contents): + warn("don't use a # before ticket numbers") + + if isBug and not re.search(r'(\d+)', contents): + warn("bugfix does not mention a number") + elif isBug and not re.search(r'Fixes ([a-z ]*)bug (\d+)', contents): + warn("bugfix does not say 'Fixes bug XXX'") + + if re.search(r'[bB]ug (\d+)', contents): + if not re.search(r'[Bb]ugfix on ', contents): + warn("bugfix does not say 'bugfix on X.Y.Z'") + elif not re.search('[fF]ixes ([a-z ]*)bug (\d+); bugfix on ', + contents): + warn("bugfix incant is not semicoloned") + + +if __name__ == '__main__': + for fname in sys.argv[1:]: + if fname.endswith("~"): + continue + lintfile(fname) diff --git a/scripts/maint/locatemissingdoxygen.py b/scripts/maint/locatemissingdoxygen.py new file mode 100755 index 0000000000..797bf8176f --- /dev/null +++ b/scripts/maint/locatemissingdoxygen.py @@ -0,0 +1,74 @@ +#!/usr/bin/python + +""" + This script parses the stderr output of doxygen and looks for undocumented + stuff. By default, it just counts the undocumented things per file. But with + the -A option, it rewrites the files to stick in /*DOCDOC*/ comments + to highlight the undocumented stuff. +""" + +import os +import re +import shutil +import sys + +warning_pattern = re.compile(r'^([^:]+):(\d+): warning: (.*) is not documented') + +def readDoxygenOutput(f): + " yields (cfilename, lineno, thingname) " + for line in f: + m = warning_pattern.match(line) + if m: + yield m.groups() + +warnings = {} + +def buildWarnings(): + for fn, lineno, what in list(readDoxygenOutput(sys.stdin)): + warnings.setdefault(fn, []).append( (int(lineno), what) ) + +def count(fn): + if os.path.abspath(fn) not in warnings: + print "0\t%s"%fn + else: + n = len(warnings[os.path.abspath(fn)]) + print "%d\t%s"%(n,fn) + +def getIndentation(line): + s = line.lstrip() + return line[:len(line)-len(s)] + +def annotate(filename): + if os.path.abspath(filename) not in warnings: + return + with open(filename) as f: + lines = f.readlines() + w = warnings[os.path.abspath(filename)][:] + w.sort() + w.reverse() + + for lineno, what in w: + lineno -= 1 # list is 0-indexed. + if 'DOCDOC' in lines[lineno]: + continue + ind = getIndentation(lines[lineno]) + lines.insert(lineno, "%s/* DOCDOC %s */\n"%(ind,what)) + + shutil.copy(filename, filename+".orig") + with open(filename, 'w') as f: + for l in lines: + f.write(l) + + +if __name__ == '__main__': + if len(sys.argv) == 1: + print "Usage: locatemissingdoxygen.py [-A] filename... <doxygen_log" + sys.exit(1) + buildWarnings() + if sys.argv[1] == '-A': + del sys.argv[1] + func = annotate + else: + func = count + for fname in sys.argv[1:]: + func(fname) diff --git a/scripts/maint/redox.py b/scripts/maint/redox.py index fa816a7267..5933d49773 100755 --- a/scripts/maint/redox.py +++ b/scripts/maint/redox.py @@ -1,6 +1,6 @@ #!/usr/bin/python # -# Copyright (c) 2008-2013, The Tor Project, Inc. +# Copyright (c) 2008-2015, The Tor Project, Inc. # See LICENSE for licensing information. # # Hi! diff --git a/scripts/maint/sortChanges.py b/scripts/maint/sortChanges.py index 726a723f93..d6ec0e269d 100755 --- a/scripts/maint/sortChanges.py +++ b/scripts/maint/sortChanges.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# Copyright (c) 2014, The Tor Project, Inc. +# Copyright (c) 2014-2015, The Tor Project, Inc. # See LICENSE for licensing information """This script sorts a bunch of changes files listed on its command line into roughly the order in which they should appear in the changelog. - - TODO: collation support. """ import re @@ -18,10 +16,36 @@ def fetch(fn): s = "%s\n" % s.rstrip() return s -def score(s): - m = re.match(r'^ +o (.*)', s) +CSR='Code simplification and refactoring' + +REPLACEMENTS = { + # plurals + 'Minor bugfix' : 'Minor bugfixes', + 'Major bugfix' : 'Major bugfixes', + 'Minor feature' : 'Minor features', + 'Major feature' : 'Major features', + 'Removed feature' : 'Removed features', + 'Code simplification and refactorings' : CSR, + 'Code simplifications and refactoring' : CSR, + 'Code simplifications and refactorings' : CSR, + + # wrong words + 'Minor fix' : 'Minor bugfixes', + 'Major fix' : 'Major bugfixes', + 'Minor fixes' : 'Minor bugfixes', + 'Major fixes' : 'Major bugfixes', + 'Minor enhancement' : 'Minor features', + 'Minor enhancements' : 'Minor features', + 'Major enhancement' : 'Major features', + 'Major enhancements' : 'Major features', +} + +def score(s,fname=None): + m = re.match(r'^ +o ([^\n]*)\n(.*)', s, re.M|re.S) if not m: - print >>sys.stderr, "Can't score %r"%s + print >>sys.stderr, "Can't score %r from %s"%(s,fname) + heading = m.group(1) + heading = REPLACEMENTS.get(heading, heading) lw = m.group(1).lower() if lw.startswith("major feature"): score = 0 @@ -38,12 +62,47 @@ def score(s): else: score = 100 - return (score, lw, s) + return (score, lw, heading, m.group(2)) + +def splitChanges(s): + this_entry = [] + for line in s.split("\n"): + if line.strip() == "": + continue + if re.match(r" +o ", line): + if len(this_entry) > 2: + yield "".join(this_entry) + curHeader = line + this_entry = [ curHeader, "\n" ] + continue + elif re.match(r" +- ", line): + if len(this_entry) > 2: + yield "".join(this_entry) + this_entry = [ curHeader, "\n" ] + this_entry.append(line) + this_entry.append("\n") -changes = [ score(fetch(fn)) for fn in sys.argv[1:] if not fn.endswith('~') ] + if len(this_entry) > 2: + yield "".join(this_entry) + + +changes = [] + +for fn in sys.argv[1:]: + if fn.endswith('~'): + continue + for change in splitChanges(fetch(fn)): + changes.append(score(change,fn)) changes.sort() -for _, _, s in changes: - print s +last_lw = "this is not a header" +for _, lw, header, rest in changes: + if lw == last_lw: + print rest, + else: + print + print " o",header + print rest, + last_lw = lw diff --git a/scripts/maint/updateCopyright.pl b/scripts/maint/updateCopyright.pl new file mode 100755 index 0000000000..8bd6a18210 --- /dev/null +++ b/scripts/maint/updateCopyright.pl @@ -0,0 +1,7 @@ +#!/usr/bin/perl -i -w -p + +$NEWYEAR=2016; + +s/Copyright(.*) (201[^6]), The Tor Project/Copyright$1 $2-${NEWYEAR}, The Tor Project/; + +s/Copyright(.*)-(20..), The Tor Project/Copyright$1-${NEWYEAR}, The Tor Project/; diff --git a/scripts/maint/updateFallbackDirs.py b/scripts/maint/updateFallbackDirs.py new file mode 100755 index 0000000000..110ecda64c --- /dev/null +++ b/scripts/maint/updateFallbackDirs.py @@ -0,0 +1,1999 @@ +#!/usr/bin/python + +# Usage: scripts/maint/updateFallbackDirs.py > src/or/fallback_dirs.inc +# +# This script should be run from a stable, reliable network connection, +# with no other network activity (and not over tor). +# If this is not possible, please disable: +# PERFORM_IPV4_DIRPORT_CHECKS and PERFORM_IPV6_DIRPORT_CHECKS +# +# Needs dateutil (and potentially other python packages) +# Needs stem available in your PYTHONPATH, or just ln -s ../stem/stem . +# Optionally uses ipaddress (python 3 builtin) or py2-ipaddress (package) +# for netblock analysis, in PYTHONPATH, or just +# ln -s ../py2-ipaddress-3.4.1/ipaddress.py . +# +# Then read the logs to make sure the fallbacks aren't dominated by a single +# netblock or port + +# Script by weasel, April 2015 +# Portions by gsathya & karsten, 2013 +# https://trac.torproject.org/projects/tor/attachment/ticket/8374/dir_list.2.py +# Modifications by teor, 2015 + +import StringIO +import string +import re +import datetime +import gzip +import os.path +import json +import math +import sys +import urllib +import urllib2 +import hashlib +import dateutil.parser +# bson_lazy provides bson +#from bson import json_util +import copy + +from stem.descriptor.remote import DescriptorDownloader + +import logging +# INFO tells you why each relay was included or excluded +# WARN tells you about potential misconfigurations and relay detail changes +logging.basicConfig(level=logging.WARNING) +logging.root.name = '' +# INFO tells you about each consensus download attempt +logging.getLogger('stem').setLevel(logging.WARNING) + +HAVE_IPADDRESS = False +try: + # python 3 builtin, or install package py2-ipaddress + # there are several ipaddress implementations for python 2 + # with slightly different semantics with str typed text + # fortunately, all our IP addresses are in unicode + import ipaddress + HAVE_IPADDRESS = True +except ImportError: + # if this happens, we avoid doing netblock analysis + logging.warning('Unable to import ipaddress, please install py2-ipaddress.' + + ' A fallback list will be created, but optional netblock' + + ' analysis will not be performed.') + +## Top-Level Configuration + +# Output all candidate fallbacks, or only output selected fallbacks? +OUTPUT_CANDIDATES = False + +# Perform DirPort checks over IPv4? +# Change this to False if IPv4 doesn't work for you, or if you don't want to +# download a consensus for each fallback +# Don't check ~1000 candidates when OUTPUT_CANDIDATES is True +PERFORM_IPV4_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else True + +# Perform DirPort checks over IPv6? +# If you know IPv6 works for you, set this to True +# This will exclude IPv6 relays without an IPv6 DirPort configured +# So it's best left at False until #18394 is implemented +# Don't check ~1000 candidates when OUTPUT_CANDIDATES is True +PERFORM_IPV6_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else False + +# Output fallback name, flags, and ContactInfo in a C comment? +OUTPUT_COMMENTS = True if OUTPUT_CANDIDATES else False + +# Output matching ContactInfo in fallbacks list or the blacklist? +# Useful if you're trying to contact operators +CONTACT_COUNT = True if OUTPUT_CANDIDATES else False +CONTACT_BLACKLIST_COUNT = True if OUTPUT_CANDIDATES else False + +## OnionOO Settings + +ONIONOO = 'https://onionoo.torproject.org/' +#ONIONOO = 'https://onionoo.thecthulhu.com/' + +# Don't bother going out to the Internet, just use the files available locally, +# even if they're very old +LOCAL_FILES_ONLY = False + +## Whitelist / Blacklist Filter Settings + +# The whitelist contains entries that are included if all attributes match +# (IPv4, dirport, orport, id, and optionally IPv6 and IPv6 orport) +# The blacklist contains (partial) entries that are excluded if any +# sufficiently specific group of attributes matches: +# IPv4 & DirPort +# IPv4 & ORPort +# ID +# IPv6 & DirPort +# IPv6 & IPv6 ORPort +# If neither port is included in the blacklist, the entire IP address is +# blacklisted. + +# What happens to entries in neither list? +# When True, they are included, when False, they are excluded +INCLUDE_UNLISTED_ENTRIES = True if OUTPUT_CANDIDATES else False + +# If an entry is in both lists, what happens? +# When True, it is excluded, when False, it is included +BLACKLIST_EXCLUDES_WHITELIST_ENTRIES = True + +WHITELIST_FILE_NAME = 'scripts/maint/fallback.whitelist' +BLACKLIST_FILE_NAME = 'scripts/maint/fallback.blacklist' + +# The number of bytes we'll read from a filter file before giving up +MAX_LIST_FILE_SIZE = 1024 * 1024 + +## Eligibility Settings + +# Reduced due to a bug in tor where a relay submits a 0 DirPort when restarted +# This causes OnionOO to (correctly) reset its stability timer +# This issue will be fixed in 0.2.7.7 and 0.2.8.2 +# Until then, the CUTOFFs below ensure a decent level of stability. +ADDRESS_AND_PORT_STABLE_DAYS = 7 +# What time-weighted-fraction of these flags must FallbackDirs +# Equal or Exceed? +CUTOFF_RUNNING = .95 +CUTOFF_V2DIR = .95 +CUTOFF_GUARD = .95 +# What time-weighted-fraction of these flags must FallbackDirs +# Equal or Fall Under? +# .00 means no bad exits +PERMITTED_BADEXIT = .00 + +# older entries' weights are adjusted with ALPHA^(age in days) +AGE_ALPHA = 0.99 + +# this factor is used to scale OnionOO entries to [0,1] +ONIONOO_SCALE_ONE = 999. + +## Fallback Count Limits + +# The target for these parameters is 20% of the guards in the network +# This is around 200 as of October 2015 +_FB_POG = 0.2 +FALLBACK_PROPORTION_OF_GUARDS = None if OUTPUT_CANDIDATES else _FB_POG + +# We want exactly 100 fallbacks for the initial release +# This gives us scope to add extra fallbacks to the list as needed +# Limit the number of fallbacks (eliminating lowest by advertised bandwidth) +MAX_FALLBACK_COUNT = None if OUTPUT_CANDIDATES else 100 +# Emit a C #error if the number of fallbacks is below +MIN_FALLBACK_COUNT = 100 + +## Fallback Bandwidth Requirements + +# Any fallback with the Exit flag has its bandwidth multipled by this fraction +# to make sure we aren't further overloading exits +# (Set to 1.0, because we asked that only lightly loaded exits opt-in, +# and the extra load really isn't that much for large relays.) +EXIT_BANDWIDTH_FRACTION = 1.0 + +# If a single fallback's bandwidth is too low, it's pointless adding it +# We expect fallbacks to handle an extra 30 kilobytes per second of traffic +# Make sure they can support a hundred times the expected extra load +# (Use 102.4 to make it come out nicely in MB/s) +# We convert this to a consensus weight before applying the filter, +# because all the bandwidth amounts are specified by the relay +MIN_BANDWIDTH = 102.4 * 30.0 * 1024.0 + +# Clients will time out after 30 seconds trying to download a consensus +# So allow fallback directories half that to deliver a consensus +# The exact download times might change based on the network connection +# running this script, but only by a few seconds +# There is also about a second of python overhead +CONSENSUS_DOWNLOAD_SPEED_MAX = 15.0 +# If the relay fails a consensus check, retry the download +# This avoids delisting a relay due to transient network conditions +CONSENSUS_DOWNLOAD_RETRY = True + +## Fallback Weights for Client Selection + +# All fallback weights are equal, and set to the value below +# Authorities are weighted 1.0 by default +# Clients use these weights to select fallbacks and authorities at random +# If there are 100 fallbacks and 9 authorities: +# - each fallback is chosen with probability 10.0/(10.0*100 + 1.0*9) ~= 0.99% +# - each authority is chosen with probability 1.0/(10.0*100 + 1.0*9) ~= 0.09% +# A client choosing a bootstrap directory server will choose a fallback for +# 10.0/(10.0*100 + 1.0*9) * 100 = 99.1% of attempts, and an authority for +# 1.0/(10.0*100 + 1.0*9) * 9 = 0.9% of attempts. +# (This disregards the bootstrap schedules, where clients start by choosing +# from fallbacks & authoritites, then later choose from only authorities.) +FALLBACK_OUTPUT_WEIGHT = 10.0 + +## Parsing Functions + +def parse_ts(t): + return datetime.datetime.strptime(t, "%Y-%m-%d %H:%M:%S") + +def remove_bad_chars(raw_string, bad_char_list): + # Remove each character in the bad_char_list + cleansed_string = raw_string + for c in bad_char_list: + cleansed_string = cleansed_string.replace(c, '') + return cleansed_string + +def cleanse_unprintable(raw_string): + # Remove all unprintable characters + cleansed_string = '' + for c in raw_string: + if c in string.printable: + cleansed_string += c + return cleansed_string + +def cleanse_whitespace(raw_string): + # Replace all whitespace characters with a space + cleansed_string = raw_string + for c in string.whitespace: + cleansed_string = cleansed_string.replace(c, ' ') + return cleansed_string + +def cleanse_c_multiline_comment(raw_string): + cleansed_string = raw_string + # Embedded newlines should be removed by tor/onionoo, but let's be paranoid + cleansed_string = cleanse_whitespace(cleansed_string) + # ContactInfo and Version can be arbitrary binary data + cleansed_string = cleanse_unprintable(cleansed_string) + # Prevent a malicious / unanticipated string from breaking out + # of a C-style multiline comment + # This removes '/*' and '*/' and '//' + bad_char_list = '*/' + # Prevent a malicious string from using C nulls + bad_char_list += '\0' + # Be safer by removing bad characters entirely + cleansed_string = remove_bad_chars(cleansed_string, bad_char_list) + # Some compilers may further process the content of comments + # There isn't much we can do to cover every possible case + # But comment-based directives are typically only advisory + return cleansed_string + +def cleanse_c_string(raw_string): + cleansed_string = raw_string + # Embedded newlines should be removed by tor/onionoo, but let's be paranoid + cleansed_string = cleanse_whitespace(cleansed_string) + # ContactInfo and Version can be arbitrary binary data + cleansed_string = cleanse_unprintable(cleansed_string) + # Prevent a malicious address/fingerprint string from breaking out + # of a C-style string + bad_char_list = '"' + # Prevent a malicious string from using escapes + bad_char_list += '\\' + # Prevent a malicious string from using C nulls + bad_char_list += '\0' + # Be safer by removing bad characters entirely + cleansed_string = remove_bad_chars(cleansed_string, bad_char_list) + # Some compilers may further process the content of strings + # There isn't much we can do to cover every possible case + # But this typically only results in changes to the string data + return cleansed_string + +## OnionOO Source Functions + +# a dictionary of source metadata for each onionoo query we've made +fetch_source = {} + +# register source metadata for 'what' +# assumes we only retrieve one document for each 'what' +def register_fetch_source(what, url, relays_published, version): + fetch_source[what] = {} + fetch_source[what]['url'] = url + fetch_source[what]['relays_published'] = relays_published + fetch_source[what]['version'] = version + +# list each registered source's 'what' +def fetch_source_list(): + return sorted(fetch_source.keys()) + +# given 'what', provide a multiline C comment describing the source +def describe_fetch_source(what): + desc = '/*' + desc += '\n' + desc += 'Onionoo Source: ' + desc += cleanse_c_multiline_comment(what) + desc += ' Date: ' + desc += cleanse_c_multiline_comment(fetch_source[what]['relays_published']) + desc += ' Version: ' + desc += cleanse_c_multiline_comment(fetch_source[what]['version']) + desc += '\n' + desc += 'URL: ' + desc += cleanse_c_multiline_comment(fetch_source[what]['url']) + desc += '\n' + desc += '*/' + return desc + +## File Processing Functions + +def write_to_file(str, file_name, max_len): + try: + with open(file_name, 'w') as f: + f.write(str[0:max_len]) + except EnvironmentError, error: + logging.error('Writing file %s failed: %d: %s'% + (file_name, + error.errno, + error.strerror) + ) + +def read_from_file(file_name, max_len): + try: + if os.path.isfile(file_name): + with open(file_name, 'r') as f: + return f.read(max_len) + except EnvironmentError, error: + logging.info('Loading file %s failed: %d: %s'% + (file_name, + error.errno, + error.strerror) + ) + return None + +def load_possibly_compressed_response_json(response): + if response.info().get('Content-Encoding') == 'gzip': + buf = StringIO.StringIO( response.read() ) + f = gzip.GzipFile(fileobj=buf) + return json.load(f) + else: + return json.load(response) + +def load_json_from_file(json_file_name): + # An exception here may be resolved by deleting the .last_modified + # and .json files, and re-running the script + try: + with open(json_file_name, 'r') as f: + return json.load(f) + except EnvironmentError, error: + raise Exception('Reading not-modified json file %s failed: %d: %s'% + (json_file_name, + error.errno, + error.strerror) + ) + +## OnionOO Functions + +def datestr_to_datetime(datestr): + # Parse datetimes like: Fri, 02 Oct 2015 13:34:14 GMT + if datestr is not None: + dt = dateutil.parser.parse(datestr) + else: + # Never modified - use start of epoch + dt = datetime.datetime.utcfromtimestamp(0) + # strip any timezone out (in case they're supported in future) + dt = dt.replace(tzinfo=None) + return dt + +def onionoo_fetch(what, **kwargs): + params = kwargs + params['type'] = 'relay' + #params['limit'] = 10 + params['first_seen_days'] = '%d-'%(ADDRESS_AND_PORT_STABLE_DAYS,) + params['last_seen_days'] = '-7' + params['flag'] = 'V2Dir' + url = ONIONOO + what + '?' + urllib.urlencode(params) + + # Unfortunately, the URL is too long for some OS filenames, + # but we still don't want to get files from different URLs mixed up + base_file_name = what + '-' + hashlib.sha1(url).hexdigest() + + full_url_file_name = base_file_name + '.full_url' + MAX_FULL_URL_LENGTH = 1024 + + last_modified_file_name = base_file_name + '.last_modified' + MAX_LAST_MODIFIED_LENGTH = 64 + + json_file_name = base_file_name + '.json' + + if LOCAL_FILES_ONLY: + # Read from the local file, don't write to anything + response_json = load_json_from_file(json_file_name) + else: + # store the full URL to a file for debugging + # no need to compare as long as you trust SHA-1 + write_to_file(url, full_url_file_name, MAX_FULL_URL_LENGTH) + + request = urllib2.Request(url) + request.add_header('Accept-encoding', 'gzip') + + # load the last modified date from the file, if it exists + last_mod_date = read_from_file(last_modified_file_name, + MAX_LAST_MODIFIED_LENGTH) + if last_mod_date is not None: + request.add_header('If-modified-since', last_mod_date) + + # Parse last modified date + last_mod = datestr_to_datetime(last_mod_date) + + # Not Modified and still recent enough to be useful + # Onionoo / Globe used to use 6 hours, but we can afford a day + required_freshness = datetime.datetime.utcnow() + # strip any timezone out (to match dateutil.parser) + required_freshness = required_freshness.replace(tzinfo=None) + required_freshness -= datetime.timedelta(hours=24) + + # Make the OnionOO request + response_code = 0 + try: + response = urllib2.urlopen(request) + response_code = response.getcode() + except urllib2.HTTPError, error: + response_code = error.code + if response_code == 304: # not modified + pass + else: + raise Exception("Could not get " + url + ": " + + str(error.code) + ": " + error.reason) + + if response_code == 200: # OK + last_mod = datestr_to_datetime(response.info().get('Last-Modified')) + + # Check for freshness + if last_mod < required_freshness: + if last_mod_date is not None: + # This check sometimes fails transiently, retry the script if it does + date_message = "Outdated data: last updated " + last_mod_date + else: + date_message = "No data: never downloaded " + raise Exception(date_message + " from " + url) + + # Process the data + if response_code == 200: # OK + + response_json = load_possibly_compressed_response_json(response) + + with open(json_file_name, 'w') as f: + # use the most compact json representation to save space + json.dump(response_json, f, separators=(',',':')) + + # store the last modified date in its own file + if response.info().get('Last-modified') is not None: + write_to_file(response.info().get('Last-Modified'), + last_modified_file_name, + MAX_LAST_MODIFIED_LENGTH) + + elif response_code == 304: # Not Modified + + response_json = load_json_from_file(json_file_name) + + else: # Unexpected HTTP response code not covered in the HTTPError above + raise Exception("Unexpected HTTP response code to " + url + ": " + + str(response_code)) + + register_fetch_source(what, + url, + response_json['relays_published'], + response_json['version']) + + return response_json + +def fetch(what, **kwargs): + #x = onionoo_fetch(what, **kwargs) + # don't use sort_keys, as the order of or_addresses is significant + #print json.dumps(x, indent=4, separators=(',', ': ')) + #sys.exit(0) + + return onionoo_fetch(what, **kwargs) + +## Fallback Candidate Class + +class Candidate(object): + CUTOFF_ADDRESS_AND_PORT_STABLE = (datetime.datetime.utcnow() + - datetime.timedelta(ADDRESS_AND_PORT_STABLE_DAYS)) + + def __init__(self, details): + for f in ['fingerprint', 'nickname', 'last_changed_address_or_port', + 'consensus_weight', 'or_addresses', 'dir_address']: + if not f in details: raise Exception("Document has no %s field."%(f,)) + + if not 'contact' in details: + details['contact'] = None + if not 'flags' in details or details['flags'] is None: + details['flags'] = [] + if (not 'advertised_bandwidth' in details + or details['advertised_bandwidth'] is None): + # relays without advertised bandwdith have it calculated from their + # consensus weight + details['advertised_bandwidth'] = 0 + if (not 'effective_family' in details + or details['effective_family'] is None): + details['effective_family'] = [] + details['last_changed_address_or_port'] = parse_ts( + details['last_changed_address_or_port']) + self._data = details + self._stable_sort_or_addresses() + + self._fpr = self._data['fingerprint'] + self._running = self._guard = self._v2dir = 0. + self._split_dirport() + self._compute_orport() + if self.orport is None: + raise Exception("Failed to get an orport for %s."%(self._fpr,)) + self._compute_ipv6addr() + if not self.has_ipv6(): + logging.debug("Failed to get an ipv6 address for %s."%(self._fpr,)) + + def _stable_sort_or_addresses(self): + # replace self._data['or_addresses'] with a stable ordering, + # sorting the secondary addresses in string order + # leave the received order in self._data['or_addresses_raw'] + self._data['or_addresses_raw'] = self._data['or_addresses'] + or_address_primary = self._data['or_addresses'][:1] + # subsequent entries in the or_addresses array are in an arbitrary order + # so we stabilise the addresses by sorting them in string order + or_addresses_secondaries_stable = sorted(self._data['or_addresses'][1:]) + or_addresses_stable = or_address_primary + or_addresses_secondaries_stable + self._data['or_addresses'] = or_addresses_stable + + def get_fingerprint(self): + return self._fpr + + # is_valid_ipv[46]_address by gsathya, karsten, 2013 + @staticmethod + def is_valid_ipv4_address(address): + if not isinstance(address, (str, unicode)): + return False + + # check if there are four period separated values + if address.count(".") != 3: + return False + + # checks that each value in the octet are decimal values between 0-255 + for entry in address.split("."): + if not entry.isdigit() or int(entry) < 0 or int(entry) > 255: + return False + elif entry[0] == "0" and len(entry) > 1: + return False # leading zeros, for instance in "1.2.3.001" + + return True + + @staticmethod + def is_valid_ipv6_address(address): + if not isinstance(address, (str, unicode)): + return False + + # remove brackets + address = address[1:-1] + + # addresses are made up of eight colon separated groups of four hex digits + # with leading zeros being optional + # https://en.wikipedia.org/wiki/IPv6#Address_format + + colon_count = address.count(":") + + if colon_count > 7: + return False # too many groups + elif colon_count != 7 and not "::" in address: + return False # not enough groups and none are collapsed + elif address.count("::") > 1 or ":::" in address: + return False # multiple groupings of zeros can't be collapsed + + found_ipv4_on_previous_entry = False + for entry in address.split(":"): + # If an IPv6 address has an embedded IPv4 address, + # it must be the last entry + if found_ipv4_on_previous_entry: + return False + if not re.match("^[0-9a-fA-f]{0,4}$", entry): + if not Candidate.is_valid_ipv4_address(entry): + return False + else: + found_ipv4_on_previous_entry = True + + return True + + def _split_dirport(self): + # Split the dir_address into dirip and dirport + (self.dirip, _dirport) = self._data['dir_address'].split(':', 2) + self.dirport = int(_dirport) + + def _compute_orport(self): + # Choose the first ORPort that's on the same IPv4 address as the DirPort. + # In rare circumstances, this might not be the primary ORPort address. + # However, _stable_sort_or_addresses() ensures we choose the same one + # every time, even if onionoo changes the order of the secondaries. + self._split_dirport() + self.orport = None + for i in self._data['or_addresses']: + if i != self._data['or_addresses'][0]: + logging.debug('Secondary IPv4 Address Used for %s: %s'%(self._fpr, i)) + (ipaddr, port) = i.rsplit(':', 1) + if (ipaddr == self.dirip) and Candidate.is_valid_ipv4_address(ipaddr): + self.orport = int(port) + return + + def _compute_ipv6addr(self): + # Choose the first IPv6 address that uses the same port as the ORPort + # Or, choose the first IPv6 address in the list + # _stable_sort_or_addresses() ensures we choose the same IPv6 address + # every time, even if onionoo changes the order of the secondaries. + self.ipv6addr = None + self.ipv6orport = None + # Choose the first IPv6 address that uses the same port as the ORPort + for i in self._data['or_addresses']: + (ipaddr, port) = i.rsplit(':', 1) + if (port == self.orport) and Candidate.is_valid_ipv6_address(ipaddr): + self.ipv6addr = ipaddr + self.ipv6orport = int(port) + return + # Choose the first IPv6 address in the list + for i in self._data['or_addresses']: + (ipaddr, port) = i.rsplit(':', 1) + if Candidate.is_valid_ipv6_address(ipaddr): + self.ipv6addr = ipaddr + self.ipv6orport = int(port) + return + + @staticmethod + def _extract_generic_history(history, which='unknown'): + # given a tree like this: + # { + # "1_month": { + # "count": 187, + # "factor": 0.001001001001001001, + # "first": "2015-02-27 06:00:00", + # "interval": 14400, + # "last": "2015-03-30 06:00:00", + # "values": [ + # 999, + # 999 + # ] + # }, + # "1_week": { + # "count": 169, + # "factor": 0.001001001001001001, + # "first": "2015-03-23 07:30:00", + # "interval": 3600, + # "last": "2015-03-30 07:30:00", + # "values": [ ...] + # }, + # "1_year": { + # "count": 177, + # "factor": 0.001001001001001001, + # "first": "2014-04-11 00:00:00", + # "interval": 172800, + # "last": "2015-03-29 00:00:00", + # "values": [ ...] + # }, + # "3_months": { + # "count": 185, + # "factor": 0.001001001001001001, + # "first": "2014-12-28 06:00:00", + # "interval": 43200, + # "last": "2015-03-30 06:00:00", + # "values": [ ...] + # } + # }, + # extract exactly one piece of data per time interval, + # using smaller intervals where available. + # + # returns list of (age, length, value) dictionaries. + + generic_history = [] + + periods = history.keys() + periods.sort(key = lambda x: history[x]['interval']) + now = datetime.datetime.utcnow() + newest = now + for p in periods: + h = history[p] + interval = datetime.timedelta(seconds = h['interval']) + this_ts = parse_ts(h['last']) + + if (len(h['values']) != h['count']): + logging.warning('Inconsistent value count in %s document for %s' + %(p, which)) + for v in reversed(h['values']): + if (this_ts <= newest): + agt1 = now - this_ts + agt2 = interval + agetmp1 = (agt1.microseconds + (agt1.seconds + agt1.days * 24 * 3600) + * 10**6) / 10**6 + agetmp2 = (agt2.microseconds + (agt2.seconds + agt2.days * 24 * 3600) + * 10**6) / 10**6 + generic_history.append( + { 'age': agetmp1, + 'length': agetmp2, + 'value': v + }) + newest = this_ts + this_ts -= interval + + if (this_ts + interval != parse_ts(h['first'])): + logging.warning('Inconsistent time information in %s document for %s' + %(p, which)) + + #print json.dumps(generic_history, sort_keys=True, + # indent=4, separators=(',', ': ')) + return generic_history + + @staticmethod + def _avg_generic_history(generic_history): + a = [] + for i in generic_history: + if i['age'] > (ADDRESS_AND_PORT_STABLE_DAYS * 24 * 3600): + continue + if (i['length'] is not None + and i['age'] is not None + and i['value'] is not None): + w = i['length'] * math.pow(AGE_ALPHA, i['age']/(3600*24)) + a.append( (i['value'] * w, w) ) + + sv = math.fsum(map(lambda x: x[0], a)) + sw = math.fsum(map(lambda x: x[1], a)) + + if sw == 0.0: + svw = 0.0 + else: + svw = sv/sw + return svw + + def _add_generic_history(self, history): + periods = r['read_history'].keys() + periods.sort(key = lambda x: r['read_history'][x]['interval'] ) + + print periods + + def add_running_history(self, history): + pass + + def add_uptime(self, uptime): + logging.debug('Adding uptime %s.'%(self._fpr,)) + + # flags we care about: Running, V2Dir, Guard + if not 'flags' in uptime: + logging.debug('No flags in document for %s.'%(self._fpr,)) + return + + for f in ['Running', 'Guard', 'V2Dir']: + if not f in uptime['flags']: + logging.debug('No %s in flags for %s.'%(f, self._fpr,)) + return + + running = self._extract_generic_history(uptime['flags']['Running'], + '%s-Running'%(self._fpr)) + guard = self._extract_generic_history(uptime['flags']['Guard'], + '%s-Guard'%(self._fpr)) + v2dir = self._extract_generic_history(uptime['flags']['V2Dir'], + '%s-V2Dir'%(self._fpr)) + if 'BadExit' in uptime['flags']: + badexit = self._extract_generic_history(uptime['flags']['BadExit'], + '%s-BadExit'%(self._fpr)) + + self._running = self._avg_generic_history(running) / ONIONOO_SCALE_ONE + self._guard = self._avg_generic_history(guard) / ONIONOO_SCALE_ONE + self._v2dir = self._avg_generic_history(v2dir) / ONIONOO_SCALE_ONE + self._badexit = None + if 'BadExit' in uptime['flags']: + self._badexit = self._avg_generic_history(badexit) / ONIONOO_SCALE_ONE + + def is_candidate(self): + must_be_running_now = (PERFORM_IPV4_DIRPORT_CHECKS + or PERFORM_IPV6_DIRPORT_CHECKS) + if (must_be_running_now and not self.is_running()): + logging.info('%s not a candidate: not running now, unable to check ' + + 'DirPort consensus download', self._fpr) + return False + if (self._data['last_changed_address_or_port'] > + self.CUTOFF_ADDRESS_AND_PORT_STABLE): + logging.info('%s not a candidate: changed address/port recently (%s)', + self._fpr, self._data['last_changed_address_or_port']) + return False + if self._running < CUTOFF_RUNNING: + logging.info('%s not a candidate: running avg too low (%lf)', + self._fpr, self._running) + return False + if self._v2dir < CUTOFF_V2DIR: + logging.info('%s not a candidate: v2dir avg too low (%lf)', + self._fpr, self._v2dir) + return False + if self._badexit is not None and self._badexit > PERMITTED_BADEXIT: + logging.info('%s not a candidate: badexit avg too high (%lf)', + self._fpr, self._badexit) + return False + # if the relay doesn't report a version, also exclude the relay + if (not self._data.has_key('recommended_version') + or not self._data['recommended_version']): + logging.info('%s not a candidate: version not recommended', self._fpr) + return False + if self._guard < CUTOFF_GUARD: + logging.info('%s not a candidate: guard avg too low (%lf)', + self._fpr, self._guard) + return False + if (not self._data.has_key('consensus_weight') + or self._data['consensus_weight'] < 1): + logging.info('%s not a candidate: consensus weight invalid', self._fpr) + return False + return True + + def is_in_whitelist(self, relaylist): + """ A fallback matches if each key in the whitelist line matches: + ipv4 + dirport + orport + id + ipv6 address and port (if present) + If the fallback has an ipv6 key, the whitelist line must also have + it, and vice versa, otherwise they don't match. """ + ipv6 = None + if self.has_ipv6(): + ipv6 = '%s:%d'%(self.ipv6addr, self.ipv6orport) + for entry in relaylist: + if entry['id'] != self._fpr: + # can't log here unless we match an IP and port, because every relay's + # fingerprint is compared to every entry's fingerprint + if entry['ipv4'] == self.dirip and int(entry['orport']) == self.orport: + logging.warning('%s excluded: has OR %s:%d changed fingerprint to ' + + '%s?', entry['id'], self.dirip, self.orport, + self._fpr) + if self.has_ipv6() and entry.has_key('ipv6') and entry['ipv6'] == ipv6: + logging.warning('%s excluded: has OR %s changed fingerprint to ' + + '%s?', entry['id'], ipv6, self._fpr) + continue + if entry['ipv4'] != self.dirip: + logging.warning('%s excluded: has it changed IPv4 from %s to %s?', + self._fpr, entry['ipv4'], self.dirip) + continue + if int(entry['dirport']) != self.dirport: + logging.warning('%s excluded: has it changed DirPort from %s:%d to ' + + '%s:%d?', self._fpr, self.dirip, int(entry['dirport']), + self.dirip, self.dirport) + continue + if int(entry['orport']) != self.orport: + logging.warning('%s excluded: has it changed ORPort from %s:%d to ' + + '%s:%d?', self._fpr, self.dirip, int(entry['orport']), + self.dirip, self.orport) + continue + if entry.has_key('ipv6') and self.has_ipv6(): + # if both entry and fallback have an ipv6 address, compare them + if entry['ipv6'] != ipv6: + logging.warning('%s excluded: has it changed IPv6 ORPort from %s ' + + 'to %s?', self._fpr, entry['ipv6'], ipv6) + continue + # if the fallback has an IPv6 address but the whitelist entry + # doesn't, or vice versa, the whitelist entry doesn't match + elif entry.has_key('ipv6') and not self.has_ipv6(): + logging.warning('%s excluded: has it lost its former IPv6 address %s?', + self._fpr, entry['ipv6']) + continue + elif not entry.has_key('ipv6') and self.has_ipv6(): + logging.warning('%s excluded: has it gained an IPv6 address %s?', + self._fpr, ipv6) + continue + return True + return False + + def is_in_blacklist(self, relaylist): + """ A fallback matches a blacklist line if a sufficiently specific group + of attributes matches: + ipv4 & dirport + ipv4 & orport + id + ipv6 & dirport + ipv6 & ipv6 orport + If the fallback and the blacklist line both have an ipv6 key, + their values will be compared, otherwise, they will be ignored. + If there is no dirport and no orport, the entry matches all relays on + that ip. """ + for entry in relaylist: + for key in entry: + value = entry[key] + if key == 'id' and value == self._fpr: + logging.info('%s is in the blacklist: fingerprint matches', + self._fpr) + return True + if key == 'ipv4' and value == self.dirip: + # if the dirport is present, check it too + if entry.has_key('dirport'): + if int(entry['dirport']) == self.dirport: + logging.info('%s is in the blacklist: IPv4 (%s) and ' + + 'DirPort (%d) match', self._fpr, self.dirip, + self.dirport) + return True + # if the orport is present, check it too + elif entry.has_key('orport'): + if int(entry['orport']) == self.orport: + logging.info('%s is in the blacklist: IPv4 (%s) and ' + + 'ORPort (%d) match', self._fpr, self.dirip, + self.orport) + return True + else: + logging.info('%s is in the blacklist: IPv4 (%s) matches, and ' + + 'entry has no DirPort or ORPort', self._fpr, + self.dirip) + return True + ipv6 = None + if self.has_ipv6(): + ipv6 = '%s:%d'%(self.ipv6addr, self.ipv6orport) + if (key == 'ipv6' and self.has_ipv6()): + # if both entry and fallback have an ipv6 address, compare them, + # otherwise, disregard ipv6 addresses + if value == ipv6: + # if the dirport is present, check it too + if entry.has_key('dirport'): + if int(entry['dirport']) == self.dirport: + logging.info('%s is in the blacklist: IPv6 (%s) and ' + + 'DirPort (%d) match', self._fpr, ipv6, + self.dirport) + return True + # we've already checked the ORPort, it's part of entry['ipv6'] + else: + logging.info('%s is in the blacklist: IPv6 (%s) matches, and' + + 'entry has no DirPort', self._fpr, ipv6) + return True + elif (key == 'ipv6' or self.has_ipv6()): + # only log if the fingerprint matches but the IPv6 doesn't + if entry.has_key('id') and entry['id'] == self._fpr: + logging.info('%s skipping IPv6 blacklist comparison: relay ' + + 'has%s IPv6%s, but entry has%s IPv6%s', self._fpr, + '' if self.has_ipv6() else ' no', + (' (' + ipv6 + ')') if self.has_ipv6() else '', + '' if key == 'ipv6' else ' no', + (' (' + value + ')') if key == 'ipv6' else '') + logging.warning('Has %s %s IPv6 address %s?', self._fpr, + 'gained an' if self.has_ipv6() else 'lost its former', + ipv6 if self.has_ipv6() else value) + return False + + def cw_to_bw_factor(self): + # any relays with a missing or zero consensus weight are not candidates + # any relays with a missing advertised bandwidth have it set to zero + return self._data['advertised_bandwidth'] / self._data['consensus_weight'] + + # since advertised_bandwidth is reported by the relay, it can be gamed + # to avoid this, use the median consensus weight to bandwidth factor to + # estimate this relay's measured bandwidth, and make that the upper limit + def measured_bandwidth(self, median_cw_to_bw_factor): + cw_to_bw= median_cw_to_bw_factor + # Reduce exit bandwidth to make sure we're not overloading them + if self.is_exit(): + cw_to_bw *= EXIT_BANDWIDTH_FRACTION + measured_bandwidth = self._data['consensus_weight'] * cw_to_bw + if self._data['advertised_bandwidth'] != 0: + # limit advertised bandwidth (if available) to measured bandwidth + return min(measured_bandwidth, self._data['advertised_bandwidth']) + else: + return measured_bandwidth + + def set_measured_bandwidth(self, median_cw_to_bw_factor): + self._data['measured_bandwidth'] = self.measured_bandwidth( + median_cw_to_bw_factor) + + def is_exit(self): + return 'Exit' in self._data['flags'] + + def is_guard(self): + return 'Guard' in self._data['flags'] + + def is_running(self): + return 'Running' in self._data['flags'] + + # does this fallback have an IPv6 address and orport? + def has_ipv6(self): + return self.ipv6addr is not None and self.ipv6orport is not None + + # strip leading and trailing brackets from an IPv6 address + # safe to use on non-bracketed IPv6 and on IPv4 addresses + # also convert to unicode, and make None appear as '' + @staticmethod + def strip_ipv6_brackets(ip): + if ip is None: + return unicode('') + if len(ip) < 2: + return unicode(ip) + if ip[0] == '[' and ip[-1] == ']': + return unicode(ip[1:-1]) + return unicode(ip) + + # are ip_a and ip_b in the same netblock? + # mask_bits is the size of the netblock + # takes both IPv4 and IPv6 addresses + # the versions of ip_a and ip_b must be the same + # the mask must be valid for the IP version + @staticmethod + def netblocks_equal(ip_a, ip_b, mask_bits): + if ip_a is None or ip_b is None: + return False + ip_a = Candidate.strip_ipv6_brackets(ip_a) + ip_b = Candidate.strip_ipv6_brackets(ip_b) + a = ipaddress.ip_address(ip_a) + b = ipaddress.ip_address(ip_b) + if a.version != b.version: + raise Exception('Mismatching IP versions in %s and %s'%(ip_a, ip_b)) + if mask_bits > a.max_prefixlen: + logging.error('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b)) + mask_bits = a.max_prefixlen + if mask_bits < 0: + logging.error('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b)) + mask_bits = 0 + a_net = ipaddress.ip_network('%s/%d'%(ip_a, mask_bits), strict=False) + return b in a_net + + # is this fallback's IPv4 address (dirip) in the same netblock as other's + # IPv4 address? + # mask_bits is the size of the netblock + def ipv4_netblocks_equal(self, other, mask_bits): + return Candidate.netblocks_equal(self.dirip, other.dirip, mask_bits) + + # is this fallback's IPv6 address (ipv6addr) in the same netblock as + # other's IPv6 address? + # Returns False if either fallback has no IPv6 address + # mask_bits is the size of the netblock + def ipv6_netblocks_equal(self, other, mask_bits): + if not self.has_ipv6() or not other.has_ipv6(): + return False + return Candidate.netblocks_equal(self.ipv6addr, other.ipv6addr, mask_bits) + + # is this fallback's IPv4 DirPort the same as other's IPv4 DirPort? + def dirport_equal(self, other): + return self.dirport == other.dirport + + # is this fallback's IPv4 ORPort the same as other's IPv4 ORPort? + def ipv4_orport_equal(self, other): + return self.orport == other.orport + + # is this fallback's IPv6 ORPort the same as other's IPv6 ORPort? + # Returns False if either fallback has no IPv6 address + def ipv6_orport_equal(self, other): + if not self.has_ipv6() or not other.has_ipv6(): + return False + return self.ipv6orport == other.ipv6orport + + # does this fallback have the same DirPort, IPv4 ORPort, or + # IPv6 ORPort as other? + # Ignores IPv6 ORPort if either fallback has no IPv6 address + def port_equal(self, other): + return (self.dirport_equal(other) or self.ipv4_orport_equal(other) + or self.ipv6_orport_equal(other)) + + # return a list containing IPv4 ORPort, DirPort, and IPv6 ORPort (if present) + def port_list(self): + ports = [self.dirport, self.orport] + if self.has_ipv6() and not self.ipv6orport in ports: + ports.append(self.ipv6orport) + return ports + + # does this fallback share a port with other, regardless of whether the + # port types match? + # For example, if self's IPv4 ORPort is 80 and other's DirPort is 80, + # return True + def port_shared(self, other): + for p in self.port_list(): + if p in other.port_list(): + return True + return False + + # report how long it takes to download a consensus from dirip:dirport + @staticmethod + def fallback_consensus_download_speed(dirip, dirport, nickname, max_time): + download_failed = False + downloader = DescriptorDownloader() + start = datetime.datetime.utcnow() + # some directory mirrors respond to requests in ways that hang python + # sockets, which is why we log this line here + logging.info('Initiating consensus download from %s (%s:%d).', nickname, + dirip, dirport) + # there appears to be about 1 second of overhead when comparing stem's + # internal trace time and the elapsed time calculated here + TIMEOUT_SLOP = 1.0 + try: + downloader.get_consensus(endpoints = [(dirip, dirport)], + timeout = (max_time + TIMEOUT_SLOP), + validate = True, + retries = 0, + fall_back_to_authority = False).run() + except Exception, stem_error: + logging.info('Unable to retrieve a consensus from %s: %s', nickname, + stem_error) + status = 'error: "%s"' % (stem_error) + level = logging.WARNING + download_failed = True + elapsed = (datetime.datetime.utcnow() - start).total_seconds() + if elapsed > max_time: + status = 'too slow' + level = logging.WARNING + download_failed = True + else: + status = 'ok' + level = logging.DEBUG + logging.log(level, 'Consensus download: %0.1fs %s from %s (%s:%d), ' + + 'max download time %0.1fs.', elapsed, status, nickname, + dirip, dirport, max_time) + return download_failed + + # does this fallback download the consensus fast enough? + def check_fallback_download_consensus(self): + # include the relay if we're not doing a check, or we can't check (IPv6) + ipv4_failed = False + ipv6_failed = False + if PERFORM_IPV4_DIRPORT_CHECKS: + ipv4_failed = Candidate.fallback_consensus_download_speed(self.dirip, + self.dirport, + self._data['nickname'], + CONSENSUS_DOWNLOAD_SPEED_MAX) + if self.has_ipv6() and PERFORM_IPV6_DIRPORT_CHECKS: + # Clients assume the IPv6 DirPort is the same as the IPv4 DirPort + ipv6_failed = Candidate.fallback_consensus_download_speed(self.ipv6addr, + self.dirport, + self._data['nickname'], + CONSENSUS_DOWNLOAD_SPEED_MAX) + return ((not ipv4_failed) and (not ipv6_failed)) + + # if this fallback has not passed a download check, try it again, + # and record the result, available in get_fallback_download_consensus + def try_fallback_download_consensus(self): + if not self.get_fallback_download_consensus(): + self._data['download_check'] = self.check_fallback_download_consensus() + + # did this fallback pass the download check? + def get_fallback_download_consensus(self): + # if we're not performing checks, return True + if not PERFORM_IPV4_DIRPORT_CHECKS and not PERFORM_IPV6_DIRPORT_CHECKS: + return True + # if we are performing checks, but haven't done one, return False + if not self._data.has_key('download_check'): + return False + return self._data['download_check'] + + # output an optional header comment and info for this fallback + # try_fallback_download_consensus before calling this + def fallbackdir_line(self, fallbacks, prefilter_fallbacks): + s = '' + if OUTPUT_COMMENTS: + s += self.fallbackdir_comment(fallbacks, prefilter_fallbacks) + # if the download speed is ok, output a C string + # if it's not, but we OUTPUT_COMMENTS, output a commented-out C string + if self.get_fallback_download_consensus() or OUTPUT_COMMENTS: + s += self.fallbackdir_info(self.get_fallback_download_consensus()) + return s + + # output a header comment for this fallback + def fallbackdir_comment(self, fallbacks, prefilter_fallbacks): + # /* + # nickname + # flags + # [contact] + # [identical contact counts] + # */ + # Multiline C comment + s = '/*' + s += '\n' + s += cleanse_c_multiline_comment(self._data['nickname']) + s += '\n' + s += 'Flags: ' + s += cleanse_c_multiline_comment(' '.join(sorted(self._data['flags']))) + s += '\n' + if self._data['contact'] is not None: + s += cleanse_c_multiline_comment(self._data['contact']) + if CONTACT_COUNT or CONTACT_BLACKLIST_COUNT: + fallback_count = len([f for f in fallbacks + if f._data['contact'] == self._data['contact']]) + if fallback_count > 1: + s += '\n' + s += '%d identical contacts listed' % (fallback_count) + if CONTACT_BLACKLIST_COUNT: + prefilter_count = len([f for f in prefilter_fallbacks + if f._data['contact'] == self._data['contact']]) + filter_count = prefilter_count - fallback_count + if filter_count > 0: + if fallback_count > 1: + s += ' ' + else: + s += '\n' + s += '%d blacklisted' % (filter_count) + s += '\n' + s += '*/' + s += '\n' + + # output the fallback info C string for this fallback + # this is the text that would go after FallbackDir in a torrc + # if this relay failed the download test and we OUTPUT_COMMENTS, + # comment-out the returned string + def fallbackdir_info(self, dl_speed_ok): + # "address:dirport orport=port id=fingerprint" + # "[ipv6=addr:orport]" + # "weight=FALLBACK_OUTPUT_WEIGHT", + # + # Do we want a C string, or a commented-out string? + c_string = dl_speed_ok + comment_string = not dl_speed_ok and OUTPUT_COMMENTS + # If we don't want either kind of string, bail + if not c_string and not comment_string: + return '' + s = '' + # Comment out the fallback directory entry if it's too slow + # See the debug output for which address and port is failing + if comment_string: + s += '/* Consensus download failed or was too slow:\n' + # Multi-Line C string with trailing comma (part of a string list) + # This makes it easier to diff the file, and remove IPv6 lines using grep + # Integers don't need escaping + s += '"%s orport=%d id=%s"'%( + cleanse_c_string(self._data['dir_address']), + self.orport, + cleanse_c_string(self._fpr)) + s += '\n' + if self.has_ipv6(): + s += '" ipv6=%s:%d"'%(cleanse_c_string(self.ipv6addr), self.ipv6orport) + s += '\n' + s += '" weight=%d",'%(FALLBACK_OUTPUT_WEIGHT) + if comment_string: + s += '\n' + s += '*/' + return s + +## Fallback Candidate List Class + +class CandidateList(dict): + def __init__(self): + pass + + def _add_relay(self, details): + if not 'dir_address' in details: return + c = Candidate(details) + self[ c.get_fingerprint() ] = c + + def _add_uptime(self, uptime): + try: + fpr = uptime['fingerprint'] + except KeyError: + raise Exception("Document has no fingerprint field.") + + try: + c = self[fpr] + except KeyError: + logging.debug('Got unknown relay %s in uptime document.'%(fpr,)) + return + + c.add_uptime(uptime) + + def _add_details(self): + logging.debug('Loading details document.') + d = fetch('details', + fields=('fingerprint,nickname,contact,last_changed_address_or_port,' + + 'consensus_weight,advertised_bandwidth,or_addresses,' + + 'dir_address,recommended_version,flags,effective_family')) + logging.debug('Loading details document done.') + + if not 'relays' in d: raise Exception("No relays found in document.") + + for r in d['relays']: self._add_relay(r) + + def _add_uptimes(self): + logging.debug('Loading uptime document.') + d = fetch('uptime') + logging.debug('Loading uptime document done.') + + if not 'relays' in d: raise Exception("No relays found in document.") + for r in d['relays']: self._add_uptime(r) + + def add_relays(self): + self._add_details() + self._add_uptimes() + + def count_guards(self): + guard_count = 0 + for fpr in self.keys(): + if self[fpr].is_guard(): + guard_count += 1 + return guard_count + + # Find fallbacks that fit the uptime, stability, and flags criteria, + # and make an array of them in self.fallbacks + def compute_fallbacks(self): + self.fallbacks = map(lambda x: self[x], + filter(lambda x: self[x].is_candidate(), + self.keys())) + + # sort fallbacks by their consensus weight to advertised bandwidth factor, + # lowest to highest + # used to find the median cw_to_bw_factor() + def sort_fallbacks_by_cw_to_bw_factor(self): + self.fallbacks.sort(key=lambda f: f.cw_to_bw_factor()) + + # sort fallbacks by their measured bandwidth, highest to lowest + # calculate_measured_bandwidth before calling this + # this is useful for reviewing candidates in priority order + def sort_fallbacks_by_measured_bandwidth(self): + self.fallbacks.sort(key=lambda f: f._data['measured_bandwidth'], + reverse=True) + + # sort fallbacks by their fingerprint, lowest to highest + # this is useful for stable diffs of fallback lists + def sort_fallbacks_by_fingerprint(self): + self.fallbacks.sort(key=lambda f: f._fpr) + + @staticmethod + def load_relaylist(file_name): + """ Read each line in the file, and parse it like a FallbackDir line: + an IPv4 address and optional port: + <IPv4 address>:<port> + which are parsed into dictionary entries: + ipv4=<IPv4 address> + dirport=<port> + followed by a series of key=value entries: + orport=<port> + id=<fingerprint> + ipv6=<IPv6 address>:<IPv6 orport> + each line's key/value pairs are placed in a dictonary, + (of string -> string key/value pairs), + and these dictionaries are placed in an array. + comments start with # and are ignored """ + relaylist = [] + file_data = read_from_file(file_name, MAX_LIST_FILE_SIZE) + if file_data is None: + return relaylist + for line in file_data.split('\n'): + relay_entry = {} + # ignore comments + line_comment_split = line.split('#') + line = line_comment_split[0] + # cleanup whitespace + line = cleanse_whitespace(line) + line = line.strip() + if len(line) == 0: + continue + for item in line.split(' '): + item = item.strip() + if len(item) == 0: + continue + key_value_split = item.split('=') + kvl = len(key_value_split) + if kvl < 1 or kvl > 2: + print '#error Bad %s item: %s, format is key=value.'%( + file_name, item) + if kvl == 1: + # assume that entries without a key are the ipv4 address, + # perhaps with a dirport + ipv4_maybe_dirport = key_value_split[0] + ipv4_maybe_dirport_split = ipv4_maybe_dirport.split(':') + dirl = len(ipv4_maybe_dirport_split) + if dirl < 1 or dirl > 2: + print '#error Bad %s IPv4 item: %s, format is ipv4:port.'%( + file_name, item) + if dirl >= 1: + relay_entry['ipv4'] = ipv4_maybe_dirport_split[0] + if dirl == 2: + relay_entry['dirport'] = ipv4_maybe_dirport_split[1] + elif kvl == 2: + relay_entry[key_value_split[0]] = key_value_split[1] + relaylist.append(relay_entry) + return relaylist + + # apply the fallback whitelist and blacklist + def apply_filter_lists(self): + excluded_count = 0 + logging.debug('Applying whitelist and blacklist.') + # parse the whitelist and blacklist + whitelist = self.load_relaylist(WHITELIST_FILE_NAME) + blacklist = self.load_relaylist(BLACKLIST_FILE_NAME) + filtered_fallbacks = [] + for f in self.fallbacks: + in_whitelist = f.is_in_whitelist(whitelist) + in_blacklist = f.is_in_blacklist(blacklist) + if in_whitelist and in_blacklist: + if BLACKLIST_EXCLUDES_WHITELIST_ENTRIES: + # exclude + excluded_count += 1 + logging.warning('Excluding %s: in both blacklist and whitelist.', + f._fpr) + else: + # include + filtered_fallbacks.append(f) + elif in_whitelist: + # include + filtered_fallbacks.append(f) + elif in_blacklist: + # exclude + excluded_count += 1 + logging.info('Excluding %s: in blacklist.', f._fpr) + else: + if INCLUDE_UNLISTED_ENTRIES: + # include + filtered_fallbacks.append(f) + else: + # exclude + excluded_count += 1 + logging.info('Excluding %s: in neither blacklist nor whitelist.', + f._fpr) + self.fallbacks = filtered_fallbacks + return excluded_count + + @staticmethod + def summarise_filters(initial_count, excluded_count): + return '/* Whitelist & blacklist excluded %d of %d candidates. */'%( + excluded_count, initial_count) + + # calculate each fallback's measured bandwidth based on the median + # consensus weight to advertised bandwdith ratio + def calculate_measured_bandwidth(self): + self.sort_fallbacks_by_cw_to_bw_factor() + median_fallback = self.fallback_median(True) + if median_fallback is not None: + median_cw_to_bw_factor = median_fallback.cw_to_bw_factor() + else: + # this will never be used, because there are no fallbacks + median_cw_to_bw_factor = None + for f in self.fallbacks: + f.set_measured_bandwidth(median_cw_to_bw_factor) + + # remove relays with low measured bandwidth from the fallback list + # calculate_measured_bandwidth for each relay before calling this + def remove_low_bandwidth_relays(self): + if MIN_BANDWIDTH is None: + return + above_min_bw_fallbacks = [] + for f in self.fallbacks: + if f._data['measured_bandwidth'] >= MIN_BANDWIDTH: + above_min_bw_fallbacks.append(f) + else: + # the bandwidth we log here is limited by the relay's consensus weight + # as well as its adverttised bandwidth. See set_measured_bandwidth + # for details + logging.info('%s not a candidate: bandwidth %.1fMB/s too low, must ' + + 'be at least %.1fMB/s', f._fpr, + f._data['measured_bandwidth']/(1024.0*1024.0), + MIN_BANDWIDTH/(1024.0*1024.0)) + self.fallbacks = above_min_bw_fallbacks + + # the minimum fallback in the list + # call one of the sort_fallbacks_* functions before calling this + def fallback_min(self): + if len(self.fallbacks) > 0: + return self.fallbacks[-1] + else: + return None + + # the median fallback in the list + # call one of the sort_fallbacks_* functions before calling this + def fallback_median(self, require_advertised_bandwidth): + # use the low-median when there are an evan number of fallbacks, + # for consistency with the bandwidth authorities + if len(self.fallbacks) > 0: + median_position = (len(self.fallbacks) - 1) / 2 + if not require_advertised_bandwidth: + return self.fallbacks[median_position] + # if we need advertised_bandwidth but this relay doesn't have it, + # move to a fallback with greater consensus weight until we find one + while not self.fallbacks[median_position]._data['advertised_bandwidth']: + median_position += 1 + if median_position >= len(self.fallbacks): + return None + return self.fallbacks[median_position] + else: + return None + + # the maximum fallback in the list + # call one of the sort_fallbacks_* functions before calling this + def fallback_max(self): + if len(self.fallbacks) > 0: + return self.fallbacks[0] + else: + return None + + # does exclusion_list contain attribute? + # if so, return False + # if not, return True + # if attribute is None or the empty string, always return True + @staticmethod + def allow(attribute, exclusion_list): + if attribute is None or attribute == '': + return True + elif attribute in exclusion_list: + return False + else: + return True + + # make sure there is only one fallback per IPv4 address, and per IPv6 address + # there is only one IPv4 address on each fallback: the IPv4 DirPort address + # (we choose the IPv4 ORPort which is on the same IPv4 as the DirPort) + # there is at most one IPv6 address on each fallback: the IPv6 ORPort address + # we try to match the IPv4 ORPort, but will use any IPv6 address if needed + # (clients assume the IPv6 DirPort is the same as the IPv4 DirPort, but + # typically only use the IPv6 ORPort) + # if there is no IPv6 address, only the IPv4 address is checked + # return the number of candidates we excluded + def limit_fallbacks_same_ip(self): + ip_limit_fallbacks = [] + ip_list = [] + for f in self.fallbacks: + if (CandidateList.allow(f.dirip, ip_list) + and CandidateList.allow(f.ipv6addr, ip_list)): + ip_limit_fallbacks.append(f) + ip_list.append(f.dirip) + if f.has_ipv6(): + ip_list.append(f.ipv6addr) + elif not CandidateList.allow(f.dirip, ip_list): + logging.info('Eliminated %s: already have fallback on IPv4 %s'%( + f._fpr, f.dirip)) + elif f.has_ipv6() and not CandidateList.allow(f.ipv6addr, ip_list): + logging.info('Eliminated %s: already have fallback on IPv6 %s'%( + f._fpr, f.ipv6addr)) + original_count = len(self.fallbacks) + self.fallbacks = ip_limit_fallbacks + return original_count - len(self.fallbacks) + + # make sure there is only one fallback per ContactInfo + # if there is no ContactInfo, allow the fallback + # this check can be gamed by providing no ContactInfo, or by setting the + # ContactInfo to match another fallback + # However, given the likelihood that relays with the same ContactInfo will + # go down at similar times, its usefulness outweighs the risk + def limit_fallbacks_same_contact(self): + contact_limit_fallbacks = [] + contact_list = [] + for f in self.fallbacks: + if CandidateList.allow(f._data['contact'], contact_list): + contact_limit_fallbacks.append(f) + contact_list.append(f._data['contact']) + else: + logging.info(('Eliminated %s: already have fallback on ' + + 'ContactInfo %s')%(f._fpr, f._data['contact'])) + original_count = len(self.fallbacks) + self.fallbacks = contact_limit_fallbacks + return original_count - len(self.fallbacks) + + # make sure there is only one fallback per effective family + # if there is no family, allow the fallback + # this check can't be gamed, because we use effective family, which ensures + # mutual family declarations + # if any indirect families exist, the result depends on the order in which + # fallbacks are sorted in the list + def limit_fallbacks_same_family(self): + family_limit_fallbacks = [] + fingerprint_list = [] + for f in self.fallbacks: + if CandidateList.allow(f._fpr, fingerprint_list): + family_limit_fallbacks.append(f) + fingerprint_list.append(f._fpr) + fingerprint_list.extend(f._data['effective_family']) + else: + # technically, we already have a fallback with this fallback in its + # effective family + logging.info('Eliminated %s: already have fallback in effective ' + + 'family'%(f._fpr)) + original_count = len(self.fallbacks) + self.fallbacks = family_limit_fallbacks + return original_count - len(self.fallbacks) + + # try a download check on each fallback candidate in order + # stop after max_count successful downloads + # but don't remove any candidates from the array + def try_download_consensus_checks(self, max_count): + dl_ok_count = 0 + for f in self.fallbacks: + f.try_fallback_download_consensus() + if f.get_fallback_download_consensus(): + # this fallback downloaded a consensus ok + dl_ok_count += 1 + if dl_ok_count >= max_count: + # we have enough fallbacks + return + + # put max_count successful candidates in the fallbacks array: + # - perform download checks on each fallback candidate + # - retry failed candidates if CONSENSUS_DOWNLOAD_RETRY is set + # - eliminate failed candidates + # - if there are more than max_count candidates, eliminate lowest bandwidth + # - if there are fewer than max_count candidates, leave only successful + # Return the number of fallbacks that failed the consensus check + def perform_download_consensus_checks(self, max_count): + self.sort_fallbacks_by_measured_bandwidth() + self.try_download_consensus_checks(max_count) + if CONSENSUS_DOWNLOAD_RETRY: + # try unsuccessful candidates again + # we could end up with more than max_count successful candidates here + self.try_download_consensus_checks(max_count) + # now we have at least max_count successful candidates, + # or we've tried them all + original_count = len(self.fallbacks) + self.fallbacks = filter(lambda x: x.get_fallback_download_consensus(), + self.fallbacks) + # some of these failed the check, others skipped the check, + # if we already had enough successful downloads + failed_count = original_count - len(self.fallbacks) + self.fallbacks = self.fallbacks[:max_count] + return failed_count + + # return a string that describes a/b as a percentage + @staticmethod + def describe_percentage(a, b): + if b != 0: + return '%d/%d = %.0f%%'%(a, b, (a*100.0)/b) + else: + # technically, 0/0 is undefined, but 0.0% is a sensible result + return '%d/%d = %.0f%%'%(a, b, 0.0) + + # return a dictionary of lists of fallbacks by IPv4 netblock + # the dictionary is keyed by the fingerprint of an arbitrary fallback + # in each netblock + # mask_bits is the size of the netblock + def fallbacks_by_ipv4_netblock(self, mask_bits): + netblocks = {} + for f in self.fallbacks: + found_netblock = False + for b in netblocks.keys(): + # we found an existing netblock containing this fallback + if f.ipv4_netblocks_equal(self[b], mask_bits): + # add it to the list + netblocks[b].append(f) + found_netblock = True + break + # make a new netblock based on this fallback's fingerprint + if not found_netblock: + netblocks[f._fpr] = [f] + return netblocks + + # return a dictionary of lists of fallbacks by IPv6 netblock + # where mask_bits is the size of the netblock + def fallbacks_by_ipv6_netblock(self, mask_bits): + netblocks = {} + for f in self.fallbacks: + # skip fallbacks without IPv6 addresses + if not f.has_ipv6(): + continue + found_netblock = False + for b in netblocks.keys(): + # we found an existing netblock containing this fallback + if f.ipv6_netblocks_equal(self[b], mask_bits): + # add it to the list + netblocks[b].append(f) + found_netblock = True + break + # make a new netblock based on this fallback's fingerprint + if not found_netblock: + netblocks[f._fpr] = [f] + return netblocks + + # log a message about the proportion of fallbacks in each IPv4 netblock, + # where mask_bits is the size of the netblock + def describe_fallback_ipv4_netblock_mask(self, mask_bits): + fallback_count = len(self.fallbacks) + shared_netblock_fallback_count = 0 + most_frequent_netblock = None + netblocks = self.fallbacks_by_ipv4_netblock(mask_bits) + for b in netblocks.keys(): + if len(netblocks[b]) > 1: + # how many fallbacks are in a netblock with other fallbacks? + shared_netblock_fallback_count += len(netblocks[b]) + # what's the netblock with the most fallbacks? + if (most_frequent_netblock is None + or len(netblocks[b]) > len(netblocks[most_frequent_netblock])): + most_frequent_netblock = b + logging.debug('Fallback IPv4 addresses in the same /%d:'%(mask_bits)) + for f in netblocks[b]: + logging.debug('%s - %s', f.dirip, f._fpr) + if most_frequent_netblock is not None: + logging.warning('There are %s fallbacks in the IPv4 /%d containing %s'%( + CandidateList.describe_percentage( + len(netblocks[most_frequent_netblock]), + fallback_count), + mask_bits, + self[most_frequent_netblock].dirip)) + if shared_netblock_fallback_count > 0: + logging.warning(('%s of fallbacks are in an IPv4 /%d with other ' + + 'fallbacks')%(CandidateList.describe_percentage( + shared_netblock_fallback_count, + fallback_count), + mask_bits)) + + # log a message about the proportion of fallbacks in each IPv6 netblock, + # where mask_bits is the size of the netblock + def describe_fallback_ipv6_netblock_mask(self, mask_bits): + fallback_count = len(self.fallbacks_with_ipv6()) + shared_netblock_fallback_count = 0 + most_frequent_netblock = None + netblocks = self.fallbacks_by_ipv6_netblock(mask_bits) + for b in netblocks.keys(): + if len(netblocks[b]) > 1: + # how many fallbacks are in a netblock with other fallbacks? + shared_netblock_fallback_count += len(netblocks[b]) + # what's the netblock with the most fallbacks? + if (most_frequent_netblock is None + or len(netblocks[b]) > len(netblocks[most_frequent_netblock])): + most_frequent_netblock = b + logging.debug('Fallback IPv6 addresses in the same /%d:'%(mask_bits)) + for f in netblocks[b]: + logging.debug('%s - %s', f.ipv6addr, f._fpr) + if most_frequent_netblock is not None: + logging.warning('There are %s fallbacks in the IPv6 /%d containing %s'%( + CandidateList.describe_percentage( + len(netblocks[most_frequent_netblock]), + fallback_count), + mask_bits, + self[most_frequent_netblock].ipv6addr)) + if shared_netblock_fallback_count > 0: + logging.warning(('%s of fallbacks are in an IPv6 /%d with other ' + + 'fallbacks')%(CandidateList.describe_percentage( + shared_netblock_fallback_count, + fallback_count), + mask_bits)) + + # log a message about the proportion of fallbacks in each IPv4 /8, /16, + # and /24 + def describe_fallback_ipv4_netblocks(self): + # this doesn't actually tell us anything useful + #self.describe_fallback_ipv4_netblock_mask(8) + self.describe_fallback_ipv4_netblock_mask(16) + self.describe_fallback_ipv4_netblock_mask(24) + + # log a message about the proportion of fallbacks in each IPv6 /12 (RIR), + # /23 (smaller RIR blocks), /32 (LIR), /48 (Customer), and /64 (Host) + # https://www.iana.org/assignments/ipv6-unicast-address-assignments/ + def describe_fallback_ipv6_netblocks(self): + # these don't actually tell us anything useful + #self.describe_fallback_ipv6_netblock_mask(12) + #self.describe_fallback_ipv6_netblock_mask(23) + self.describe_fallback_ipv6_netblock_mask(32) + self.describe_fallback_ipv6_netblock_mask(48) + self.describe_fallback_ipv6_netblock_mask(64) + + # log a message about the proportion of fallbacks in each IPv4 and IPv6 + # netblock + def describe_fallback_netblocks(self): + self.describe_fallback_ipv4_netblocks() + self.describe_fallback_ipv6_netblocks() + + # return a list of fallbacks which are on the IPv4 ORPort port + def fallbacks_on_ipv4_orport(self, port): + return filter(lambda x: x.orport == port, self.fallbacks) + + # return a list of fallbacks which are on the IPv6 ORPort port + def fallbacks_on_ipv6_orport(self, port): + return filter(lambda x: x.ipv6orport == port, self.fallbacks_with_ipv6()) + + # return a list of fallbacks which are on the DirPort port + def fallbacks_on_dirport(self, port): + return filter(lambda x: x.dirport == port, self.fallbacks) + + # log a message about the proportion of fallbacks on IPv4 ORPort port + # and return that count + def describe_fallback_ipv4_orport(self, port): + port_count = len(self.fallbacks_on_ipv4_orport(port)) + fallback_count = len(self.fallbacks) + logging.warning('%s of fallbacks are on IPv4 ORPort %d'%( + CandidateList.describe_percentage(port_count, + fallback_count), + port)) + return port_count + + # log a message about the proportion of IPv6 fallbacks on IPv6 ORPort port + # and return that count + def describe_fallback_ipv6_orport(self, port): + port_count = len(self.fallbacks_on_ipv6_orport(port)) + fallback_count = len(self.fallbacks_with_ipv6()) + logging.warning('%s of IPv6 fallbacks are on IPv6 ORPort %d'%( + CandidateList.describe_percentage(port_count, + fallback_count), + port)) + return port_count + + # log a message about the proportion of fallbacks on DirPort port + # and return that count + def describe_fallback_dirport(self, port): + port_count = len(self.fallbacks_on_dirport(port)) + fallback_count = len(self.fallbacks) + logging.warning('%s of fallbacks are on DirPort %d'%( + CandidateList.describe_percentage(port_count, + fallback_count), + port)) + return port_count + + # log a message about the proportion of fallbacks on each dirport, + # each IPv4 orport, and each IPv6 orport + def describe_fallback_ports(self): + fallback_count = len(self.fallbacks) + ipv4_or_count = fallback_count + ipv4_or_count -= self.describe_fallback_ipv4_orport(443) + ipv4_or_count -= self.describe_fallback_ipv4_orport(9001) + logging.warning('%s of fallbacks are on other IPv4 ORPorts'%( + CandidateList.describe_percentage(ipv4_or_count, + fallback_count))) + ipv6_fallback_count = len(self.fallbacks_with_ipv6()) + ipv6_or_count = ipv6_fallback_count + ipv6_or_count -= self.describe_fallback_ipv6_orport(443) + ipv6_or_count -= self.describe_fallback_ipv6_orport(9001) + logging.warning('%s of IPv6 fallbacks are on other IPv6 ORPorts'%( + CandidateList.describe_percentage(ipv6_or_count, + ipv6_fallback_count))) + dir_count = fallback_count + dir_count -= self.describe_fallback_dirport(80) + dir_count -= self.describe_fallback_dirport(9030) + logging.warning('%s of fallbacks are on other DirPorts'%( + CandidateList.describe_percentage(dir_count, + fallback_count))) + + # return a list of fallbacks which have the Exit flag + def fallbacks_with_exit(self): + return filter(lambda x: x.is_exit(), self.fallbacks) + + # log a message about the proportion of fallbacks with an Exit flag + def describe_fallback_exit_flag(self): + exit_falback_count = len(self.fallbacks_with_exit()) + fallback_count = len(self.fallbacks) + logging.warning('%s of fallbacks have the Exit flag'%( + CandidateList.describe_percentage(exit_falback_count, + fallback_count))) + + # return a list of fallbacks which have an IPv6 address + def fallbacks_with_ipv6(self): + return filter(lambda x: x.has_ipv6(), self.fallbacks) + + # log a message about the proportion of fallbacks on IPv6 + def describe_fallback_ip_family(self): + ipv6_falback_count = len(self.fallbacks_with_ipv6()) + fallback_count = len(self.fallbacks) + logging.warning('%s of fallbacks are on IPv6'%( + CandidateList.describe_percentage(ipv6_falback_count, + fallback_count))) + + def summarise_fallbacks(self, eligible_count, operator_count, failed_count, + guard_count, target_count): + s = '' + s += '/* To comment-out entries in this file, use C comments, and add *' + s += ' to the start of each line. (stem finds fallback entries using "' + s += ' at the start of a line.) */' + s += '\n' + # Report: + # whether we checked consensus download times + # the number of fallback directories (and limits/exclusions, if relevant) + # min & max fallback bandwidths + # #error if below minimum count + if PERFORM_IPV4_DIRPORT_CHECKS or PERFORM_IPV6_DIRPORT_CHECKS: + s += '/* Checked %s%s%s DirPorts served a consensus within %.1fs. */'%( + 'IPv4' if PERFORM_IPV4_DIRPORT_CHECKS else '', + ' and ' if (PERFORM_IPV4_DIRPORT_CHECKS + and PERFORM_IPV6_DIRPORT_CHECKS) else '', + 'IPv6' if PERFORM_IPV6_DIRPORT_CHECKS else '', + CONSENSUS_DOWNLOAD_SPEED_MAX) + else: + s += '/* Did not check IPv4 or IPv6 DirPort consensus downloads. */' + s += '\n' + # Multiline C comment with #error if things go bad + s += '/*' + s += '\n' + # Integers don't need escaping in C comments + fallback_count = len(self.fallbacks) + if FALLBACK_PROPORTION_OF_GUARDS is None: + fallback_proportion = '' + else: + fallback_proportion = ', Target %d (%d * %.2f)'%(target_count, + guard_count, + FALLBACK_PROPORTION_OF_GUARDS) + s += 'Final Count: %d (Eligible %d%s'%(fallback_count, eligible_count, + fallback_proportion) + if MAX_FALLBACK_COUNT is not None: + s += ', Max %d'%(MAX_FALLBACK_COUNT) + s += ')\n' + if eligible_count != fallback_count: + removed_count = eligible_count - fallback_count + excess_to_target_or_max = (eligible_count - operator_count - failed_count + - fallback_count) + # some 'Failed' failed the check, others 'Skipped' the check, + # if we already had enough successful downloads + s += ('Excluded: %d (Same Operator %d, Failed/Skipped Download %d, ' + + 'Excess %d)')%(removed_count, operator_count, failed_count, + excess_to_target_or_max) + s += '\n' + min_fb = self.fallback_min() + min_bw = min_fb._data['measured_bandwidth'] + max_fb = self.fallback_max() + max_bw = max_fb._data['measured_bandwidth'] + s += 'Bandwidth Range: %.1f - %.1f MB/s'%(min_bw/(1024.0*1024.0), + max_bw/(1024.0*1024.0)) + s += '\n' + s += '*/' + if fallback_count < MIN_FALLBACK_COUNT: + # We must have a minimum number of fallbacks so they are always + # reachable, and are in diverse locations + s += '\n' + s += '#error Fallback Count %d is too low. '%(fallback_count) + s += 'Must be at least %d for diversity. '%(MIN_FALLBACK_COUNT) + s += 'Try adding entries to the whitelist, ' + s += 'or setting INCLUDE_UNLISTED_ENTRIES = True.' + return s + +## Main Function + +def list_fallbacks(): + """ Fetches required onionoo documents and evaluates the + fallback directory criteria for each of the relays """ + + logging.warning('Downloading and parsing Onionoo data. ' + + 'This may take some time.') + # find relays that could be fallbacks + candidates = CandidateList() + candidates.add_relays() + + # work out how many fallbacks we want + guard_count = candidates.count_guards() + if FALLBACK_PROPORTION_OF_GUARDS is None: + target_count = guard_count + else: + target_count = int(guard_count * FALLBACK_PROPORTION_OF_GUARDS) + # the maximum number of fallbacks is the least of: + # - the target fallback count (FALLBACK_PROPORTION_OF_GUARDS * guard count) + # - the maximum fallback count (MAX_FALLBACK_COUNT) + if MAX_FALLBACK_COUNT is None: + max_count = target_count + else: + max_count = min(target_count, MAX_FALLBACK_COUNT) + + candidates.compute_fallbacks() + prefilter_fallbacks = copy.copy(candidates.fallbacks) + + # filter with the whitelist and blacklist + # if a relay has changed IPv4 address or ports recently, it will be excluded + # as ineligible before we call apply_filter_lists, and so there will be no + # warning that the details have changed from those in the whitelist. + # instead, there will be an info-level log during the eligibility check. + initial_count = len(candidates.fallbacks) + excluded_count = candidates.apply_filter_lists() + print candidates.summarise_filters(initial_count, excluded_count) + eligible_count = len(candidates.fallbacks) + + # calculate the measured bandwidth of each relay, + # then remove low-bandwidth relays + candidates.calculate_measured_bandwidth() + candidates.remove_low_bandwidth_relays() + + # print the raw fallback list + #for x in candidates.fallbacks: + # print x.fallbackdir_line(True) + # print json.dumps(candidates[x]._data, sort_keys=True, indent=4, + # separators=(',', ': '), default=json_util.default) + + # impose mandatory conditions here, like one per contact, family, IP + # in measured bandwidth order + candidates.sort_fallbacks_by_measured_bandwidth() + operator_count = 0 + # only impose these limits on the final list - operators can nominate + # multiple candidate fallbacks, and then we choose the best set + if not OUTPUT_CANDIDATES: + operator_count += candidates.limit_fallbacks_same_ip() + operator_count += candidates.limit_fallbacks_same_contact() + operator_count += candidates.limit_fallbacks_same_family() + + # check if each candidate can serve a consensus + # there's a small risk we've eliminated relays from the same operator that + # can serve a consensus, in favour of one that can't + # but given it takes up to 15 seconds to check each consensus download, + # the risk is worth it + if PERFORM_IPV4_DIRPORT_CHECKS or PERFORM_IPV6_DIRPORT_CHECKS: + logging.warning('Checking consensus download speeds. ' + + 'This may take some time.') + failed_count = candidates.perform_download_consensus_checks(max_count) + + # analyse and log interesting diversity metrics + # like netblock, ports, exit, IPv4-only + # (we can't easily analyse AS, and it's hard to accurately analyse country) + candidates.describe_fallback_ip_family() + # if we can't import the ipaddress module, we can't do netblock analysis + if HAVE_IPADDRESS: + candidates.describe_fallback_netblocks() + candidates.describe_fallback_ports() + candidates.describe_fallback_exit_flag() + + # output C comments summarising the fallback selection process + if len(candidates.fallbacks) > 0: + print candidates.summarise_fallbacks(eligible_count, operator_count, + failed_count, guard_count, + target_count) + else: + print '/* No Fallbacks met criteria */' + + # output C comments specifying the OnionOO data used to create the list + for s in fetch_source_list(): + print describe_fetch_source(s) + + # if we're outputting the final fallback list, sort by fingerprint + # this makes diffs much more stable + # otherwise, leave sorted by bandwidth, which allows operators to be + # contacted in priority order + if not OUTPUT_CANDIDATES: + candidates.sort_fallbacks_by_fingerprint() + + for x in candidates.fallbacks: + print x.fallbackdir_line(candidates.fallbacks, prefilter_fallbacks) + +if __name__ == "__main__": + list_fallbacks() diff --git a/scripts/maint/updateVersions.pl b/scripts/maint/updateVersions.pl.in index 15c83b80a7..65c51a1f2d 100755 --- a/scripts/maint/updateVersions.pl +++ b/scripts/maint/updateVersions.pl.in @@ -1,8 +1,8 @@ #!/usr/bin/perl -w -$CONFIGURE_IN = './configure.ac'; -$ORCONFIG_H = './src/win32/orconfig.h'; -$TOR_NSI = './contrib/win32build/tor-mingw.nsi.in'; +$CONFIGURE_IN = '@abs_top_srcdir@/configure.ac'; +$ORCONFIG_H = '@abs_top_srcdir@/src/win32/orconfig.h'; +$TOR_NSI = '@abs_top_srcdir@/contrib/win32build/tor-mingw.nsi.in'; $quiet = 1; diff --git a/scripts/test/cov-diff b/scripts/test/cov-diff index 33a54802b6..48dbec9d54 100755 --- a/scripts/test/cov-diff +++ b/scripts/test/cov-diff @@ -9,8 +9,8 @@ DIRB="$2" for A in $DIRA/*; do B=$DIRB/`basename $A` - perl -pe 's/^\s*\d+:/ 1:/; s/^([^:]+:)[\d\s]+:/$1/;' "$A" > "$A.tmp" - perl -pe 's/^\s*\d+:/ 1:/; s/^([^:]+:)[\d\s]+:/$1/;' "$B" > "$B.tmp" + perl -pe 's/^\s*\d+:/ 1:/; s/^([^:]+:)[\d\s]+:/$1/; s/^ *-:(Runs|Programs):.*//;' "$A" > "$A.tmp" + perl -pe 's/^\s*\d+:/ 1:/; s/^([^:]+:)[\d\s]+:/$1/; s/^ *-:(Runs|Programs):.*//;' "$B" > "$B.tmp" diff -u "$A.tmp" "$B.tmp" rm "$A.tmp" "$B.tmp" done diff --git a/scripts/test/cov-display b/scripts/test/cov-display new file mode 100755 index 0000000000..4628cd589b --- /dev/null +++ b/scripts/test/cov-display @@ -0,0 +1,81 @@ +#!/usr/bin/python +import sys, re, os + +none0, some0 = 0,0 +branchTaken0, branchNot0 = 0,0 + +BRANCH = False +FUNC = False + +if sys.argv[1] == '-b': + BRANCH = True + del sys.argv[1] + +if sys.argv[1] == '-f': + FUNC = True + del sys.argv[1] + +def show(name, none, some): + if some+none == 0: + none = 1 + print name, none, some, "%.02f"%(100*(float(some)/(some+none))) + + +file_args = sys.argv[1:] +files = [] +for fn in file_args: + if os.path.isdir(fn): + files.extend(os.path.join(fn, f) for f in os.listdir(fn)) + else: + files.append(fn) + +for fn in files: + none = some = branchTaken = branchNot = 0 + inFunc = "" + for line in open(fn, 'r'): + m = re.match(r'^[^:]*:([^:]*):(.*)', line) + if m: + body = m.group(2).rstrip() + lineno = m.group(1).strip() + else: + body = "" + lineno = "?" + m = re.match(r'^([A-Za-z_][A-Za-z0-9_]*)(?:, *)?\(', body) + if m: + inFunc = "%s:%s %s" %(fn,lineno,m.group(1)) + elif body == "}": + if FUNC and inFunc: + show(inFunc, none, some) + none = some = 0 + inFunc = None + if re.match(r'^ *###', line): + none += 1 + elif re.match(r'^ *\d', line): + some += 1 + else: + m = re.match(r'^branch.*taken (\d+)%', line) + if m: + if int(m.group(1)) == 0: + branchNot += 1 + else: + branchTaken += 1 + + none0 += none + some0 += some + branchTaken0 += branchTaken + branchNot0 += branchNot + if FUNC: + pass + elif BRANCH: + if branchTaken or branchNot: + show(fn, branchNot, branchTaken) + else: + if some or none: + show(fn, none, some) + +if BRANCH: + if branchTaken0 or branchNot0: + show("TOTAL", branchNot0, branchTaken0) +else: + if some0 or none0: + show("TOTAL", none0, some0) diff --git a/scripts/test/scan-build.sh b/scripts/test/scan-build.sh index 623b227fe4..36e69e6d00 100644 --- a/scripts/test/scan-build.sh +++ b/scripts/test/scan-build.sh @@ -3,12 +3,9 @@ # See LICENSE for licensing information # # This script is used for running a bunch of clang scan-build checkers -# on Tor. -# -# It has hardwired paths for Nick's desktop at the moment. +# on Tor. CHECKERS="\ - --use-analyzer=/opt/clang-3.4/bin/clang \ -disable-checker deadcode.DeadStores \ -enable-checker alpha.core.CastSize \ -enable-checker alpha.core.CastToStruct \ @@ -22,28 +19,23 @@ CHECKERS="\ -enable-checker alpha.unix.cstring.NotNullTerminated \ -enable-checker alpha.unix.cstring.OutOfBounds \ -enable-checker alpha.core.FixedAddr \ - -enable-checker security.insecureAPI.strcpy + -enable-checker security.insecureAPI.strcpy \ + -enable-checker alpha.unix.PthreadLock \ + -enable-checker alpha.core.PointerArithm \ + -enable-checker alpha.core.TestAfterDivZero \ " -/opt/clang-3.4/bin/scan-build/scan-build \ +scan-build \ $CHECKERS \ - --use-analyzer=/opt/clang-3.4/bin/clang \ ./configure -/opt/clang-3.4/bin/scan-build/scan-build \ +scan-build \ $CHECKERS \ - --use-analyzer=/opt/clang-3.4/bin/clang \ - make -j2 - + make -j2 -k -# Haven't tried this yet. -# -enable-checker alpha.unix.PthreadLock # This one gives a false positive on every strcmp. # -enable-checker alpha.core.PointerSub -# This one hates it when we stick a nonzero const in a pointer. -# -enable-checker alpha.core.FixedAddr - -# This one crashes sometimes for me. -# -enable-checker alpha.deadcode.IdempotentOperations +# Needs work +# alpha.unix.MallocWithAnnotations ?? |