aboutsummaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/coccinelle/apply.sh9
-rwxr-xr-xscripts/coccinelle/check_cocci_parse.sh98
-rw-r--r--scripts/coccinelle/ctrl-reply-cleanup.cocci43
-rw-r--r--scripts/coccinelle/ctrl-reply.cocci87
-rw-r--r--scripts/coccinelle/debugmm.cocci29
-rw-r--r--scripts/coccinelle/exceptions.txt24
-rwxr-xr-xscripts/coccinelle/test-operator-cleanup13
-rw-r--r--scripts/coccinelle/tor-coccinelle.h63
-rwxr-xr-xscripts/coccinelle/try_parse.sh46
-rwxr-xr-xscripts/codegen/fuzzing_include_am.py12
-rwxr-xr-xscripts/codegen/gen_server_ciphers.py21
-rwxr-xr-xscripts/codegen/get_mozilla_ciphers.py15
-rw-r--r--scripts/codegen/makedesc.py27
-rwxr-xr-xscripts/codegen/run_trunnel.sh4
-rwxr-xr-xscripts/git/git-install-tools.sh189
-rwxr-xr-xscripts/git/git-list-tor-branches.sh153
-rwxr-xr-xscripts/git/git-merge-forward.sh373
-rwxr-xr-xscripts/git/git-pull-all.sh208
-rwxr-xr-xscripts/git/git-push-all.sh309
-rwxr-xr-xscripts/git/git-setup-dirs.sh508
-rwxr-xr-xscripts/git/post-merge.git-hook51
-rwxr-xr-xscripts/git/pre-commit.git-hook99
-rwxr-xr-xscripts/git/pre-push.git-hook129
-rwxr-xr-xscripts/maint/add_c_file.py333
-rwxr-xr-xscripts/maint/annotate_ifdef_directives74
-rwxr-xr-xscripts/maint/annotate_ifdef_directives.py322
-rwxr-xr-xscripts/maint/checkIncludes.py120
-rwxr-xr-xscripts/maint/checkManpageAlpha.py72
-rwxr-xr-xscripts/maint/checkShellScripts.sh64
-rwxr-xr-xscripts/maint/checkSpace.pl125
-rwxr-xr-xscripts/maint/checkSpaceTest.sh36
-rw-r--r--scripts/maint/checkspace_tests/dubious.c83
-rw-r--r--scripts/maint/checkspace_tests/dubious.h4
-rw-r--r--scripts/maint/checkspace_tests/expected.txt30
-rw-r--r--scripts/maint/checkspace_tests/good_guard.h6
-rw-r--r--scripts/maint/checkspace_tests/same_guard.h6
-rw-r--r--scripts/maint/checkspace_tests/subdir/dubious.c1
-rwxr-xr-xscripts/maint/clang-format.sh41
-rwxr-xr-xscripts/maint/codetool.py182
-rw-r--r--scripts/maint/fallback.whitelist997
-rwxr-xr-xscripts/maint/format_changelog.py74
-rwxr-xr-xscripts/maint/generateFallbackDirLine.py38
-rwxr-xr-xscripts/maint/lintChanges.py70
-rwxr-xr-xscripts/maint/locatemissingdoxygen.py13
-rwxr-xr-xscripts/maint/lookupFallbackDirContact.py28
-rw-r--r--scripts/maint/practracker/README21
-rw-r--r--scripts/maint/practracker/exceptions.txt325
-rwxr-xr-xscripts/maint/practracker/includes.py381
-rw-r--r--scripts/maint/practracker/metrics.py62
-rwxr-xr-xscripts/maint/practracker/practracker.py320
-rwxr-xr-xscripts/maint/practracker/practracker_tests.py72
-rw-r--r--scripts/maint/practracker/problem.py266
-rwxr-xr-xscripts/maint/practracker/test_practracker.sh96
-rw-r--r--scripts/maint/practracker/testdata/.may_include4
-rw-r--r--scripts/maint/practracker/testdata/a.c41
-rw-r--r--scripts/maint/practracker/testdata/b.c15
-rw-r--r--scripts/maint/practracker/testdata/ex.txt0
-rw-r--r--scripts/maint/practracker/testdata/ex0-expected.txt13
-rw-r--r--scripts/maint/practracker/testdata/ex0.txt0
-rw-r--r--scripts/maint/practracker/testdata/ex1-expected.txt5
-rw-r--r--scripts/maint/practracker/testdata/ex1-overbroad-expected.txt4
-rw-r--r--scripts/maint/practracker/testdata/ex1-regen-expected.txt46
-rw-r--r--scripts/maint/practracker/testdata/ex1-regen-overbroad-expected.txt45
-rw-r--r--scripts/maint/practracker/testdata/ex1.txt18
-rw-r--r--scripts/maint/practracker/testdata/header.h8
-rw-r--r--scripts/maint/practracker/testdata/not_c_file2
-rw-r--r--scripts/maint/practracker/util.py61
-rwxr-xr-xscripts/maint/rectify_include_paths.py32
-rwxr-xr-xscripts/maint/redox.py30
-rwxr-xr-xscripts/maint/rename_c_identifier.py267
-rwxr-xr-xscripts/maint/run_calltool.sh4
-rwxr-xr-xscripts/maint/run_check_subsystem_order.sh18
-rwxr-xr-xscripts/maint/sortChanges.py17
-rwxr-xr-xscripts/maint/updateCopyright.pl6
-rwxr-xr-xscripts/maint/updateFallbackDirs.py2216
-rwxr-xr-xscripts/maint/updateRustDependencies.sh18
-rwxr-xr-xscripts/maint/updateVersions.pl.in59
-rwxr-xr-xscripts/maint/update_versions.py136
-rw-r--r--scripts/test/appveyor-irc-notify.py4
-rwxr-xr-xscripts/test/chutney-git-bisect.sh14
-rwxr-xr-xscripts/test/cov-diff9
-rwxr-xr-xscripts/test/cov-test-determinism.sh51
-rwxr-xr-xscripts/test/coverage18
-rwxr-xr-xscripts/test/scan-build.sh5
84 files changed, 6268 insertions, 3670 deletions
diff --git a/scripts/coccinelle/apply.sh b/scripts/coccinelle/apply.sh
new file mode 100755
index 0000000000..f531d7fa32
--- /dev/null
+++ b/scripts/coccinelle/apply.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+# apply.sh:
+# run spatch with appropriate includes and builtins for the Tor source code
+
+top="$(dirname "$0")/../.."
+
+spatch -macro_file_builtins "$top"/scripts/coccinelle/tor-coccinelle.h \
+ -I "$top" -I "$top"/src -I "$top"/ext --defined COCCI "$@"
diff --git a/scripts/coccinelle/check_cocci_parse.sh b/scripts/coccinelle/check_cocci_parse.sh
new file mode 100755
index 0000000000..aaa586c093
--- /dev/null
+++ b/scripts/coccinelle/check_cocci_parse.sh
@@ -0,0 +1,98 @@
+#!/bin/sh
+
+# If we have coccinelle installed, run try_parse.sh on every filename passed
+# as an argument. If no filenames are supplied, scan a standard Tor 0.3.5 or
+# later directory layout.
+#
+# Uses the default coccinelle exceptions file, or $TOR_COCCI_EXCEPTIONS_FILE,
+# if it is set.
+#
+# Use TOR_COCCI_EXCEPTIONS_FILE=/dev/null check_cocci_parse.sh to disable
+# the default exception file.
+#
+# If spatch is not installed, remind the user to install it, but exit with
+# a success error status.
+
+scripts_cocci="$(dirname "$0")"
+top="$scripts_cocci/../.."
+try_parse="$scripts_cocci/try_parse.sh"
+
+exitcode=0
+
+export TOR_COCCI_EXCEPTIONS_FILE="${TOR_COCCI_EXCEPTIONS_FILE:-$scripts_cocci/exceptions.txt}"
+
+PURPOSE="cocci C parsing"
+
+echo "Checking spatch:"
+
+if ! command -v spatch ; then
+ echo "Install coccinelle's spatch to check $PURPOSE."
+ exit "$exitcode"
+fi
+
+# Returns true if $1 is greater than or equal to $2
+version_ge()
+{
+ if test "$1" = "$2" ; then
+ # return true
+ return 0
+ fi
+ LOWER_VERSION="$(printf '%s\n' "$1" "$2" | $SORT_V | head -n 1)"
+ # implicit return
+ test "$LOWER_VERSION" != "$1"
+}
+
+# 'sort -V' is a gnu extension
+SORT_V="sort -V"
+# Use 'sort -n' if 'sort -V' doesn't work
+if ! version_ge "1" "0" ; then
+ echo "Your 'sort -V' command appears broken. Falling back to 'sort -n'."
+ echo "Some spatch version checks may give the wrong result."
+ SORT_V="sort -n"
+fi
+
+# Print the full spatch version, for diagnostics
+spatch --version
+
+MIN_SPATCH_V="1.0.4"
+# This pattern needs to handle version strings like:
+# spatch version 1.0.0-rc19
+# spatch version 1.0.6 compiled with OCaml version 4.05.0
+SPATCH_V=$(spatch --version | head -1 | \
+ sed 's/spatch version \([0-9][^ ]*\).*/\1/')
+
+if ! version_ge "$SPATCH_V" "$MIN_SPATCH_V" ; then
+ echo "Tor requires coccinelle spatch >= $MIN_SPATCH_V to check $PURPOSE."
+ echo "But you have $SPATCH_V. Please install a newer version."
+ exit "$exitcode"
+fi
+
+if test $# -ge 1 ; then
+ "$try_parse" "$@"
+ exitcode=$?
+else
+ cd "$top" || exit 1
+ # This is the layout in 0.3.5
+ # Keep these lists consistent:
+ # - OWNED_TOR_C_FILES in Makefile.am
+ # - CHECK_FILES in pre-commit.git-hook and pre-push.git-hook
+ # - try_parse in check_cocci_parse.sh
+ "$try_parse" \
+ src/lib/*/*.[ch] \
+ src/core/*/*.[ch] \
+ src/feature/*/*.[ch] \
+ src/app/*/*.[ch] \
+ src/test/*.[ch] \
+ src/test/*/*.[ch] \
+ src/tools/*.[ch]
+ exitcode=$?
+fi
+
+if test "$exitcode" != 0 ; then
+ echo "Please fix these $PURPOSE errors in the above files"
+ echo "Set VERBOSE=1 for more details"
+ echo "Try running test-operator-cleanup or 'make autostyle-operators'"
+ echo "As a last resort, you can modify scripts/coccinelle/exceptions.txt"
+fi
+
+exit "$exitcode"
diff --git a/scripts/coccinelle/ctrl-reply-cleanup.cocci b/scripts/coccinelle/ctrl-reply-cleanup.cocci
new file mode 100644
index 0000000000..f085cd4684
--- /dev/null
+++ b/scripts/coccinelle/ctrl-reply-cleanup.cocci
@@ -0,0 +1,43 @@
+// Script to clean up after ctrl-reply.cocci -- run as a separate step
+// because cleanup_write2 (even when disabled) somehow prevents the
+// match rule in ctrl-reply.cocci from matching.
+
+// If it doesn't have to be a printf, turn it into a write
+
+@ cleanup_write @
+expression E;
+constant code, s;
+@@
+-control_printf_endreply(E, code, s)
++control_write_endreply(E, code, s)
+
+// Use send_control_done() instead of explicitly writing it out
+@ cleanup_send_done @
+type T;
+identifier f != send_control_done;
+expression E;
+@@
+ T f(...) {
+<...
+-control_write_endreply(E, 250, "OK")
++send_control_done(E)
+ ...>
+ }
+
+// Clean up more printfs that could be writes
+//
+// For some reason, including this rule, even disabled, causes the
+// match rule in ctrl-reply.cocci to fail to match some code that has
+// %s in its format strings
+
+@ cleanup_write2 @
+expression E1, E2;
+constant code;
+@@
+(
+-control_printf_endreply(E1, code, "%s", E2)
++control_write_endreply(E1, code, E2)
+|
+-control_printf_midreply(E1, code, "%s", E2)
++control_write_midreply(E1, code, E2)
+)
diff --git a/scripts/coccinelle/ctrl-reply.cocci b/scripts/coccinelle/ctrl-reply.cocci
new file mode 100644
index 0000000000..d6e9aeedd7
--- /dev/null
+++ b/scripts/coccinelle/ctrl-reply.cocci
@@ -0,0 +1,87 @@
+// Script to edit control_*.c for refactored control reply output functions
+
+@ initialize:python @
+@@
+import re
+from coccilib.report import *
+
+# reply strings "NNN-foo", "NNN+foo", "NNN foo", etc.
+r = re.compile(r'^"(\d+)([ +-])(.*)\\r\\n"$')
+
+# Generate name of function to call based on which separator character
+# comes between the numeric code and the text
+def idname(sep, base):
+ if sep == '+':
+ return base + "datareply"
+ elif sep == '-':
+ return base + "midreply"
+ else:
+ return base + "endreply"
+
+# Generate the actual replacements used by the rules
+def gen(s, base, p):
+ pos = p[0]
+ print_report(pos, "%s %s" % (base, s))
+ m = r.match(s)
+ if m is None:
+ # String not correct format, so fail match
+ cocci.include_match(False)
+ print_report(pos, "BAD STRING %s" % s)
+ return
+
+ code, sep, s1 = m.groups()
+
+ if r'\r\n' in s1:
+ # Extra CRLF in string, so fail match
+ cocci.include_match(False)
+ print_report(pos, "extra CRLF in string %s" % s)
+ return
+
+ coccinelle.code = code
+ # Need a string that is a single C token, because Coccinelle only allows
+ # "identifiers" to be output from Python scripts?
+ coccinelle.body = '"%s"' % s1
+ coccinelle.id = idname(sep, base)
+ return
+
+@ match @
+identifier f;
+position p;
+expression E;
+constant s;
+@@
+(
+ connection_printf_to_buf@f@p(E, s, ...)
+|
+ connection_write_str_to_buf@f@p(s, E)
+)
+
+@ script:python sc1 @
+s << match.s;
+p << match.p;
+f << match.f;
+id;
+body;
+code;
+@@
+if f == 'connection_printf_to_buf':
+ gen(s, 'control_printf_', p)
+elif f == 'connection_write_str_to_buf':
+ gen(s, 'control_write_', p)
+else:
+ raise(ValueError("%s: %s" % (f, s)))
+
+@ replace @
+constant match.s;
+expression match.E;
+identifier match.f;
+identifier sc1.body, sc1.id, sc1.code;
+@@
+(
+-connection_write_str_to_buf@f(s, E)
++id(E, code, body)
+|
+-connection_printf_to_buf@f(E, s
++id(E, code, body
+ , ...)
+)
diff --git a/scripts/coccinelle/debugmm.cocci b/scripts/coccinelle/debugmm.cocci
new file mode 100644
index 0000000000..dbd308df33
--- /dev/null
+++ b/scripts/coccinelle/debugmm.cocci
@@ -0,0 +1,29 @@
+// Look for use of expressions with side-effects inside of debug logs.
+//
+// This script detects expressions like ++E, --E, E++, and E-- inside of
+// calls to log_debug().
+//
+// The log_debug() macro exits early if debug logging is not enabled,
+// potentially causing problems if its arguments have side-effects.
+
+@@
+expression E;
+@@
+*log_debug(... , <+... --E ...+>, ... );
+
+
+@@
+expression E;
+@@
+*log_debug(... , <+... ++E ...+>, ... );
+
+@@
+expression E;
+@@
+*log_debug(... , <+... E-- ...+>, ... );
+
+
+@@
+expression E;
+@@
+*log_debug(... , <+... E++ ...+>, ... );
diff --git a/scripts/coccinelle/exceptions.txt b/scripts/coccinelle/exceptions.txt
new file mode 100644
index 0000000000..473f4b22c5
--- /dev/null
+++ b/scripts/coccinelle/exceptions.txt
@@ -0,0 +1,24 @@
+# A list of exception patterns for check_cocci_parse.sh
+# Passed to 'grep -f'
+src/lib/cc/compat_compiler.h
+src/lib/container/handles.h
+src/lib/container/map.c
+src/lib/container/map.h
+src/lib/container/order.c
+src/lib/crypt_ops/crypto_rand.c
+src/lib/fs/files.h
+src/lib/log/util_bug.c
+src/lib/pubsub/pubsub_macros.h
+src/lib/smartlist_core/smartlist_foreach.h
+src/lib/testsupport/testsupport.h
+src/lib/tls/tortls.h
+src/lib/tls/tortls_openssl.c
+src/lib/tls/x509.h
+src/lib/version/version.c
+src/core/mainloop/connection.c
+src/core/or/reasons.c
+src/feature/dirclient/dirclient.c
+src/feature/nodelist/networkstatus.c
+src/test/test_address.c
+src/test/test_hs_cache.c
+src/test/test_hs_descriptor.c
diff --git a/scripts/coccinelle/test-operator-cleanup b/scripts/coccinelle/test-operator-cleanup
index e7822542a4..28b4d4f588 100755
--- a/scripts/coccinelle/test-operator-cleanup
+++ b/scripts/coccinelle/test-operator-cleanup
@@ -1,4 +1,17 @@
#!/usr/bin/perl -w -p -i
+#
+# Copyright (c) 2001 Matej Pfajfar.
+# Copyright (c) 2001-2004, Roger Dingledine.
+# Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+# Copyright (c) 2007-2019, The Tor Project, Inc.
+# See LICENSE for licensing information
+
+# This script looks for instances of C comparison operators as macro arguments,
+# and replaces them with our OP_* equivalents.
+#
+# Some macros that take operators are our tt_int_op() testing macro, and the
+# standard timercmp() macro. Coccinelle can't handle their syntax, however,
+# unless we give them their operators as a macro too.
next if m#^ */\*# or m#^ *\* #;
diff --git a/scripts/coccinelle/tor-coccinelle.h b/scripts/coccinelle/tor-coccinelle.h
new file mode 100644
index 0000000000..44d79325eb
--- /dev/null
+++ b/scripts/coccinelle/tor-coccinelle.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2019, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/*
+ * This file looks like a C header, but its purpose is a bit different.
+ *
+ * We never include it from our real C files; we only tell Coccinelle
+ * about it in apply.sh.
+ *
+ * It tells the Coccinelle semantic patching tool how to understand
+ * things that would otherwise not be good C syntax, or which would
+ * otherwise not make sense to it as C. It doesn't need to produce
+ * semantically equivalent C, or even correct C: it only has to produce
+ * syntactically valid C.
+ */
+
+#define MOCK_DECL(a, b, c) a b c
+#define MOCK_IMPL(a, b, c) a b c
+#define CHECK_PRINTF(a, b)
+#define CHECK_SCANF(a, b)
+#define STATIC static
+#define EXTERN(a,b) extern a b;
+
+#define STMT_BEGIN do {
+#define STMT_END } while (0)
+
+#define BUG(x) (x)
+#define IF_BUG_ONCE(x) if (x)
+
+#define ATTR_NORETURN
+#define ATTR_UNUSED
+#define ATTR_CONST
+#define ATTR_MALLOC
+#define ATTR_WUR
+#define DISABLE_GCC_WARNING(x)
+#define ENABLE_GCC_WARNING(x)
+
+#define HANDLE_DECL(a,b,c)
+#define HANDLE_IMPL(a,b,c)
+#define HT_ENTRY(x) void *
+#define HT_HEAD(a,b) struct ht_head
+#define HT_INITIALIZER() { }
+#define X509 struct x509_st
+#define STACK_OF(x) struct foo_stack_t
+#define TOR_TAILQ_HEAD(a,b) struct tailq_head
+#define TOR_TAILQ_ENTRY(a) struct tailq_entry
+#define TOR_SIMPLEQ_HEAD(a,b) struct simpleq_entry
+#define TOR_SIMPLEQ_ENTRY(a) struct simpleq_entry
+#define TOR_LIST_HEAD(a,b) struct list_head
+#define TOR_LIST_ENTRY(a) struct list_entry
+#define TOR_SLIST_HEAD(a,b) struct slist_head
+#define TOR_SLIST_ENTRY(a) struct slist_entry
+
+#define NS_DECL(a, b, c) a b c
+#define NS(a) a
+
+#define CONF_TEST_MEMBERS(a,b,c)
+#define DUMMY_CONF_TEST_MEMBERS
+
+#define EAT_SEMICOLON extern int dummy__;
diff --git a/scripts/coccinelle/try_parse.sh b/scripts/coccinelle/try_parse.sh
new file mode 100755
index 0000000000..a90e51b4aa
--- /dev/null
+++ b/scripts/coccinelle/try_parse.sh
@@ -0,0 +1,46 @@
+#!/bin/sh
+
+# Echo the name of every argument of this script that is not "perfect"
+# according to coccinelle's --parse-c.
+#
+# If $TOR_COCCI_EXCEPTIONS_FILE is non-empty, skip any files that match the
+# patterns in the exception file, according to "grep -f"
+#
+# If VERBOSE is non-empty, log spatch errors and skipped files.
+
+top="$(dirname "$0")/../.."
+
+exitcode=0
+
+for fn in "$@"; do
+
+ if test "${TOR_COCCI_EXCEPTIONS_FILE}" ; then
+ skip_fn=$(echo "$fn" | grep -f "${TOR_COCCI_EXCEPTIONS_FILE}")
+ if test "${skip_fn}" ; then
+ if test "${VERBOSE}" != ""; then
+ echo "Skipping '${skip_fn}'"
+ fi
+ continue
+ fi
+ fi
+
+ if spatch --macro-file-builtins \
+ "$top"/scripts/coccinelle/tor-coccinelle.h \
+ --defined COCCI \
+ --parse-c "$fn" \
+ 2>/dev/null | grep "perfect = 1" > /dev/null; then
+ : # it's perfect
+ else
+ echo "$fn"
+ if test "${VERBOSE}" != ""; then
+ spatch --macro-file-builtins \
+ "$top"/scripts/coccinelle/tor-coccinelle.h \
+ --defined COCCI \
+ --parse-c "$fn"
+ fi
+ exitcode=1
+ fi
+
+done
+
+exit "$exitcode"
diff --git a/scripts/codegen/fuzzing_include_am.py b/scripts/codegen/fuzzing_include_am.py
index 3c948d87cf..ae50563074 100755
--- a/scripts/codegen/fuzzing_include_am.py
+++ b/scripts/codegen/fuzzing_include_am.py
@@ -1,4 +1,9 @@
-#!/usr/bin/python
+#!/usr/bin/env python
+
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
FUZZERS = """
consensus
@@ -13,6 +18,7 @@ FUZZERS = """
iptsv2
microdesc
socks
+ strops
vrs
"""
@@ -23,12 +29,12 @@ FUZZING_CPPFLAGS = \
FUZZING_CFLAGS = \
$(AM_CFLAGS) $(TEST_CFLAGS)
FUZZING_LDFLAG = \
- @TOR_LDFLAGS_zlib@ @TOR_LDFLAGS_openssl@ @TOR_LDFLAGS_libevent@
+ @TOR_LDFLAGS_zlib@ $(TOR_LDFLAGS_CRYPTLIB) @TOR_LDFLAGS_libevent@
FUZZING_LIBS = \
$(TOR_INTERNAL_TESTING_LIBS) \
$(rust_ldadd) \
@TOR_ZLIB_LIBS@ @TOR_LIB_MATH@ \
- @TOR_LIBEVENT_LIBS@ @TOR_OPENSSL_LIBS@ \
+ @TOR_LIBEVENT_LIBS@ $(TOR_LIBS_CRYPTLIB) \
@TOR_LIB_WS32@ @TOR_LIB_IPHLPAPI@ @TOR_LIB_GDI@ @TOR_LIB_USERENV@ @CURVE25519_LIBS@ \
@TOR_SYSTEMD_LIBS@ \
@TOR_LZMA_LIBS@ \
diff --git a/scripts/codegen/gen_server_ciphers.py b/scripts/codegen/gen_server_ciphers.py
index 5d326f8b9e..8c88e54a13 100755
--- a/scripts/codegen/gen_server_ciphers.py
+++ b/scripts/codegen/gen_server_ciphers.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# Copyright 2014-2019, The Tor Project, Inc
# See LICENSE for licensing information
@@ -8,6 +8,11 @@
#
# Run it on all the files in your openssl include directory.
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
import re
import sys
@@ -96,7 +101,7 @@ def parse_cipher(ciph):
fwsec, = m.groups()
return Ciphersuite(ciph, fwsec, "CHACHA20", "256", "POLY1305", "n/a")
- print "/* Couldn't parse %s ! */"%ciph
+ print("/* Couldn't parse %s ! */"%ciph)
return None
@@ -120,12 +125,12 @@ for c in ALL_CIPHERS:
colon = ' ":"'
if c.name in MANDATORY:
- print "%s/* Required */"%indent
- print '%s%s%s'%(indent,c.name,colon)
+ print("%s/* Required */"%indent)
+ print('%s%s%s'%(indent,c.name,colon))
else:
- print "#ifdef %s"%c.name
- print '%s%s%s'%(indent,c.name,colon)
- print "#endif"
+ print("#ifdef %s"%c.name)
+ print('%s%s%s'%(indent,c.name,colon))
+ print("#endif")
-print '%s;'%indent
+print('%s;'%indent)
diff --git a/scripts/codegen/get_mozilla_ciphers.py b/scripts/codegen/get_mozilla_ciphers.py
index f23f2f1e6f..ff01dd8719 100755
--- a/scripts/codegen/get_mozilla_ciphers.py
+++ b/scripts/codegen/get_mozilla_ciphers.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# coding=utf-8
# Copyright 2011-2019, The Tor Project, Inc
# original version by Arturo Filastò
@@ -10,12 +10,17 @@
# It takes two arguments: the location of a firefox source directory, and the
# location of an openssl source directory.
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
import os
import re
import sys
if len(sys.argv) != 3:
- print >>sys.stderr, "Syntax: get_mozilla_ciphers.py <firefox-source-dir> <openssl-source-dir>"
+ print("Syntax: get_mozilla_ciphers.py <firefox-source-dir> <openssl-source-dir>", file=sys.stderr)
sys.exit(1)
ff_root = sys.argv[1]
@@ -171,13 +176,13 @@ for fl in oSSLinclude:
fp.close()
# Now generate the output.
-print """\
+print("""\
/* This is an include file used to define the list of ciphers clients should
* advertise. Before including it, you should define the CIPHER and XCIPHER
* macros.
*
* This file was automatically generated by get_mozilla_ciphers.py.
- */"""
+ */""")
# Go in order by the order in CipherPrefs
for firefox_macro in firefox_ciphers:
@@ -210,4 +215,4 @@ for firefox_macro in firefox_ciphers:
#else
XCIPHER(%(hex)s, %(macro)s)
#endif""" % format
- print res
+ print(res)
diff --git a/scripts/codegen/makedesc.py b/scripts/codegen/makedesc.py
index efca4dda9a..48d1d31a02 100644
--- a/scripts/codegen/makedesc.py
+++ b/scripts/codegen/makedesc.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# Copyright 2014-2019, The Tor Project, Inc.
# See LICENSE for license information
@@ -9,6 +9,11 @@
# I've used this to make inputs for unit tests. I wouldn't suggest
# using it for anything else.
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
import base64
import binascii
import ctypes
@@ -19,12 +24,16 @@ import os
import re
import struct
import time
-import UserDict
import slow_ed25519
import slownacl_curve25519
import ed25519_exts_ref
+try:
+ xrange # Python 2
+except NameError:
+ xrange = range # Python 3
+
# Pull in the openssl stuff we need.
crypt = ctypes.CDLL(ctypes.util.find_library('crypto'))
@@ -247,8 +256,8 @@ class OnDemandKeys(object):
def signdesc(body, args_out=None):
- rsa, ident_pem, id_digest = make_key()
- _, onion_pem, _ = make_key()
+ rsa, ident_pem, id_digest = make_rsa_key()
+ _, onion_pem, _ = make_rsa_key()
need_ed = '{ED25519-CERT}' in body or '{ED25519-SIGNATURE}' in body
if need_ed:
@@ -298,10 +307,10 @@ def signdesc(body, args_out=None):
return body.rstrip()
def print_c_string(ident, body):
- print "static const char %s[] =" % ident
+ print("static const char %s[] =" % ident)
for line in body.split("\n"):
- print ' "%s\\n"' %(line)
- print " ;"
+ print(' "%s\\n"' %(line))
+ print(" ;")
def emit_ri(name, body):
info = OnDemandKeys()
@@ -315,8 +324,8 @@ def emit_ei(name, body):
body = info.sign_desc(body)
print_c_string("EX_EI_%s"%name.upper(), body)
- print 'const char EX_EI_{NAME}_FP[] = "{d.RSA_FINGERPRINT_NOSPACE}";'.format(
- d=info, NAME=name.upper())
+ print('const char EX_EI_{NAME}_FP[] = "{d.RSA_FINGERPRINT_NOSPACE}";'.format(
+ d=info, NAME=name.upper()))
print_c_string("EX_EI_%s_KEY"%name.upper(), info.RSA_IDENTITY)
def analyze(s):
diff --git a/scripts/codegen/run_trunnel.sh b/scripts/codegen/run_trunnel.sh
index 428804342a..645b3c2158 100755
--- a/scripts/codegen/run_trunnel.sh
+++ b/scripts/codegen/run_trunnel.sh
@@ -9,9 +9,7 @@ OPTIONS="--require-version=1.5.1"
# Get all .trunnel files recursively from that directory so we can support
# multiple sub-directories.
-for file in `find ./src/trunnel/ -name '*.trunnel'`; do
- python -m trunnel ${OPTIONS} $file
-done
+find ./src/trunnel/ -name '*.trunnel' -exec python -m trunnel ${OPTIONS} {} \;
python -m trunnel ${OPTIONS} --write-c-files --target-dir=./src/ext/trunnel/
diff --git a/scripts/git/git-install-tools.sh b/scripts/git/git-install-tools.sh
new file mode 100755
index 0000000000..ef8623a018
--- /dev/null
+++ b/scripts/git/git-install-tools.sh
@@ -0,0 +1,189 @@
+#!/usr/bin/env bash
+
+SCRIPT_NAME=$(basename "$0")
+SCRIPTS_DIR=$(dirname "$0")
+
+TOOL_NAMES=(push-all pull-all merge-forward list-tor-branches)
+
+function usage()
+{
+ echo "$SCRIPT_NAME [-h] [-n] [-v] [-f] <all|hooks|tools|aliases>"
+ echo
+ echo " flags:"
+ echo " -h: show this help text"
+ echo " -n: dry-run"
+ echo " -v: verbose mode"
+ echo " -f: force-install even if \$TOR_DEVTOOLS_DIR looks fishy"
+ echo
+ echo " modes:"
+ echo " hooks: install git hooks in this repository."
+ echo " tools: install scripts in \$TOR_DEVTOOLS_DIR"
+ echo " aliases: set up global git aliases for git tools in \$TOR_DEVTOOLS_DIR"
+ echo " all: all of the above."
+}
+
+INSTALL_HOOKS=0
+INSTALL_TOOLS=0
+INSTALL_ALIASES=0
+
+DRY_RUN=0
+VERBOSE=0
+FORCE=0
+
+while getopts "hnfv" opt; do
+ case "$opt" in
+ h) usage
+ exit 0
+ ;;
+ n) DRY_RUN=1
+ ;;
+ v) VERBOSE=1
+ ;;
+ f) FORCE=1
+ ;;
+ *) echo
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+for item in "${@:$OPTIND}"; do
+ case "$item" in
+ hooks) INSTALL_HOOKS=1
+ ;;
+ tools) INSTALL_TOOLS=1
+ ;;
+ aliases) INSTALL_ALIASES=1
+ ;;
+ all) INSTALL_HOOKS=1
+ INSTALL_TOOLS=1
+ INSTALL_ALIASES=1
+ ;;
+ *) echo "Unrecognized mode '$item'"
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+if [[ $VERBOSE = 1 ]]; then
+ function note()
+ {
+ echo "$@"
+ }
+else
+ function note()
+ {
+ true
+ }
+fi
+
+function fail()
+{
+ echo "$@" 1>&2
+ exit 1
+}
+
+if [[ $INSTALL_HOOKS = 0 && $INSTALL_TOOLS = 0 && $INSTALL_ALIASES = 0 ]]; then
+ echo "Nothing to do. Try $SCRIPT_NAME -h for a list of commands."
+ exit 0
+fi
+
+if [[ $INSTALL_TOOLS = 1 || $INSTALL_ALIASES = 1 ]]; then
+ if [[ -z "$TOR_DEVTOOLS_DIR" ]] ; then
+ fail "\$TOR_DEVTOOLS_DIR was not set."
+ fi
+ note "Checking whether \$TOR_DEVTOOLS_DIR ($TOR_DEVTOOLS_DIR) is a git repo..."
+ GITDIR=$(cd "$TOR_DEVTOOLS_DIR" && git rev-parse --git-dir 2>/dev/null)
+ note "GITDIR is $GITDIR"
+ if [[ -n "$GITDIR" ]] ; then
+ cat <<EOF
+You have asked me to install to \$TOR_DEVTOOLS_DIR ($TOR_DEVTOOLS_DIR).
+That is inside a git repository, so you might not want to install there:
+depending on what you pull or push, you might find yourself giving somebody
+else write access to your scripts. I think you should just use ~/bin or
+something.
+EOF
+
+ echo
+ if [[ "$FORCE" = 1 ]] ; then
+ echo "I will install anyway, since you said '-f'."
+ else
+ echo "I will not install. You can tell me -f if you are really sure."
+ exit 1
+ fi
+ else
+ note "It was not."
+ fi
+fi
+
+if [[ ! -d "$SCRIPTS_DIR" || ! -e "$SCRIPTS_DIR/git-push-all.sh" ]]; then
+ fail "Couldn't find scripts in '$SCRIPTS_DIR'"
+fi
+
+if [[ $DRY_RUN = 1 ]]; then
+ echo "** DRY RUN **"
+ RUN="echo >>"
+else
+ RUN=
+fi
+
+set -e
+
+# ======================================================================
+if [[ $INSTALL_HOOKS = 1 ]]; then
+ HOOKS_DIR=$(git rev-parse --git-path hooks)
+
+ note "Looking for hooks directory"
+
+ if [[ -z "$HOOKS_DIR" || ! -d "$HOOKS_DIR" ]]; then
+ fail "Couldn't find git hooks directory."
+ fi
+
+ note "Found hooks directory in $HOOKS_DIR"
+
+ note "Installing hooks"
+ for fn in "$SCRIPTS_DIR"/*.git-hook; do
+ name=$(basename "$fn")
+ $RUN install --backup "$fn" "${HOOKS_DIR}/${name%.git-hook}"
+ done
+fi
+
+
+# ======================================================================
+if [[ $INSTALL_TOOLS = 1 ]]; then
+ note "Installing tools."
+ note "Looking for \$TOR_DEVTOOLS_DIR ($TOR_DEVTOOLS_DIR)"
+
+ if [[ ! -d "$TOR_DEVTOOLS_DIR" ]]; then
+ note "Creating directory"
+ $RUN mkdir -p "$TOR_DEVTOOLS_DIR"
+ fi
+
+ note "Copying scripts"
+ for tool in "${TOOL_NAMES[@]}"; do
+ $RUN install --backup "${SCRIPTS_DIR}/git-${tool}.sh" "${TOR_DEVTOOLS_DIR}/"
+ done
+fi
+
+# ======================================================================
+if [[ $INSTALL_ALIASES = 1 ]]; then
+ note "Installing aliases."
+ note "Looking for \$TOR_DEVTOOLS_DIR ($TOR_DEVTOOLS_DIR)"
+
+ note "Checking for ${TOR_DEVTOOLS_DIR}/git-push-all.sh"
+ if [[ ! -x "${TOR_DEVTOOLS_DIR}/git-push-all.sh" ]]; then
+ if [[ $DRY_RUN = 0 ]]; then
+ fail "Could not find scripts in \$TOR_DEVTOOLS_DIR"
+ fi
+ fi
+
+ note "Setting aliases"
+ for tool in "${TOOL_NAMES[@]}"; do
+ $RUN git config --global "alias.$tool" \!"${TOR_DEVTOOLS_DIR}/git-${tool}.sh"
+ done
+
+fi
+
+note Done.
diff --git a/scripts/git/git-list-tor-branches.sh b/scripts/git/git-list-tor-branches.sh
new file mode 100755
index 0000000000..d6b30f064f
--- /dev/null
+++ b/scripts/git/git-list-tor-branches.sh
@@ -0,0 +1,153 @@
+#!/usr/bin/env bash
+
+# Script to be used by other git scripts, and provide a single place
+# that lists our supported branches. To change which branches are
+# supported, look at the end of the file that says 'edit here'.
+
+SCRIPT_NAME=$(basename "$0")
+
+function usage()
+{
+ echo "$SCRIPT_NAME [-h] [-l|-s|-b|-m] [-R]"
+ echo
+ echo " arguments:"
+ echo " -h: show this help text"
+ echo
+ echo " -l: list the active tor branches (default)"
+ echo " -s: list the suffixes to be used with the active tor branches"
+ echo " -b: write bash code setting WORKTREE to an array of ( branch path ) arrays"
+ echo " -m: write bash code setting WORKTREE to an array of"
+ echo " ( branch parent path suffix parent_suffix ) arrays"
+ echo
+ echo " -R: omit release branches."
+}
+
+# list : just a list of branch names.
+# branch_path : For git-setup-dirs.sh and git-pull-all.sh
+# suffix: write a list of suffixes.
+# merge: branch, upstream, path, suffix, upstream suffix.
+mode="list"
+skip_release_branches="no"
+
+while getopts "hblmsR" opt ; do
+ case "$opt" in
+ h) usage
+ exit 0
+ ;;
+ b) mode="branch_path"
+ ;;
+ l) mode="list"
+ ;;
+ s) mode="suffix"
+ ;;
+ m) mode="merge"
+ ;;
+ R) skip_release_branches="yes"
+ ;;
+ *) echo "Unknown option"
+ exit 1
+ ;;
+ esac
+done
+
+all_branch_vars=()
+
+prev_maint_branch=""
+prev_maint_suffix=""
+
+branch() {
+ # The name of the branch. (Supplied by caller) Ex: maint-0.4.3
+ brname="$1"
+
+ # The name of the branch with no dots. Ex: maint-043
+ brname_nodots="${brname//./}"
+ # The name of the branch with no dots, and _ instead of -. Ex: maint_043
+ brname_nodots_uscore="${brname_nodots//-/_}"
+ # Name to use for a variable to represent the branch. Ex: MAINT_043
+ varname="${brname_nodots_uscore^^}"
+
+ is_maint="no"
+
+ # suffix: a suffix to place at the end of branches we generate with respect
+ # to this branch. Ex: _043
+
+ # location: where the branch can be found.
+
+ if [[ "$brname" == "master" ]]; then
+ suffix="_master"
+ location="\$GIT_PATH/\$TOR_MASTER_NAME"
+ elif [[ "$brname" =~ ^maint- ]]; then
+ suffix="_${brname_nodots#maint-}"
+ location="\$GIT_PATH/\$TOR_WKT_NAME/$brname"
+ is_maint="yes"
+ elif [[ "$brname" =~ ^release- ]]; then
+ suffix="_r${brname_nodots#release-}"
+ location="\$GIT_PATH/\$TOR_WKT_NAME/$brname"
+
+ if [[ "$skip_release_branches" = "yes" ]]; then
+ return
+ fi
+ else
+ echo "Unrecognized branch type '${brname}'" >&2
+ exit 1
+ fi
+
+ all_branch_vars+=("$varname")
+
+ # Now emit the per-branch information
+ if [[ "$mode" == "branch_path" ]]; then
+ echo "${varname}=( \"$brname\" \"$location\" )"
+ elif [[ "$mode" == "merge" ]]; then
+ echo "${varname}=( \"$brname\" \"$prev_maint_branch\" \"$location\" \"$suffix\" \"$prev_maint_suffix\" )"
+ elif [[ "$mode" == "list" ]]; then
+ echo "$brname"
+ elif [[ "$mode" == "suffix" ]]; then
+ echo "$suffix"
+ else
+ echo "unknown mode $mode" >&2
+ exit 1
+ fi
+
+ if [[ "$is_maint" == "yes" ]]; then
+ prev_maint_branch="$brname"
+ prev_maint_suffix="$suffix"
+ fi
+}
+
+finish() {
+ if [[ "$mode" == branch_path ]] || [[ "$mode" == merge ]]; then
+ echo "WORKTREE=("
+ for v in "${all_branch_vars[@]}"; do
+ echo " ${v}[@]"
+ done
+ echo ")"
+ elif [[ "$mode" == list ]] || [[ "$mode" == suffix ]]; then
+ # nothing to do
+ :
+ else
+ echo "unknown mode $mode" >&2
+ exit 1
+ fi
+}
+
+# ==============================
+# EDIT HERE
+# ==============================
+# List of all branches. These must be in order, from oldest to newest, with
+# maint before release.
+
+branch maint-0.3.5
+branch release-0.3.5
+
+branch maint-0.4.1
+branch release-0.4.1
+
+branch maint-0.4.2
+branch release-0.4.2
+
+branch maint-0.4.3
+branch release-0.4.3
+
+branch master
+
+finish
diff --git a/scripts/git/git-merge-forward.sh b/scripts/git/git-merge-forward.sh
new file mode 100755
index 0000000000..7c72f8478d
--- /dev/null
+++ b/scripts/git/git-merge-forward.sh
@@ -0,0 +1,373 @@
+#!/usr/bin/env bash
+
+SCRIPT_NAME=$(basename "$0")
+
+function usage()
+{
+ echo "$SCRIPT_NAME [-h] [-n] [-t <test-branch-prefix> [-u]]"
+ echo
+ echo " arguments:"
+ echo " -h: show this help text"
+ echo " -n: dry run mode"
+ echo " (default: run commands)"
+ echo " -t: test branch mode: create new branches from the commits checked"
+ echo " out in each maint directory. Call these branches prefix_035,"
+ echo " prefix_040, ... , prefix_master."
+ echo " (default: merge forward maint-*, release-*, and master)"
+ echo " -u: in test branch mode, if a prefix_* branch already exists,"
+ echo " skip creating that branch. Use after a merge error, to"
+ echo " restart the merge forward at the first unmerged branch."
+ echo " (default: if a prefix_* branch already exists, fail and exit)"
+ echo
+ echo " env vars:"
+ echo " required:"
+ echo " TOR_FULL_GIT_PATH: where the git repository directories reside."
+ echo " You must set this env var, we recommend \$HOME/git/"
+ echo " (default: fail if this env var is not set;"
+ echo " current: $GIT_PATH)"
+ echo
+ echo " optional:"
+ echo " TOR_MASTER: the name of the directory containing the tor.git clone"
+ echo " The tor master git directory is \$GIT_PATH/\$TOR_MASTER"
+ echo " (default: tor; current: $TOR_MASTER_NAME)"
+ echo " TOR_WKT_NAME: the name of the directory containing the tor"
+ echo " worktrees. The tor worktrees are:"
+ echo " \$GIT_PATH/\$TOR_WKT_NAME/{maint-*,release-*}"
+ echo " (default: tor-wkt; current: $TOR_WKT_NAME)"
+ echo " we recommend that you set these env vars in your ~/.profile"
+}
+
+#################
+# Configuration #
+#################
+
+# Don't change this configuration - set the env vars in your .profile
+
+# Where are all those git repositories?
+GIT_PATH=${TOR_FULL_GIT_PATH:-"FULL_PATH_TO_GIT_REPOSITORY_DIRECTORY"}
+# The tor master git repository directory from which all the worktree have
+# been created.
+TOR_MASTER_NAME=${TOR_MASTER_NAME:-"tor"}
+# The worktrees location (directory).
+TOR_WKT_NAME=${TOR_WKT_NAME:-"tor-wkt"}
+
+##########################
+# Git branches to manage #
+##########################
+
+# The branches and worktrees need to be modified when there is a new branch,
+# and when an old branch is no longer supported.
+
+# Configuration of the branches that needs merging. The values are in order:
+# (0) current maint/release branch name
+# (1) previous maint/release name to merge into (0)
+# (only used in merge forward mode)
+# (2) Full path of the git worktree
+# (3) current branch suffix
+# (maint branches only, only used in test branch mode)
+# (4) previous test branch suffix to merge into (3)
+# (maint branches only, only used in test branch mode)
+#
+# Merge forward example:
+# $ cd <PATH/TO/WORKTREE> (2)
+# $ git checkout maint-0.3.5 (0)
+# $ git pull
+# $ git merge maint-0.3.4 (1)
+#
+# Test branch example:
+# $ cd <PATH/TO/WORKTREE> (2)
+# $ git checkout -b ticket99999_035 (3)
+# $ git checkout maint-0.3.5 (0)
+# $ git pull
+# $ git checkout ticket99999_035
+# $ git merge maint-0.3.5
+# $ git merge ticket99999_034 (4)
+#
+# First set of arrays are the maint-* branch and then the release-* branch.
+# New arrays need to be in the WORKTREE= array else they aren't considered.
+#
+# Only used in test branch mode
+# We create a test branch for the earliest maint branch.
+# But it's the earliest maint branch, so we don't merge forward into it.
+# Since we don't merge forward into it, the second and fifth items must be
+# blank ("").
+
+# origin that will be used to fetch the updates. All the worktrees are created
+# from that repository.
+ORIGIN_PATH="$GIT_PATH/$TOR_MASTER_NAME"
+
+#######################
+# Argument processing #
+#######################
+
+# Controlled by the -n option. The dry run option will just output the command
+# that would have been executed for each worktree.
+DRY_RUN=0
+
+# Controlled by the -t <test-branch-prefix> option. The test branch base
+# name option makes git-merge-forward.sh create new test branches:
+# <tbbn>_035, <tbbn>_040, ... , <tbbn>_master, and merge forward.
+TEST_BRANCH_PREFIX=
+
+# Controlled by the -u option. The use existing option checks for existing
+# branches with the <test-branch-prefix>, and checks them out, rather than
+# creating a new branch.
+USE_EXISTING=0
+
+while getopts "hnt:u" opt; do
+ case "$opt" in
+ h) usage
+ exit 0
+ ;;
+ n) DRY_RUN=1
+ echo " *** DRY RUN MODE ***"
+ ;;
+ t) TEST_BRANCH_PREFIX="$OPTARG"
+ echo " *** CREATING TEST BRANCHES: ${TEST_BRANCH_PREFIX}_nnn ***"
+ ;;
+ u) USE_EXISTING=1
+ echo " *** USE EXISTING TEST BRANCHES MODE ***"
+ ;;
+ *)
+ echo
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+###########################
+# Git worktrees to manage #
+###########################
+
+set -e
+if [ -z "$TEST_BRANCH_PREFIX" ]; then
+ # maint/release merge mode
+ eval "$(git-list-tor-branches.sh -m)"
+ # Remove first element: we don't merge forward into it.
+ WORKTREE=( "${WORKTREE[@]:1}" )
+else
+ eval "$(git-list-tor-branches.sh -m -R)"
+fi
+set +e
+
+COUNT=${#WORKTREE[@]}
+
+#############
+# Constants #
+#############
+
+# Control characters
+CNRM=$'\x1b[0;0m' # Clear color
+
+# Bright color
+BGRN=$'\x1b[1;32m'
+BBLU=$'\x1b[1;34m'
+BRED=$'\x1b[1;31m'
+BYEL=$'\x1b[1;33m'
+IWTH=$'\x1b[3;37m'
+
+# Strings for the pretty print.
+MARKER="${BBLU}[${BGRN}+${BBLU}]${CNRM}"
+SUCCESS="${BGRN}success${CNRM}"
+FAILED="${BRED}failed${CNRM}"
+
+####################
+# Helper functions #
+####################
+
+# Validate the given returned value (error code), print success or failed. The
+# second argument is the error output in case of failure, it is printed out.
+# On failure, this function exits.
+function validate_ret
+{
+ if [ "$1" -eq 0 ]; then
+ printf "%s\\n" "$SUCCESS"
+ else
+ printf "%s\\n" "$FAILED"
+ printf " %s" "$2"
+ exit 1
+ fi
+}
+
+# Switch to the given branch name.
+function switch_branch
+{
+ local cmd="git checkout '$1'"
+ printf " %s Switching branch to %s..." "$MARKER" "$1"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Checkout a new branch with the given branch name.
+function new_branch
+{
+ local cmd="git checkout -b '$1'"
+ printf " %s Creating new branch %s..." "$MARKER" "$1"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Switch to an existing branch, or checkout a new branch with the given
+# branch name.
+function switch_or_new_branch
+{
+ local cmd="git rev-parse --verify '$1'"
+ if [ $DRY_RUN -eq 0 ]; then
+ # Call switch_branch if there is a branch, or new_branch if there is not
+ msg=$( eval "$cmd" 2>&1 )
+ RET=$?
+ if [ $RET -eq 0 ]; then
+ # Branch: (commit id)
+ switch_branch "$1"
+ elif [ $RET -eq 128 ]; then
+ # Not a branch: "fatal: Needed a single revision"
+ new_branch "$1"
+ else
+ # Unexpected return value
+ validate_ret $RET "$msg"
+ fi
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}, then depending on the result:"
+ switch_branch "$1"
+ new_branch "$1"
+ fi
+}
+
+# Pull the given branch name.
+function pull_branch
+{
+ local cmd="git pull"
+ printf " %s Pulling branch %s..." "$MARKER" "$1"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Merge the given branch name ($1) into the current branch ($2).
+function merge_branch
+{
+ local cmd="git merge --no-edit '$1'"
+ printf " %s Merging branch %s into %s..." "$MARKER" "$1" "$2"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Merge origin/(branch name) into the current branch.
+function merge_branch_origin
+{
+ local cmd="git merge --ff-only 'origin/$1'"
+ printf " %s Merging branch origin/%s..." "$MARKER" "$1"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Go into the worktree repository.
+function goto_repo
+{
+ if [ ! -d "$1" ]; then
+ echo " $1: Not found. Stopping."
+ exit 1
+ fi
+ cd "$1" || exit
+}
+
+# Fetch the origin. No arguments.
+function fetch_origin
+{
+ local cmd="git fetch origin"
+ printf " %s Fetching origin..." "$MARKER"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+###############
+# Entry point #
+###############
+
+# First, fetch the origin.
+goto_repo "$ORIGIN_PATH"
+fetch_origin
+
+# Go over all configured worktree.
+for ((i=0; i<COUNT; i++)); do
+ current=${!WORKTREE[$i]:0:1}
+ previous=${!WORKTREE[$i]:1:1}
+ repo_path=${!WORKTREE[$i]:2:1}
+ # default to merge forward mode
+ test_current=
+ test_previous=
+ target_current="$current"
+ target_previous="$previous"
+ if [ "$TEST_BRANCH_PREFIX" ]; then
+ test_current_suffix=${!WORKTREE[$i]:3:1}
+ test_current=${TEST_BRANCH_PREFIX}${test_current_suffix}
+ # the current test branch, if present, or maint/release branch, if not
+ target_current="$test_current"
+ test_previous_suffix=${!WORKTREE[$i]:4:1}
+ if [ "$test_previous_suffix" ]; then
+ test_previous=${TEST_BRANCH_PREFIX}${test_previous_suffix}
+ # the previous test branch, if present, or maint/release branch, if not
+ target_previous="$test_previous"
+ fi
+ fi
+
+ printf "%s Handling branch \\n" "$MARKER" "${BYEL}$target_current${CNRM}"
+
+ # Go into the worktree to start merging.
+ goto_repo "$repo_path"
+ if [ "$test_current" ]; then
+ if [ $USE_EXISTING -eq 0 ]; then
+ # Create a test branch from the currently checked-out branch/commit
+ # Fail if it already exists
+ new_branch "$test_current"
+ else
+ # Switch if it exists, or create if it does not
+ switch_or_new_branch "$test_current"
+ fi
+ fi
+ # Checkout the current maint/release branch
+ switch_branch "$current"
+ # Update the current maint/release branch with an origin merge to get the
+ # latest updates
+ merge_branch_origin "$current"
+ if [ "$test_current" ]; then
+ # Checkout the test branch
+ switch_branch "$test_current"
+ # Merge the updated maint branch into the test branch
+ merge_branch "$current" "$test_current"
+ fi
+ # Merge the previous branch into the target branch
+ # Merge Forward Example:
+ # merge maint-0.3.5 into maint-0.4.0.
+ # Test Branch Example:
+ # merge bug99999_035 into bug99999_040.
+ # Skip the merge if the previous branch does not exist
+ # (there's nothing to merge forward into the oldest test branch)
+ if [ "$target_previous" ]; then
+ merge_branch "$target_previous" "$target_current"
+ fi
+done
diff --git a/scripts/git/git-pull-all.sh b/scripts/git/git-pull-all.sh
new file mode 100755
index 0000000000..7f82eda296
--- /dev/null
+++ b/scripts/git/git-pull-all.sh
@@ -0,0 +1,208 @@
+#!/usr/bin/env bash
+
+SCRIPT_NAME=$(basename "$0")
+
+usage()
+{
+ echo "$SCRIPT_NAME [-h] [-n]"
+ echo
+ echo " arguments:"
+ echo " -h: show this help text"
+ echo " -n: dry run mode"
+ echo " (default: run commands)"
+ echo
+ echo " env vars:"
+ echo " required:"
+ echo " TOR_FULL_GIT_PATH: where the git repository directories reside."
+ echo " You must set this env var, we recommend \$HOME/git/"
+ echo " (default: fail if this env var is not set;"
+ echo " current: $GIT_PATH)"
+ echo
+ echo " optional:"
+ echo " TOR_MASTER: the name of the directory containing the tor.git clone"
+ echo " The tor master git directory is \$GIT_PATH/\$TOR_MASTER"
+ echo " (default: tor; current: $TOR_MASTER_NAME)"
+ echo " TOR_WKT_NAME: the name of the directory containing the tor"
+ echo " worktrees. The tor worktrees are:"
+ echo " \$GIT_PATH/\$TOR_WKT_NAME/{maint-*,release-*}"
+ echo " (default: tor-wkt; current: $TOR_WKT_NAME)"
+ echo " we recommend that you set these env vars in your ~/.profile"
+}
+
+#################
+# Configuration #
+#################
+
+# Don't change this configuration - set the env vars in your .profile
+
+# Where are all those git repositories?
+GIT_PATH=${TOR_FULL_GIT_PATH:-"FULL_PATH_TO_GIT_REPOSITORY_DIRECTORY"}
+# The tor master git repository directory from which all the worktree have
+# been created.
+TOR_MASTER_NAME=${TOR_MASTER_NAME:-"tor"}
+# The worktrees location (directory).
+TOR_WKT_NAME=${TOR_WKT_NAME:-"tor-wkt"}
+
+##########################
+# Git branches to manage #
+##########################
+
+set -e
+eval "$(git-list-tor-branches.sh -b)"
+set +e
+
+# The master branch path has to be the main repository thus contains the
+# origin that will be used to fetch the updates. All the worktrees are created
+# from that repository.
+ORIGIN_PATH="$GIT_PATH/$TOR_MASTER_NAME"
+
+COUNT=${#WORKTREE[@]}
+
+#######################
+# Argument processing #
+#######################
+
+# Controlled by the -n option. The dry run option will just output the command
+# that would have been executed for each worktree.
+DRY_RUN=0
+
+while getopts "hn" opt; do
+ case "$opt" in
+ h) usage
+ exit 0
+ ;;
+ n) DRY_RUN=1
+ echo " *** DRY DRUN MODE ***"
+ ;;
+ *)
+ echo
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+#############
+# Constants #
+#############
+
+# Control characters
+CNRM=$'\x1b[0;0m' # Clear color
+
+# Bright color
+BGRN=$'\x1b[1;32m'
+BBLU=$'\x1b[1;34m'
+BRED=$'\x1b[1;31m'
+BYEL=$'\x1b[1;33m'
+IWTH=$'\x1b[3;37m'
+
+# Strings for the pretty print.
+MARKER="${BBLU}[${BGRN}+${BBLU}]${CNRM}"
+SUCCESS="${BGRN}ok${CNRM}"
+FAILED="${BRED}failed${CNRM}"
+
+####################
+# Helper functions #
+####################
+
+# Validate the given returned value (error code), print success or failed. The
+# second argument is the error output in case of failure, it is printed out.
+# On failure, this function exits.
+function validate_ret
+{
+ if [ "$1" -eq 0 ]; then
+ printf "%s\\n" "$SUCCESS"
+ else
+ printf "%s\\n" "$FAILED"
+ printf " %s" "$2"
+ exit 1
+ fi
+}
+
+# Switch to the given branch name.
+function switch_branch
+{
+ local cmd="git checkout $1"
+ printf " %s Switching branch to %s..." "$MARKER" "$1"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Pull the given branch name.
+function merge_branch
+{
+ local cmd="git merge --ff-only origin/$1"
+ printf " %s Merging branch origin/%s..." "$MARKER" "$1"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Go into the worktree repository.
+function goto_repo
+{
+ if [ ! -d "$1" ]; then
+ echo " $1: Not found. Stopping."
+ exit 1
+ fi
+ cd "$1" || exit
+}
+
+# Fetch the origin. No arguments.
+function fetch_origin
+{
+ local cmd="git fetch origin"
+ printf " %s Fetching origin..." "$MARKER"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Fetch tor-github pull requests. No arguments.
+function fetch_tor_github
+{
+ local cmd="git fetch tor-github"
+ printf " %s Fetching tor-github..." "$MARKER"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+###############
+# Entry point #
+###############
+
+# First, fetch tor-github.
+goto_repo "$ORIGIN_PATH"
+fetch_tor_github
+
+# Then, fetch the origin.
+fetch_origin
+
+# Go over all configured worktree.
+for ((i=0; i<COUNT; i++)); do
+ current=${!WORKTREE[$i]:0:1}
+ repo_path=${!WORKTREE[$i]:1:1}
+
+ printf "%s Handling branch %s\\n" "$MARKER" "${BYEL}$current${CNRM}"
+
+ # Go into the worktree to start merging.
+ goto_repo "$repo_path"
+ # Checkout the current branch
+ switch_branch "$current"
+ # Update the current branch by merging the origin to get the latest.
+ merge_branch "$current"
+done
diff --git a/scripts/git/git-push-all.sh b/scripts/git/git-push-all.sh
new file mode 100755
index 0000000000..558ea8d01c
--- /dev/null
+++ b/scripts/git/git-push-all.sh
@@ -0,0 +1,309 @@
+#!/usr/bin/env bash
+
+SCRIPT_NAME=$(basename "$0")
+
+function usage()
+{
+ if [ "$TOR_PUSH_SAME" ]; then
+ CURRENT_PUSH_SAME="push"
+ else
+ CURRENT_PUSH_SAME="skip"
+ fi
+
+ echo "$SCRIPT_NAME [-h] [-r <remote-name> [-t <test-branch-prefix>]] [-s]"
+ # The next line looks misaligned, but it lines up in the output
+ echo " [-- [-n] [--no-atomic] <git push options>]"
+ echo
+ echo " arguments:"
+ echo " -h: show this help text"
+ echo " -n: dry run mode"
+ echo " (default: run commands)"
+ echo " -r: push to remote-name, rather than the default upstream remote."
+ echo " (default: $DEFAULT_UPSTREAM_REMOTE, current: $UPSTREAM_REMOTE)"
+ echo " -t: test branch mode: push test branches to remote-name. Pushes"
+ echo " branches prefix_035, prefix_040, ... , prefix_master."
+ echo " (default: push maint-*, release-*, and master)"
+ echo " -s: push branches whose tips match upstream maint, release, or"
+ echo " master branches. The default is to skip these branches,"
+ echo " because they do not contain any new code. Use -s to test for"
+ echo " CI environment failures, using code that previously passed CI."
+ echo " (default: skip; current: $CURRENT_PUSH_SAME matching branches)"
+ echo " --: pass further arguments to git push."
+ echo " All unrecognised arguments are passed to git push, but complex"
+ echo " arguments before -- may be mangled by getopt."
+ echo " (default: git push --atomic, current: $GIT_PUSH)"
+ echo
+ echo " env vars:"
+ echo " optional:"
+ echo " TOR_GIT_PUSH_PATH: change to this directory before pushing."
+ echo " (default: if \$TOR_FULL_GIT_PATH is set,"
+ echo " use \$TOR_FULL_GIT_PATH/\$TOR_MASTER;"
+ echo " Otherwise, use the current directory for pushes;"
+ echo " current: $TOR_GIT_PUSH_PATH)"
+ echo " TOR_FULL_GIT_PATH: where the git repository directories reside."
+ echo " We recommend using \$HOME/git/."
+ echo " (default: use the current directory for pushes;"
+ echo " current: $TOR_FULL_GIT_PATH)"
+ echo " TOR_MASTER: the name of the directory containing the tor.git clone"
+ echo " The tor master git directory is \$GIT_PATH/\$TOR_MASTER"
+ echo " (default: tor; current: $TOR_MASTER_NAME)"
+ echo
+ echo " TOR_UPSTREAM_REMOTE_NAME: the default upstream remote."
+ echo " Overridden by -r."
+ echo " (default: upstream; current: $UPSTREAM_REMOTE)"
+ echo " TOR_GIT_PUSH: the git push command and default arguments."
+ echo " Overridden by <git push options> after --."
+ echo " (default: git push --atomic; current: $GIT_PUSH)"
+ echo " TOR_PUSH_SAME: push branches whose tips match upstream maint,"
+ echo " release, or master branches. Inverted by -s."
+ echo " (default: skip; current: $CURRENT_PUSH_SAME matching branches)"
+ echo " TOR_PUSH_DELAY: pushes the master and maint branches separately,"
+ echo " so that CI runs in a sensible order."
+ echo " (default: push all branches immediately; current: $PUSH_DELAY)"
+ echo " we recommend that you set these env vars in your ~/.profile"
+}
+
+set -e
+
+#################
+# Configuration #
+#################
+
+# Don't change this configuration - set the env vars in your .profile
+#
+# The tor master git repository directory from which all the worktree have
+# been created.
+TOR_MASTER_NAME=${TOR_MASTER_NAME:-"tor"}
+# Which directory do we push from?
+if [ "$TOR_FULL_GIT_PATH" ]; then
+ TOR_GIT_PUSH_PATH=${TOR_GIT_PUSH_PATH:-"$TOR_FULL_GIT_PATH/$TOR_MASTER_NAME"}
+fi
+# git push command and default arguments
+GIT_PUSH=${TOR_GIT_PUSH:-"git push --atomic"}
+# The upstream remote which git.torproject.org/tor.git points to.
+DEFAULT_UPSTREAM_REMOTE=${TOR_UPSTREAM_REMOTE_NAME:-"upstream"}
+# Push to a different upstream remote using -r <remote-name>
+UPSTREAM_REMOTE=${DEFAULT_UPSTREAM_REMOTE}
+# Add a delay between pushes, so CI runs on the most important branches first
+PUSH_DELAY=${TOR_PUSH_DELAY:-0}
+# Push (1) or skip (0) test branches that are the same as an upstream
+# maint/master branch. Push if you are testing that the CI environment still
+# works on old code, skip if you are testing new code in the branch.
+# Default: skip unchanged branches.
+# Inverted by the -s option.
+PUSH_SAME=${TOR_PUSH_SAME:-0}
+
+#######################
+# Argument processing #
+#######################
+
+# Controlled by the -t <test-branch-prefix> option. The test branch prefix
+# option makes git-merge-forward.sh create new test branches:
+# <tbp>_035, <tbp>_040, ... , <tbp>_master, and merge each branch forward into
+# the next one.
+TEST_BRANCH_PREFIX=
+
+while getopts ":hr:st:" opt; do
+ case "$opt" in
+ h) usage
+ exit 0
+ ;;
+ r) UPSTREAM_REMOTE="$OPTARG"
+ echo " *** PUSHING TO REMOTE: ${UPSTREAM_REMOTE} ***"
+ shift
+ shift
+ OPTIND=$((OPTIND - 2))
+ ;;
+ s) PUSH_SAME=$((! PUSH_SAME))
+ if [ "$PUSH_SAME" -eq 0 ]; then
+ echo " *** SKIPPING UNCHANGED TEST BRANCHES ***"
+ else
+ echo " *** PUSHING UNCHANGED TEST BRANCHES ***"
+ fi
+ shift
+ OPTIND=$((OPTIND - 1))
+ ;;
+ t) TEST_BRANCH_PREFIX="$OPTARG"
+ echo " *** PUSHING TEST BRANCHES: ${TEST_BRANCH_PREFIX}_nnn ***"
+ shift
+ shift
+ OPTIND=$((OPTIND - 2))
+ ;;
+ *)
+ # Make git push handle the option
+ # This might mangle options with spaces, use -- for complex options
+ GIT_PUSH="$GIT_PUSH $1"
+ shift
+ OPTIND=$((OPTIND - 1))
+ ;;
+ esac
+done
+
+# getopts doesn't allow "-" as an option character,
+# so we have to handle -- manually
+if [ "$1" = "--" ]; then
+ shift
+fi
+
+if [ "$TEST_BRANCH_PREFIX" ]; then
+ if [ "$UPSTREAM_REMOTE" = "$DEFAULT_UPSTREAM_REMOTE" ]; then
+ echo "Pushing test branches ${TEST_BRANCH_PREFIX}_nnn to " \
+ "the default remote $DEFAULT_UPSTREAM_REMOTE is not allowed."
+ echo
+ usage
+ exit 1
+ fi
+fi
+
+if [ "$TOR_GIT_PUSH_PATH" ]; then
+ echo "Changing to $TOR_GIT_PUSH_PATH before pushing"
+ cd "$TOR_GIT_PUSH_PATH"
+else
+ echo "Pushing from the current directory"
+fi
+
+echo "Calling $GIT_PUSH" "$@" "<branches>"
+
+################################
+# Git upstream remote branches #
+################################
+
+set -e
+DEFAULT_UPSTREAM_BRANCHES=
+if [ "$DEFAULT_UPSTREAM_REMOTE" != "$UPSTREAM_REMOTE" ]; then
+ for br in $(git-list-tor-branches.sh -l); do
+ DEFAULT_UPSTREAM_BRANCHES="${DEFAULT_UPSTREAM_BRANCHES} ${DEFAULT_UPSTREAM_REMOTE}/${br}"
+ done
+fi
+
+UPSTREAM_BRANCHES=
+for br in $(git-list-tor-branches.sh -l); do
+ UPSTREAM_BRANCHES="${UPSTREAM_BRANCHES} ${UPSTREAM_REMOTE}/${br}"
+done
+
+########################
+# Git branches to push #
+########################
+
+if [ -z "$TEST_BRANCH_PREFIX" ]; then
+
+ # maint/release push mode: push all branches.
+ #
+ # List of branches to push. Ordering is not important.
+ PUSH_BRANCHES="$(git-list-tor-branches.sh -l)"
+else
+
+ # Test branch push mode: push test branches, based on each maint branch.
+ #
+ # List of branches to push. Ordering is not important.
+ PUSH_BRANCHES=""
+ for suffix in $(git-list-tor-branches.sh -s -R); do
+ PUSH_BRANCHES="${PUSH_BRANCHES} ${TEST_BRANCH_PREFIX}${suffix}"
+ done
+fi
+
+set +e
+
+###############
+# Entry point #
+###############
+
+if [ "$TEST_BRANCH_PREFIX" ]; then
+ # Skip the test branches that are the same as the default or current
+ # upstream branches (they have already been tested)
+ UPSTREAM_SKIP_SAME_AS="$UPSTREAM_BRANCHES $DEFAULT_UPSTREAM_BRANCHES"
+else
+ # Skip the local maint-*, release-*, master branches that are the same as the
+ # current upstream branches, but ignore the default upstream
+ # (we want to update a non-default remote, even if it matches the default)
+ UPSTREAM_SKIP_SAME_AS="$UPSTREAM_BRANCHES"
+fi
+
+# Skip branches that match the relevant upstream(s)
+if [ "$PUSH_SAME" -eq 0 ]; then
+ NEW_PUSH_BRANCHES=
+ for b in $PUSH_BRANCHES; do
+ PUSH_COMMIT=$(git rev-parse "$b")
+ SKIP_UPSTREAM=
+ for u in $UPSTREAM_SKIP_SAME_AS; do
+ # Skip the branch check on error
+ UPSTREAM_COMMIT=$(git rev-parse "$u" 2>/dev/null) || continue
+ if [ "$PUSH_COMMIT" = "$UPSTREAM_COMMIT" ]; then
+ SKIP_UPSTREAM="$u"
+ fi
+ done
+ if [ "$SKIP_UPSTREAM" ]; then
+ printf "Skipping unchanged: %s matching remote: %s\\n" \
+ "$b" "$SKIP_UPSTREAM"
+ else
+ if [ "$NEW_PUSH_BRANCHES" ]; then
+ NEW_PUSH_BRANCHES="${NEW_PUSH_BRANCHES} ${b}"
+ else
+ NEW_PUSH_BRANCHES="${b}"
+ fi
+ fi
+ done
+ PUSH_BRANCHES=${NEW_PUSH_BRANCHES}
+fi
+
+if [ ! "$PUSH_BRANCHES" ]; then
+ echo "No branches to push!"
+ # We expect the rest of the script to run without errors, even if there
+ # are no branches
+fi
+
+if [ "$PUSH_DELAY" -le 0 ]; then
+ echo "Pushing $PUSH_BRANCHES"
+ # We know that there are no spaces in any branch within $PUSH_BRANCHES, so
+ # it is safe to use it unquoted. (This also applies to the other shellcheck
+ # exceptions below.)
+ #
+ # Push all the branches at the same time
+ # shellcheck disable=SC2086
+ $GIT_PUSH "$@" "$UPSTREAM_REMOTE" $PUSH_BRANCHES
+else
+ # Push the branches in optimal CI order, with a delay between each push
+ PUSH_BRANCHES=$(echo "$PUSH_BRANCHES" | tr " " "\\n" | sort -V)
+ MASTER_BRANCH=$(echo "$PUSH_BRANCHES" | tr " " "\\n" | grep master) \
+ || true # Skipped master branch
+ if [ -z "$TEST_BRANCH_PREFIX" ]; then
+ MAINT_BRANCHES=$(echo "$PUSH_BRANCHES" | tr " " "\\n" | grep maint) \
+ || true # Skipped all maint branches
+ RELEASE_BRANCHES=$(echo "$PUSH_BRANCHES" | tr " " "\\n" | grep release | \
+ tr "\\n" " ") || true # Skipped all release branches
+ else
+ # Actually test branches based on maint branches
+ MAINT_BRANCHES=$(echo "$PUSH_BRANCHES" | tr " " "\\n" | grep -v master) \
+ || true # Skipped all maint test branches
+ # No release branches
+ RELEASE_BRANCHES=
+ fi
+ if [ "$MASTER_BRANCH" ] || [ "$MAINT_BRANCHES" ] \
+ || [ "$RELEASE_BRANCHES" ]; then
+ printf "Pushing with %ss delays, so CI runs in this order:\\n" \
+ "$PUSH_DELAY"
+ if [ "$MASTER_BRANCH" ]; then
+ printf "%s\\n" "$MASTER_BRANCH"
+ fi
+ if [ "$MAINT_BRANCHES" ]; then
+ printf "%s\\n" "$MAINT_BRANCHES"
+ fi
+ if [ "$RELEASE_BRANCHES" ]; then
+ printf "%s\\n" "$RELEASE_BRANCHES"
+ fi
+ fi
+ # shellcheck disable=SC2086
+ for b in $MASTER_BRANCH $MAINT_BRANCHES; do
+ $GIT_PUSH "$@" "$UPSTREAM_REMOTE" "$b"
+ # If we are pushing more than one branch, delay.
+ # In the unlikely scenario where we are pushing maint without master,
+ # or maint without release, there may be an extra delay
+ if [ "$MAINT_BRANCHES" ] || [ "$RELEASE_BRANCHES" ]; then
+ sleep "$PUSH_DELAY"
+ fi
+ done
+ if [ "$RELEASE_BRANCHES" ]; then
+ # shellcheck disable=SC2086
+ $GIT_PUSH "$@" "$UPSTREAM_REMOTE" $RELEASE_BRANCHES
+ fi
+fi
diff --git a/scripts/git/git-setup-dirs.sh b/scripts/git/git-setup-dirs.sh
new file mode 100755
index 0000000000..1f61eb8b83
--- /dev/null
+++ b/scripts/git/git-setup-dirs.sh
@@ -0,0 +1,508 @@
+#!/usr/bin/env bash
+
+SCRIPT_NAME=$(basename "$0")
+
+function usage()
+{
+ echo "$SCRIPT_NAME [-h] [-n] [-u]"
+ echo
+ echo " arguments:"
+ echo " -h: show this help text"
+ echo " -n: dry run mode"
+ echo " (default: run commands)"
+ echo " -u: if a directory or worktree already exists, use it"
+ echo " (default: fail and exit on existing directories)"
+ echo
+ echo " env vars:"
+ echo " required:"
+ echo " TOR_FULL_GIT_PATH: where the git repository directories reside."
+ echo " You must set this env var, we recommend \$HOME/git/"
+ echo " (default: fail if this env var is not set;"
+ echo " current: $GIT_PATH)"
+ echo
+ echo " optional:"
+ echo " TOR_MASTER: the name of the directory containing the tor.git clone"
+ echo " The tor master git directory is \$GIT_PATH/\$TOR_MASTER"
+ echo " (default: tor; current: $TOR_MASTER_NAME)"
+ echo " TOR_WKT_NAME: the name of the directory containing the tor"
+ echo " worktrees. The tor worktrees are:"
+ echo " \$GIT_PATH/\$TOR_WKT_NAME/{maint-*,release-*}"
+ echo " (default: tor-wkt; current: $TOR_WKT_NAME)"
+ echo " TOR_GIT_ORIGIN_PULL: the origin remote pull URL."
+ echo " (current: $GIT_ORIGIN_PULL)"
+ echo " TOR_GIT_ORIGIN_PUSH: the origin remote push URL"
+ echo " (current: $GIT_ORIGIN_PUSH)"
+ echo " TOR_UPSTREAM_REMOTE_NAME: the default upstream remote."
+ echo " If \$TOR_UPSTREAM_REMOTE_NAME is not 'origin', we have a"
+ echo " separate upstream remote, and we don't push to origin."
+ echo " (default: $DEFAULT_UPSTREAM_REMOTE)"
+ echo " TOR_GITHUB_PULL: the tor-github remote pull URL"
+ echo " (current: $GITHUB_PULL)"
+ echo " TOR_GITHUB_PUSH: the tor-github remote push URL"
+ echo " (current: $GITHUB_PUSH)"
+ echo " TOR_EXTRA_CLONE_ARGS: extra arguments to git clone"
+ echo " (current: $TOR_EXTRA_CLONE_ARGS)"
+ echo " TOR_EXTRA_REMOTE_NAME: the name of an extra remote"
+ echo " This remote is not pulled by this script or git-pull-all.sh."
+ echo " This remote is not pushed by git-push-all.sh."
+ echo " (current: $TOR_EXTRA_REMOTE_NAME)"
+ echo " TOR_EXTRA_REMOTE_PULL: the extra remote pull URL."
+ echo " (current: $TOR_EXTRA_REMOTE_PULL)"
+ echo " TOR_EXTRA_REMOTE_PUSH: the extra remote push URL"
+ echo " (current: $TOR_EXTRA_REMOTE_PUSH)"
+ echo " we recommend that you set these env vars in your ~/.profile"
+}
+
+#################
+# Configuration #
+#################
+
+# Don't change this configuration - set the env vars in your .profile
+
+# Where are all those git repositories?
+GIT_PATH=${TOR_FULL_GIT_PATH:-"FULL_PATH_TO_GIT_REPOSITORY_DIRECTORY"}
+# The tor master git repository directory from which all the worktree have
+# been created.
+TOR_MASTER_NAME=${TOR_MASTER_NAME:-"tor"}
+# The worktrees location (directory).
+TOR_WKT_NAME=${TOR_WKT_NAME:-"tor-wkt"}
+
+# Origin repositories
+GIT_ORIGIN_PULL=${TOR_GIT_ORIGIN_PULL:-"https://git.torproject.org/tor.git"}
+GIT_ORIGIN_PUSH=${TOR_GIT_ORIGIN_PUSH:-"git@git-rw.torproject.org:tor.git"}
+# The upstream remote which git.torproject.org/tor.git points to.
+DEFAULT_UPSTREAM_REMOTE=${TOR_UPSTREAM_REMOTE_NAME:-"upstream"}
+# Copy the URLs from origin
+GIT_UPSTREAM_PULL="$GIT_ORIGIN_PULL"
+GIT_UPSTREAM_PUSH="$GIT_ORIGIN_PUSH"
+# And avoid pushing to origin if we have an upstream
+if [ "$DEFAULT_UPSTREAM_REMOTE" != "origin" ]; then
+ GIT_ORIGIN_PUSH="No pushes to origin, if there is an upstream"
+fi
+# GitHub repositories
+GITHUB_PULL=${TOR_GITHUB_PULL:-"https://github.com/torproject/tor.git"}
+GITHUB_PUSH=${TOR_GITHUB_PUSH:-"No_Pushing_To_GitHub"}
+
+##########################
+# Git branches to manage #
+##########################
+
+# The branches and worktrees need to be modified when there is a new branch,
+# and when an old branch is no longer supported.
+
+set -e
+eval "$(git-list-tor-branches.sh -b)"
+set +e
+
+# The master branch path has to be the main repository thus contains the
+# origin that will be used to fetch the updates. All the worktrees are created
+# from that repository.
+ORIGIN_PATH="$GIT_PATH/$TOR_MASTER_NAME"
+
+#######################
+# Argument processing #
+#######################
+
+# Controlled by the -n option. The dry run option will just output the command
+# that would have been executed for each worktree.
+DRY_RUN=0
+
+# Controlled by the -s option. The use existing option checks for existing
+# directories, and re-uses them, rather than creating a new directory.
+USE_EXISTING=0
+USE_EXISTING_HINT="Use existing: '$SCRIPT_NAME -u'."
+
+while getopts "hnu" opt; do
+ case "$opt" in
+ h) usage
+ exit 0
+ ;;
+ n) DRY_RUN=1
+ echo " *** DRY RUN MODE ***"
+ ;;
+ u) USE_EXISTING=1
+ echo " *** USE EXISTING DIRECTORIES MODE ***"
+ ;;
+ *)
+ echo
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+###########################
+# Git worktrees to manage #
+###########################
+
+COUNT=${#WORKTREE[@]}
+
+#############
+# Constants #
+#############
+
+# Control characters
+CNRM=$'\x1b[0;0m' # Clear color
+
+# Bright color
+BGRN=$'\x1b[1;32m'
+BBLU=$'\x1b[1;34m'
+BRED=$'\x1b[1;31m'
+BYEL=$'\x1b[1;33m'
+IWTH=$'\x1b[3;37m'
+
+# Strings for the pretty print.
+MARKER="${BBLU}[${BGRN}+${BBLU}]${CNRM}"
+SUCCESS="${BGRN}success${CNRM}"
+SKIPPED="${BYEL}skipped${CNRM}"
+FAILED="${BRED}failed${CNRM}"
+
+####################
+# Helper functions #
+####################
+
+# Validate the given returned value (error code), print success or failed. The
+# second argument is the error output in case of failure, it is printed out.
+# On failure, this function exits.
+function validate_ret
+{
+ if [ "$1" -eq 0 ]; then
+ printf "%s\\n" "$SUCCESS"
+ else
+ printf "%s\\n" "$FAILED"
+ printf " %s\\n" "$2"
+ exit 1
+ fi
+}
+
+# Validate the given returned value (error code), print success, skipped, or
+# failed. If $USE_EXISTING is 0, fail on error, otherwise, skip on error.
+# The second argument is the error output in case of failure, it is printed
+# out. On failure, this function exits.
+function validate_ret_skip
+{
+ if [ "$1" -ne 0 ]; then
+ if [ "$USE_EXISTING" -eq "0" ]; then
+ # Fail and exit with error
+ validate_ret "$1" "$2 $USE_EXISTING_HINT"
+ else
+ printf "%s\\n" "$SKIPPED"
+ printf " %s\\n" "${IWTH}$2${CNRM}"
+ # Tell the caller to skip the rest of the function
+ return 0
+ fi
+ fi
+ # Tell the caller to continue
+ return 1
+}
+
+# Create a directory, and any missing enclosing directories.
+# If the directory already exists: fail if $USE_EXISTING is 0, otherwise skip.
+function make_directory
+{
+ local cmd="mkdir -p '$1'"
+ printf " %s Creating directory %s..." "$MARKER" "$1"
+ local check_cmd="[ ! -d '$1' ]"
+ msg=$( eval "$check_cmd" 2>&1 )
+ if validate_ret_skip $? "Directory already exists."; then
+ return
+ fi
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Create a symlink from the first argument to the second argument
+# If the link already exists: fail if $USE_EXISTING is 0, otherwise skip.
+function make_symlink
+{
+ local cmd="ln -s '$1' '$2'"
+ printf " %s Creating symlink from %s to %s..." "$MARKER" "$1" "$2"
+ local check_cmd="[ ! -e '$2' ]"
+ msg=$( eval "$check_cmd" 2>&1 )
+ if validate_ret_skip $? "File already exists."; then
+ return
+ fi
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Go into the directory or repository, even if $DRY_RUN is non-zero.
+# If the directory does not exist, fail and log an error.
+# Otherwise, silently succeed.
+function goto_dir
+{
+ if ! cd "$1" 1>/dev/null 2>/dev/null ; then
+ printf " %s Changing to directory %s..." "$MARKER" "$1"
+ validate_ret 1 "$1: Not found. Stopping."
+ fi
+}
+
+# Clone a repository into a directory.
+# If the directory already exists: fail if $USE_EXISTING is 0, otherwise skip.
+function clone_repo
+{
+ local cmd="git clone $TOR_EXTRA_CLONE_ARGS '$1' '$2'"
+ printf " %s Cloning %s into %s..." "$MARKER" "$1" "$2"
+ local check_cmd="[ ! -d '$2' ]"
+ msg=$( eval "$check_cmd" 2>&1 )
+ if validate_ret_skip $? "Directory already exists."; then
+ # If we skip the clone, we need to do a fetch
+ goto_dir "$ORIGIN_PATH"
+ fetch_remote "origin"
+ return
+ fi
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Add a remote by name and URL.
+# If the remote already exists: fail if $USE_EXISTING is 0, otherwise skip.
+function add_remote
+{
+ local cmd="git remote add '$1' '$2'"
+ printf " %s Adding remote %s at %s..." "$MARKER" "$1" "$2"
+ local check_cmd="git remote get-url '$1'"
+ msg=$( eval "$check_cmd" 2>&1 )
+ ret=$?
+ # We don't want a remote, so we invert the exit status
+ if validate_ret_skip $(( ! ret )) \
+ "Remote already exists for $1 at $msg."; then
+ return
+ fi
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Set a remote's push URL by name and URL.
+function set_remote_push
+{
+ local cmd="git remote set-url --push '$1' '$2'"
+ printf " %s Setting remote %s push URL to '%s'..." "$MARKER" "$1" "$2"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Fetch a remote by name.
+function fetch_remote
+{
+ local cmd="git fetch '$1'"
+ printf " %s Fetching %s..." "$MARKER" "$1"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Replace the fetch configs for a remote with config if they match a pattern.
+function replace_fetch_config
+{
+ local cmd="git config --replace-all remote.'$1'.fetch '$2' '$3'"
+ printf " %s Replacing %s fetch configs for '%s'..." \
+ "$MARKER" "$1" "$3"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Set up the tor-github PR config, so tor-github/pr/NNNN/head points to GitHub
+# PR NNNN. In some repositories, "/head" is optional.
+function set_tor_github_pr_fetch_config
+{
+ # Standard branches
+ replace_fetch_config tor-github \
+ "+refs/heads/*:refs/remotes/tor-github/*" \
+ "refs/heads"
+ # PRs
+ replace_fetch_config "tor-github" \
+ "+refs/pull/*:refs/remotes/tor-github/pr/*" \
+ "refs/pull.*pr"
+}
+
+# Add a new worktree for branch at path.
+# If the directory already exists: fail if $USE_EXISTING is 0, otherwise skip.
+function add_worktree
+{
+ local cmd="git worktree add '$2' '$1'"
+ printf " %s Adding worktree for %s at %s..." "$MARKER" "$1" "$2"
+ local check_cmd="[ ! -d '$2' ]"
+ msg=$( eval "$check_cmd" 2>&1 )
+ if validate_ret_skip $? "Directory already exists."; then
+ return
+ fi
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Switch to the given branch name.
+# If the branch does not exist: fail.
+function switch_branch
+{
+ local cmd="git checkout '$1'"
+ printf " %s Switching branch to %s..." "$MARKER" "$1"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Checkout a new branch with the given branch name.
+# If the branch already exists: fail if $USE_EXISTING is 0, otherwise skip.
+function new_branch
+{
+ local cmd="git checkout -b '$1'"
+ printf " %s Creating new branch %s..." "$MARKER" "$1"
+ local check_cmd="git branch --list '$1'"
+ msg=$( eval "$check_cmd" 2>&1 )
+ if validate_ret_skip $? "Branch already exists."; then
+ return
+ fi
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+# Switch to an existing branch, or checkout a new branch with the given
+# branch name.
+function switch_or_new_branch
+{
+ local cmd="git rev-parse --verify '$1'"
+ if [ $DRY_RUN -eq 0 ]; then
+ # Call switch_branch if there is a branch, or new_branch if there is not
+ msg=$( eval "$cmd" 2>&1 )
+ RET=$?
+ if [ $RET -eq 0 ]; then
+ # Branch: (commit id)
+ switch_branch "$1"
+ elif [ $RET -eq 128 ]; then
+ # Not a branch: "fatal: Needed a single revision"
+ new_branch "$1"
+ else
+ # Unexpected return value
+ validate_ret $RET "$msg"
+ fi
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}, then depending on the result:"
+ switch_branch "$1"
+ new_branch "$1"
+ fi
+}
+
+# Set the upstream for branch to upstream.
+function set_upstream
+{
+ # Note the argument order is swapped
+ local cmd="git branch --set-upstream-to='$2' '$1'"
+ printf " %s Setting upstream for %s to %s..." "$MARKER" "$1" "$2"
+ if [ $DRY_RUN -eq 0 ]; then
+ msg=$( eval "$cmd" 2>&1 )
+ validate_ret $? "$msg"
+ else
+ printf "\\n %s\\n" "${IWTH}$cmd${CNRM}"
+ fi
+}
+
+###############
+# Entry point #
+###############
+
+printf "%s Setting up the repository and remote %s\\n" "$MARKER" \
+ "${BYEL}origin${CNRM}"
+# First, fetch the origin.
+ORIGIN_PARENT=$(dirname "$ORIGIN_PATH")
+make_directory "$ORIGIN_PARENT"
+# This is just cd with an error check
+goto_dir "$ORIGIN_PARENT"
+
+# clone repository / origin remote
+clone_repo "$GIT_ORIGIN_PULL" "$TOR_MASTER_NAME"
+goto_dir "$ORIGIN_PATH"
+set_remote_push "origin" "$GIT_ORIGIN_PUSH"
+
+# upstream remote, if different to origin
+if [ "$DEFAULT_UPSTREAM_REMOTE" != "origin" ]; then
+ printf "%s Setting up remote %s\\n" "$MARKER" \
+ "${BYEL}$DEFAULT_UPSTREAM_REMOTE${CNRM}"
+ add_remote "$DEFAULT_UPSTREAM_REMOTE" "$GIT_UPSTREAM_PULL"
+ set_remote_push "$DEFAULT_UPSTREAM_REMOTE" "$GIT_UPSTREAM_PUSH"
+ fetch_remote "$DEFAULT_UPSTREAM_REMOTE"
+fi
+
+# GitHub remote
+printf "%s Setting up remote %s\\n" "$MARKER" "${BYEL}tor-github${CNRM}"
+# Add remote
+add_remote "tor-github" "$GITHUB_PULL"
+set_remote_push "tor-github" "$GITHUB_PUSH"
+# Add custom fetch for PRs
+set_tor_github_pr_fetch_config
+# Now fetch them all
+fetch_remote "tor-github"
+
+# Extra remote
+if [ "$TOR_EXTRA_REMOTE_NAME" ]; then
+ printf "%s Setting up remote %s\\n" "$MARKER" \
+ "${BYEL}$TOR_EXTRA_REMOTE_NAME${CNRM}"
+ # Add remote
+ add_remote "$TOR_EXTRA_REMOTE_NAME" "$TOR_EXTRA_REMOTE_PULL"
+ set_remote_push "$TOR_EXTRA_REMOTE_NAME" "$TOR_EXTRA_REMOTE_PUSH"
+ # But leave it to the user to decide if they want to fetch it
+ #fetch_remote "$TOR_EXTRA_REMOTE_NAME"
+fi
+
+# Go over all configured worktree.
+for ((i=0; i<COUNT; i++)); do
+ branch=${!WORKTREE[$i]:0:1}
+ repo_path=${!WORKTREE[$i]:1:1}
+
+ printf "%s Handling branch %s\\n" "$MARKER" "${BYEL}$branch${CNRM}"
+ # We cloned the repository, and master is the default branch
+ if [ "$branch" = "master" ]; then
+ if [ "$TOR_MASTER_NAME" != "master" ]; then
+ # Set up a master link in the worktree directory
+ make_symlink "$repo_path" "$GIT_PATH/$TOR_WKT_NAME/master"
+ fi
+ else
+ # git makes worktree directories if they don't exist
+ add_worktree "origin/$branch" "$repo_path"
+ fi
+ goto_dir "$repo_path"
+ switch_or_new_branch "$branch"
+ set_upstream "$branch" "origin/$branch"
+done
+
+echo
+echo "Remember to copy the git hooks from tor/scripts/git/*.git-hook to"
+echo "$ORIGIN_PATH/.git/hooks/*"
diff --git a/scripts/git/post-merge.git-hook b/scripts/git/post-merge.git-hook
new file mode 100755
index 0000000000..eae4f999e7
--- /dev/null
+++ b/scripts/git/post-merge.git-hook
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+# This is post-merge git hook script to check for changes in:
+# * git hook scripts
+# * helper scripts for using git efficiently.
+# If any changes are detected, a diff of them is printed.
+#
+# To install this script, copy it to .git/hooks/post-merge in local copy of
+# tor git repo and make sure it has permission to execute.
+
+git_toplevel=$(git rev-parse --show-toplevel)
+
+check_for_diffs() {
+ installed="$git_toplevel/.git/hooks/$1"
+ latest="$git_toplevel/scripts/git/$1.git-hook"
+
+ if [ -e "$installed" ]
+ then
+ if ! cmp "$installed" "$latest" >/dev/null 2>&1
+ then
+ echo "ATTENTION: $1 hook has changed:"
+ echo "==============================="
+ diff -u "$installed" "$latest"
+ fi
+ fi
+}
+
+check_for_script_update() {
+ fullpath="$1"
+
+ if ! git diff ORIG_HEAD HEAD --exit-code -- "$fullpath" >/dev/null
+ then
+ echo "ATTENTION: $1 has changed:"
+ git --no-pager diff ORIG_HEAD HEAD -- "$fullpath"
+ fi
+}
+
+cur_branch=$(git rev-parse --abbrev-ref HEAD)
+if [ "$cur_branch" != "master" ]; then
+ echo "post-merge: Not a master branch. Skipping."
+ exit 0
+fi
+
+check_for_diffs "pre-push"
+check_for_diffs "pre-commit"
+check_for_diffs "post-merge"
+
+for file in "$git_toplevel"/scripts/git/* ; do
+ check_for_script_update "$file"
+done
+
diff --git a/scripts/git/pre-commit.git-hook b/scripts/git/pre-commit.git-hook
new file mode 100755
index 0000000000..f630a242bd
--- /dev/null
+++ b/scripts/git/pre-commit.git-hook
@@ -0,0 +1,99 @@
+#!/usr/bin/env bash
+#
+# To install this script, copy it to .git/hooks/pre-commit in local copy of
+# tor git repo and make sure it has permission to execute.
+#
+# This is pre-commit git hook script that prevents commiting your changeset if
+# it fails our code formatting, changelog entry formatting, module include
+# rules, or best practices tracker.
+
+workdir=$(git rev-parse --show-toplevel)
+
+cd "$workdir" || exit 1
+
+set -e
+
+if [ $# -eq 0 ]; then
+ # When called in pre-commit, check the files modified in this commit
+ CHECK_FILTER="git diff --cached --name-only --diff-filter=ACMR"
+ # Use the appropriate owned tor source list to filter the changed files
+
+ # This is the layout in 0.3.5 and later.
+
+ # Keep these lists consistent:
+ # - OWNED_TOR_C_FILES in Makefile.am
+ # - CHECK_FILES in pre-commit.git-hook and pre-push.git-hook
+ # - try_parse in check_cocci_parse.sh
+ CHECK_FILES="$($CHECK_FILTER \
+ src/lib/*/*.[ch] \
+ src/core/*/*.[ch] \
+ src/feature/*/*.[ch] \
+ src/app/*/*.[ch] \
+ src/test/*.[ch] \
+ src/test/*/*.[ch] \
+ src/tools/*.[ch] \
+ )"
+else
+ # When called in pre-push, concatenate the argument array
+ # Fails on special characters in file names
+ CHECK_FILES="$*"
+fi
+
+## General File Checks
+
+if [ -n "$(ls ./changes/)" ]; then
+ python scripts/maint/lintChanges.py ./changes/*
+fi
+
+if [ -e scripts/maint/checkShellScripts.sh ]; then
+ scripts/maint/checkShellScripts.sh
+fi
+
+# Always run the practracker unit tests
+PT_DIR=scripts/maint/practracker
+
+if [ -e "${PT_DIR}/test_practracker.sh" ]; then
+ "${PT_DIR}/test_practracker.sh"
+fi
+
+if [ -e scripts/maint/checkSpaceTest.sh ]; then
+ scripts/maint/checkSpaceTest.sh
+fi
+
+if [ ! "$CHECK_FILES" ]; then
+ echo "No modified tor-owned source files, skipping further checks"
+ exit 0
+fi
+
+## Owned Source File Checks
+
+printf "Modified tor-owned source files:\\n%s\\n" "$CHECK_FILES"
+
+# We want word splitting here, because file names are space separated
+# shellcheck disable=SC2086
+perl scripts/maint/checkSpace.pl -C \
+ $CHECK_FILES
+
+if test -e scripts/maint/practracker/includes.py; then
+ python scripts/maint/practracker/includes.py
+fi
+
+# Only call practracker if ${PT_DIR}/.enable_practracker_in_hooks exists
+# We do this check so that we can enable practracker in hooks in master, and
+# disable it on maint branches
+if [ -e "${PT_DIR}/practracker.py" ]; then
+ if [ -e "${PT_DIR}/.enable_practracker_in_hooks" ]; then
+ python3 "${PT_DIR}/practracker.py" "$workdir"
+ fi
+fi
+
+if [ -e scripts/coccinelle/check_cocci_parse.sh ]; then
+
+ # Run a verbose cocci parse check on the changed files
+ # (spatch is slow, so we don't want to check all the files.)
+ #
+ # We want word splitting here, because file names are space separated
+ # shellcheck disable=SC2086
+ VERBOSE=1 scripts/coccinelle/check_cocci_parse.sh \
+ $CHECK_FILES
+fi
diff --git a/scripts/git/pre-push.git-hook b/scripts/git/pre-push.git-hook
new file mode 100755
index 0000000000..efa45b9860
--- /dev/null
+++ b/scripts/git/pre-push.git-hook
@@ -0,0 +1,129 @@
+#!/usr/bin/env bash
+
+# git pre-push hook script to:
+# 0) Call the pre-commit hook, if it is available
+# 1) prevent "fixup!" and "squash!" commit from ending up in master, release-*
+# or maint-*
+# 2) Disallow pushing branches other than master, release-*
+# and maint-* to origin (e.g. gitweb.torproject.org)
+#
+# To install this script, copy it into .git/hooks/pre-push path in your
+# local copy of git repository. Make sure it has permission to execute.
+# Furthermore, make sure that TOR_UPSTREAM_REMOTE_NAME environment
+# variable is set to local name of git remote that corresponds to upstream
+# repository on e.g. git.torproject.org.
+#
+# The following sample script was used as starting point:
+# https://github.com/git/git/blob/master/templates/hooks--pre-push.sample
+
+# Are you adding a new check to the git hooks?
+# - Common checks belong in the pre-commit hook
+# - Push-only checks belong in the pre-push hook
+
+echo "Running pre-push hook"
+
+z40=0000000000000000000000000000000000000000
+
+upstream_name=${TOR_UPSTREAM_REMOTE_NAME:-"upstream"}
+
+# The working directory
+workdir=$(git rev-parse --show-toplevel)
+# The .git directory
+# If $workdir is a worktree, then $gitdir is not $workdir/.git
+gitdir=$(git rev-parse --git-dir)
+
+cd "$workdir" || exit 1
+
+remote="$1"
+remote_name=$(git remote --verbose | grep "$2" | awk '{print $1}' | head -n 1)
+
+
+ref_is_upstream_branch() {
+ if [ "$1" == "refs/heads/master" ] ||
+ [[ "$1" == refs/heads/release-* ]] ||
+ [[ "$1" == refs/heads/maint-* ]]; then
+ return 1
+ fi
+}
+
+# shellcheck disable=SC2034
+while read -r local_ref local_sha remote_ref remote_sha
+do
+ if [ "$local_sha" = $z40 ]; then
+ # Handle delete
+ :
+ else
+ if [ "$remote_sha" = $z40 ]; then
+ # New branch, examine commits not in master
+ range="master...$local_sha"
+ else
+ # Update to existing branch, examine new commits
+ range="$remote_sha..$local_sha"
+ fi
+
+ # Call the pre-commit hook for the common checks, if it is executable
+ pre_commit=${gitdir}/hooks/pre-commit
+ if [ -x "$pre_commit" ]; then
+ # Only check the files newly modified in this branch
+ CHECK_FILTER="git diff --name-only --diff-filter=ACMR $range"
+ # Use the appropriate owned tor source list to filter the changed
+ # files
+ # This is the layout in 0.3.5
+ # Keep these lists consistent:
+ # - OWNED_TOR_C_FILES in Makefile.am
+ # - CHECK_FILES in pre-commit.git-hook and pre-push.git-hook
+ # - try_parse in check_cocci_parse.sh
+ CHECK_FILES="$($CHECK_FILTER \
+ src/lib/*/*.[ch] \
+ src/core/*/*.[ch] \
+ src/feature/*/*.[ch] \
+ src/app/*/*.[ch] \
+ src/test/*.[ch] \
+ src/test/*/*.[ch] \
+ src/tools/*.[ch] \
+ )"
+
+ # We want word splitting here, because file names are space
+ # separated
+ # shellcheck disable=SC2086
+ if ! "$pre_commit" $CHECK_FILES ; then
+ exit 1
+ fi
+ fi
+
+ if [[ "$remote_name" != "$upstream_name" ]]; then
+ echo "Not pushing to upstream - refraining from further checks"
+ continue
+ fi
+
+ if (ref_is_upstream_branch "$local_ref" == 0 ||
+ ref_is_upstream_branch "$remote_ref" == 0) &&
+ [ "$local_ref" != "$remote_ref" ]; then
+ if [ "$remote" == "origin" ]; then
+ echo >&2 "Not pushing: $local_ref to $remote_ref"
+ echo >&2 "If you really want to push this, use --no-verify."
+ exit 1
+ else
+ continue
+ fi
+ fi
+
+ # Check for fixup! commit
+ commit=$(git rev-list -n 1 --grep '^fixup!' "$range")
+ if [ -n "$commit" ]; then
+ echo >&2 "Found fixup! commit in $local_ref, not pushing"
+ echo >&2 "If you really want to push this, use --no-verify."
+ exit 1
+ fi
+
+ # Check for squash! commit
+ commit=$(git rev-list -n 1 --grep '^squash!' "$range")
+ if [ -n "$commit" ]; then
+ echo >&2 "Found squash! commit in $local_ref, not pushing"
+ echo >&2 "If you really want to push this, use --no-verify."
+ exit 1
+ fi
+ fi
+done
+
+exit 0
diff --git a/scripts/maint/add_c_file.py b/scripts/maint/add_c_file.py
new file mode 100755
index 0000000000..e1e224d8d5
--- /dev/null
+++ b/scripts/maint/add_c_file.py
@@ -0,0 +1,333 @@
+#!/usr/bin/env python3
+
+"""
+ Add a C file with matching header to the Tor codebase. Creates
+ both files from templates, and adds them to the right include.am file.
+
+ This script takes paths relative to the top-level tor directory. It
+ expects to be run from that directory.
+
+ This script creates files, and inserts them into include.am, also
+ relative to the top-level tor directory.
+
+ But the template content in those files is relative to tor's src
+ directory. (This script strips "src" from the paths used to create
+ templated comments and macros.)
+
+ This script expects posix paths, so it should be run with a python
+ where os.path is posixpath. (Rather than ntpath.) This probably means
+ Linux, macOS, or BSD, although it might work on Windows if your python
+ was compiled with mingw, MSYS, or cygwin.
+
+ Example usage:
+
+ % add_c_file.py ./src/feature/dirauth/ocelot.c
+"""
+
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import re
+import time
+
+def tordir_file(fname):
+ """Make fname relative to the current directory, which should be the
+ top-level tor directory. Also performs basic path simplifications."""
+ return os.path.normpath(os.path.relpath(fname))
+
+def srcdir_file(tor_fname):
+ """Make tor_fname relative to tor's "src" directory.
+ Also performs basic path simplifications.
+ (This function takes paths relative to the top-level tor directory,
+ but outputs a path that is relative to tor's src directory.)"""
+ return os.path.normpath(os.path.relpath(tor_fname, 'src'))
+
+def guard_macro(src_fname):
+ """Return the guard macro that should be used for the header file
+ 'src_fname'. This function takes paths relative to tor's src directory.
+ """
+ td = src_fname.replace(".", "_").replace("/", "_").upper()
+ return "TOR_{}".format(td)
+
+def makeext(fname, new_extension):
+ """Replace the extension for the file called 'fname' with 'new_extension'.
+ This function takes and returns paths relative to either the top-level
+ tor directory, or tor's src directory, and returns the same kind
+ of path.
+ """
+ base = os.path.splitext(fname)[0]
+ return base + "." + new_extension
+
+def instantiate_template(template, tor_fname):
+ """
+ Fill in a template with string using the fields that should be used
+ for 'tor_fname'.
+
+ This function takes paths relative to the top-level tor directory,
+ but the paths in the completed template are relative to tor's src
+ directory. (Except for one of the fields, which is just a basename).
+ """
+ src_fname = srcdir_file(tor_fname)
+ names = {
+ # The relative location of the header file.
+ 'header_path' : makeext(src_fname, "h"),
+ # The relative location of the C file file.
+ 'c_file_path' : makeext(src_fname, "c"),
+ # The truncated name of the file.
+ 'short_name' : os.path.basename(src_fname),
+ # The current year, for the copyright notice
+ 'this_year' : time.localtime().tm_year,
+ # An appropriate guard macro, for the header.
+ 'guard_macro' : guard_macro(src_fname),
+ }
+
+ return template.format(**names)
+
+# This template operates on paths relative to tor's src directory
+HEADER_TEMPLATE = """\
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-{this_year}, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file {short_name}
+ * @brief Header for {c_file_path}
+ **/
+
+#ifndef {guard_macro}
+#define {guard_macro}
+
+#endif /* !defined({guard_macro}) */
+"""
+
+# This template operates on paths relative to the tor's src directory
+C_FILE_TEMPLATE = """\
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-{this_year}, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file {short_name}
+ * @brief DOCDOC
+ **/
+
+#include "orconfig.h"
+#include "{header_path}"
+"""
+
+class AutomakeChunk:
+ """
+ Represents part of an automake file. If it is decorated with
+ an ADD_C_FILE comment, it has a "kind" based on what to add to it.
+ Otherwise, it only has a bunch of lines in it.
+
+ This class operates on paths relative to the top-level tor directory.
+ """
+ pat = re.compile(r'# ADD_C_FILE: INSERT (\S*) HERE', re.I)
+
+ def __init__(self):
+ self.lines = []
+ self.kind = ""
+ self.hasBlank = False # true if we end with a blank line.
+
+ def addLine(self, line):
+ """
+ Insert a line into this chunk while parsing the automake file.
+
+ Return True if we have just read the last line in the chunk, and
+ False otherwise.
+ """
+ m = self.pat.match(line)
+ if m:
+ if self.lines:
+ raise ValueError("control line not preceded by a blank line")
+ self.kind = m.group(1)
+
+ if line.strip() == "":
+ self.hasBlank = True
+ return True
+
+ self.lines.append(line)
+
+ return False
+
+ def insertMember(self, new_tor_fname):
+ """
+ Add a new file name new_tor_fname to this chunk. Try to insert it in
+ alphabetical order with matching indentation, but don't freak out too
+ much if the source isn't consistent.
+
+ Assumes that this chunk is of the form:
+ FOOBAR = \
+ X \
+ Y \
+ Z
+
+ This function operates on paths relative to the top-level tor
+ directory.
+ """
+ prespace = "\t"
+ postspace = "\t\t"
+ for lineno, line in enumerate(self.lines):
+ m = re.match(r'(\s+)(\S+)(\s+)\\', line)
+ if not m:
+ continue
+ prespace, cur_tor_fname, postspace = m.groups()
+ if cur_tor_fname > new_tor_fname:
+ self.insert_before(lineno, new_tor_fname, prespace, postspace)
+ return
+ self.insert_at_end(new_tor_fname, prespace, postspace)
+
+ def insert_before(self, lineno, new_tor_fname, prespace, postspace):
+ self.lines.insert(lineno,
+ "{}{}{}\\\n".format(prespace, new_tor_fname,
+ postspace))
+
+ def insert_at_end(self, new_tor_fname, prespace, postspace):
+ lastline = self.lines[-1].strip()
+ self.lines[-1] = '{}{}{}\\\n'.format(prespace, lastline, postspace)
+ self.lines.append("{}{}\n".format(prespace, new_tor_fname))
+
+ def dump(self, f):
+ """Write all the lines in this chunk to the file 'f'."""
+ for line in self.lines:
+ f.write(line)
+ if not line.endswith("\n"):
+ f.write("\n")
+
+ if self.hasBlank:
+ f.write("\n")
+
+class ParsedAutomake:
+ """A sort-of-parsed automake file, with identified chunks into which
+ headers and c files can be inserted.
+
+ This class operates on paths relative to the top-level tor directory.
+ """
+ def __init__(self):
+ self.chunks = []
+ self.by_type = {}
+
+ def addChunk(self, chunk):
+ """Add a newly parsed AutomakeChunk to this file."""
+ self.chunks.append(chunk)
+ self.by_type[chunk.kind.lower()] = chunk
+
+ def add_file(self, tor_fname, kind):
+ """Insert a file tor_fname of kind 'kind' to the appropriate
+ section of this file. Return True if we added it.
+
+ This function operates on paths relative to the top-level tor
+ directory.
+ """
+ if kind.lower() in self.by_type:
+ self.by_type[kind.lower()].insertMember(tor_fname)
+ return True
+ else:
+ return False
+
+ def dump(self, f):
+ """Write this file into a file 'f'."""
+ for chunk in self.chunks:
+ chunk.dump(f)
+
+def get_include_am_location(tor_fname):
+ """Find the right include.am file for introducing a new file
+ tor_fname. Return None if we can't guess one.
+
+ Note that this function is imperfect because our include.am layout is
+ not (yet) consistent.
+
+ This function operates on paths relative to the top-level tor directory.
+ """
+ # Strip src for pattern matching, but add it back when returning the path
+ src_fname = srcdir_file(tor_fname)
+ m = re.match(r'^(lib|core|feature|app)/([a-z0-9_]*)/', src_fname)
+ if m:
+ return "src/{}/{}/include.am".format(m.group(1),m.group(2))
+
+ if re.match(r'^test/', src_fname):
+ return "src/test/include.am"
+
+ return None
+
+def run(fname):
+ """
+ Create a new C file and H file corresponding to the filename "fname",
+ and add them to the corresponding include.am.
+
+ This function operates on paths relative to the top-level tor directory.
+ """
+
+ # Make sure we're in the top-level tor directory,
+ # which contains the src directory
+ if not os.path.isdir("src"):
+ raise RuntimeError("Could not find './src/'. "
+ "Run this script from the top-level tor source "
+ "directory.")
+
+ # And it looks like a tor/src directory
+ if not os.path.isfile("src/include.am"):
+ raise RuntimeError("Could not find './src/include.am'. "
+ "Run this script from the top-level tor source "
+ "directory.")
+
+ # Make the file name relative to the top-level tor directory
+ tor_fname = tordir_file(fname)
+ # And check that we're adding files to the "src" directory,
+ # with canonical paths
+ if tor_fname[:4] != "src/":
+ raise ValueError("Requested file path '{}' canonicalized to '{}', "
+ "but the canonical path did not start with 'src/'. "
+ "Please add files to the src directory."
+ .format(fname, tor_fname))
+
+ c_tor_fname = makeext(tor_fname, "c")
+ h_tor_fname = makeext(tor_fname, "h")
+
+ if os.path.exists(c_tor_fname):
+ print("{} already exists".format(c_tor_fname))
+ return 1
+ if os.path.exists(h_tor_fname):
+ print("{} already exists".format(h_tor_fname))
+ return 1
+
+ with open(c_tor_fname, 'w') as f:
+ f.write(instantiate_template(C_FILE_TEMPLATE, c_tor_fname))
+
+ with open(h_tor_fname, 'w') as f:
+ f.write(instantiate_template(HEADER_TEMPLATE, h_tor_fname))
+
+ iam = get_include_am_location(c_tor_fname)
+ if iam is None or not os.path.exists(iam):
+ print("Made files successfully but couldn't identify include.am for {}"
+ .format(c_tor_fname))
+ return 1
+
+ amfile = ParsedAutomake()
+ cur_chunk = AutomakeChunk()
+ with open(iam) as f:
+ for line in f:
+ if cur_chunk.addLine(line):
+ amfile.addChunk(cur_chunk)
+ cur_chunk = AutomakeChunk()
+ amfile.addChunk(cur_chunk)
+
+ amfile.add_file(c_tor_fname, "sources")
+ amfile.add_file(h_tor_fname, "headers")
+
+ with open(iam+".tmp", 'w') as f:
+ amfile.dump(f)
+
+ os.rename(iam+".tmp", iam)
+
+if __name__ == '__main__':
+ import sys
+ sys.exit(run(sys.argv[1]))
diff --git a/scripts/maint/annotate_ifdef_directives b/scripts/maint/annotate_ifdef_directives
deleted file mode 100755
index ca267a865e..0000000000
--- a/scripts/maint/annotate_ifdef_directives
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017-2019, The Tor Project, Inc.
-# See LICENSE for licensing information
-
-import re
-
-LINE_OBVIOUSNESS_LIMIT = 4
-
-class Problem(Exception):
- pass
-
-def uncomment(s):
- s = re.sub(r'//.*','',s)
- s = re.sub(r'/\*.*','',s)
- return s.strip()
-
-def translate(f_in, f_out):
- whole_file = []
- stack = []
- cur_level = whole_file
- lineno = 0
- for line in f_in:
- lineno += 1
- m = re.match(r'\s*#\s*(if|ifdef|ifndef|else|endif|elif)\b\s*(.*)',
- line)
- if not m:
- f_out.write(line)
- continue
- command,rest = m.groups()
- if command in ("if", "ifdef", "ifndef"):
- # The #if directive pushes us one level lower on the stack.
- if command == 'ifdef':
- rest = "defined(%s)"%uncomment(rest)
- elif command == 'ifndef':
- rest = "!defined(%s)"%uncomment(rest)
- elif rest.endswith("\\"):
- rest = rest[:-1]+"..."
-
- rest = uncomment(rest)
-
- new_level = [ (command, rest, lineno) ]
- stack.append(cur_level)
- cur_level = new_level
- f_out.write(line)
- elif command in ("else", "elif"):
- if len(cur_level) == 0 or cur_level[-1][0] == 'else':
- raise Problem("Unexpected #%s on %d"% (command,lineno))
- if (len(cur_level) == 1 and command == 'else' and
- lineno > cur_level[0][2] + LINE_OBVIOUSNESS_LIMIT):
- f_out.write("#else /* !(%s) */\n"%cur_level[0][1])
- else:
- f_out.write(line)
- cur_level.append((command, rest, lineno))
- else:
- assert command == 'endif'
- if len(stack) == 0:
- raise Problem("Unmatched #%s on %s"% (command,lineno))
- if lineno <= cur_level[0][2] + LINE_OBVIOUSNESS_LIMIT:
- f_out.write(line)
- elif len(cur_level) == 1 or (
- len(cur_level) == 2 and cur_level[1][0] == 'else'):
- f_out.write("#endif /* %s */\n"%cur_level[0][1])
- else:
- f_out.write("#endif /* %s || ... */\n"%cur_level[0][1])
- cur_level = stack.pop()
- if len(stack) or cur_level != whole_file:
- raise Problem("Missing #endif")
-
-import sys,os
-for fn in sys.argv[1:]:
- with open(fn+"_OUT", 'w') as output_file:
- translate(open(fn, 'r'), output_file)
- os.rename(fn+"_OUT", fn)
-
diff --git a/scripts/maint/annotate_ifdef_directives.py b/scripts/maint/annotate_ifdef_directives.py
new file mode 100755
index 0000000000..9ca090d595
--- /dev/null
+++ b/scripts/maint/annotate_ifdef_directives.py
@@ -0,0 +1,322 @@
+#!/usr/bin/env python
+# Copyright (c) 2017-2019, The Tor Project, Inc.
+# See LICENSE for licensing information
+
+r"""
+This script iterates over a list of C files. For each file, it looks at the
+#if/#else C macros, and annotates them with comments explaining what they
+match.
+
+For example, it replaces this kind of input...
+
+>>> INPUT = '''
+... #ifdef HAVE_OCELOT
+... C code here
+... #if MIMSY == BOROGROVE
+... block 1
+... block 1
+... block 1
+... block 1
+... #else
+... block 2
+... block 2
+... block 2
+... block 2
+... #endif
+... #endif
+... '''
+
+With this kind of output:
+>>> EXPECTED_OUTPUT = '''
+... #ifdef HAVE_OCELOT
+... C code here
+... #if MIMSY == BOROGROVE
+... block 1
+... block 1
+... block 1
+... block 1
+... #else /* !(MIMSY == BOROGROVE) */
+... block 2
+... block 2
+... block 2
+... block 2
+... #endif /* MIMSY == BOROGROVE */
+... #endif /* defined(HAVE_OCELOT) */
+... '''
+
+Here's how to use it:
+>>> import sys
+>>> if sys.version_info.major < 3: from cStringIO import StringIO
+>>> if sys.version_info.major >= 3: from io import StringIO
+
+>>> OUTPUT = StringIO()
+>>> translate(StringIO(INPUT), OUTPUT)
+>>> assert OUTPUT.getvalue() == EXPECTED_OUTPUT
+
+Note that only #else and #endif lines are annotated. Existing comments
+on those lines are removed.
+"""
+
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import re
+
+# Any block with fewer than this many lines does not need annotations.
+LINE_OBVIOUSNESS_LIMIT = 4
+
+# Maximum line width. This includes a terminating newline character.
+#
+# (This is the maximum before encoding, so that if the the operating system
+# uses multiple characers to encode newline, that's still okay.)
+LINE_WIDTH=80
+
+class Problem(Exception):
+ pass
+
+def close_parens_needed(expr):
+ """Return the number of left-parentheses needed to make 'expr'
+ balanced.
+
+ >>> close_parens_needed("1+2")
+ 0
+ >>> close_parens_needed("(1 + 2)")
+ 0
+ >>> close_parens_needed("(1 + 2")
+ 1
+ >>> close_parens_needed("(1 + (2 *")
+ 2
+ >>> close_parens_needed("(1 + (2 * 3) + (4")
+ 2
+ """
+ return expr.count("(") - expr.count(")")
+
+def truncate_expression(expr, new_width):
+ """Given a parenthesized C expression in 'expr', try to return a new
+ expression that is similar to 'expr', but no more than 'new_width'
+ characters long.
+
+ Try to return an expression with balanced parentheses.
+
+ >>> truncate_expression("1+2+3", 8)
+ '1+2+3'
+ >>> truncate_expression("1+2+3+4+5", 8)
+ '1+2+3...'
+ >>> truncate_expression("(1+2+3+4)", 8)
+ '(1+2...)'
+ >>> truncate_expression("(1+(2+3+4))", 8)
+ '(1+...)'
+ >>> truncate_expression("(((((((((", 8)
+ '((...))'
+ """
+ if len(expr) <= new_width:
+ # The expression is already short enough.
+ return expr
+
+ ellipsis = "..."
+
+ # Start this at the minimum that we might truncate.
+ n_to_remove = len(expr) + len(ellipsis) - new_width
+
+ # Try removing characters, one by one, until we get something where
+ # re-balancing the parentheses still fits within the limit.
+ while n_to_remove < len(expr):
+ truncated = expr[:-n_to_remove] + ellipsis
+ truncated += ")" * close_parens_needed(truncated)
+ if len(truncated) <= new_width:
+ return truncated
+ n_to_remove += 1
+
+ return ellipsis
+
+def commented_line(fmt, argument, maxwidth=LINE_WIDTH):
+ # (This is a raw docstring so that our doctests can use \.)
+ r"""
+ Return fmt%argument, for use as a commented line. If the line would
+ be longer than maxwidth, truncate argument but try to keep its
+ parentheses balanced.
+
+ Requires that fmt%"..." will fit into maxwidth characters.
+
+ Requires that fmt ends with a newline.
+
+ >>> commented_line("/* %s */\n", "hello world", 32)
+ '/* hello world */\n'
+ >>> commented_line("/* %s */\n", "hello world", 15)
+ '/* hello... */\n'
+ >>> commented_line("#endif /* %s */\n", "((1+2) && defined(FOO))", 32)
+ '#endif /* ((1+2) && defi...) */\n'
+
+
+ The default line limit is 80 characters including the newline:
+
+ >>> long_argument = "long " * 100
+ >>> long_line = commented_line("#endif /* %s */\n", long_argument)
+ >>> len(long_line)
+ 80
+
+ >>> long_line[:40]
+ '#endif /* long long long long long long '
+ >>> long_line[40:]
+ 'long long long long long long lon... */\n'
+
+ If a line works out to being 80 characters naturally, it isn't truncated,
+ and no ellipsis is added.
+
+ >>> medium_argument = "a"*66
+ >>> medium_line = commented_line("#endif /* %s */\n", medium_argument)
+ >>> len(medium_line)
+ 80
+ >>> "..." in medium_line
+ False
+ >>> medium_line[:40]
+ '#endif /* aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
+ >>> medium_line[40:]
+ 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa */\n'
+
+
+ """
+ assert fmt.endswith("\n")
+ result = fmt % argument
+ if len(result) <= maxwidth:
+ return result
+ else:
+ # How long can we let the argument be? Try filling in the
+ # format with an empty argument to find out.
+ max_arg_width = maxwidth - len(fmt % "")
+ result = fmt % truncate_expression(argument, max_arg_width)
+ assert len(result) <= maxwidth
+ return result
+
+def negate(expr):
+ """Return a negated version of expr; try to avoid double-negation.
+
+ We usually wrap expressions in parentheses and add a "!".
+ >>> negate("A && B")
+ '!(A && B)'
+
+ But if we recognize the expression as negated, we can restore it.
+ >>> negate(negate("A && B"))
+ 'A && B'
+
+ The same applies for defined(FOO).
+ >>> negate("defined(FOO)")
+ '!defined(FOO)'
+ >>> negate(negate("defined(FOO)"))
+ 'defined(FOO)'
+
+ Internal parentheses don't confuse us:
+ >>> negate("!(FOO) && !(BAR)")
+ '!(!(FOO) && !(BAR))'
+
+ """
+ expr = expr.strip()
+ # See whether we match !(...), with no intervening close-parens.
+ m = re.match(r'^!\s*\(([^\)]*)\)$', expr)
+ if m:
+ return m.group(1)
+
+
+ # See whether we match !?defined(...), with no intervening close-parens.
+ m = re.match(r'^(!?)\s*(defined\([^\)]*\))$', expr)
+ if m:
+ if m.group(1) == "!":
+ prefix = ""
+ else:
+ prefix = "!"
+ return prefix + m.group(2)
+
+ return "!(%s)" % expr
+
+def uncomment(s):
+ """
+ Remove existing trailing comments from an #else or #endif line.
+ """
+ s = re.sub(r'//.*','',s)
+ s = re.sub(r'/\*.*','',s)
+ return s.strip()
+
+def translate(f_in, f_out):
+ """
+ Read a file from f_in, and write its annotated version to f_out.
+ """
+ # A stack listing our current if/else state. Each member of the stack
+ # is a list of directives. Each directive is a 3-tuple of
+ # (command, rest, lineno)
+ # where "command" is one of if/ifdef/ifndef/else/elif, and where
+ # "rest" is an expression in a format suitable for use with #if, and where
+ # lineno is the line number where the directive occurred.
+ stack = []
+ # the stack element corresponding to the top level of the file.
+ whole_file = []
+ cur_level = whole_file
+ lineno = 0
+ for line in f_in:
+ lineno += 1
+ m = re.match(r'\s*#\s*(if|ifdef|ifndef|else|endif|elif)\b\s*(.*)',
+ line)
+ if not m:
+ # no directive, so we can just write it out.
+ f_out.write(line)
+ continue
+ command,rest = m.groups()
+ if command in ("if", "ifdef", "ifndef"):
+ # The #if directive pushes us one level lower on the stack.
+ if command == 'ifdef':
+ rest = "defined(%s)"%uncomment(rest)
+ elif command == 'ifndef':
+ rest = "!defined(%s)"%uncomment(rest)
+ elif rest.endswith("\\"):
+ rest = rest[:-1]+"..."
+
+ rest = uncomment(rest)
+
+ new_level = [ (command, rest, lineno) ]
+ stack.append(cur_level)
+ cur_level = new_level
+ f_out.write(line)
+ elif command in ("else", "elif"):
+ # We stay at the same level on the stack. If we have an #else,
+ # we comment it.
+ if len(cur_level) == 0 or cur_level[-1][0] == 'else':
+ raise Problem("Unexpected #%s on %d"% (command,lineno))
+ if (len(cur_level) == 1 and command == 'else' and
+ lineno > cur_level[0][2] + LINE_OBVIOUSNESS_LIMIT):
+ f_out.write(commented_line("#else /* %s */\n",
+ negate(cur_level[0][1])))
+ else:
+ f_out.write(line)
+ cur_level.append((command, rest, lineno))
+ else:
+ # We pop one element on the stack, and comment an endif.
+ assert command == 'endif'
+ if len(stack) == 0:
+ raise Problem("Unmatched #%s on %s"% (command,lineno))
+ if lineno <= cur_level[0][2] + LINE_OBVIOUSNESS_LIMIT:
+ f_out.write(line)
+ elif len(cur_level) == 1 or (
+ len(cur_level) == 2 and cur_level[1][0] == 'else'):
+ f_out.write(commented_line("#endif /* %s */\n",
+ cur_level[0][1]))
+ else:
+ f_out.write(commented_line("#endif /* %s || ... */\n",
+ cur_level[0][1]))
+ cur_level = stack.pop()
+ if len(stack) or cur_level != whole_file:
+ raise Problem("Missing #endif")
+
+if __name__ == '__main__':
+
+ import sys,os
+
+ if sys.argv[1] == "--self-test":
+ import doctest
+ doctest.testmod()
+ sys.exit(0)
+
+ for fn in sys.argv[1:]:
+ with open(fn+"_OUT", 'w') as output_file:
+ translate(open(fn, 'r'), output_file)
+ os.rename(fn+"_OUT", fn)
diff --git a/scripts/maint/checkIncludes.py b/scripts/maint/checkIncludes.py
index 46a3f39638..ae0ccb9e12 100755
--- a/scripts/maint/checkIncludes.py
+++ b/scripts/maint/checkIncludes.py
@@ -1,115 +1,19 @@
-#!/usr/bin/python3
+#!/usr/bin/env python
# Copyright 2018 The Tor Project, Inc. See LICENSE file for licensing info.
-"""This script looks through all the directories for files matching *.c or
- *.h, and checks their #include directives to make sure that only "permitted"
- headers are included.
-
- Any #include directives with angle brackets (like #include <stdio.h>) are
- ignored -- only directives with quotes (like #include "foo.h") are
- considered.
-
- To decide what includes are permitted, this script looks at a .may_include
- file in each directory. This file contains empty lines, #-prefixed
- comments, filenames (like "lib/foo/bar.h") and file globs (like lib/*/*.h)
- for files that are permitted.
-"""
-
+# This file is no longer here; see practracker/includes.py for this
+# functionality. This is a stub file that exists so that older git
+# hooks will know where to look.
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
from __future__ import print_function
+from __future__ import unicode_literals
-import fnmatch
-import os
-import re
-import sys
-
-# Global: Have there been any errors?
-trouble = False
-
-if sys.version_info[0] <= 2:
- def open_file(fname):
- return open(fname, 'r')
-else:
- def open_file(fname):
- return open(fname, 'r', encoding='utf-8')
-
-def err(msg):
- """ Declare that an error has happened, and remember that there has
- been an error. """
- global trouble
- trouble = True
- print(msg, file=sys.stderr)
-
-def fname_is_c(fname):
- """ Return true iff 'fname' is the name of a file that we should
- search for possibly disallowed #include directives. """
- return fname.endswith(".h") or fname.endswith(".c")
-
-INCLUDE_PATTERN = re.compile(r'\s*#\s*include\s+"([^"]*)"')
-RULES_FNAME = ".may_include"
-
-class Rules(object):
- """ A 'Rules' object is the parsed version of a .may_include file. """
- def __init__(self, dirpath):
- self.dirpath = dirpath
- self.patterns = []
- self.usedPatterns = set()
-
- def addPattern(self, pattern):
- self.patterns.append(pattern)
-
- def includeOk(self, path):
- for pattern in self.patterns:
- if fnmatch.fnmatchcase(path, pattern):
- self.usedPatterns.add(pattern)
- return True
- return False
-
- def applyToLines(self, lines, context=""):
- lineno = 0
- for line in lines:
- lineno += 1
- m = INCLUDE_PATTERN.match(line)
- if m:
- include = m.group(1)
- if not self.includeOk(include):
- err("Forbidden include of {} on line {}{}".format(
- include, lineno, context))
-
- def applyToFile(self, fname):
- with open_file(fname) as f:
- #print(fname)
- self.applyToLines(iter(f), " of {}".format(fname))
-
- def noteUnusedRules(self):
- for p in self.patterns:
- if p not in self.usedPatterns:
- print("Pattern {} in {} was never used.".format(p, self.dirpath))
-
-def load_include_rules(fname):
- """ Read a rules file from 'fname', and return it as a Rules object. """
- result = Rules(os.path.split(fname)[0])
- with open_file(fname) as f:
- for line in f:
- line = line.strip()
- if line.startswith("#") or not line:
- continue
- result.addPattern(line)
- return result
-
-list_unused = False
+import sys, os
-for dirpath, dirnames, fnames in os.walk("src"):
- if ".may_include" in fnames:
- rules = load_include_rules(os.path.join(dirpath, RULES_FNAME))
- for fname in fnames:
- if fname_is_c(fname):
- rules.applyToFile(os.path.join(dirpath,fname))
- if list_unused:
- rules.noteUnusedRules()
+dirname = os.path.split(sys.argv[0])[0]
+new_location = os.path.join(dirname, "practracker", "includes.py")
+python = sys.executable
-if trouble:
- err(
-"""To change which includes are allowed in a C file, edit the {}
-files in its enclosing directory.""".format(RULES_FNAME))
- sys.exit(1)
+os.execl(python, python, new_location, *sys.argv[1:])
diff --git a/scripts/maint/checkManpageAlpha.py b/scripts/maint/checkManpageAlpha.py
new file mode 100755
index 0000000000..70421c2fd1
--- /dev/null
+++ b/scripts/maint/checkManpageAlpha.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+
+import difflib
+import re
+import sys
+
+# Assume we only use the "== Section Name" section title syntax
+sectionheader_re = re.compile(r'^==+\s(.*)\s*$')
+
+# Assume we only use the "[[ItemName]]" anchor syntax
+anchor_re = re.compile(r'^\[\[([^]]+)\]\]')
+
+class Reader(object):
+ def __init__(self):
+ self.d = {}
+ # Initial state is to gather section headers
+ self.getline = self._getsec
+ self.section = None
+
+ def _getsec(self, line):
+ """Read a section header
+
+ Prepare to gather anchors from subsequent lines. Don't change
+ state if the line isn't a section header.
+ """
+ m = sectionheader_re.match(line)
+ if not m:
+ return
+ self.anchors = anchors = []
+ self.d[m.group(1)] = anchors
+ self.getline = self._getanchor
+
+ def _getanchor(self, line):
+ """Read an anchor for an item definition
+
+ Append the anchor names to the list of items in the current
+ section.
+ """
+ m = anchor_re.match(line)
+ if not m:
+ return self._getsec(line)
+ self.anchors.append(m.group(1))
+
+ def diffsort(self, key):
+ """Unified diff of unsorted and sorted item lists
+ """
+ # Append newlines because difflib works better with them
+ a = [s + '\n' for s in self.d[key]]
+ b = sorted(a, key=str.lower)
+ return difflib.unified_diff(a, b, fromfile=key+' unsorted',
+ tofile=key+' sorted')
+
+def main():
+ """Diff unsorted and sorted lists of option names in a manpage
+
+ Use the file named by the first argument, or standard input if
+ there is none.
+ """
+ try:
+ fname = sys.argv[1]
+ f = open(fname, 'r')
+ except IndexError:
+ f = sys.stdin
+
+ reader = Reader()
+ for line in f:
+ reader.getline(line)
+ for key in sorted(reader.d.keys(), key=str.lower):
+ sys.stdout.writelines(reader.diffsort(key))
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts/maint/checkShellScripts.sh b/scripts/maint/checkShellScripts.sh
new file mode 100755
index 0000000000..0a423be29e
--- /dev/null
+++ b/scripts/maint/checkShellScripts.sh
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2019 The Tor Project, Inc.
+# See LICENSE for license information
+#
+# checkShellScripts.sh
+# --------------------
+# If shellcheck is installed, check all the shell scripts that we can fix.
+
+set -e
+
+# Only run this script if shellcheck is installed
+# command echoes the path to shellcheck, which is a useful diagnostic log
+if ! command -v shellcheck; then
+ printf "%s: Install shellcheck to check shell scripts.\\n" "$0"
+ exit 0
+fi
+
+# Some platforms don't have realpath
+if command -v realpath ; then
+ HERE=$(dirname "$(realpath "$0")")
+else
+ HERE=$(dirname "$0")
+ if [ ! -d "$HERE" ] || [ "$HERE" = "." ]; then
+ HERE=$(dirname "$PWD/$0")
+ fi
+fi
+TOPLEVEL=$(dirname "$(dirname "$HERE")")
+
+# Check we actually have a tor/src directory
+if [ ! -d "$TOPLEVEL/src" ]; then
+ printf "Error: Couldn't find src directory in expected location: %s\\n" \
+ "$TOPLEVEL/src"
+ exit 1
+fi
+
+# Remove obsolete scripts generated from older versions of Tor
+rm -f "$TOPLEVEL/contrib/dist/suse/tor.sh" "$TOPLEVEL/contrib/dist/tor.sh"
+
+# Check *.sh scripts, but ignore the ones that we can't fix
+find "$TOPLEVEL/contrib" "$TOPLEVEL/doc" "$TOPLEVEL/scripts" "$TOPLEVEL/src" \
+ -name "*.sh" \
+ -not -path "$TOPLEVEL/src/ext/*" \
+ -not -path "$TOPLEVEL/src/rust/registry/*" \
+ -exec shellcheck {} +
+
+# Check scripts that aren't named *.sh
+if [ -d "$TOPLEVEL/scripts/test" ]; then
+ shellcheck \
+ "$TOPLEVEL/scripts/test/cov-diff" \
+ "$TOPLEVEL/scripts/test/coverage"
+fi
+if [ -e \
+ "$TOPLEVEL/contrib/dirauth-tools/nagios-check-tor-authority-cert" \
+ ]; then
+ shellcheck \
+ "$TOPLEVEL/contrib/dirauth-tools/nagios-check-tor-authority-cert"
+fi
+if [ -e "$TOPLEVEL/contrib/client-tools/torify" ]; then
+ shellcheck "$TOPLEVEL/contrib/client-tools/torify"
+fi
+if [ -d "$TOPLEVEL/scripts/git" ]; then
+ shellcheck "$TOPLEVEL/scripts/git/"*.git-hook
+fi
diff --git a/scripts/maint/checkSpace.pl b/scripts/maint/checkSpace.pl
index 633b47e314..857ce6f6f1 100755
--- a/scripts/maint/checkSpace.pl
+++ b/scripts/maint/checkSpace.pl
@@ -4,9 +4,16 @@ use strict;
use warnings;
my $found = 0;
+my $COLON_POS = 10;
+
sub msg {
$found = 1;
- print "$_[0]";
+ my $v = shift;
+ $v =~ /^\s*([^:]+):(.*)$/;
+ chomp(my $errtype = $1);
+ my $rest = $2;
+ my $padding = ' ' x ($COLON_POS - length $errtype);
+ print "$padding$errtype:$rest\n";
}
my $C = 0;
@@ -16,8 +23,29 @@ if ($ARGV[0] =~ /^-/) {
$C = ($lang eq '-C');
}
+# hashmap of things where we allow spaces between them and (.
+our %allow_space_after= map {$_, 1} qw{
+ if while for switch return int unsigned elsif WINAPI
+ void __attribute__ op size_t double uint64_t
+ bool ssize_t
+ workqueue_reply_t hs_desc_decode_status_t
+ PRStatus
+ SMARTLIST_FOREACH_BEGIN SMARTLIST_FOREACH_END
+ HT_FOREACH
+ DIGESTMAP_FOREACH_MODIFY DIGESTMAP_FOREACH
+ DIGEST256MAP_FOREACH_MODIFY DIGEST256MAP_FOREACH
+ STRMAP_FOREACH_MODIFY STRMAP_FOREACH
+ SDMAP_FOREACH EIMAP_FOREACH RIMAP_FOREACH
+ MAP_FOREACH_MODIFY MAP_FOREACH
+ TOR_SIMPLEQ_FOREACH TOR_SIMPLEQ_FOREACH_SAFE
+ TOR_LIST_FOREACH TOR_LIST_FOREACH_SAFE
+ TOR_SLIST_FOREACH TOR_SLIST_FOREACH_SAFE
+};
+
our %basenames = ();
+our %guardnames = ();
+
for my $fn (@ARGV) {
open(F, "$fn");
my $lastnil = 0;
@@ -27,27 +55,31 @@ for my $fn (@ARGV) {
my $basename = $fn;
$basename =~ s#.*/##;
if ($basenames{$basename}) {
- msg "Duplicate fnames: $fn and $basenames{$basename}.\n";
+ msg "dup fname:$fn (same as $basenames{$basename}).\n";
} else {
$basenames{$basename} = $fn;
}
+ my $isheader = ($fn =~ /\.h/);
+ my $seenguard = 0;
+ my $guardname = "<none>";
+
while (<F>) {
## Warn about windows-style newlines.
# (We insist on lines that end with a single LF character, not
# CR LF.)
if (/\r/) {
- msg " CR:$fn:$.\n";
+ msg "CR:$fn:$.\n";
}
## Warn about tabs.
# (We only use spaces)
if (/\t/) {
- msg " TAB:$fn:$.\n";
+ msg "TAB:$fn:$.\n";
}
## Warn about labels that don't have a space in front of them
# (We indent every label at least one space)
- if (/^[a-zA-Z_][a-zA-Z_0-9]*:/) {
- msg "nosplabel:$fn:$.\n";
- }
+ #if (/^[a-zA-Z_][a-zA-Z_0-9]*:/) {
+ # msg "nosplabel:$fn:$.\n";
+ #}
## Warn about trailing whitespace.
# (We don't allow whitespace at the end of the line; make your
# editor highlight it for you so you can stop adding it in.)
@@ -57,12 +89,12 @@ for my $fn (@ARGV) {
## Warn about control keywords without following space.
# (We put a space after every 'if', 'while', 'for', 'switch', etc)
if ($C && /\s(?:if|while|for|switch)\(/) {
- msg " KW(:$fn:$.\n";
+ msg "KW(:$fn:$.\n";
}
## Warn about #else #if instead of #elif.
# (We only allow #elif)
if (($lastline =~ /^\# *else/) and ($_ =~ /^\# *if/)) {
- msg " #else#if:$fn:$.\n";
+ msg "#else#if:$fn:$.\n";
}
## Warn about some K&R violations
# (We use K&R-style C, where open braces go on the same line as
@@ -77,19 +109,19 @@ for my $fn (@ARGV) {
msg "non-K&R {:$fn:$.\n";
}
if (/^\s*else/ and $lastline =~ /\}$/) {
- msg " }\\nelse:$fn:$.\n";
+ msg "}\\nelse:$fn:$.\n";
}
$lastline = $_;
## Warn about unnecessary empty lines.
# (Don't put an empty line before a line that contains nothing
# but a closing brace.)
if ($lastnil && /^\s*}\n/) {
- msg " UnnecNL:$fn:$.\n";
+ msg "UnnecNL:$fn:$.\n";
}
## Warn about multiple empty lines.
# (At most one blank line in a row.)
if ($lastnil && /^$/) {
- msg " DoubleNL:$fn:$.\n";
+ msg "DoubleNL:$fn:$.\n";
} elsif (/^$/) {
$lastnil = 1;
} else {
@@ -98,8 +130,8 @@ for my $fn (@ARGV) {
## Terminals are still 80 columns wide in my world. I refuse to
## accept double-line lines.
# (Don't make lines wider than 80 characters, including newline.)
- if (/^.{80}/) {
- msg " Wide:$fn:$.\n";
+ if (/^.{80}/ and not /LCOV_EXCL/) {
+ msg "Wide:$fn:$.\n";
}
### Juju to skip over comments and strings, since the tests
### we're about to do are okay there.
@@ -112,6 +144,23 @@ for my $fn (@ARGV) {
next;
}
}
+
+ if ($isheader) {
+ if ($seenguard == 0) {
+ if (/^\s*\#\s*ifndef\s+(\S+)/) {
+ ++$seenguard;
+ $guardname = $1;
+ }
+ } elsif ($seenguard == 1) {
+ if (/^\s*\#\s*define (\S+)/) {
+ ++$seenguard;
+ if ($1 ne $guardname) {
+ msg "GUARD:$fn:$.: Header guard macro mismatch.\n";
+ }
+ }
+ }
+ }
+
if (m!/\*.*?\*/!) {
s!\s*/\*.*?\*/!!;
} elsif (m!/\*!) {
@@ -121,41 +170,34 @@ for my $fn (@ARGV) {
}
s!"(?:[^\"]+|\\.)*"!"X"!g;
next if /^\#/;
- ## Warn about C++-style comments.
- # (Use C style comments only.)
+ ## Skip C++-style comments.
if (m!//!) {
- # msg " //:$fn:$.\n";
+ # msg "//:$fn:$.\n";
s!//.*!!;
}
- ## Warn about unquoted braces preceded by non-space.
- # (No character except a space should come before a {)
- if (/([^\s'])\{/) {
- msg " $1\{:$fn:$.\n";
+ ## Warn about unquoted braces preceded by unexpected character.
+ if (/([^\s'\)\(\{])\{/) {
+ msg "$1\{:$fn:$.\n";
}
## Warn about double semi-colons at the end of a line.
if (/;;$/) {
- msg " double semi-colons at the end of $. in $fn\n"
+ msg ";;:$fn:$.\n"
}
## Warn about multiple internal spaces.
#if (/[^\s,:]\s{2,}[^\s\\=]/) {
- # msg " X X:$fn:$.\n";
+ # msg "X X:$fn:$.\n";
#}
## Warn about { with stuff after.
#s/\s+$//;
#if (/\{[^\}\\]+$/) {
- # msg " {X:$fn:$.\n";
+ # msg "{X:$fn:$.\n";
#}
## Warn about function calls with space before parens.
# (Don't put a space between the name of a function and its
# arguments.)
if (/(\w+)\s\(([A-Z]*)/) {
- if ($1 ne "if" and $1 ne "while" and $1 ne "for" and
- $1 ne "switch" and $1 ne "return" and $1 ne "int" and
- $1 ne "elsif" and $1 ne "WINAPI" and $2 ne "WINAPI" and
- $1 ne "void" and $1 ne "__attribute__" and $1 ne "op" and
- $1 ne "size_t" and $1 ne "double" and $1 ne "uint64_t" and
- $1 ne "workqueue_reply_t") {
- msg " fn ():$fn:$.\n";
+ if (! $allow_space_after{$1} && $2 ne 'WINAPI') {
+ msg "fn ():$fn:$.\n";
}
}
## Warn about functions not declared at start of line.
@@ -165,8 +207,8 @@ for my $fn (@ARGV) {
if ($in_func_head ||
($fn !~ /\.h$/ && /^[a-zA-Z0-9_]/ &&
! /^(?:const |static )*(?:typedef|struct|union)[^\(]*$/ &&
- ! /= *\{$/ && ! /;$/)) {
- if (/.\{$/){
+ ! /= *\{$/ && ! /;$/) && ! /^[a-zA-Z0-9_]+\s*:/) {
+ if (/[^,\s]\s*\{$/){
msg "fn() {:$fn:$.\n";
$in_func_head = 0;
} elsif (/^\S[^\(]* +\**[a-zA-Z0-9_]+\(/) {
@@ -184,23 +226,32 @@ for my $fn (@ARGV) {
## Check for forbidden functions except when they are
# explicitly permitted
if (/\bassert\(/ && not /assert OK/) {
- msg "assert :$fn:$. (use tor_assert)\n";
+ msg "assert:$fn:$. (use tor_assert)\n";
}
if (/\bmemcmp\(/ && not /memcmp OK/) {
- msg "memcmp :$fn:$. (use {tor,fast}_mem{eq,neq,cmp}\n";
+ msg "memcmp:$fn:$. (use {tor,fast}_mem{eq,neq,cmp}\n";
}
# always forbidden.
if (not /\ OVERRIDE\ /) {
if (/\bstrcat\(/ or /\bstrcpy\(/ or /\bsprintf\(/) {
- msg "$& :$fn:$.\n";
+ msg "$&:$fn:$.\n";
}
if (/\bmalloc\(/ or /\bfree\(/ or /\brealloc\(/ or
/\bstrdup\(/ or /\bstrndup\(/ or /\bcalloc\(/) {
- msg "$& :$fn:$. (use tor_malloc, tor_free, etc)\n";
+ msg "$&:$fn:$. (use tor_malloc, tor_free, etc)\n";
}
}
}
}
+ if ($isheader && $C) {
+ if ($seenguard < 2) {
+ msg "noguard:$fn (No #ifndef/#define header guard pair found)\n";
+ } elsif ($guardnames{$guardname}) {
+ msg "dupguard:$fn (Guard macro $guardname also used in $guardnames{$guardname})\n";
+ } else {
+ $guardnames{$guardname} = $fn;
+ }
+ }
close(F);
}
diff --git a/scripts/maint/checkSpaceTest.sh b/scripts/maint/checkSpaceTest.sh
new file mode 100755
index 0000000000..e1d207a1a8
--- /dev/null
+++ b/scripts/maint/checkSpaceTest.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# Copyright 2019, The Tor Project, Inc.
+# See LICENSE for licensing information
+
+# Integration test for checkSpace.pl, which we want to rewrite.
+
+umask 077
+set -e
+
+# Skip this test if we're running on Windows; we expect line-ending
+# issues in that case.
+case "$(uname -s)" in
+ CYGWIN*) WINDOWS=1;;
+ MINGW*) WINDOWS=1;;
+ MSYS*) WINDOWS=1;;
+ *) WINDOWS=0;;
+esac
+if test "$WINDOWS" = 1; then
+ # This magic value tells automake that the test has been skipped.
+ exit 77
+fi
+
+# make a safe space for temporary files
+DATA_DIR=$(mktemp -d -t tor_checkspace_tests.XXXXXX)
+trap 'rm -rf "$DATA_DIR"' 0
+
+RECEIVED_FNAME="${DATA_DIR}/got.txt"
+
+cd "$(dirname "$0")/checkspace_tests"
+
+# we expect this to give an error code.
+../checkSpace.pl -C ./*.[ch] ./*/*.[ch] > "${RECEIVED_FNAME}" && exit 1
+
+diff -u expected.txt "${RECEIVED_FNAME}" || exit 1
+
+echo "OK"
diff --git a/scripts/maint/checkspace_tests/dubious.c b/scripts/maint/checkspace_tests/dubious.c
new file mode 100644
index 0000000000..59c5f8e4fe
--- /dev/null
+++ b/scripts/maint/checkspace_tests/dubious.c
@@ -0,0 +1,83 @@
+
+// The { coming up should be on its own line.
+int
+foo(void) {
+ // There should be a space before (1)
+ if(1) x += 1;
+
+ // The following empty line is unnecessary.
+
+}
+
+
+// There should be a newline between void and bar.
+void bar(void)
+{
+ // too wide:
+ testing("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
+}
+
+long
+bad_spacing()
+{
+ // here comes a tab
+ return 2;
+ // here comes a label without space:
+foo:
+ ;
+}
+
+// Here comes a CR:
+
+// Trailing space:
+
+int
+non_k_and_r(void)
+{
+ // non-k&r
+ if (foo)
+ {
+ // double-semi
+ return 1;;
+ }
+ else
+ {
+ return 2;
+ }
+}
+
+// #else #if causes a warning.
+#if 1
+#else
+#if 2
+#else
+#endif
+#endif
+
+// always space before a brace.
+foo{
+}
+
+void
+unexpected_space(void)
+{
+ // This space gives a warning.
+ foobar (77);
+}
+
+void
+bad_function_calls(long)
+{
+ // These are forbidden:
+ assert(1);
+ memcmp("a","b",1);
+ strcat(foo,x);
+ strcpy(foo,y);
+ sprintf(foo,"x");
+ malloc(7);
+ free(p);
+ realloc(p);
+ strdup(s);
+ strndup(s,10);
+ calloc(a,b);
+}
diff --git a/scripts/maint/checkspace_tests/dubious.h b/scripts/maint/checkspace_tests/dubious.h
new file mode 100644
index 0000000000..744ec33955
--- /dev/null
+++ b/scripts/maint/checkspace_tests/dubious.h
@@ -0,0 +1,4 @@
+
+// no guards.
+
+int foo(int);
diff --git a/scripts/maint/checkspace_tests/expected.txt b/scripts/maint/checkspace_tests/expected.txt
new file mode 100644
index 0000000000..38595ed373
--- /dev/null
+++ b/scripts/maint/checkspace_tests/expected.txt
@@ -0,0 +1,30 @@
+ fn() {:./dubious.c:4
+ KW(:./dubious.c:6
+ UnnecNL:./dubious.c:10
+ DoubleNL:./dubious.c:12
+ tp fn():./dubious.c:15
+ Wide:./dubious.c:17
+ TAB:./dubious.c:24
+ CR:./dubious.c:30
+ Space@EOL:./dubious.c:32
+ non-K&R {:./dubious.c:39
+ ;;:./dubious.c:41
+ }\nelse:./dubious.c:43
+ #else#if:./dubious.c:52
+ o{:./dubious.c:58
+ fn() {:./dubious.c:58
+ fn ():./dubious.c:65
+ assert:./dubious.c:72 (use tor_assert)
+ memcmp:./dubious.c:73 (use {tor,fast}_mem{eq,neq,cmp}
+ strcat(:./dubious.c:74
+ strcpy(:./dubious.c:75
+ sprintf(:./dubious.c:76
+ malloc(:./dubious.c:77 (use tor_malloc, tor_free, etc)
+ free(:./dubious.c:78 (use tor_malloc, tor_free, etc)
+ realloc(:./dubious.c:79 (use tor_malloc, tor_free, etc)
+ strdup(:./dubious.c:80 (use tor_malloc, tor_free, etc)
+ strndup(:./dubious.c:81 (use tor_malloc, tor_free, etc)
+ calloc(:./dubious.c:82 (use tor_malloc, tor_free, etc)
+ noguard:./dubious.h (No #ifndef/#define header guard pair found)
+ dupguard:./same_guard.h (Guard macro GUARD_MACRO_H also used in ./good_guard.h)
+ dup fname:./subdir/dubious.c (same as ./dubious.c).
diff --git a/scripts/maint/checkspace_tests/good_guard.h b/scripts/maint/checkspace_tests/good_guard.h
new file mode 100644
index 0000000000..b792912d90
--- /dev/null
+++ b/scripts/maint/checkspace_tests/good_guard.h
@@ -0,0 +1,6 @@
+#ifndef GUARD_MACRO_H
+#define GUARD_MACRO_H
+
+int bar(void);
+
+#endif
diff --git a/scripts/maint/checkspace_tests/same_guard.h b/scripts/maint/checkspace_tests/same_guard.h
new file mode 100644
index 0000000000..b792912d90
--- /dev/null
+++ b/scripts/maint/checkspace_tests/same_guard.h
@@ -0,0 +1,6 @@
+#ifndef GUARD_MACRO_H
+#define GUARD_MACRO_H
+
+int bar(void);
+
+#endif
diff --git a/scripts/maint/checkspace_tests/subdir/dubious.c b/scripts/maint/checkspace_tests/subdir/dubious.c
new file mode 100644
index 0000000000..7f22bf79bf
--- /dev/null
+++ b/scripts/maint/checkspace_tests/subdir/dubious.c
@@ -0,0 +1 @@
+// Nothing wrong with this file, but the name is a duplicate.
diff --git a/scripts/maint/clang-format.sh b/scripts/maint/clang-format.sh
new file mode 100755
index 0000000000..59832117b4
--- /dev/null
+++ b/scripts/maint/clang-format.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+# Copyright 2020, The Tor Project, Inc.
+# See LICENSE for licensing information.
+
+#
+# DO NOT COMMIT OR MERGE CODE THAT IS RUN THROUGH THIS TOOL YET.
+#
+# WE ARE STILL DISCUSSING OUR DESIRED STYLE AND ITERATING ON IT.
+# (12 Feb 2020)
+#
+
+# This script runs "clang-format" and "codetool" in sequence over each of
+# our source files, and replaces the original file only if it has changed.
+#
+# We can't just use clang-format -i, since we also want to use codetool to
+# reformat a few things back to how we want them, and we want avoid changing
+# the mtime on files that didn't actually change.
+
+set -e
+
+cd "$(dirname "$0")/../../src/"
+
+# Shellcheck complains that a for loop over find's output is unreliable,
+# since there might be special characters in the output. But we happen
+# to know that none of our C files have special characters or spaces in
+# their names, so this is safe.
+#
+# shellcheck disable=SC2044
+for fname in $(find lib core feature app test tools -name '[^.]*.[ch]'); do
+ tmpfname="${fname}.clang_fmt.tmp"
+ rm -f "${tmpfname}"
+ clang-format --style=file "${fname}" > "${tmpfname}"
+ ../scripts/maint/codetool.py "${tmpfname}"
+ if cmp "${fname}" "${tmpfname}" >/dev/null 2>&1; then
+ echo "No change in ${fname}"
+ rm -f "${tmpfname}"
+ else
+ echo "Change in ${fname}"
+ mv "${tmpfname}" "${fname}"
+ fi
+done
diff --git a/scripts/maint/codetool.py b/scripts/maint/codetool.py
new file mode 100755
index 0000000000..725712c0cc
--- /dev/null
+++ b/scripts/maint/codetool.py
@@ -0,0 +1,182 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020, The Tor Project, Inc.
+# See LICENSE for licensing information.
+
+#
+# DO NOT COMMIT OR MERGE CODE THAT IS RUN THROUGH THIS TOOL YET.
+#
+# WE ARE STILL DISCUSSING OUR DESIRED STYLE AND ITERATING ON IT,
+# ALONG WITH THE TOOLS THAT ACHIEVE IT.
+# (12 Feb 2020)
+#
+
+"""
+ This program uses a set of plugable filters to inspect and transform
+ our C code.
+"""
+
+import os
+import re
+import sys
+
+class Filter:
+ """A Filter transforms a string containing a C program."""
+ def __init__(self):
+ pass
+
+ def transform(self, s):
+ return s
+
+class CompoundFilt(Filter):
+ """A CompoundFilt runs another set of filters, in sequence."""
+ def __init__(self, items=()):
+ super().__init__()
+ self._filters = list(items)
+
+ def add(self, filt):
+ self._filters.append(filt)
+ return self
+
+ def transform(self, s):
+ for f in self._filters:
+ s = f.transform(s)
+
+ return s
+
+class SplitError(Exception):
+ """Exception: raised if split_comments() can't understand a C file."""
+ pass
+
+def split_comments(s):
+ r"""Iterate over the C code in 's', and yield a sequence of (code,
+ comment) pairs. Each pair will contain either a nonempty piece
+ of code, a nonempty comment, or both.
+
+ >>> list(split_comments("hello // world\n"))
+ [('hello ', '// world'), ('\n', '')]
+
+ >>> list(split_comments("a /* b cd */ efg // hi"))
+ [('a ', '/* b cd */'), (' efg ', '// hi')]
+ """
+
+ # Matches a block of code without any comments.
+ PAT_CODE = re.compile(r'''^(?: [^/"']+ |
+ "(?:[^\\"]+|\\.)*" |
+ '(?:[^\\']+|\\.)*' |
+ /[^/*]
+ )*''', re.VERBOSE|re.DOTALL)
+
+ # Matches a C99 "//" comment.
+ PAT_C99_COMMENT = re.compile(r'^//.*$', re.MULTILINE)
+
+ # Matches a C "/* */" comment.
+ PAT_C_COMMENT = re.compile(r'^/\*(?:[^*]|\*+[^*/])*\*+/', re.DOTALL)
+
+ while True:
+ # Find some non-comment code at the start of the string.
+ m = PAT_CODE.match(s)
+
+ # If we found some code here, save it and advance the string.
+ # Otherwise set 'code' to "".
+ if m:
+ code = m.group(0)
+ s = s[m.end():]
+ else:
+ code = ""
+
+ # Now we have a comment, or the end of the string. Find out which
+ # one, and how long it is.
+ if s.startswith("//"):
+ m = PAT_C99_COMMENT.match(s)
+ else:
+ m = PAT_C_COMMENT.match(s)
+
+ # If we got a comment, save it and advance the string. Otherwise
+ # set 'comment' to "".
+ if m:
+ comment = m.group(0)
+ s = s[m.end():]
+ else:
+ comment = ""
+
+ # If we found no code and no comment, we should be at the end of
+ # the string...
+ if code == "" and comment == "":
+ if s:
+ # But in case we *aren't* at the end of the string, raise
+ # an error.
+ raise SplitError()
+ # ... all is well, we're done scanning the code.
+ return
+
+ yield (code, comment)
+
+class IgnoreCommentsFilt(Filter):
+ """Wrapper: applies another filter to C code only, excluding comments.
+ """
+ def __init__(self, filt):
+ super().__init__()
+ self._filt = filt
+
+ def transform(self, s):
+ result = []
+ for code, comment in split_comments(s):
+ result.append(self._filt.transform(code))
+ result.append(comment)
+ return "".join(result)
+
+
+class RegexFilt(Filter):
+ """A regex filter applies a regular expression to some C code."""
+ def __init__(self, pat, replacement, flags=0):
+ super().__init__()
+ self._pat = re.compile(pat, flags)
+ self._replacement = replacement
+
+ def transform(self, s):
+ s, _ = self._pat.subn(self._replacement, s)
+ return s
+
+def revise(fname, filt):
+ """Run 'filt' on the contents of the file in 'fname'. If any
+ changes are made, then replace the file with its new contents.
+ Otherwise, leave the file alone.
+ """
+ contents = open(fname, 'r').read()
+ result = filt.transform(contents)
+ if result == contents:
+ return
+
+ tmpname = "{}_codetool_tmp".format(fname)
+ try:
+ with open(tmpname, 'w') as f:
+ f.write(result)
+ os.rename(tmpname, fname)
+ except:
+ os.unlink(tmpname)
+ raise
+
+##############################
+# Filtering rules.
+##############################
+
+# Make sure that there is a newline after the first comma in a MOCK_IMPL()
+BREAK_MOCK_IMPL = RegexFilt(
+ r'^MOCK_IMPL\(([^,]+),\s*(\S+)',
+ r'MOCK_IMPL(\1,\n\2',
+ re.MULTILINE)
+
+# Make sure there is no newline between } and a loop iteration terminator.
+RESTORE_SMARTLIST_END = RegexFilt(
+ r'}\s*(SMARTLIST|DIGESTMAP|DIGEST256MAP|STRMAP|MAP)_FOREACH_END\s*\(',
+ r'} \1_FOREACH_END (',
+ re.MULTILINE)
+
+F = CompoundFilt()
+F.add(IgnoreCommentsFilt(CompoundFilt([
+ RESTORE_SMARTLIST_END,
+ BREAK_MOCK_IMPL])))
+
+if __name__ == '__main__':
+ for fname in sys.argv[1:]:
+ revise(fname, F)
diff --git a/scripts/maint/fallback.whitelist b/scripts/maint/fallback.whitelist
deleted file mode 100644
index 79551948c6..0000000000
--- a/scripts/maint/fallback.whitelist
+++ /dev/null
@@ -1,997 +0,0 @@
-# updateFallbackDirs.py directory mirror whitelist
-#
-# Format:
-# IPv4:DirPort orport=<ORPort> id=<ID> [ ipv6=<IPv6>:<IPv6 ORPort> ]
-# or use:
-# scripts/maint/generateFallbackDirLine.py fingerprint ...
-#
-# All attributes must match for the directory mirror to be included.
-# If the fallback has an ipv6 key, the whitelist line must also have
-# it, and vice versa, otherwise they don't match.
-# (The blacklist overrides the whitelist.)
-
-# To replace this list with the hard-coded fallback list (for testing), use
-# a command similar to:
-# cat src/app/config/fallback_dirs.inc | grep \" | grep -v weight | \
-# tr -d '\n' | \
-# sed 's/"" / /g' | sed 's/""/"/g' | tr \" '\n' | grep -v '^$' \
-# > scripts/maint/fallback.whitelist
-#
-# When testing before a release, exclusions due to changed details will result
-# in a warning, unless the IPv4 address or port change happened recently.
-# Then it is only logged at info level, as part of the eligibility check.
-# Exclusions due to stability also are only shown at info level.
-#
-# Add the number of selected, slow, and excluded relays, and compare that to
-# the number of hard-coded relays. If it's less, use info-level logs to find
-# out why each of the missing relays was excluded.
-
-# If a relay operator wants their relay to be a FallbackDir,
-# enter the following information here:
-# <IPv4>:<DirPort> orport=<ORPort> id=<ID> [ ipv6=<IPv6>:<IPv6 ORPort> ]
-
-# https://lists.torproject.org/pipermail/tor-relays/2015-December/008362.html
-# https://trac.torproject.org/projects/tor/ticket/22321#comment:22
-78.47.18.110:443 orport=80 id=F8D27B163B9247B232A2EEE68DD8B698695C28DE ipv6=[2a01:4f8:120:4023::110]:80 # fluxe3
-131.188.40.188:1443 orport=80 id=EBE718E1A49EE229071702964F8DB1F318075FF8 ipv6=[2001:638:a000:4140::ffff:188]:80 # fluxe4
-
-# https://lists.torproject.org/pipermail/tor-relays/2015-December/008366.html
-5.39.88.19:9030 orport=9001 id=7CB8C31432A796731EA7B6BF4025548DFEB25E0C ipv6=[2001:41d0:8:9a13::1]:9050
-
-# https://lists.torproject.org/pipermail/tor-relays/2015-December/008370.html
-# https://lists.torproject.org/pipermail/tor-relays/2016-January/008517.html
-# https://lists.torproject.org/pipermail/tor-relays/2016-January/008555.html
-212.47.237.95:9030 orport=9001 id=3F5D8A879C58961BB45A3D26AC41B543B40236D6
-212.47.237.95:9130 orport=9101 id=6FB38EB22E57EF7ED5EF00238F6A48E553735D88
-
-# https://lists.torproject.org/pipermail/tor-relays/2015-December/008372.html
-# IPv6 tunnel available on request (is this a good idea?)
-108.53.208.157:80 orport=443 id=4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2
-
-# https://lists.torproject.org/pipermail/tor-relays/2015-December/008373.html
-167.114.35.28:9030 orport=9001 id=E65D300F11E1DB12C534B0146BDAB6972F1A8A48
-
-# https://lists.torproject.org/pipermail/tor-relays/2015-December/008378.html
-144.76.14.145:110 orport=143 id=14419131033443AE6E21DA82B0D307F7CAE42BDB ipv6=[2a01:4f8:190:9490::dead]:443
-
-# https://lists.torproject.org/pipermail/tor-relays/2015-December/008379.html
-# Email sent directly to teor, verified using relay contact info
-91.121.84.137:4951 orport=4051 id=6DE61A6F72C1E5418A66BFED80DFB63E4C77668F
-91.121.84.137:4952 orport=4052 id=9FBEB75E8BC142565F12CBBE078D63310236A334
-
-# https://lists.torproject.org/pipermail/tor-relays/2015-December/008381.html
-# Sent additional emails to teor with updated relays
-81.7.11.96:9030 orport=9001 id=8FA37B93397015B2BC5A525C908485260BE9F422 # Doedel22
-# 9F5068310818ED7C70B0BC4087AB55CB12CB4377 not found in current consensus
-178.254.19.101:80 orport=443 id=F9246DEF2B653807236DA134F2AEAB103D58ABFE # Freebird31
-178.254.19.101:9030 orport=9001 id=0C475BA4D3AA3C289B716F95954CAD616E50C4E5 # Freebird32
-81.7.14.253:9001 orport=443 id=1AE039EE0B11DB79E4B4B29CBA9F752864A0259E # Ichotolot60
-81.7.11.186:1080 orport=443 id=B86137AE9681701901C6720E55C16805B46BD8E3 # BeastieJoy60
-85.25.213.211:465 orport=80 id=CE47F0356D86CF0A1A2008D97623216D560FB0A8 # BeastieJoy61
-85.25.159.65:995 orport=80 id=52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9 # BeastieJoy63
-81.7.3.67:993 orport=443 id=A2E6BB5C391CD46B38C55B4329C35304540771F1 # BeastieJoy62
-81.7.14.31:9001 orport=443 id=7600680249A22080ECC6173FBBF64D6FCF330A61 # Ichotolot62
-
-# https://lists.torproject.org/pipermail/tor-relays/2015-December/008382.html
-51.255.33.237:9091 orport=9001 id=A360C21FA87FFA2046D92C17086A6B47E5C68109
-
-# https://lists.torproject.org/pipermail/tor-relays/2015-December/008383.html
-81.7.14.246:80 orport=443 id=CE75BF0972ADD52AF8807602374E495C815DB304 ipv6=[2a02:180:a:51::dead]:443
-
-# https://lists.torproject.org/pipermail/tor-relays/2015-December/008384.html
-# Sent additional email to teor with fingerprint change
-149.202.98.161:80 orport=443 id=FC64CD763F8C1A319BFBBF62551684F4E1E42332 ipv6=[2001:41d0:8:4528::161]:443
-193.111.136.162:80 orport=443 id=C79552275DFCD486B942510EF663ED36ACA1A84B ipv6=[2001:4ba0:cafe:10d0::1]:443
-
-# https://lists.torproject.org/pipermail/tor-relays/2015-December/008416.html
-185.100.84.212:80 orport=443 id=330CD3DB6AD266DC70CDB512B036957D03D9BC59 ipv6=[2a06:1700:0:7::1]:443
-
-# https://lists.torproject.org/pipermail/tor-relays/2015-December/008417.html
-178.16.208.56:80 orport=443 id=2CDCFED0142B28B002E89D305CBA2E26063FADE2 ipv6=[2a00:1c20:4089:1234:cd49:b58a:9ebe:67ec]:443
-178.16.208.57:80 orport=443 id=92CFD9565B24646CAC2D172D3DB503D69E777B8A ipv6=[2a00:1c20:4089:1234:7825:2c5d:1ecd:c66f]:443
-
-# https://lists.torproject.org/pipermail/tor-relays/2016-January/008513.html
-178.62.173.203:9030 orport=9001 id=DD85503F2D1F52EF9EAD621E942298F46CD2FC10 ipv6=[2a03:b0c0:0:1010::a4:b001]:9001
-
-# https://lists.torproject.org/pipermail/tor-relays/2016-January/008534.html
-5.9.110.236:9030 orport=9001 id=0756B7CD4DFC8182BE23143FAC0642F515182CEB ipv6=[2a01:4f8:162:51e2::2]:9001
-
-# https://lists.torproject.org/pipermail/tor-relays/2016-January/008542.html
-178.62.199.226:80 orport=443 id=CBEFF7BA4A4062045133C053F2D70524D8BBE5BE ipv6=[2a03:b0c0:2:d0::b7:5001]:443
-
-# Email sent directly to teor, verified using relay contact info
-94.23.204.175:9030 orport=9001 id=5665A3904C89E22E971305EE8C1997BCA4123C69
-
-# Email sent directly to teor, verified using relay contact info
-171.25.193.77:80 orport=443 id=A10C4F666D27364036B562823E5830BC448E046A ipv6=[2001:67c:289c:3::77]:443
-171.25.193.78:80 orport=443 id=A478E421F83194C114F41E94F95999672AED51FE ipv6=[2001:67c:289c:3::78]:443
-171.25.193.20:80 orport=443 id=DD8BD7307017407FCC36F8D04A688F74A0774C02 ipv6=[2001:67c:289c::20]:443
-# same machine as DD8BD7307017407FCC36F8D04A688F74A0774C02
-171.25.193.25:80 orport=443 id=185663B7C12777F052B2C2D23D7A239D8DA88A0F ipv6=[2001:67c:289c::25]:443
-
-# Email sent directly to teor, verified using relay contact info
-212.47.229.2:9030 orport=9001 id=20462CBA5DA4C2D963567D17D0B7249718114A68 ipv6=[2001:bc8:4400:2100::f03]:9001
-93.115.97.242:9030 orport=9001 id=B5212DB685A2A0FCFBAE425738E478D12361710D
-46.28.109.231:9030 orport=9001 id=F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610 ipv6=[2a02:2b88:2:1::4205:1]:9001
-
-# Email sent directly to teor, verified using relay contact info
-85.235.250.88:80 orport=443 id=72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE # TykRelay01
-185.96.88.29:80 orport=443 id=86C281AD135058238D7A337D546C902BE8505DDE # TykRelay051
-# This fallback opted-in in previous releases, then changed its details,
-# and so we blacklisted it. Now we want to whitelist changes.
-# Assume details update is permanent
-185.96.180.29:80 orport=443 id=F93D8F37E35C390BCAD9F9069E13085B745EC216 # TykRelay06
-
-# Email sent directly to teor, verified using relay contact info
-185.11.180.67:80 orport=9001 id=794D8EA8343A4E820320265D05D4FA83AB6D1778
-
-# Email sent directly to teor, verified using relay contact info
-178.16.208.62:80 orport=443 id=5CF8AFA5E4B0BB88942A44A3F3AAE08C3BDFD60B ipv6=[2a00:1c20:4089:1234:a6a4:2926:d0af:dfee]:443
-46.165.221.166:80 orport=443 id=EE5F897C752D46BCFF531641B853FC6BC78DD4A7
-178.16.208.60:80 orport=443 id=B44FBE5366AD98B46D829754FA4AC599BAE41A6A ipv6=[2a00:1c20:4089:1234:67bc:79f3:61c0:6e49]:443
-178.16.208.55:80 orport=443 id=C4AEA05CF380BAD2230F193E083B8869B4A29937 ipv6=[2a00:1c20:4089:1234:7b2c:11c5:5221:903e]:443
-178.16.208.61:80 orport=443 id=3B52392E2256C35CDCF7801FF898FC88CE6D431A ipv6=[2a00:1c20:4089:1234:2712:a3d0:666b:88a6]:443
-81.89.96.88:80 orport=443 id=55ED4BB49F6D3F36D8D9499BE43500E017A5EF82 ipv6=[2a02:180:1:1:14c5:b0b7:2d7d:5f3a]:443
-209.222.8.196:80 orport=443 id=C86D2F3DEFE287A0EEB28D4887AF14E35C172733 ipv6=[2001:19f0:1620:41c1:426c:5adf:2ed5:4e88]:443
-81.89.96.89:80 orport=443 id=28651F419F5A1CF74511BB500C58112192DD4943 ipv6=[2a02:180:1:1:2ced:24e:32ea:a03b]:443
-46.165.221.166:9030 orport=9001 id=8C7106C880FE8AA1319DD71B59623FCB8914C9F1
-178.16.208.62:80 orport=443 id=5CF8AFA5E4B0BB88942A44A3F3AAE08C3BDFD60B ipv6=[2a00:1c20:4089:1234:a6a4:2926:d0af:dfee]:443"
-46.165.221.166:80 orport=443 id=EE5F897C752D46BCFF531641B853FC6BC78DD4A7
-178.16.208.60:80 orport=443 id=B44FBE5366AD98B46D829754FA4AC599BAE41A6A ipv6=[2a00:1c20:4089:1234:67bc:79f3:61c0:6e49]:443
-178.16.208.55:80 orport=443 id=C4AEA05CF380BAD2230F193E083B8869B4A29937 ipv6=[2a00:1c20:4089:1234:7b2c:11c5:5221:903e]:443
-178.16.208.61:80 orport=443 id=3B52392E2256C35CDCF7801FF898FC88CE6D431A ipv6=[2a00:1c20:4089:1234:2712:a3d0:666b:88a6]:443
-81.89.96.88:80 orport=443 id=55ED4BB49F6D3F36D8D9499BE43500E017A5EF82 ipv6=[2a02:180:1:1:14c5:b0b7:2d7d:5f3a]:443
-209.222.8.196:80 orport=443 id=C86D2F3DEFE287A0EEB28D4887AF14E35C172733 ipv6=[2001:19f0:1620:41c1:426c:5adf:2ed5:4e88]:443
-81.89.96.89:80 orport=443 id=28651F419F5A1CF74511BB500C58112192DD4943 ipv6=[2a02:180:1:1:2ced:24e:32ea:a03b]:443
-46.165.221.166:9030 orport=9001 id=8C7106C880FE8AA1319DD71B59623FCB8914C9F1
-178.16.208.56:80 orport=443 id=2CDCFED0142B28B002E89D305CBA2E26063FADE2 ipv6=[2a00:1c20:4089:1234:cd49:b58a:9ebe:67ec]:443
-178.16.208.58:80 orport=443 id=A4C98CEA3F34E05299417E9F885A642C88EF6029 ipv6=[2a00:1c20:4089:1234:cdae:1b3e:cc38:3d45]:443
-178.16.208.57:80 orport=443 id=92CFD9565B24646CAC2D172D3DB503D69E777B8A ipv6=[2a00:1c20:4089:1234:7825:2c5d:1ecd:c66f]:443
-178.16.208.59:80 orport=443 id=136F9299A5009A4E0E96494E723BDB556FB0A26B ipv6=[2a00:1c20:4089:1234:bff6:e1bb:1ce3:8dc6]:443
-
-# Email sent directly to teor, verified using relay contact info
-5.39.76.158:80 orport=443 id=C41F60F8B00E7FEF5CCC5BC6BB514CA1B8AAB651
-
-# Email sent directly to teor, verified using relay contact info
-109.163.234.2:80 orport=443 id=14F92FF956105932E9DEC5B82A7778A0B1BD9A52
-109.163.234.4:80 orport=443 id=4888770464F0E900EFEF1BA181EA873D13F7713C
-109.163.234.5:80 orport=443 id=5EB8D862E70981B8690DEDEF546789E26AB2BD24
-109.163.234.7:80 orport=443 id=23038A7F2845EBA2234ECD6651BD4A7762F51B18
-109.163.234.8:80 orport=443 id=0818DAE0E2DDF795AEDEAC60B15E71901084F281
-109.163.234.9:80 orport=443 id=ABF7FBF389C9A747938B639B20E80620B460B2A9
-62.102.148.67:80 orport=443 id=4A0C3E177AF684581EF780981AEAF51A98A6B5CF
-# Assume details update is permanent
-77.247.181.166:80 orport=443 id=77131D7E2EC1CA9B8D737502256DA9103599CE51 # CriticalMass
-77.247.181.164:80 orport=443 id=204DFD2A2C6A0DC1FA0EACB495218E0B661704FD # HaveHeart
-77.247.181.162:80 orport=443 id=7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55 # sofia
-
-# https://twitter.com/biotimylated/status/718994247500718080
-212.47.252.149:9030 orport=9001 id=2CAC39BAA996791CEFAADC9D4754D65AF5EB77C0
-
-# Email sent directly to teor, verified using relay contact info
-46.165.230.5:80 orport=443 id=A0F06C2FADF88D3A39AA3072B406F09D7095AC9E
-
-# Email sent directly to teor, verified using relay contact info
-94.242.246.24:23 orport=8080 id=EC116BCB80565A408CE67F8EC3FE3B0B02C3A065 ipv6=[2a01:608:ffff:ff07::1:24]:9004
-176.126.252.11:443 orport=9001 id=B0279A521375F3CB2AE210BDBFC645FDD2E1973A ipv6=[2a02:59e0:0:7::11]:9003
-176.126.252.12:21 orport=8080 id=379FB450010D17078B3766C2273303C358C3A442 ipv6=[2a02:59e0:0:7::12]:81
-94.242.246.23:443 orport=9001 id=F65E0196C94DFFF48AFBF2F5F9E3E19AAE583FD0 ipv6=[2a01:608:ffff:ff07::1:23]:9003
-85.248.227.164:444 orport=9002 id=B84F248233FEA90CAD439F292556A3139F6E1B82 ipv6=[2a00:1298:8011:212::164]:9004
-85.248.227.163:443 orport=9001 id=C793AB88565DDD3C9E4C6F15CCB9D8C7EF964CE9 ipv6=[2a00:1298:8011:212::163]:9003
-
-# Email sent directly to teor, verified using relay contact info
-148.251.190.229:9030 orport=9010 id=BF0FB582E37F738CD33C3651125F2772705BB8E8 ipv6=[2a01:4f8:211:c68::2]:9010
-
-# Email sent directly to teor, verified using relay contact info
-5.79.68.161:81 orport=443 id=9030DCF419F6E2FBF84F63CBACBA0097B06F557E ipv6=[2001:1af8:4700:a012:1::1]:443
-5.79.68.161:9030 orport=9001 id=B7EC0C02D7D9F1E31B0C251A6B058880778A0CD1 ipv6=[2001:1af8:4700:a012:1::1]:9001
-
-# Email sent directly to teor, verified using relay contact info
-62.210.92.11:9030 orport=9001 id=0266B0660F3F20A7D1F3D8335931C95EF50F6C6B ipv6=[2001:bc8:338c::1]:9001
-62.210.92.11:9130 orport=9101 id=387B065A38E4DAA16D9D41C2964ECBC4B31D30FF ipv6=[2001:bc8:338c::1]:9101
-
-# Email sent directly to teor, verified using relay contact info
-188.165.194.195:9030 orport=9001 id=49E7AD01BB96F6FE3AB8C3B15BD2470B150354DF
-
-# Message sent directly to teor, verified using relay contact info
-95.215.44.110:80 orport=443 id=D56AA4A1AA71961F5279FB70A6DCF7AD7B993EB5
-95.215.44.122:80 orport=443 id=998D8FE06B867AA3F8D257A7D28FFF16964D53E2
-95.215.44.111:80 orport=443 id=A7C7FD510B20BC8BE8F2A1D911364E1A23FBD09F
-
-# Email sent directly to teor, verified using relay contact info
-86.59.119.88:80 orport=443 id=ACD889D86E02EDDAB1AFD81F598C0936238DC6D0
-86.59.119.83:80 orport=443 id=FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B
-
-# Email sent directly to teor, verified using relay contact info
-193.11.164.243:9030 orport=9001 id=FFA72BD683BC2FCF988356E6BEC1E490F313FB07 ipv6=[2001:6b0:7:125::243]:9001
-109.105.109.162:52860 orport=60784 id=32EE911D968BE3E016ECA572BB1ED0A9EE43FC2F ipv6=[2001:948:7:2::163]:5001
-
-# Email sent directly to teor, verified using relay contact info
-146.0.32.144:9030 orport=9001 id=35E8B344F661F4F2E68B17648F35798B44672D7E
-
-# Email sent directly to teor, verified using relay contact info
-46.252.26.2:45212 orport=49991 id=E589316576A399C511A9781A73DA4545640B479D
-
-# Email sent directly to teor, verified using relay contact info
-89.187.142.208:80 orport=443 id=64186650FFE4469EBBE52B644AE543864D32F43C
-
-# Email sent directly to teor
-# Assume details update is permanent
-212.51.134.123:9030 orport=9001 id=50586E25BE067FD1F739998550EDDCB1A14CA5B2 # Jans
-
-# Email sent directly to teor, verified using relay contact info
-46.101.143.173:80 orport=443 id=F960DF50F0FD4075AC9B505C1D4FFC8384C490FB
-
-# Email sent directly to teor, verified using relay contact info
-193.171.202.146:9030 orport=9001 id=01A9258A46E97FF8B2CAC7910577862C14F2C524
-
-# Email sent directly to teor, verified using relay contact info
-# Assume details update is permanent
-197.231.221.211:9030 orport=443 id=BC630CBBB518BE7E9F4E09712AB0269E9DC7D626 # IPredator
-
-# Email sent directly to teor, verified using relay contact info
-185.61.138.18:8080 orport=4443 id=2541759BEC04D37811C2209A88E863320271EC9C
-
-# Email sent directly to teor, verified using relay contact info
-193.11.114.45:9031 orport=9002 id=80AAF8D5956A43C197104CEF2550CD42D165C6FB
-193.11.114.43:9030 orport=9001 id=12AD30E5D25AA67F519780E2111E611A455FDC89 ipv6=[2001:6b0:30:1000::99]:9050
-193.11.114.46:9032 orport=9003 id=B83DC1558F0D34353BB992EF93AFEAFDB226A73E
-
-# Email sent directly to teor, verified using relay contact info
-138.201.250.33:9012 orport=9011 id=2BA2C8E96B2590E1072AECE2BDB5C48921BF8510
-
-# Email sent directly to teor, verified using relay contact info
-37.221.162.226:9030 orport=9001 id=D64366987CB39F61AD21DBCF8142FA0577B92811
-
-# Email sent directly to teor, verified using relay contact info
-91.219.237.244:80 orport=443 id=92ECC9E0E2AF81BB954719B189AC362E254AD4A5
-
-# Email sent directly to teor, verified using relay contact info
-185.21.100.50:9030 orport=9001 id=58ED9C9C35E433EE58764D62892B4FFD518A3CD0 ipv6=[2a00:1158:2:cd00:0:74:6f:72]:443
-
-# Email sent directly to teor, verified using relay contact info
-193.35.52.53:9030 orport=9001 id=DAA39FC00B196B353C2A271459C305C429AF09E4
-
-# Email sent directly to teor, verified using relay contact info
-134.119.3.164:9030 orport=9001 id=D1B8AAA98C65F3DF7D8BB3AF881CAEB84A33D8EE
-
-# Email sent directly to teor, verified using relay contact info
-173.212.254.192:31336 orport=31337 id=99E246DB480B313A3012BC3363093CC26CD209C7
-
-# Email sent directly to teor, verified using relay contact info
-178.62.22.36:80 orport=443 id=A0766C0D3A667A3232C7D569DE94A28F9922FCB1 ipv6=[2a03:b0c0:1:d0::174:1]:9050
-188.166.23.127:80 orport=443 id=8672E8A01B4D3FA4C0BBE21C740D4506302EA487 ipv6=[2a03:b0c0:2:d0::27b:7001]:9050
-198.199.64.217:80 orport=443 id=B1D81825CFD7209BD1B4520B040EF5653C204A23 ipv6=[2604:a880:400:d0::1a9:b001]:9050
-159.203.32.149:80 orport=443 id=55C7554AFCEC1062DCBAC93E67B2E03C6F330EFC ipv6=[2604:a880:cad:d0::105:f001]:9050
-
-# Email sent directly to teor, verified using relay contact info
-5.196.31.80:9030 orport=9900 id=DFB2EB472643FAFCD5E73D2E37D51DB67203A695 ipv6=[2001:41d0:52:400::a65]:9900
-
-# Email sent directly to teor, verified using relay contact info
-188.138.112.60:1433 orport=1521 id=C414F28FD2BEC1553024299B31D4E726BEB8E788
-
-# Email sent directly to teor, verified using relay contact info
-213.61.66.118:9031 orport=9001 id=30648BC64CEDB3020F4A405E4AB2A6347FB8FA22
-213.61.66.117:9032 orport=9002 id=6E44A52E3D1FF7683FE5C399C3FB5E912DE1C6B4
-213.61.66.115:9034 orport=9004 id=480CCC94CEA04D2DEABC0D7373868E245D4C2AE2
-213.61.66.116:9033 orport=9003 id=A9DEB920B42B4EC1DE6249034039B06D61F38690
-
-# Email sent directly to teor, verified using relay contact info
-136.243.187.165:9030 orport=443 id=1AC65257D7BFDE7341046625470809693A8ED83E
-
-# Email sent directly to teor, verified using relay contact info
-212.47.230.49:9030 orport=9001 id=3D6D0771E54056AEFC28BB1DE816951F11826E97
-
-# Email sent directly to teor, verified using relay contact info
-192.99.55.69:80 orport=443 id=0682DE15222A4A4A0D67DBA72A8132161992C023
-192.99.59.140:80 orport=443 id=3C9148DA49F20654730FAC83FFF693A4D49D0244
-51.254.215.13:80 orport=443 id=73C30C8ABDD6D9346C822966DE73B9F82CB6178A
-51.254.215.129:80 orport=443 id=7B4491D05144B20AE8519AE784B94F0525A8BB79
-192.99.59.139:80 orport=443 id=82EC878ADA7C205146B9F5193A7310867FAA0D7B
-51.254.215.124:80 orport=443 id=98999EBE89B5FA9AA0C58421F0B46C3D0AF51CBA
-51.254.214.208:80 orport=443 id=C3F0D1417848EAFC41277A73DEB4A9F2AEC23DDF
-192.99.59.141:80 orport=443 id=F45426551795B9DA78BEDB05CD5F2EACED8132E4
-192.99.59.14:80 orport=443 id=161A1B29A37EBF096D2F8A9B1E176D6487FE42AE
-
-# Email sent directly to teor, verified using relay contact info
-151.80.42.103:9030 orport=9001 id=9007C1D8E4F03D506A4A011B907A9E8D04E3C605 ipv6=[2001:41d0:e:f67::114]:9001
-
-# Email sent directly to teor, verified using relay contact info
-5.39.92.199:80 orport=443 id=0BEA4A88D069753218EAAAD6D22EA87B9A1319D6 ipv6=[2001:41d0:8:b1c7::1]:443
-
-# Email sent directly to teor, verified using relay contact info
-176.31.159.231:80 orport=443 id=D5DBCC0B4F029F80C7B8D33F20CF7D97F0423BB1
-176.31.159.230:80 orport=443 id=631748AFB41104D77ADBB7E5CD4F8E8AE876E683
-195.154.79.128:80 orport=443 id=C697612CA5AED06B8D829FCC6065B9287212CB2F
-195.154.9.161:80 orport=443 id=B6295A9960F89BD0C743EEBC5670450EA6A34685
-46.148.18.74:8080 orport=443 id=6CACF0B5F03C779672F3C5C295F37C8D234CA3F7
-
-# Email sent directly to teor, verified using relay contact info
-37.187.102.108:80 orport=443 id=F4263275CF54A6836EE7BD527B1328836A6F06E1 ipv6=[2001:41d0:a:266c::1]:443 # EvilMoe
-212.47.241.21:80 orport=443 id=892F941915F6A0C6E0958E52E0A9685C190CF45C # EvilMoe
-
-# Email sent directly to teor, verified using relay contact info
-212.129.38.254:9030 orport=9001 id=FDF845FC159C0020E2BDDA120C30C5C5038F74B4
-
-# Email sent directly to teor
-37.157.195.87:8030 orport=443 id=12FD624EE73CEF37137C90D38B2406A66F68FAA2 # thanatosCZ
-5.189.169.190:8030 orport=8080 id=8D79F73DCD91FC4F5017422FAC70074D6DB8DD81 # thanatosDE
-
-# Email sent directly to teor, verified using relay contact info
-37.187.7.74:80 orport=443 id=AEA43CB1E47BE5F8051711B2BF01683DB1568E05 ipv6=[2001:41d0:a:74a::1]:443
-
-# Email sent directly to teor, verified using relay contact info
-185.66.250.141:9030 orport=9001 id=B1726B94885CE3AC3910CA8B60622B97B98E2529
-
-# Email sent directly to teor, verified using relay contact info
-185.104.120.7:9030 orport=443 id=445F1C853966624FB3CF1E12442570DC553CC2EC ipv6=[2a06:3000::120:7]:443
-185.104.120.2:9030 orport=21 id=518FF8708698E1DA09C823C36D35DF89A2CAD956
-185.104.120.4:9030 orport=9001 id=F92B3CB9BBE0CB22409843FB1AE4DBCD5EFAC835
-185.104.120.3:9030 orport=21 id=707C1B61AC72227B34487B56D04BAA3BA1179CE8 ipv6=[2a06:3000::120:3]:21
-
-# Email sent directly to teor, verified using relay contact info
-37.187.102.186:9030 orport=9001 id=489D94333DF66D57FFE34D9D59CC2D97E2CB0053 ipv6=[2001:41d0:a:26ba::1]:9001
-
-# Email sent directly to teor, verified using relay contact info
-198.96.155.3:8080 orport=5001 id=BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E
-
-# Email sent directly to teor, verified using relay contact info
-212.83.154.33:8888 orport=443 id=3C79699D4FBC37DE1A212D5033B56DAE079AC0EF
-212.83.154.33:8080 orport=8443 id=322C6E3A973BC10FC36DE3037AD27BC89F14723B
-
-# Email sent directly to teor, verified using relay contact info
-51.255.41.65:9030 orport=9001 id=9231DF741915AA1630031A93026D88726877E93A
-
-# Email sent directly to teor, verified using relay contact info
-78.142.142.246:80 orport=443 id=5A5E03355C1908EBF424CAF1F3ED70782C0D2F74
-
-# Email sent directly to teor, verified using relay contact info
-195.154.97.91:80 orport=443 id=BD33C50D50DCA2A46AAED54CA319A1EFEBF5D714
-
-# Email sent directly to teor, verified using relay contact info
-62.210.129.246:80 orport=443 id=79E169B25E4C7CE99584F6ED06F379478F23E2B8
-
-# Email sent directly to teor, verified using relay contact info
-5.196.74.215:9030 orport=9001 id=5818055DFBAF0FA7F67E8125FD63E3E7F88E28F6
-
-# Email sent directly to teor, verified using relay contact info
-212.47.233.86:9030 orport=9001 id=B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20
-
-# Email sent directly to teor, verified using relay contact info
-85.214.206.219:9030 orport=9001 id=98F8D5F359949E41DE8DF3DBB1975A86E96A84A0
-
-# Email sent directly to teor, verified using relay contact info
-46.166.170.4:80 orport=443 id=19F42DB047B72C7507F939F5AEA5CD1FA4656205
-46.166.170.5:80 orport=443 id=DA705AD4591E7B4708FA2CAC3D53E81962F3E6F6
-
-# Email sent directly to teor, verified using relay contact info
-5.189.157.56:80 orport=443 id=77F6D6A6B6EAFB8F5DADDC07A918BBF378ED6725
-
-# Email sent directly to teor, verified using relay contact info
-46.28.110.244:80 orport=443 id=9F7D6E6420183C2B76D3CE99624EBC98A21A967E
-185.13.39.197:80 orport=443 id=001524DD403D729F08F7E5D77813EF12756CFA8D
-95.130.12.119:80 orport=443 id=587E0A9552E4274B251F29B5B2673D38442EE4BF
-
-# Email sent directly to teor, verified using relay contact info
-212.129.62.232:80 orport=443 id=B143D439B72D239A419F8DCE07B8A8EB1B486FA7
-
-# Email sent directly to teor, verified using relay contact info
-91.219.237.229:80 orport=443 id=1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7
-
-# Email sent directly to teor, verified using relay contact info
-46.101.151.222:80 orport=443 id=1DBAED235E3957DE1ABD25B4206BE71406FB61F8
-178.62.60.37:80 orport=443 id=175921396C7C426309AB03775A9930B6F611F794
-
-# Email sent directly to teor, verified using relay contact info
-178.62.197.82:80 orport=443 id=0D3EBA17E1C78F1E9900BABDB23861D46FCAF163
-
-# Email sent directly to teor, verified using relay contact info
-82.223.21.74:9030 orport=9001 id=7A32C9519D80CA458FC8B034A28F5F6815649A98 ipv6=[2001:470:53e0::cafe]:9050
-
-# Email sent directly to teor, verified using relay contact info
-146.185.177.103:80 orport=9030 id=9EC5E097663862DF861A18C32B37C5F82284B27D
-
-# Email sent directly to teor, verified using relay contact info
-37.187.22.87:9030 orport=9001 id=36B9E7AC1E36B62A9D6F330ABEB6012BA7F0D400 ipv6=[2001:41d0:a:1657::1]:9001
-
-# Email sent directly to teor, verified using relay contact info
-37.59.46.159:9030 orport=9001 id=CBD0D1BD110EC52963082D839AC6A89D0AE243E7
-
-# Email sent directly to teor, verified using relay contact info
-212.47.250.243:9030 orport=9001 id=5B33EDBAEA92F446768B3753549F3B813836D477
-# Confirm with operator before adding these
-#163.172.133.36:9030 orport=9001 id=D8C2BD36F01FA86F4401848A0928C4CB7E5FDFF9
-#158.69.216.70:9030 orport=9001 id=0ACE25A978D4422C742D6BC6345896719BF6A7EB
-
-# Email sent directly to teor, verified using relay contact info
-5.199.142.236:9030 orport=9001 id=F4C0EDAA0BF0F7EC138746F8FEF1CE26C7860265
-
-# Email sent directly to teor
-188.166.133.133:9030 orport=9001 id=774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7 ipv6=[2a03:b0c0:2:d0::26c0:1]:9001 # dropsy
-
-# Email sent directly to teor, verified using relay contact info
-46.8.249.10:80 orport=443 id=31670150090A7C3513CB7914B9610E786391A95D
-
-# Email sent directly to teor, verified using relay contact info
-144.76.163.93:9030 orport=9001 id=22F08CF09764C4E8982640D77F71ED72FF26A9AC
-
-# Email sent directly to teor, verified using relay contact info
-46.4.24.161:9030 orport=9001 id=DB4C76A3AD7E234DA0F00D6F1405D8AFDF4D8DED
-46.4.24.161:9031 orport=9002 id=7460F3D12EBE861E4EE073F6233047AACFE46AB4
-46.38.51.132:9030 orport=9001 id=810DEFA7E90B6C6C383C063028EC397A71D7214A
-163.172.194.53:9030 orport=9001 id=8C00FA7369A7A308F6A137600F0FA07990D9D451 ipv6=[2001:bc8:225f:142:6c69:7461:7669:73]:9001
-
-# Email sent directly to teor, verified using relay contact info
-176.10.107.180:9030 orport=9001 id=3D7E274A87D9A89AF064C13D1EE4CA1F184F2600
-
-# Email sent directly to teor, verified using relay contact info
-46.28.207.19:80 orport=443 id=5B92FA5C8A49D46D235735504C72DBB3472BA321
-46.28.207.141:80 orport=443 id=F69BED36177ED727706512BA6A97755025EEA0FB
-46.28.205.170:80 orport=443 id=AF322D83A4D2048B22F7F1AF5F38AFF4D09D0B76
-95.183.48.12:80 orport=443 id=7187CED1A3871F837D0E60AC98F374AC541CB0DA
-
-# Email sent directly to teor, verified using relay contact info
-93.180.156.84:9030 orport=9001 id=8844D87E9B038BE3270938F05AF797E1D3C74C0F
-
-# Email sent directly to teor, verified using relay contact info
-37.187.115.157:9030 orport=9001 id=D5039E1EBFD96D9A3F9846BF99EC9F75EDDE902A
-
-# Email sent directly to teor, verified using relay contact info
-5.34.183.205:80 orport=443 id=DDD7871C1B7FA32CB55061E08869A236E61BDDF8
-
-# Email sent directly to teor, verified using relay contact info
-51.254.246.203:9030 orport=9001 id=47B596B81C9E6277B98623A84B7629798A16E8D5
-
-# Email sent directly to teor, verified using relay contact info
-5.9.146.203:80 orport=443 id=1F45542A24A61BF9408F1C05E0DCE4E29F2CBA11
-
-# Email sent directly to teor, verified using relay contact info
-# Updated details from atlas based on ticket #20010
-163.172.176.167:80 orport=443 id=230A8B2A8BA861210D9B4BA97745AEC217A94207
-163.172.149.155:80 orport=443 id=0B85617241252517E8ECF2CFC7F4C1A32DCD153F
-163.172.149.122:80 orport=443 id=A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2
-
-# Email sent directly to teor, verified using relay contact info
-204.11.50.131:9030 orport=9001 id=185F2A57B0C4620582602761097D17DB81654F70
-
-# Email sent directly to teor, verified using relay contact info
-151.236.222.217:44607 orport=9001 id=94D58704C2589C130C9C39ED148BD8EA468DBA54
-
-# Email sent directly to teor, verified using relay contact info
-185.35.202.221:9030 orport=9001 id=C13B91384CDD52A871E3ECECE4EF74A7AC7DCB08 ipv6=[2a02:ed06::221]:9001
-
-# Email sent directly to teor, verified using relay contact info
-5.9.151.241:9030 orport=4223 id=9BF04559224F0F1C3C953D641F1744AF0192543A ipv6=[2a01:4f8:190:34f0::2]:4223
-
-# Email sent directly to teor, verified using relay contact info
-89.40.71.149:8081 orport=8080 id=EC639EDAA5121B47DBDF3D6B01A22E48A8CB6CC7
-
-# Email sent directly to teor, verified using relay contact info
-92.222.20.130:80 orport=443 id=0639612FF149AA19DF3BCEA147E5B8FED6F3C87C
-
-# Email sent directly to teor, verified using relay contact info
-80.112.155.100:9030 orport=9001 id=53B000310984CD86AF47E5F3CD0BFF184E34B383 ipv6=[2001:470:7b02::38]:9001
-
-# Email sent directly to teor, verified using relay contact info
-83.212.99.68:80 orport=443 id=DDBB2A38252ADDA53E4492DDF982CA6CC6E10EC0 ipv6=[2001:648:2ffc:1225:a800:bff:fe3d:67b5]:443
-
-# Email sent directly to teor, verified using relay contact info
-95.130.11.147:9030 orport=443 id=6B697F3FF04C26123466A5C0E5D1F8D91925967A
-
-# Email sent directly to teor, verified using relay contact info
-128.199.55.207:9030 orport=9001 id=BCEF908195805E03E92CCFE669C48738E556B9C5 ipv6=[2a03:b0c0:2:d0::158:3001]:9001
-
-# Email sent directly to teor, verified using relay contact info
-178.32.216.146:9030 orport=9001 id=17898F9A2EBC7D69DAF87C00A1BD2FABF3C9E1D2
-
-# Email sent directly to teor, verified using relay contact info
-212.83.40.238:9030 orport=9001 id=F409FA7902FD89270E8DE0D7977EA23BC38E5887
-
-# Email sent directly to teor, verified using relay contact info
-204.8.156.142:80 orport=443 id=94C4B7B8C50C86A92B6A20107539EE2678CF9A28
-
-# Email sent directly to teor, verified using relay contact info
-80.240.139.111:80 orport=443 id=DD3BE7382C221F31723C7B294310EF9282B9111B
-
-# Email sent directly to teor, verified using relay contact info
-185.97.32.18:9030 orport=9001 id=04250C3835019B26AA6764E85D836088BE441088
-
-# Email sent directly to teor
-149.56.45.200:9030 orport=9001 id=FE296180018833AF03A8EACD5894A614623D3F76 ipv6=[2607:5300:201:3000::17d3]:9002 # PiotrTorpotkinOne
-
-# Email sent directly to teor, verified using relay contact info
-81.2.209.10:443 orport=80 id=B6904ADD4C0D10CDA7179E051962350A69A63243 ipv6=[2001:15e8:201:1::d10a]:80
-
-# Email sent directly to teor, verified using relay contact info
-# IPv6 address unreliable
-195.154.164.243:80 orport=443 id=AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C #ipv6=[2001:bc8:399f:f000::1]:993
-138.201.26.2:80 orport=443 id=6D3A3ED5671E4E3F58D4951438B10AE552A5FA0F
-81.7.16.182:80 orport=443 id=51E1CF613FD6F9F11FE24743C91D6F9981807D82 ipv6=[2a02:180:1:1::517:10b6]:993
-134.119.36.135:80 orport=443 id=763C9556602BD6207771A7A3D958091D44C43228 ipv6=[2a00:1158:3::2a8]:993
-46.228.199.19:80 orport=443 id=E26AFC5F718E21AC502899B20C653AEFF688B0D2 ipv6=[2001:4ba0:cafe:4a::1]:993
-37.200.98.5:80 orport=443 id=231C2B9C8C31C295C472D031E06964834B745996 ipv6=[2a00:1158:3::11a]:993
-46.23.70.195:80 orport=443 id=C9933B3725239B6FAB5227BA33B30BE7B48BB485
-185.15.244.124:80 orport=443 id=935BABE2564F82016C19AEF63C0C40B5753BA3D2 ipv6=[2001:4ba0:cafe:e35::1]:993
-195.154.116.232:80 orport=443 id=B35C5739C8C5AB72094EB2B05738FD1F8EEF6EBD ipv6=[2001:bc8:399f:200::1]:993
-195.154.121.198:80 orport=443 id=0C77421C890D16B6D201283A2244F43DF5BC89DD ipv6=[2001:bc8:399f:100::1]:993
-37.187.20.59:80 orport=443 id=91D23D8A539B83D2FB56AA67ECD4D75CC093AC55 ipv6=[2001:41d0:a:143b::1]:993
-217.12.208.117:80 orport=443 id=E6E18151300F90C235D3809F90B31330737CEB43 ipv6=[2a00:1ca8:a7::1bb]:993
-81.7.10.251:80 orport=443 id=8073670F8F852971298F8AF2C5B23AE012645901 ipv6=[2a02:180:1:1::517:afb]:993
-46.36.39.50:80 orport=443 id=ED4B0DBA79AEF5521564FA0231455DCFDDE73BB6 ipv6=[2a02:25b0:aaaa:aaaa:8d49:b692:4852:0]:995
-91.194.90.103:80 orport=443 id=75C4495F4D80522CA6F6A3FB349F1B009563F4B7 ipv6=[2a02:c205:3000:5449::1]:993
-163.172.25.118:80 orport=22 id=0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F
-188.138.88.42:80 orport=443 id=70C55A114C0EF3DC5784A4FAEE64388434A3398F
-81.7.13.84:80 orport=443 id=0C1E7DD9ED0676C788933F68A9985ED853CA5812 ipv6=[2a02:180:1:1::5b8f:538c]:993
-213.246.56.95:80 orport=443 id=27E6E8E19C46751E7312420723C6162FF3356A4C ipv6=[2a00:c70:1:213:246:56:95:1]:993
-94.198.100.18:80 orport=443 id=BAACCB29197DB833F107E410E2BFAE5009EE7583
-217.12.203.46:80 orport=443 id=6A29FD8C00D573E6C1D47852345B0E5275BA3307
-212.117.180.107:80 orport=443 id=0B454C7EBA58657B91133A587C1BDAEDC6E23142
-217.12.199.190:80 orport=443 id=A37C47B03FF31CA6937D3D68366B157997FE7BCD ipv6=[2a02:27a8:0:2::486]:993
-216.230.230.247:80 orport=443 id=4C7BF55B1BFF47993DFF995A2926C89C81E4F04A
-69.30.215.42:80 orport=443 id=510176C07005D47B23E6796F02C93241A29AA0E9 ipv6=[2604:4300:a:2e::2]:993
-89.46.100.162:80 orport=443 id=6B7191639E179965FD694612C9B2C8FB4267B27D
-107.181.174.22:80 orport=443 id=5A551BF2E46BF26CC50A983F7435CB749C752553 ipv6=[2607:f7a0:3:4::4e]:993
-
-# Email sent directly to teor, verified using relay contact info
-212.238.208.48:9030 orport=9001 id=F406219CDD339026D160E53FCA0EF6857C70F109 ipv6=[2001:984:a8fb:1:ba27:ebff:feac:c109]:9001
-
-# Email sent directly to teor
-176.158.236.102:9030 orport=9001 id=DC163DDEF4B6F0C6BC226F9F6656A5A30C5C5686 # Underworld
-
-# Email sent directly to teor, verified using relay contact info
-91.229.20.27:9030 orport=9001 id=9A0D54D3A6D2E0767596BF1515E6162A75B3293F
-
-# Email sent directly to teor, verified using relay contact info
-80.127.137.19:80 orport=443 id=6EF897645B79B6CB35E853B32506375014DE3621 ipv6=[2001:981:47c1:1::6]:443
-
-# Email sent directly to teor
-163.172.138.22:80 orport=443 id=16102E458460349EE45C0901DAA6C30094A9BBEA ipv6=[2001:bc8:4400:2100::1:3]:443 # mkultra
-
-# Email sent directly to teor, verified using relay contact info
-97.74.237.196:9030 orport=9001 id=2F0F32AB1E5B943CA7D062C03F18960C86E70D94
-
-# Email sent directly to teor, verified using relay contact info
-192.187.124.98:9030 orport=9001 id=FD1871854BFC06D7B02F10742073069F0528B5CC
-
-# Email sent directly to teor, verified using relay contact info
-178.62.98.160:9030 orport=9001 id=8B92044763E880996A988831B15B2B0E5AD1544A
-
-# Email sent directly to teor, verified using relay contact info
-163.172.217.50:9030 orport=9001 id=02ECD99ECD596013A8134D46531560816ECC4BE6
-
-# Email sent directly to teor, verified using relay contact info
-185.100.86.100:80 orport=443 id=0E8C0C8315B66DB5F703804B3889A1DD66C67CE0
-185.100.84.82:80 orport=443 id=7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E
-
-# Email sent directly to teor, verified using relay contact info
-164.132.77.175:9030 orport=9001 id=3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B
-78.24.75.53:9030 orport=9001 id=DEB73705B2929AE9BE87091607388939332EF123
-
-# Email sent directly to teor, verified using relay contact info
-46.101.237.246:9030 orport=9001 id=75F1992FD3F403E9C082A5815EB5D12934CDF46C ipv6=[2a03:b0c0:3:d0::208:5001]:9050
-178.62.86.96:9030 orport=9001 id=439D0447772CB107B886F7782DBC201FA26B92D1 ipv6=[2a03:b0c0:1:d0::3cf:7001]:9050
-
-# Email sent directly to teor, verified using relay contact info
-# Very low bandwidth, stale consensues, excluded to cut down on warnings
-#91.233.106.121:80 orport=443 id=896364B7996F5DFBA0E15D1A2E06D0B98B555DD6
-
-# Email sent directly to teor, verified using relay contact info
-167.114.113.48:9030 orport=403 id=2EC0C66EA700C44670444280AABAB1EC78B722A0
-
-# Email sent directly to teor, verified using relay contact info
-# Assume details update is permanent
-213.141.138.174:9030 orport=9001 id=BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6 # Schakalium
-
-# Email sent directly to teor, verified using relay contact info
-95.128.43.164:80 orport=443 id=616081EC829593AF4232550DE6FFAA1D75B37A90 ipv6=[2a02:ec0:209:10::4]:443
-
-# Email sent directly to teor, verified using relay contact info
-166.82.21.200:9030 orport=9029 id=D5C33F3E203728EDF8361EA868B2939CCC43FAFB
-
-# Email sent directly to teor, verified using relay contact info
-91.121.54.8:9030 orport=9001 id=CBEE0F3303C8C50462A12107CA2AE061831931BC
-
-# Email sent directly to teor, verified using relay contact info
-178.217.184.32:8080 orport=443 id=8B7F47AE1A5D954A3E58ACDE0865D09DBA5B738D
-
-# Email sent directly to teor, verified using relay contact info
-85.10.201.47:9030 orport=9001 id=D8B7A3A6542AA54D0946B9DC0257C53B6C376679 ipv6=[2a01:4f8:a0:43eb::beef]:9001
-
-# Email sent directly to teor, verified using relay contact info
-120.29.217.46:80 orport=443 id=5E853C94AB1F655E9C908924370A0A6707508C62
-
-# Email sent directly to teor, verified using relay contact info
-37.153.1.10:9030 orport=9001 id=9772EFB535397C942C3AB8804FB35CFFAD012438
-
-# Email sent directly to teor, verified using relay contact info
-92.222.4.102:9030 orport=9001 id=1A6B8B8272632D8AD38442027F822A367128405C
-
-# Email sent directly to teor, verified using relay contact info
-31.31.78.49:80 orport=443 id=46791D156C9B6C255C2665D4D8393EC7DBAA7798
-
-# Email sent directly to teor
-192.160.102.169:80 orport=9001 id=C0192FF43E777250084175F4E59AC1BA2290CE38 ipv6=[2620:132:300c:c01d::9]:9002 # manipogo
-192.160.102.166:80 orport=9001 id=547DA56F6B88B6C596B3E3086803CDA4F0EF8F21 ipv6=[2620:132:300c:c01d::6]:9002 # chaucer
-192.160.102.170:80 orport=9001 id=557ACEC850F54EEE65839F83CACE2B0825BE811E ipv6=[2620:132:300c:c01d::a]:9002 # ogopogo
-192.160.102.164:80 orport=9001 id=823AA81E277F366505545522CEDC2F529CE4DC3F ipv6=[2620:132:300c:c01d::4]:9002 # snowfall
-192.160.102.165:80 orport=9001 id=C90CA3B7FE01A146B8268D56977DC4A2C024B9EA ipv6=[2620:132:300c:c01d::5]:9002 # cowcat
-192.160.102.168:80 orport=9001 id=F6A358DD367B3282D6EF5824C9D45E1A19C7E815 ipv6=[2620:132:300c:c01d::8]:9002 # prawksi
-
-# Email sent directly to teor, verified using relay contact info
-136.243.214.137:80 orport=443 id=B291D30517D23299AD7CEE3E60DFE60D0E3A4664
-
-# Email sent directly to teor, verified using relay contact info
-192.87.28.28:9030 orport=9001 id=ED2338CAC2711B3E331392E1ED2831219B794024
-192.87.28.82:9030 orport=9001 id=844AE9CAD04325E955E2BE1521563B79FE7094B7
-
-# Email sent directly to teor, verified using relay contact info
-192.87.28.28:9030 orport=9001 id=ED2338CAC2711B3E331392E1ED2831219B794024
-# same machine as ED2338CAC2711B3E331392E1ED2831219B794024
-192.87.28.82:9030 orport=9001 id=844AE9CAD04325E955E2BE1521563B79FE7094B7
-
-# https://twitter.com/kosjoli/status/719507270904758272
-85.10.202.87:9030 orport=9001 id=971AFB23C168DCD8EDA17473C1C452B359DE3A5A
-176.9.5.116:9030 orport=9001 id=A1EB8D8F1EE28DB98BBB1EAA3B4BEDD303BAB911
-46.4.111.124:9030 orport=9001 id=D9065F9E57899B3D272AA212317AF61A9B14D204
-
-# Email sent directly to teor, verified using relay contact info
-185.100.85.61:80 orport=443 id=025B66CEBC070FCB0519D206CF0CF4965C20C96E
-
-# Email sent directly to teor, verified using relay contact info
-108.166.168.158:80 orport=443 id=CDAB3AE06A8C9C6BF817B3B0F1877A4B91465699
-
-# Email sent directly to teor, verified using relay contact info
-91.219.236.222:80 orport=443 id=20704E7DD51501DC303FA51B738D7B7E61397CF6
-
-# Email sent directly to teor, verified using relay contact info
-185.14.185.240:9030 orport=443 id=D62FB817B0288085FAC38A6DC8B36DCD85B70260
-192.34.63.137:9030 orport=443 id=ABCB4965F1FEE193602B50A365425105C889D3F8
-128.199.197.16:9030 orport=443 id=DEE5298B3BA18CDE651421CD2DCB34A4A69F224D
-
-# Email sent directly to teor, verified using relay contact info
-185.13.38.75:9030 orport=9001 id=D2A1703758A0FBBA026988B92C2F88BAB59F9361
-
-# Email sent directly to teor, verified using relay contact info
-128.204.39.106:9030 orport=9001 id=6F0F3C09AF9580F7606B34A7678238B3AF7A57B7
-
-# Email sent directly to teor, verified using relay contact info
-198.50.191.95:80 orport=443 id=39F096961ED2576975C866D450373A9913AFDC92
-
-# Email sent directly to teor, verified using relay contact info
-167.114.66.61:9696 orport=443 id=DE6CD5F09DF26076F26321B0BDFBE78ACD935C65 ipv6=[2607:5300:100::78d]:443
-
-# Email sent directly to teor, verified using relay contact info
-66.111.2.20:9030 orport=9001 id=9A68B85A02318F4E7E87F2828039FBD5D75B0142
-66.111.2.16:9030 orport=9001 id=3F092986E9B87D3FDA09B71FA3A602378285C77A
-
-# Email sent directly to teor, verified using relay contact info
-92.222.38.67:80 orport=443 id=DED6892FF89DBD737BA689698A171B2392EB3E82
-
-# Email sent directly to teor, verified using relay contact info
-212.47.228.115:9030 orport=443 id=BCA017ACDA48330D02BB70716639ED565493E36E
-
-# Email sent directly to teor, verified using relay contact info
-185.100.84.175:80 orport=443 id=39B59AF4FE54FAD8C5085FA9C15FDF23087250DB
-
-# Email sent directly to teor, verified using relay contact info
-166.70.207.2:9030 orport=9001 id=E3DB2E354B883B59E8DC56B3E7A353DDFD457812
-
-# Emails sent directly to teor, verified using relay contact info
-69.162.139.9:9030 orport=9001 id=4791FC0692EAB60DF2BCCAFF940B95B74E7654F6 ipv6=[2607:f128:40:1212::45a2:8b09]:9001
-
-# Email sent directly to teor, verified using relay contact info
-213.239.217.18:1338 orport=1337 id=C37BC191AC389179674578C3E6944E925FE186C2 ipv6=[2a01:4f8:a0:746a:101:1:1:1]:1337
-
-# Email sent directly to teor, verified using relay contact info
-# Assume details update is permanent
-188.40.128.246:9030 orport=9001 id=AD19490C7DBB26D3A68EFC824F67E69B0A96E601 ipv6=[2a01:4f8:221:1ac1:dead:beef:7005:9001]:9001 # sputnik
-
-# Email sent directly to teor, verified using relay contact info
-88.198.253.13:9030 orport=9001 id=DF924196D69AAE3C00C115A9CCDF7BB62A175310 ipv6=[2a01:4f8:11a:b1f::2]:9001
-
-# Email sent directly to teor, verified using relay contact info
-185.100.86.128:9030 orport=9001 id=9B31F1F1C1554F9FFB3455911F82E818EF7C7883
-46.36.36.127:9030 orport=9001 id=C80DF89B21FF932DEC0D7821F679B6C79E1449C3
-
-# Email sent directly to teor, verified using relay contact info
-176.10.104.240:80 orport=443 id=0111BA9B604669E636FFD5B503F382A4B7AD6E80
-176.10.104.240:8080 orport=8443 id=AD86CD1A49573D52A7B6F4A35750F161AAD89C88
-176.10.104.243:80 orport=443 id=88487BDD980BF6E72092EE690E8C51C0AA4A538C
-176.10.104.243:8080 orport=8443 id=95DA61AEF23A6C851028C1AA88AD8593F659E60F
-
-# Email sent directly to teor, verified using relay contact info
-107.170.101.39:9030 orport=443 id=30973217E70AF00EBE51797FF6D9AA720A902EAA
-
-# Email sent directly to teor
-193.70.112.165:80 orport=443 id=F10BDE279AE71515DDCCCC61DC19AC8765F8A3CC # ParkBenchInd001
-
-# Email sent directly to teor
-185.220.101.6:10006 orport=20006 id=C08DE49658E5B3CFC6F2A952B453C4B608C9A16A # niftyvolcanorabbit
-185.220.101.13:10013 orport=20013 id=71AB4726D830FAE776D74AEF790CF04D8E0151B4 # niftycottontail
-185.220.101.5:10005 orport=20005 id=1084200B44021D308EA4253F256794671B1D099A # niftyhedgehog
-185.220.101.9:10009 orport=20009 id=14877C6384A9E793F422C8D1DDA447CACA4F7C4B # niftywoodmouse
-185.220.101.8:10008 orport=20008 id=24E91955D969AEA1D80413C64FE106FAE7FD2EA9 # niftymouse
-185.220.101.1:10001 orport=20001 id=28F4F392F8F19E3FBDE09616D9DB8143A1E2DDD3 # niftycottonmouse
-185.220.101.21:10021 orport=20021 id=348B89013EDDD99E4755951D1EC284D9FED71226 # niftysquirrel
-185.220.101.10:10010 orport=20010 id=4031460683AE9E0512D3620C2758D98758AC6C93 # niftyeuropeanrabbit
-185.220.101.34:10034 orport=20034 id=47C42E2094EE482E7C9B586B10BABFB67557030B # niftyquokka
-185.220.101.18:10018 orport=20018 id=5D5006E4992F2F97DF4F8B926C3688870EB52BD8 # niftyplagiodontia
-185.220.101.28:10028 orport=20028 id=609E598FB6A00BCF7872906B602B705B64541C50 # niftychipmunk
-185.220.101.20:10020 orport=20020 id=619349D82424C601CAEB94161A4CF778993DAEE7 # niftytucotuco
-185.220.101.17:10017 orport=20017 id=644DECC5A1879C0FE23DE927DD7049F58BBDF349 # niftyhutia
-185.220.101.0:10000 orport=20000 id=6E94866ED8CA098BACDFD36D4E8E2B459B8A734E # niftybeaver
-185.220.101.30:10030 orport=20030 id=71CFDEB4D9E00CCC3E31EC4E8A29E109BBC1FB36 # niftypedetidae
-185.220.101.29:10029 orport=20029 id=7DC52AE6667A30536BA2383CD102CFC24F20AD71 # niftyllipika
-185.220.101.41:10041 orport=20041 id=7E281CD2C315C4F7A84BC7C8721C3BC974DDBFA3 # niftyporcupine
-185.220.101.25:10025 orport=20025 id=8EE0534532EA31AA5172B1892F53B2F25C76EB02 # niftyjerboa
-185.220.101.33:10033 orport=20033 id=906DCB390F2BA987AE258D745E60BAAABAD31DE8 # niftyquokka
-185.220.101.26:10026 orport=20026 id=92A6085EABAADD928B6F8E871540A1A41CBC08BA # niftypedetes
-185.220.101.40:10040 orport=20040 id=9A857254F379194D1CD76F4A79A20D2051BEDA3F # niftynutria
-185.220.101.42:10042 orport=20042 id=9B816A5B3EB20B8E4E9B9D1FBA299BD3F40F0320 # niftypygmyjerboa
-185.220.101.2:10002 orport=20002 id=B740BCECC4A9569232CDD45C0E1330BA0D030D33 # niftybunny
-185.220.101.32:10032 orport=20032 id=B771AA877687F88E6F1CA5354756DF6C8A7B6B24 # niftypika
-185.220.101.12:10012 orport=20012 id=BC82F2190DE2E97DE65F49B4A95572374BDC0789 # niftycapybara
-185.220.101.22:10022 orport=20022 id=CA37CD46799449D83B6B98B8C22C649906307888 # niftyjackrabbit
-185.220.101.4:10004 orport=20004 id=CDA2EA326E2272C57ACB26773D7252C211795B78 # niftygerbil
-185.220.101.14:10014 orport=20014 id=E7EBA5D8A4E09684D11A1DF24F75362817333768 # niftyhare
-185.220.101.16:10016 orport=20016 id=EC1997D51892E4607C68E800549A1E7E4694005A # niftyguineapig
-185.220.101.24:10024 orport=20024 id=FDA70EC93DB01E3CB418CB6943B0C68464B18B4C # niftyrat
-
-# Email sent directly to teor, verified using relay contact info
-64.113.32.29:9030 orport=9001 id=30C19B81981F450C402306E2E7CFB6C3F79CB6B2
-
-# Emails sent directly to teor, verified using relay contact info
-51.254.101.242:9002 orport=9001 id=4CC9CC9195EC38645B699A33307058624F660CCF
-
-# Emails sent directly to teor, verified using relay contact info
-85.214.62.48:80 orport=443 id=6A7551EEE18F78A9813096E82BF84F740D32B911
-
-# Email sent directly to teor, verified using relay contact info
-173.255.245.116:9030 orport=9001 id=91E4015E1F82DAF0121D62267E54A1F661AB6DC7
-
-# Email sent directly to teor, verified using relay contact info
-62.216.5.120:9030 orport=9001 id=D032D4D617140D6B828FC7C4334860E45E414FBE
-
-# Email sent directly to teor, verified using relay contact info
-51.254.136.195:80 orport=443 id=7BB70F8585DFC27E75D692970C0EEB0F22983A63
-
-# Email sent directly to teor, verified using relay contact info
-163.172.13.165:9030 orport=9001 id=33DA0CAB7C27812EFF2E22C9705630A54D101FEB ipv6=[2001:bc8:38cb:201::8]:9001
-
-# Email sent directly to teor, verified using relay contact info
-5.196.88.122:9030 orport=9001 id=0C2C599AFCB26F5CFC2C7592435924C1D63D9484 ipv6=[2001:41d0:a:fb7a::1]:9001
-
-# Email sent directly to teor, verified using relay contact info
-5.9.158.75:80 orport=443 id=1AF72E8906E6C49481A791A6F8F84F8DFEBBB2BA ipv6=[2a01:4f8:190:514a::2]:443
-
-# Email sent directly to teor, verified using relay contact info
-46.101.169.151:9030 orport=9001 id=D760C5B436E42F93D77EF2D969157EEA14F9B39C ipv6=[2a03:b0c0:3:d0::74f:a001]:9001
-
-# Email sent directly to teor, verified using relay contact info
-199.249.223.81:80 orport=443 id=F7447E99EB5CBD4D5EB913EE0E35AC642B5C1EF3
-199.249.223.79:80 orport=443 id=D33292FEDE24DD40F2385283E55C87F85C0943B6
-199.249.223.78:80 orport=443 id=EC15DB62D9101481F364DE52EB8313C838BDDC29
-199.249.223.77:80 orport=443 id=CC4A3AE960E3617F49BF9887B79186C14CBA6813
-199.249.223.76:80 orport=443 id=43209F6D50C657A56FE79AF01CA69F9EF19BD338
-199.249.223.75:80 orport=443 id=60D3667F56AEC5C69CF7E8F557DB21DDF6C36060
-199.249.223.74:80 orport=443 id=5F4CD12099AF20FAF9ADFDCEC65316A376D0201C
-199.249.223.73:80 orport=443 id=5649CB2158DA94FB747415F26628BEC07FA57616
-199.249.223.72:80 orport=443 id=B028707969D8ED84E6DEA597A884F78AAD471971
-199.249.223.71:80 orport=443 id=B6320E44A230302C7BF9319E67597A9B87882241
-199.249.223.60:80 orport=443 id=B7047FBDE9C53C39011CA84E5CB2A8E3543066D0
-199.249.223.61:80 orport=443 id=40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD
-199.249.223.62:80 orport=443 id=0077BCBA7244DB3E6A5ED2746E86170066684887
-199.249.223.63:80 orport=443 id=1DB25DF59DAA01B5BE3D3CEB8AFED115940EBE8B
-199.249.223.64:80 orport=443 id=9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A
-199.249.223.65:80 orport=443 id=9D21F034C3BFF4E7737D08CF775DC1745706801F
-199.249.223.66:80 orport=443 id=C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2
-199.249.223.67:80 orport=443 id=155D6F57425F16C0624D77777641E4EB1B47C6F0
-199.249.223.68:80 orport=443 id=DF20497E487A979995D851A5BCEC313DF7E5BC51
-199.249.223.69:80 orport=443 id=7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3
-
-# https://lists.torproject.org/pipermail/tor-relays/2016-December/011114.html
-86.105.212.130:9030 orport=443 id=9C900A7F6F5DD034CFFD192DAEC9CCAA813DB022
-
-# Email sent directly to teor, verified using relay contact info
-178.33.183.251:80 orport=443 id=DD823AFB415380A802DCAEB9461AE637604107FB ipv6=[2001:41d0:2:a683::251]:443
-
-# Email sent directly to teor, verified using relay contact info
-31.185.104.19:80 orport=443 id=9EAD5B2D3DBD96DBC80DCE423B0C345E920A758D
-# same machine as 9EAD5B2D3DBD96DBC80DCE423B0C345E920A758D
-31.185.104.20:80 orport=443 id=ADB2C26629643DBB9F8FE0096E7D16F9414B4F8D
-31.185.104.21:80 orport=443 id=C2AAB088555850FC434E68943F551072042B85F1
-31.185.104.22:80 orport=443 id=5BA3A52760A0EABF7E7C3ED3048A77328FF0F148
-
-# Email sent directly to teor, verified using relay contact info
-185.34.60.114:80 orport=443 id=7F7A695DF6F2B8640A70B6ADD01105BC2EBC5135
-
-# https://lists.torproject.org/pipermail/tor-relays/2017-December/013939.html
-94.142.242.84:80 orport=443 id=AA0D167E03E298F9A8CD50F448B81FBD7FA80D56 ipv6=[2a02:898:24:84::1]:443 # rejozenger
-
-# Email sent directly to teor, verified using relay contact info
-185.129.62.62:9030 orport=9001 id=ACDD9E85A05B127BA010466C13C8C47212E8A38F ipv6=[2a06:d380:0:3700::62]:9001
-
-# Email sent directly to teor, verified using relay contact info
-# The e84 part of the IPv6 address does not have a leading 0 in the consensus
-81.30.158.213:9030 orport=9001 id=789EA6C9AE9ADDD8760903171CFA9AC5741B0C70 ipv6=[2001:4ba0:cafe:e84::1]:9001
-
-# https://lists.torproject.org/pipermail/tor-relays/2016-December/011209.html
-5.9.159.14:9030 orport=9001 id=0F100F60C7A63BED90216052324D29B08CFCF797
-
-# Email sent directly to teor, verified using relay contact info
-45.62.255.25:80 orport=443 id=3473ED788D9E63361D1572B7E82EC54338953D2A
-
-# Email sent directly to teor, verified using relay contact info
-217.79.179.177:9030 orport=9001 id=3E53D3979DB07EFD736661C934A1DED14127B684 ipv6=[2001:4ba0:fff9:131:6c4f::90d3]:9001
-
-# Email sent directly to teor, verified using relay contact info
-212.47.244.38:8080 orport=443 id=E81EF60A73B3809F8964F73766B01BAA0A171E20
-163.172.157.213:8080 orport=443 id=4623A9EC53BFD83155929E56D6F7B55B5E718C24
-163.172.139.104:8080 orport=443 id=68F175CCABE727AA2D2309BCD8789499CEE36ED7
-
-# Email sent directly to teor, verified using relay contact info
-163.172.223.200:80 orport=443 id=998BF3ED7F70E33D1C307247B9626D9E7573C438
-195.154.122.54:80 orport=443 id=64E99CB34C595A02A3165484BD1215E7389322C6
-
-# Email sent directly to teor, verified using relay contact info
-185.100.86.128:9030 orport=9001 id=9B31F1F1C1554F9FFB3455911F82E818EF7C7883
-185.100.85.101:9030 orport=9001 id=4061C553CA88021B8302F0814365070AAE617270
-31.171.155.108:9030 orport=9001 id=D3E5EDDBE5159388704D6785BE51930AAFACEC6F
-
-# Email sent directly to teor, verified using relay contact info
-89.163.247.43:9030 orport=9001 id=BC7ACFAC04854C77167C7D66B7E471314ED8C410 ipv6=[2001:4ba0:fff7:25::5]:9001
-
-# Email sent directly to teor, verified using relay contact info
-95.85.8.226:80 orport=443 id=1211AC1BBB8A1AF7CBA86BCE8689AA3146B86423
-
-# Email sent directly to teor, verified using relay contact info
-85.214.151.72:9030 orport=9001 id=722D365140C8C52DBB3C9FF6986E3CEFFE2BA812
-
-# email sent directly to teor
-72.52.75.27:9030 orport=9001 id=8567AD0A6369ED08527A8A8533A5162AC00F7678 # piecoopdotnet
-
-# Email sent directly to teor, verified using relay contact info
-5.9.146.203:80 orport=443 id=1F45542A24A61BF9408F1C05E0DCE4E29F2CBA11
-5.9.159.14:9030 orport=9001 id=0F100F60C7A63BED90216052324D29B08CFCF797
-
-# Email sent directly to teor, verified using relay contact info
-# Assume details update is permanent
-5.9.147.226:9030 orport=9001 id=B0553175AADB0501E5A61FC61CEA3970BE130FF2 ipv6=[2a01:4f8:190:30e1::2]:9001 # zwiubel
-
-# https://trac.torproject.org/projects/tor/ticket/22527#comment:1
-199.184.246.250:80 orport=443 id=1F6ABD086F40B890A33C93CC4606EE68B31C9556 ipv6=[2620:124:1009:1::171]:443
-
-# https://trac.torproject.org/projects/tor/ticket/24695
-163.172.53.84:143 orport=21 id=1C90D3AEADFF3BCD079810632C8B85637924A58E ipv6=[2001:bc8:24f8::]:21 # Multivac
-
-# Email sent directly to teor
-54.36.237.163:80 orport=443 id=DB2682153AC0CCAECD2BD1E9EBE99C6815807A1E # GermanCraft2
-
-# Email sent directly to teor
-62.138.7.171:9030 orport=9001 id=9844B981A80B3E4B50897098E2D65167E6AEF127 # 0x3d004
-62.138.7.171:8030 orport=8001 id=9285B22F7953D7874604EEE2B470609AD81C74E9 # 0x3d005
-91.121.23.100:9030 orport=9001 id=3711E80B5B04494C971FB0459D4209AB7F2EA799 # 0x3d002
-91.121.23.100:8030 orport=8001 id=CFBBA0D858F02E40B1432A65F6D13C9BDFE7A46B # 0x3d001
-51.15.13.245:9030 orport=9001 id=CED527EAC230E7B56E5B363F839671829C3BA01B # 0x3d006
-51.15.13.245:8030 orport=8001 id=8EBB8D1CF48FE2AB95C451DA8F10DB6235F40F8A # 0x3d007
-
-# Email sent directly to teor
-104.192.5.248:9030 orport=9001 id=BF735F669481EE1CCC348F0731551C933D1E2278 # Freeway11
-
-# Email sent directly to teor
-# https://lists.torproject.org/pipermail/tor-relays/2017-December/013961.html
-178.17.174.14:9030 orport=9001 id=B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1 # TorExitMoldova
-178.17.170.156:9030 orport=9001 id=41C59606AFE1D1AA6EC6EF6719690B856F0B6587 # TorExitMoldova2
-
-# Email sent directly to teor
-163.172.221.44:59030 orport=59001 id=164604F5C86FC8CC9C0288BD9C02311958427597 # altego
-
-# Email sent directly to teor
-46.38.237.221:9030 orport=9001 id=D30E9D4D639068611D6D96861C95C2099140B805 # mine
-
-# https://lists.torproject.org/pipermail/tor-relays/2017-December/013911.html
-# https://lists.torproject.org/pipermail/tor-relays/2017-December/013912.html
-199.249.223.62:80 orport=443 id=0077BCBA7244DB3E6A5ED2746E86170066684887 # Quintex13
-199.249.224.45:80 orport=443 id=041646640AB306EA74B001966E86169B04CC88D2 # QuintexAirVPN26
-199.249.223.67:80 orport=443 id=155D6F57425F16C0624D77777641E4EB1B47C6F0 # Quintex18
-199.249.223.45:80 orport=443 id=1AE949967F82BBE7534A3D6BA77A7EBE1CED4369 # Quintex36
-199.249.223.63:80 orport=443 id=1DB25DF59DAA01B5BE3D3CEB8AFED115940EBE8B # Quintex14
-199.249.224.63:80 orport=443 id=1E5136DDC52FAE1219208F0A6BADB0BA62587EE6 # Quintex43
-199.249.224.46:80 orport=443 id=2ED4D25766973713EB8C56A290BF07E06B85BF12 # QuintexAirVPN27
-199.249.223.42:80 orport=443 id=3687FEC7E73F61AC66F7AE251E7DEE6BBD8C0252 # Quintex33
-199.249.223.49:80 orport=443 id=36D68478366CB8627866757EBCE7FB3C17FC1CB8 # Quintex40
-199.249.224.49:80 orport=443 id=3CA0D15567024D2E0B557DC0CF3E962B37999A79 # QuintexAirVPN30
-199.249.223.61:80 orport=443 id=40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD # Quintex12
-199.249.223.76:80 orport=443 id=43209F6D50C657A56FE79AF01CA69F9EF19BD338 # QuintexAirVPN5
-199.249.224.41:80 orport=443 id=54A4820B46E65509BF3E2B892E66930A41759DE9 # QuintexAirVPN22
-199.249.223.73:80 orport=443 id=5649CB2158DA94FB747415F26628BEC07FA57616 # QuintexAirVPN8
-199.249.223.74:80 orport=443 id=5F4CD12099AF20FAF9ADFDCEC65316A376D0201C # QuintexAirVPN7
-199.249.223.75:80 orport=443 id=60D3667F56AEC5C69CF7E8F557DB21DDF6C36060 # QuintexAirVPN6
-199.249.223.46:80 orport=443 id=66E19E8C4773086F669A1E06A3F8C23B6C079129 # Quintex37
-199.249.224.65:80 orport=443 id=764BF8A03868F84C8F323C1A676AA254B80DC3BF # Quintex45
-199.249.223.48:80 orport=443 id=7A3DD280EA4CD4DD16EF8C67B93D9BDE184D1A81 # Quintex39
-199.249.224.68:80 orport=443 id=7E6E9A6FDDB8DC7C92F0CFCC3CBE76C29F061799 # Quintex48
-199.249.223.69:80 orport=443 id=7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3 # Quintex20
-199.249.223.44:80 orport=443 id=8B80169BEF71450FC4069A190853523B7AEA45E1 # Quintex35
-199.249.224.60:80 orport=443 id=9314BD9503B9014261A65C221D77E57389DBCCC1 # Quintex50
-199.249.224.40:80 orport=443 id=9C1E7D92115D431385B8CAEA6A7C15FB89CE236B # QuintexAirVPN21
-199.249.223.65:80 orport=443 id=9D21F034C3BFF4E7737D08CF775DC1745706801F # Quintex16
-199.249.224.67:80 orport=443 id=9E2D7C6981269404AA1970B53891701A20424EF8 # Quintex47
-199.249.223.64:80 orport=443 id=9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A # Quintex15
-199.249.224.48:80 orport=443 id=A0DB820FEC87C0405F7BF05DEE5E4ADED2BB9904 # QuintexAirVPN29
-199.249.224.64:80 orport=443 id=A4A393FEF48640961AACE92D041934B55348CEF9 # Quintex44
-199.249.223.72:80 orport=443 id=B028707969D8ED84E6DEA597A884F78AAD471971 # QuintexAirVPN9
-199.249.223.40:80 orport=443 id=B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249 # Quintex31
-199.249.224.61:80 orport=443 id=B2197C23A4FF5D1C49EE45BA7688BA8BCCD89A0B # Quintex41
-199.249.223.71:80 orport=443 id=B6320E44A230302C7BF9319E67597A9B87882241 # QuintexAirVPN10
-199.249.223.60:80 orport=443 id=B7047FBDE9C53C39011CA84E5CB2A8E3543066D0 # Quintex11
-199.249.224.66:80 orport=443 id=C78AFFEEE320EA0F860961763E613FD2FAC855F5 # Quintex46
-199.249.224.44:80 orport=443 id=CB7C0D841FE376EF43F7845FF201B0290C0A239E # QuintexAirVPN25
-199.249.223.47:80 orport=443 id=CC14C97F1D23EE97766828FC8ED8582E21E11665 # Quintex38
-199.249.223.77:80 orport=443 id=CC4A3AE960E3617F49BF9887B79186C14CBA6813 # QuintexAirVPN4
-199.249.223.41:80 orport=443 id=D25210CE07C49F2A4F2BC7A506EB0F5EA7F5E2C2 # Quintex32
-199.249.223.79:80 orport=443 id=D33292FEDE24DD40F2385283E55C87F85C0943B6 # QuintexAirVPN2
-199.249.224.47:80 orport=443 id=D6FF2697CEA5C0C7DA84797C2E71163814FC2466 # QuintexAirVPN28
-199.249.223.68:80 orport=443 id=DF20497E487A979995D851A5BCEC313DF7E5BC51 # Quintex19
-199.249.223.43:80 orport=443 id=E480D577F58E782A5BC4FA6F49A6650E9389302F # Quintex34
-199.249.224.69:80 orport=443 id=EABC2DD0D47B5DB11F2D37EB3C60C2A4D91C10F2 # Quintex49
-199.249.223.78:80 orport=443 id=EC15DB62D9101481F364DE52EB8313C838BDDC29 # QuintexAirVPN3
-199.249.224.42:80 orport=443 id=F21DE9C7DE31601D9716781E17E24380887883D1 # QuintexAirVPN23
-199.249.223.81:80 orport=443 id=F7447E99EB5CBD4D5EB913EE0E35AC642B5C1EF3 # QuintexAirVPN1
-199.249.224.43:80 orport=443 id=FDD700C791CC6BB0AC1C2099A82CBC367AD4B764 # QuintexAirVPN24
-199.249.224.62:80 orport=443 id=FE00A3A835680E67FBBC895A724E2657BB253E97 # Quintex42
-199.249.223.66:80 orport=443 id=C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2 # Quintex17
-
-# https://lists.torproject.org/pipermail/tor-relays/2017-December/013914.html
-5.196.23.64:9030 orport=9001 id=775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E # Aerodynamik01
-217.182.75.181:9030 orport=9001 id=EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F # Aerodynamik02
-193.70.43.76:9030 orport=9001 id=484A10BA2B8D48A5F0216674C8DD50EF27BC32F3 # Aerodynamik03
-149.56.141.138:9030 orport=9001 id=1938EBACBB1A7BFA888D9623C90061130E63BB3F # Aerodynamik04
-
-# https://lists.torproject.org/pipermail/tor-relays/2017-December/013917.html
-104.200.20.46:80 orport=9001 id=78E2BE744A53631B4AAB781468E94C52AB73968B # bynumlawtor
-
-# https://lists.torproject.org/pipermail/tor-relays/2017-December/013929.html
-139.99.130.178:80 orport=443 id=867B95CACD64653FEEC4D2CEFC5C49B4620307A7 # coffswifi2
-
-# https://lists.torproject.org/pipermail/tor-relays/2017-December/013946.html
-172.98.193.43:80 orport=443 id=5E56738E7F97AA81DEEF59AF28494293DFBFCCDF # Backplane
-
-# Email sent directly to teor
-62.210.254.132:80 orport=443 id=8456DFA94161CDD99E480C2A2992C366C6564410 # turingmachine
-
-# Email sent directly to teor
-80.127.117.180:80 orport=443 id=328E54981C6DDD7D89B89E418724A4A7881E3192 ipv6=[2001:985:e77:10::4]:443 # sjc01
-
-# https://lists.torproject.org/pipermail/tor-relays/2017-December/013960.html
-51.15.205.214:9030 orport=9001 id=8B6556601612F1E2AFCE2A12FFFAF8482A76DD1F ipv6=[2001:bc8:4400:2500::5:b07]:9001 # titania1
-51.15.205.214:9031 orport=9002 id=5E363D72488276160D062DDD2DFA25CFEBAF5EA9 ipv6=[2001:bc8:4400:2500::5:b07]:9002 # titania2
-
-# Email sent directly to teor
-185.129.249.124:9030 orport=9001 id=1FA8F638298645BE58AC905276680889CB795A94 # treadstone
-
-# https://lists.torproject.org/pipermail/tor-relays/2017-December/014000.html
-24.117.231.229:34175 orport=45117 id=CE24412AD69444954B4015E293AE53DDDAFEA3D6 # Anosognosia
-
-# https://lists.torproject.org/pipermail/tor-relays/2018-January/014012.html
-128.31.0.13:80 orport=443 id=A53C46F5B157DD83366D45A8E99A244934A14C46 # csailmitexit
-
-# Email sent directly to teor
-82.247.103.117:110 orport=995 id=C9B3C1661A9577BA24C1C2C6123918921A495509 # Casper01
-109.238.2.79:110 orport=995 id=7520892E3DD133D0B0464D01A158B54B8E2A8B75 # Casper02
-51.15.179.153:110 orport=995 id=BB60F5BA113A0B8B44B7B37DE3567FE561E92F78 # Casper04
-
-# Email sent directly to teor
-80.127.107.179:80 orport=443 id=BC6B2E2F62ACC5EDECBABE64DA1E48F84DD98B78 ipv6=[2001:981:4a22:c::6]:443 # TVISION02
-
-# https://lists.torproject.org/pipermail/tor-relays/2018-January/014020.html
-37.120.174.249:80 orport=443 id=11DF0017A43AF1F08825CD5D973297F81AB00FF3 ipv6=[2a03:4000:6:724c:df98:15f9:b34d:443]:443 # gGDHjdcC6zAlM8k08lX
-
-# These fallbacks opted-in in previous releases, then changed their details,
-# and so we blacklisted them. Now we want to whitelist changes.
-# Assume details update is permanent
-85.230.184.93:9030 orport=443 id=855BC2DABE24C861CD887DB9B2E950424B49FC34 # Logforme
-176.31.180.157:143 orport=22 id=E781F4EC69671B3F1864AE2753E0890351506329 ipv6=[2001:41d0:8:eb9d::1]:22 # armbrust
-
-# https://lists.torproject.org/pipermail/tor-relays/2018-January/014024.html
-82.161.212.209:9030 orport=9001 id=4E8CE6F5651E7342C1E7E5ED031E82078134FB0D ipv6=[2001:980:d7ed:1:ff:b0ff:fe00:d0b]:9001 # ymkeo
diff --git a/scripts/maint/format_changelog.py b/scripts/maint/format_changelog.py
index 8dce4b6e51..93ab56e257 100755
--- a/scripts/maint/format_changelog.py
+++ b/scripts/maint/format_changelog.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# Copyright (c) 2014-2019, The Tor Project, Inc.
# See LICENSE for licensing information
#
@@ -9,6 +9,11 @@
# To run it, pipe a section of the changelog (starting with "Changes
# in Tor 0.x.y.z-alpha" through the script.)
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
import os
import re
import sys
@@ -190,7 +195,7 @@ def body_parser(line):
elif re.match(r'^\s+\S', line):
return TP_HEADTEXT
else:
- print "Weird line %r"%line
+ print("Weird line %r"%line, file=sys.stderr)
def clean_head(head):
return head
@@ -198,7 +203,7 @@ def clean_head(head):
def head_score(s):
m = re.match(r'^ +o (.*)', s)
if not m:
- print >>sys.stderr, "Can't score %r"%s
+ print("Can't score %r"%s, file=sys.stderr)
return 99999
lw = m.group(1).lower()
if lw.startswith("security") and "feature" not in lw:
@@ -286,12 +291,12 @@ class ChangeLog(object):
self.curgraf.append(line)
else:
- assert "This" is "unreachable"
+ assert False # This should be unreachable.
def lint_head(self, line, head):
m = re.match(r'^ *o ([^\(]+)((?:\([^\)]+\))?):', head)
if not m:
- print >>sys.stderr, "Weird header format on line %s"%line
+ print("Weird header format on line %s"%line, file=sys.stderr)
def lint_item(self, line, grafs, head_type):
pass
@@ -306,7 +311,7 @@ class ChangeLog(object):
def dumpGraf(self,par,indent1,indent2=-1):
if not self.wrapText:
for line in par:
- print line
+ print(line)
return
if indent2 == -1:
@@ -320,17 +325,17 @@ class ChangeLog(object):
def dumpPreheader(self, graf):
self.dumpGraf(graf, 0)
- print
+ print()
def dumpMainhead(self, head):
- print head
+ print(head)
def dumpHeadGraf(self, graf):
self.dumpGraf(graf, 2)
- print
+ print()
def dumpSectionHeader(self, header):
- print header
+ print(header)
def dumpStartOfSections(self):
pass
@@ -339,10 +344,10 @@ class ChangeLog(object):
pass
def dumpEndOfSection(self):
- print
+ print()
def dumpEndOfChangelog(self):
- print
+ print()
def dumpDrupalBreak(self):
pass
@@ -350,7 +355,7 @@ class ChangeLog(object):
def dumpItem(self, grafs):
self.dumpGraf(grafs[0],4,6)
for par in grafs[1:]:
- print
+ print()
self.dumpGraf(par,6,6)
def collateAndSortSections(self):
@@ -389,7 +394,7 @@ class ChangeLog(object):
self.dumpStartOfSections()
for _,head,items in self.sections:
if not head.endswith(':'):
- print >>sys.stderr, "adding : to %r"%head
+ print("adding : to %r"%head, file=sys.stderr)
head = head + ":"
self.dumpSectionHeader(head)
for _,grafs in items:
@@ -400,10 +405,31 @@ class ChangeLog(object):
self.dumpEndOfSections()
self.dumpEndOfChangelog()
+# Map from issue prefix to pair of (visible prefix, url prefix)
+ISSUE_PREFIX_MAP = {
+ "" : ( "", "tpo/core/tor" ),
+ "tor#" : ( "", "tpo/core/tor" ),
+ "chutney#" : ( "chutney#", "tpo/core/chutney" ),
+ "torspec#" : ( "torspec#", "tpo/core/torspec" ),
+ "trunnel#" : ( "trunnel#", "tpo/core/trunnel" ),
+ "torsocks#" : ( "torsocks#", "tpo/core/torsocks"),
+}
+
# Let's turn bugs to html.
-BUG_PAT = re.compile('(bug|ticket|feature)\s+(\d{4,5})', re.I)
+BUG_PAT = re.compile('(bug|ticket|issue|feature)\s+([\w/]+#)?(\d{4,6})', re.I)
def bug_html(m):
- return "%s <a href='https://bugs.torproject.org/%s'>%s</a>" % (m.group(1), m.group(2), m.group(2))
+ kind = m.group(1)
+ prefix = m.group(2) or ""
+ bugno = m.group(3)
+ try:
+ disp_prefix, url_prefix = ISSUE_PREFIX_MAP[prefix]
+ except KeyError:
+ print("Can't figure out URL for {}{}".formt(prefix,bugno),
+ file=sys.stderr)
+ return "{} {}{}".format(kind, prefix, bugno)
+
+ return "{} <a href='https://bugs.torproject.org/{}/{}'>{}{}</a>".format(
+ kind, url_prefix, bugno, disp_prefix, bugno)
class HTMLChangeLog(ChangeLog):
def __init__(self, *args, **kwargs):
@@ -445,16 +471,16 @@ class HTMLChangeLog(ChangeLog):
pass
def dumpStartOfSections(self):
- print "<ul>\n"
+ print("<ul>\n")
def dumpEndOfSections(self):
- print "</ul>\n"
+ print("</ul>\n")
def dumpDrupalBreak(self):
- print "\n</ul>\n"
- print "<p>&nbsp;</p>"
- print "\n<!--break-->\n\n"
- print "<ul>"
+ print("\n</ul>\n")
+ print("<p>&nbsp;</p>")
+ print("\n<!--break-->\n\n")
+ print("<ul>")
def dumpItem(self, grafs):
grafs[0][0] = grafs[0][0].replace(" - ", "", 1).lstrip()
@@ -464,7 +490,7 @@ class HTMLChangeLog(ChangeLog):
self.htmlPar(par)
else:
self.htmlText(grafs[0])
- print
+ print()
op = optparse.OptionParser(usage="usage: %prog [options] [filename]")
op.add_option('-W', '--no-wrap', action='store_false',
@@ -560,7 +586,7 @@ if options.firstOnly:
sys.exit(0)
if nextline is not None:
- print nextline
+ print(nextline)
for line in sys.stdin:
sys.stdout.write(line)
diff --git a/scripts/maint/generateFallbackDirLine.py b/scripts/maint/generateFallbackDirLine.py
deleted file mode 100755
index b856c938bf..0000000000
--- a/scripts/maint/generateFallbackDirLine.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-
-# Generate a fallback directory whitelist/blacklist line for every fingerprint
-# passed as an argument.
-#
-# Usage:
-# generateFallbackDirLine.py fingerprint ...
-
-import sys
-import urllib2
-
-import stem.descriptor.remote
-import stem.util.tor_tools
-
-if len(sys.argv) <= 1:
- print('Usage: %s fingerprint ...' % sys.argv[0])
- sys.exit(1)
-
-for fingerprint in sys.argv[1:]:
- if not stem.util.tor_tools.is_valid_fingerprint(fingerprint):
- print("'%s' isn't a valid relay fingerprint" % fingerprint)
- sys.exit(1)
-
- try:
- desc = stem.descriptor.remote.get_server_descriptors(fingerprint).run()[0]
- except urllib2.HTTPError as exc:
- if exc.code == 404:
- print('# %s not found in recent descriptors' % fingerprint)
- continue
- else:
- raise
-
- if not desc.dir_port:
- print("# %s needs a DirPort" % fingerprint)
- else:
- ipv6_addresses = [(address, port) for address, port, is_ipv6 in desc.or_addresses if is_ipv6]
- ipv6_field = ' ipv6=[%s]:%s' % ipv6_addresses[0] if ipv6_addresses else ''
- print('%s:%s orport=%s id=%s%s # %s' % (desc.address, desc.dir_port, desc.or_port, fingerprint, ipv6_field, desc.nickname))
diff --git a/scripts/maint/lintChanges.py b/scripts/maint/lintChanges.py
index d5b8fcae5c..cf7b09fcc3 100755
--- a/scripts/maint/lintChanges.py
+++ b/scripts/maint/lintChanges.py
@@ -1,7 +1,10 @@
-#!/usr/bin/python
+#!/usr/bin/env python
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
from __future__ import print_function
-from __future__ import with_statement
+from __future__ import unicode_literals
+
import sys
import re
import os
@@ -22,7 +25,12 @@ KNOWN_GROUPS = set([
"Code simplification and refactoring",
"Removed features",
"Deprecated features",
- "Directory authority changes"])
+ "Directory authority changes",
+
+ # These aren't preferred, but sortChanges knows how to clean them up.
+ "Code simplifications and refactoring",
+ "Code simplification and refactorings",
+ "Code simplifications and refactorings"])
NEEDS_SUBCATEGORIES = set([
"Minor bugfix",
@@ -35,6 +43,36 @@ NEEDS_SUBCATEGORIES = set([
"Major features",
])
+def split_tor_version(version):
+ '''
+ Return the initial numeric components of the Tor version as a list of ints.
+ For versions earlier than 0.1.0, returns MAJOR, MINOR, and MICRO.
+ For versions 0.1.0 and later, returns MAJOR, MINOR, MICRO, and PATCHLEVEL if present.
+
+ If the version is malformed, returns None.
+ '''
+ version_match = re.match('([0-9]+)\.([0-9]+)\.([0-9]+)(\.([0-9]+))?', version)
+ if version_match is None:
+ return None
+
+ version_groups = version_match.groups()
+ if version_groups is None:
+ return None
+ if len(version_groups) < 3:
+ return None
+
+ if len(version_groups) != 5:
+ return None
+ version_components = version_groups[0:3]
+ version_components += version_groups[4:5]
+
+ try:
+ version_list = [int(v) for v in version_components if v is not None]
+ except ValueError:
+ return None
+
+ return version_list
+
def lintfile(fname):
have_warned = []
@@ -87,6 +125,32 @@ def lintfile(fname):
warn("Bugfix does not say 'Fixes bug X; bugfix on Y'")
elif re.search('tor-([0-9]+)', contents):
warn("Do not prefix versions with 'tor-'. ('0.1.2', not 'tor-0.1.2'.)")
+ else:
+ bugfix_match = re.search('bugfix on ([0-9]+\.[0-9]+\.[0-9]+)', contents)
+ if bugfix_match is None:
+ warn("Versions must have at least 3 digits. ('0.1.2', '0.3.4.8', or '0.3.5.1-alpha'.)")
+ elif bugfix_match.group(0) is None:
+ warn("Versions must have at least 3 digits. ('0.1.2', '0.3.4.8', or '0.3.5.1-alpha'.)")
+ else:
+ bugfix_match = re.search('bugfix on ([0-9a-z][-.0-9a-z]+[0-9a-z])', contents)
+ bugfix_group = bugfix_match.groups() if bugfix_match is not None else None
+ bugfix_version = bugfix_group[0] if bugfix_group is not None else None
+ package_version = os.environ.get('PACKAGE_VERSION', None)
+ if bugfix_version is None:
+ # This should be unreachable, unless the patterns are out of sync
+ warn("Malformed bugfix version.")
+ elif package_version is not None:
+ # If $PACKAGE_VERSION isn't set, skip this check
+ bugfix_split = split_tor_version(bugfix_version)
+ package_split = split_tor_version(package_version)
+ if bugfix_split is None:
+ # This should be unreachable, unless the patterns are out of sync
+ warn("Malformed bugfix version: '{}'.".format(bugfix_version))
+ elif package_split is None:
+ # This should be unreachable, unless the patterns are out of sync, or the package versioning scheme has changed
+ warn("Malformed $PACKAGE_VERSION: '{}'.".format(package_version))
+ elif bugfix_split > package_split:
+ warn("Bugfixes must be made on earlier versions (or this version). (Bugfix on version: '{}', current tor package version: '{}'.)".format(bugfix_version, package_version))
return have_warned != []
diff --git a/scripts/maint/locatemissingdoxygen.py b/scripts/maint/locatemissingdoxygen.py
index 797bf8176f..a2844346d6 100755
--- a/scripts/maint/locatemissingdoxygen.py
+++ b/scripts/maint/locatemissingdoxygen.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
"""
This script parses the stderr output of doxygen and looks for undocumented
@@ -7,6 +7,11 @@
to highlight the undocumented stuff.
"""
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
import os
import re
import shutil
@@ -29,10 +34,10 @@ def buildWarnings():
def count(fn):
if os.path.abspath(fn) not in warnings:
- print "0\t%s"%fn
+ print("0\t%s"%fn)
else:
n = len(warnings[os.path.abspath(fn)])
- print "%d\t%s"%(n,fn)
+ print("%d\t%s"%(n,fn))
def getIndentation(line):
s = line.lstrip()
@@ -62,7 +67,7 @@ def annotate(filename):
if __name__ == '__main__':
if len(sys.argv) == 1:
- print "Usage: locatemissingdoxygen.py [-A] filename... <doxygen_log"
+ print("Usage: locatemissingdoxygen.py [-A] filename... <doxygen_log")
sys.exit(1)
buildWarnings()
if sys.argv[1] == '-A':
diff --git a/scripts/maint/lookupFallbackDirContact.py b/scripts/maint/lookupFallbackDirContact.py
deleted file mode 100755
index 14c53d1282..0000000000
--- a/scripts/maint/lookupFallbackDirContact.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env python
-
-# Lookup fallback directory contact lines for every fingerprint passed as an
-# argument.
-#
-# Usage:
-# lookupFallbackDirContact.py fingerprint ...
-
-import sys
-
-import stem.descriptor.remote as remote
-
-if len(sys.argv) <= 1:
- print "Usage: {} fingerprint ...".format(sys.argv[0])
- sys.exit(-1)
-
-# we need descriptors, because the consensus does not have contact infos
-descriptor_list = remote.get_server_descriptors(fingerprints=sys.argv[1:]).run()
-
-descriptor_list_fingerprints = []
-for d in descriptor_list:
- assert d.fingerprint in sys.argv[1:]
- descriptor_list_fingerprints.append(d.fingerprint)
- print "{} {}".format(d.fingerprint, d.contact)
-
-for fingerprint in sys.argv[1:]:
- if fingerprint not in descriptor_list_fingerprints:
- print "{} not found in current descriptors".format(fingerprint)
diff --git a/scripts/maint/practracker/README b/scripts/maint/practracker/README
new file mode 100644
index 0000000000..d978b39806
--- /dev/null
+++ b/scripts/maint/practracker/README
@@ -0,0 +1,21 @@
+Practracker is a simple python tool that keeps track of places where
+our code is ugly, and tries to warn us about new ones or ones that
+get worse.
+
+Right now, practracker looks for the following kinds of
+best-practices violations:
+
+ .c files greater than 3000 lines long
+ .h files greater than 500 lines long
+ .c files with more than 50 includes
+ .h files with more than 15 includes
+
+ All files that include a local header not listed in a .may_include
+ file in the same directory, when that .may_include file has an
+ "!advisory" marker.
+
+The list of current violations is tracked in exceptions.txt; slight
+deviations of the current exceptions cause warnings, whereas large
+ones cause practracker to fail.
+
+For usage information, run "practracker.py --help".
diff --git a/scripts/maint/practracker/exceptions.txt b/scripts/maint/practracker/exceptions.txt
new file mode 100644
index 0000000000..711ef67d36
--- /dev/null
+++ b/scripts/maint/practracker/exceptions.txt
@@ -0,0 +1,325 @@
+# Welcome to the exceptions file for Tor's best-practices tracker!
+#
+# Each line of this file represents a single violation of Tor's best
+# practices -- typically, a violation that we had before practracker.py
+# first existed.
+#
+# There are three kinds of problems that we recognize right now:
+# function-size -- a function of more than 100 lines.
+# file-size -- a .c file of more than 3000 lines, or a .h
+# file with more than 500 lines.
+# include-count -- a .c file with more than 50 #includes,
+# or a .h file with more than 15 #includes.
+# dependency-violation -- a file includes a header that it should
+# not, according to an advisory .may_include file.
+#
+# Each line below represents a single exception that practracker should
+# _ignore_. Each line has four parts:
+# 1. The word "problem".
+# 2. The kind of problem.
+# 3. The location of the problem: either a filename, or a
+# filename:functionname pair.
+# 4. The magnitude of the problem to ignore.
+#
+# So for example, consider this line:
+# problem file-size /src/core/or/connection_or.c 3200
+#
+# It tells practracker to allow the mentioned file to be up to 3200 lines
+# long, even though ordinarily it would warn about any file with more than
+# 3000 lines.
+#
+# You can either edit this file by hand, or regenerate it completely by
+# running `make practracker-regen`.
+#
+# Remember: It is better to fix the problem than to add a new exception!
+
+problem file-size /src/app/config/config.c 7525
+problem include-count /src/app/config/config.c 80
+problem function-size /src/app/config/config.c:options_act() 381
+problem function-size /src/app/config/config.c:options_validate_cb() 794
+problem function-size /src/app/config/config.c:options_init_from_torrc() 192
+problem function-size /src/app/config/config.c:options_init_from_string() 103
+problem function-size /src/app/config/config.c:options_init_logs() 125
+problem function-size /src/app/config/config.c:parse_bridge_line() 104
+problem function-size /src/app/config/config.c:pt_parse_transport_line() 190
+problem function-size /src/app/config/config.c:parse_dir_authority_line() 150
+problem function-size /src/app/config/config.c:parse_dir_fallback_line() 101
+problem function-size /src/app/config/config.c:port_parse_config() 435
+problem function-size /src/app/config/config.c:parse_ports() 132
+problem function-size /src/app/config/resolve_addr.c:resolve_my_address() 191
+problem file-size /src/app/config/or_options_st.h 1050
+problem include-count /src/app/main/main.c 68
+problem function-size /src/app/main/main.c:dumpstats() 102
+problem function-size /src/app/main/main.c:tor_init() 101
+problem function-size /src/app/main/main.c:sandbox_init_filter() 291
+problem function-size /src/app/main/main.c:run_tor_main_loop() 105
+problem function-size /src/app/main/ntmain.c:nt_service_install() 126
+problem dependency-violation /src/core/crypto/hs_ntor.c 1
+problem dependency-violation /src/core/crypto/hs_ntor.h 1
+problem dependency-violation /src/core/crypto/onion_crypto.c 5
+problem dependency-violation /src/core/crypto/onion_fast.c 1
+problem dependency-violation /src/core/crypto/onion_tap.c 3
+problem dependency-violation /src/core/crypto/relay_crypto.c 9
+problem file-size /src/core/mainloop/connection.c 5700
+problem include-count /src/core/mainloop/connection.c 65
+problem function-size /src/core/mainloop/connection.c:connection_free_minimal() 181
+problem function-size /src/core/mainloop/connection.c:connection_listener_new() 325
+problem function-size /src/core/mainloop/connection.c:connection_handle_listener_read() 161
+problem function-size /src/core/mainloop/connection.c:connection_read_proxy_handshake() 153
+problem function-size /src/core/mainloop/connection.c:retry_listener_ports() 112
+problem function-size /src/core/mainloop/connection.c:connection_handle_read_impl() 111
+problem function-size /src/core/mainloop/connection.c:connection_buf_read_from_socket() 186
+problem function-size /src/core/mainloop/connection.c:connection_handle_write_impl() 241
+problem function-size /src/core/mainloop/connection.c:assert_connection_ok() 143
+problem dependency-violation /src/core/mainloop/connection.c 47
+problem dependency-violation /src/core/mainloop/cpuworker.c 12
+problem include-count /src/core/mainloop/mainloop.c 64
+problem function-size /src/core/mainloop/mainloop.c:conn_close_if_marked() 107
+problem function-size /src/core/mainloop/mainloop.c:run_connection_housekeeping() 123
+problem dependency-violation /src/core/mainloop/mainloop.c 50
+problem dependency-violation /src/core/mainloop/mainloop_pubsub.c 1
+problem dependency-violation /src/core/mainloop/mainloop_sys.c 1
+problem dependency-violation /src/core/mainloop/netstatus.c 4
+problem dependency-violation /src/core/mainloop/periodic.c 2
+problem dependency-violation /src/core/or/address_set.c 1
+problem dependency-violation /src/core/or/cell_queue_st.h 1
+problem file-size /src/core/or/channel.c 3500
+problem dependency-violation /src/core/or/channel.c 9
+problem file-size /src/core/or/channel.h 800
+problem dependency-violation /src/core/or/channel.h 1
+problem dependency-violation /src/core/or/channelpadding.c 6
+problem function-size /src/core/or/channeltls.c:channel_tls_handle_var_cell() 160
+problem function-size /src/core/or/channeltls.c:channel_tls_process_versions_cell() 170
+problem function-size /src/core/or/channeltls.c:channel_tls_process_netinfo_cell() 214
+problem function-size /src/core/or/channeltls.c:channel_tls_process_certs_cell() 246
+problem function-size /src/core/or/channeltls.c:channel_tls_process_authenticate_cell() 202
+problem dependency-violation /src/core/or/channeltls.c 11
+problem include-count /src/core/or/circuitbuild.c 53
+problem function-size /src/core/or/circuitbuild.c:get_unique_circ_id_by_chan() 128
+problem function-size /src/core/or/circuitbuild.c:choose_good_exit_server_general() 206
+problem dependency-violation /src/core/or/circuitbuild.c 25
+problem include-count /src/core/or/circuitlist.c 55
+problem function-size /src/core/or/circuitlist.c:HT_PROTOTYPE() 109
+problem function-size /src/core/or/circuitlist.c:circuit_free_() 146
+problem function-size /src/core/or/circuitlist.c:circuit_find_to_cannibalize() 101
+problem function-size /src/core/or/circuitlist.c:circuits_handle_oom() 117
+problem dependency-violation /src/core/or/circuitlist.c 19
+problem dependency-violation /src/core/or/circuitlist.h 1
+problem function-size /src/core/or/circuitmux.c:circuitmux_set_policy() 109
+problem function-size /src/core/or/circuitmux.c:circuitmux_attach_circuit() 113
+problem dependency-violation /src/core/or/circuitmux_ewma.c 2
+problem file-size /src/core/or/circuitpadding.c 3101
+problem function-size /src/core/or/circuitpadding.c:circpad_machine_schedule_padding() 113
+problem dependency-violation /src/core/or/circuitpadding.c 6
+problem file-size /src/core/or/circuitpadding.h 813
+problem function-size /src/core/or/circuitpadding_machines.c:circpad_machine_relay_hide_intro_circuits() 103
+problem function-size /src/core/or/circuitpadding_machines.c:circpad_machine_client_hide_rend_circuits() 112
+problem dependency-violation /src/core/or/circuitpadding_machines.c 1
+problem function-size /src/core/or/circuitstats.c:circuit_build_times_parse_state() 123
+problem dependency-violation /src/core/or/circuitstats.c 11
+problem file-size /src/core/or/circuituse.c 3195
+problem function-size /src/core/or/circuituse.c:circuit_is_acceptable() 128
+problem function-size /src/core/or/circuituse.c:circuit_expire_building() 389
+problem function-size /src/core/or/circuituse.c:circuit_log_ancient_one_hop_circuits() 126
+problem function-size /src/core/or/circuituse.c:circuit_build_failed() 149
+problem function-size /src/core/or/circuituse.c:circuit_launch_by_extend_info() 108
+problem function-size /src/core/or/circuituse.c:circuit_get_open_circ_or_launch() 351
+problem function-size /src/core/or/circuituse.c:connection_ap_handshake_attach_circuit() 244
+problem dependency-violation /src/core/or/circuituse.c 24
+problem function-size /src/core/or/command.c:command_process_create_cell() 156
+problem function-size /src/core/or/command.c:command_process_relay_cell() 132
+problem dependency-violation /src/core/or/command.c 9
+problem file-size /src/core/or/connection_edge.c 4655
+problem include-count /src/core/or/connection_edge.c 65
+problem function-size /src/core/or/connection_edge.c:connection_ap_expire_beginning() 117
+problem function-size /src/core/or/connection_edge.c:connection_ap_handshake_rewrite() 193
+problem function-size /src/core/or/connection_edge.c:connection_ap_handle_onion() 185
+problem function-size /src/core/or/connection_edge.c:connection_ap_handshake_rewrite_and_attach() 420
+problem function-size /src/core/or/connection_edge.c:connection_ap_handshake_send_begin() 111
+problem function-size /src/core/or/connection_edge.c:connection_ap_handshake_socks_resolved() 101
+problem function-size /src/core/or/connection_edge.c:connection_exit_begin_conn() 185
+problem function-size /src/core/or/connection_edge.c:connection_exit_connect() 130
+problem dependency-violation /src/core/or/connection_edge.c 27
+problem dependency-violation /src/core/or/connection_edge.h 1
+problem function-size /src/core/or/connection_or.c:connection_or_group_set_badness_() 105
+problem function-size /src/core/or/connection_or.c:connection_or_client_learned_peer_id() 142
+problem dependency-violation /src/core/or/connection_or.c 21
+problem dependency-violation /src/core/or/dos.c 6
+problem dependency-violation /src/core/or/onion.c 2
+problem file-size /src/core/or/or.h 1105
+problem include-count /src/core/or/or.h 48
+problem dependency-violation /src/core/or/or.h 1
+problem dependency-violation /src/core/or/or_periodic.c 1
+problem file-size /src/core/or/policies.c 3182
+problem function-size /src/core/or/policies.c:policy_summarize() 107
+problem dependency-violation /src/core/or/policies.c 14
+problem function-size /src/core/or/protover.c:protover_all_supported() 117
+problem dependency-violation /src/core/or/reasons.c 2
+problem file-size /src/core/or/relay.c 3300
+problem function-size /src/core/or/relay.c:circuit_receive_relay_cell() 127
+problem function-size /src/core/or/relay.c:relay_send_command_from_edge_() 109
+problem function-size /src/core/or/relay.c:connection_ap_process_end_not_open() 192
+problem function-size /src/core/or/relay.c:connection_edge_process_relay_cell_not_open() 137
+problem function-size /src/core/or/relay.c:handle_relay_cell_command() 369
+problem function-size /src/core/or/relay.c:connection_edge_package_raw_inbuf() 128
+problem function-size /src/core/or/relay.c:circuit_resume_edge_reading_helper() 146
+problem dependency-violation /src/core/or/relay.c 17
+problem dependency-violation /src/core/or/scheduler.c 1
+problem function-size /src/core/or/scheduler_kist.c:kist_scheduler_run() 171
+problem dependency-violation /src/core/or/scheduler_kist.c 2
+problem function-size /src/core/or/scheduler_vanilla.c:vanilla_scheduler_run() 109
+problem dependency-violation /src/core/or/scheduler_vanilla.c 1
+problem dependency-violation /src/core/or/sendme.c 2
+problem dependency-violation /src/core/or/status.c 13
+problem function-size /src/core/or/versions.c:tor_version_parse() 104
+problem dependency-violation /src/core/proto/proto_cell.c 3
+problem dependency-violation /src/core/proto/proto_control0.c 1
+problem dependency-violation /src/core/proto/proto_ext_or.c 2
+problem dependency-violation /src/core/proto/proto_http.c 1
+problem function-size /src/core/proto/proto_socks.c:parse_socks_client() 110
+problem dependency-violation /src/core/proto/proto_socks.c 8
+problem function-size /src/feature/client/addressmap.c:addressmap_rewrite() 109
+problem function-size /src/feature/client/bridges.c:rewrite_node_address_for_bridge() 125
+problem function-size /src/feature/client/circpathbias.c:pathbias_measure_close_rate() 108
+problem function-size /src/feature/client/dnsserv.c:evdns_server_callback() 153
+problem file-size /src/feature/client/entrynodes.c 4000
+problem function-size /src/feature/client/entrynodes.c:entry_guards_upgrade_waiting_circuits() 155
+problem function-size /src/feature/client/entrynodes.c:entry_guard_parse_from_state() 246
+problem file-size /src/feature/client/entrynodes.h 700
+problem function-size /src/feature/client/transports.c:handle_proxy_line() 108
+problem function-size /src/feature/client/transports.c:parse_method_line_helper() 110
+problem function-size /src/feature/client/transports.c:create_managed_proxy_environment() 111
+problem function-size /src/feature/control/control.c:connection_control_process_inbuf() 113
+problem function-size /src/feature/control/control_auth.c:handle_control_authenticate() 186
+problem function-size /src/feature/control/control_cmd.c:handle_control_extendcircuit() 150
+problem function-size /src/feature/control/control_cmd.c:handle_control_add_onion() 256
+problem function-size /src/feature/control/control_cmd.c:add_onion_helper_keyarg() 118
+problem function-size /src/feature/control/control_events.c:control_event_stream_status() 124
+problem include-count /src/feature/control/control_getinfo.c 56
+problem function-size /src/feature/control/control_getinfo.c:getinfo_helper_misc() 108
+problem function-size /src/feature/control/control_getinfo.c:getinfo_helper_dir() 297
+problem function-size /src/feature/control/control_getinfo.c:getinfo_helper_events() 234
+problem function-size /src/feature/dirauth/bwauth.c:dirserv_read_measured_bandwidths() 121
+problem file-size /src/feature/dirauth/dirvote.c 4734
+problem include-count /src/feature/dirauth/dirvote.c 55
+problem function-size /src/feature/dirauth/dirvote.c:format_networkstatus_vote() 230
+problem function-size /src/feature/dirauth/dirvote.c:networkstatus_compute_bw_weights_v10() 233
+problem function-size /src/feature/dirauth/dirvote.c:networkstatus_compute_consensus() 952
+problem function-size /src/feature/dirauth/dirvote.c:networkstatus_add_detached_signatures() 119
+problem function-size /src/feature/dirauth/dirvote.c:dirvote_add_vote() 161
+problem function-size /src/feature/dirauth/dirvote.c:dirvote_compute_consensuses() 164
+problem function-size /src/feature/dirauth/dirvote.c:dirserv_generate_networkstatus_vote_obj() 281
+problem function-size /src/feature/dirauth/dsigs_parse.c:networkstatus_parse_detached_signatures() 196
+problem function-size /src/feature/dirauth/guardfraction.c:dirserv_read_guardfraction_file_from_str() 109
+problem function-size /src/feature/dirauth/process_descs.c:dirserv_add_descriptor() 125
+problem function-size /src/feature/dirauth/shared_random.c:should_keep_commit() 109
+problem function-size /src/feature/dirauth/voteflags.c:dirserv_compute_performance_thresholds() 175
+problem function-size /src/feature/dircache/consdiffmgr.c:consdiffmgr_cleanup() 115
+problem function-size /src/feature/dircache/consdiffmgr.c:consdiffmgr_rescan_flavor_() 111
+problem function-size /src/feature/dircache/consdiffmgr.c:consensus_diff_worker_threadfn() 132
+problem function-size /src/feature/dircache/dircache.c:handle_get_current_consensus() 165
+problem function-size /src/feature/dircache/dircache.c:directory_handle_command_post() 124
+problem file-size /src/feature/dirclient/dirclient.c 3204
+problem include-count /src/feature/dirclient/dirclient.c 54
+problem function-size /src/feature/dirclient/dirclient.c:directory_get_from_dirserver() 126
+problem function-size /src/feature/dirclient/dirclient.c:directory_initiate_request() 201
+problem function-size /src/feature/dirclient/dirclient.c:directory_send_command() 239
+problem function-size /src/feature/dirclient/dirclient.c:dir_client_decompress_response_body() 111
+problem function-size /src/feature/dirclient/dirclient.c:connection_dir_client_reached_eof() 199
+problem function-size /src/feature/dirclient/dirclient.c:handle_response_fetch_consensus() 104
+problem function-size /src/feature/dircommon/consdiff.c:gen_ed_diff() 203
+problem function-size /src/feature/dircommon/consdiff.c:apply_ed_diff() 158
+problem function-size /src/feature/dirparse/authcert_parse.c:authority_cert_parse_from_string() 181
+problem function-size /src/feature/dirparse/ns_parse.c:routerstatus_parse_entry_from_string() 280
+problem function-size /src/feature/dirparse/ns_parse.c:networkstatus_verify_bw_weights() 389
+problem function-size /src/feature/dirparse/ns_parse.c:networkstatus_parse_vote_from_string() 635
+problem function-size /src/feature/dirparse/parsecommon.c:tokenize_string() 101
+problem function-size /src/feature/dirparse/parsecommon.c:get_next_token() 165
+problem function-size /src/feature/dirparse/routerparse.c:router_parse_entry_from_string() 554
+problem function-size /src/feature/dirparse/routerparse.c:extrainfo_parse_entry_from_string() 208
+problem function-size /src/feature/hibernate/hibernate.c:accounting_parse_options() 109
+problem function-size /src/feature/hs/hs_cell.c:hs_cell_build_establish_intro() 115
+problem function-size /src/feature/hs/hs_cell.c:hs_cell_parse_introduce2() 134
+problem function-size /src/feature/hs/hs_client.c:send_introduce1() 108
+problem function-size /src/feature/hs/hs_common.c:hs_get_responsible_hsdirs() 102
+problem function-size /src/feature/hs/hs_descriptor.c:decrypt_desc_layer() 111
+problem function-size /src/feature/hs/hs_descriptor.c:decode_introduction_point() 122
+problem function-size /src/feature/hs/hs_descriptor.c:desc_decode_superencrypted_v3() 107
+problem function-size /src/feature/hs/hs_descriptor.c:desc_decode_encrypted_v3() 109
+problem file-size /src/feature/hs/hs_service.c 4300
+problem function-size /src/feature/keymgt/loadkey.c:ed_key_init_from_file() 326
+problem function-size /src/feature/nodelist/authcert.c:trusted_dirs_load_certs_from_string() 123
+problem function-size /src/feature/nodelist/authcert.c:authority_certs_fetch_missing() 295
+problem function-size /src/feature/nodelist/fmt_routerstatus.c:routerstatus_format_entry() 158
+problem function-size /src/feature/nodelist/microdesc.c:microdesc_cache_rebuild() 134
+problem include-count /src/feature/nodelist/networkstatus.c 65
+problem function-size /src/feature/nodelist/networkstatus.c:networkstatus_check_consensus_signature() 175
+problem function-size /src/feature/nodelist/networkstatus.c:networkstatus_set_current_consensus() 289
+problem function-size /src/feature/nodelist/node_select.c:router_pick_directory_server_impl() 122
+problem function-size /src/feature/nodelist/node_select.c:compute_weighted_bandwidths() 204
+problem function-size /src/feature/nodelist/node_select.c:router_pick_trusteddirserver_impl() 112
+problem function-size /src/feature/nodelist/nodelist.c:compute_frac_paths_available() 190
+problem file-size /src/feature/nodelist/routerlist.c 3247
+problem function-size /src/feature/nodelist/routerlist.c:router_rebuild_store() 148
+problem function-size /src/feature/nodelist/routerlist.c:router_add_to_routerlist() 168
+problem function-size /src/feature/nodelist/routerlist.c:routerlist_remove_old_routers() 121
+problem function-size /src/feature/nodelist/routerlist.c:update_consensus_router_descriptor_downloads() 142
+problem function-size /src/feature/nodelist/routerlist.c:update_extrainfo_downloads() 103
+problem function-size /src/feature/relay/dns.c:dns_resolve_impl() 131
+problem function-size /src/feature/relay/dns.c:configure_nameservers() 161
+problem function-size /src/feature/relay/dns.c:evdns_callback() 108
+problem function-size /src/feature/relay/relay_handshake.c:connection_or_compute_authenticate_cell_body() 231
+problem file-size /src/feature/relay/router.c 3600
+problem include-count /src/feature/relay/router.c 57
+problem function-size /src/feature/relay/router.c:init_keys() 254
+problem function-size /src/feature/relay/router.c:get_my_declared_family() 114
+problem function-size /src/feature/relay/router.c:router_build_fresh_unsigned_routerinfo() 113
+problem function-size /src/feature/relay/router.c:router_dump_router_to_string() 372
+problem function-size /src/feature/relay/routerkeys.c:load_ed_keys() 294
+problem function-size /src/feature/rend/rendcache.c:rend_cache_store_v2_desc_as_client() 190
+problem function-size /src/feature/rend/rendclient.c:rend_client_send_introduction() 219
+problem function-size /src/feature/rend/rendcommon.c:rend_encode_v2_descriptors() 221
+problem function-size /src/feature/rend/rendmid.c:rend_mid_establish_intro_legacy() 105
+problem function-size /src/feature/rend/rendparse.c:rend_parse_v2_service_descriptor() 181
+problem function-size /src/feature/rend/rendparse.c:rend_parse_introduction_points() 129
+problem file-size /src/feature/rend/rendservice.c 4504
+problem function-size /src/feature/rend/rendservice.c:rend_service_prune_list_impl_() 107
+problem function-size /src/feature/rend/rendservice.c:rend_config_service() 143
+problem function-size /src/feature/rend/rendservice.c:rend_service_load_auth_keys() 178
+problem function-size /src/feature/rend/rendservice.c:rend_service_receive_introduction() 334
+problem function-size /src/feature/rend/rendservice.c:rend_service_parse_intro_for_v3() 111
+problem function-size /src/feature/rend/rendservice.c:rend_service_decrypt_intro() 112
+problem function-size /src/feature/rend/rendservice.c:rend_service_intro_has_opened() 126
+problem function-size /src/feature/rend/rendservice.c:rend_service_rendezvous_has_opened() 117
+problem function-size /src/feature/rend/rendservice.c:directory_post_to_hs_dir() 106
+problem function-size /src/feature/rend/rendservice.c:upload_service_descriptor() 111
+problem function-size /src/feature/rend/rendservice.c:rend_consider_services_intro_points() 170
+problem function-size /src/feature/stats/rephist.c:rep_hist_load_mtbf_data() 185
+problem function-size /src/feature/stats/rephist.c:rep_hist_format_exit_stats() 148
+problem function-size /src/lib/compress/compress.c:tor_compress_impl() 127
+problem function-size /src/lib/compress/compress_zstd.c:tor_zstd_compress_process() 123
+problem function-size /src/lib/container/smartlist.c:smartlist_bsearch_idx() 107
+problem function-size /src/lib/crypt_ops/crypto_rand.c:crypto_strongest_rand_syscall() 102
+problem function-size /src/lib/encoding/binascii.c:base64_encode() 106
+problem function-size /src/lib/encoding/confline.c:parse_config_line_from_str_verbose() 117
+problem function-size /src/lib/encoding/cstring.c:unescape_string() 108
+problem function-size /src/lib/fs/dir.c:check_private_dir() 230
+problem function-size /src/lib/math/prob_distr.c:sample_uniform_interval() 145
+problem function-size /src/lib/net/address.c:tor_addr_parse_mask_ports() 195
+problem function-size /src/lib/net/address.c:tor_addr_compare_masked() 110
+problem function-size /src/lib/net/inaddr.c:tor_inet_pton() 107
+problem function-size /src/lib/net/socketpair.c:tor_ersatz_socketpair() 102
+problem function-size /src/lib/process/process_unix.c:process_unix_exec() 213
+problem function-size /src/lib/process/process_win32.c:process_win32_exec() 151
+problem function-size /src/lib/process/process_win32.c:process_win32_create_pipe() 109
+problem function-size /src/lib/process/restrict.c:set_max_file_descriptors() 102
+problem function-size /src/lib/process/setuid.c:switch_id() 156
+problem function-size /src/lib/sandbox/sandbox.c:prot_strings() 104
+problem function-size /src/lib/string/scanf.c:tor_vsscanf() 112
+problem function-size /src/lib/tls/tortls_nss.c:tor_tls_context_new() 152
+problem function-size /src/lib/tls/tortls_openssl.c:tor_tls_context_new() 170
+problem function-size /src/lib/tls/x509_nss.c:tor_tls_create_certificate_internal() 121
+problem function-size /src/tools/tor-gencert.c:parse_commandline() 111
+problem function-size /src/tools/tor-resolve.c:build_socks5_resolve_request() 102
+problem function-size /src/tools/tor-resolve.c:do_resolve() 171
+problem function-size /src/tools/tor-resolve.c:main() 112
diff --git a/scripts/maint/practracker/includes.py b/scripts/maint/practracker/includes.py
new file mode 100755
index 0000000000..a5ee728824
--- /dev/null
+++ b/scripts/maint/practracker/includes.py
@@ -0,0 +1,381 @@
+#!/usr/bin/env python
+# Copyright 2018 The Tor Project, Inc. See LICENSE file for licensing info.
+
+"""This script looks through all the directories for files matching *.c or
+ *.h, and checks their #include directives to make sure that only "permitted"
+ headers are included.
+
+ Any #include directives with angle brackets (like #include <stdio.h>) are
+ ignored -- only directives with quotes (like #include "foo.h") are
+ considered.
+
+ To decide what includes are permitted, this script looks at a .may_include
+ file in each directory. This file contains empty lines, #-prefixed
+ comments, filenames (like "lib/foo/bar.h") and file globs (like lib/*/*.h)
+ for files that are permitted.
+
+ The script exits with an error if any non-permitted includes are found.
+ .may_include files that contain "!advisory" are considered advisory.
+ Advisory .may_include files only result in warnings, rather than errors.
+"""
+
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import fnmatch
+import os
+import re
+import sys
+
+if sys.version_info[0] <= 2:
+ def open_file(fname):
+ return open(fname, 'r')
+else:
+ def open_file(fname):
+ return open(fname, 'r', encoding='utf-8')
+
+def warn(msg):
+ print(msg, file=sys.stderr)
+
+def fname_is_c(fname):
+ """ Return true iff 'fname' is the name of a file that we should
+ search for possibly disallowed #include directives. """
+ if fname.endswith(".h") or fname.endswith(".c"):
+ bname = os.path.basename(fname)
+ return not (bname.startswith(".") or bname.startswith("#"))
+ else:
+ return False
+
+INCLUDE_PATTERN = re.compile(r'\s*#\s*include\s+"([^"]*)"')
+RULES_FNAME = ".may_include"
+
+ALLOWED_PATTERNS = [
+ re.compile(r'^.*\*\.(h|inc)$'),
+ re.compile(r'^.*/.*\.h$'),
+ re.compile(r'^ext/.*\.c$'),
+ re.compile(r'^orconfig.h$'),
+ re.compile(r'^micro-revision.i$'),
+]
+
+TOPDIR = "src"
+
+def pattern_is_normal(s):
+ for p in ALLOWED_PATTERNS:
+ if p.match(s):
+ return True
+ return False
+
+class Error(object):
+ def __init__(self, location, msg, is_advisory=False):
+ self.location = location
+ self.msg = msg
+ self.is_advisory = is_advisory
+
+ def __str__(self):
+ return "{} at {}".format(self.msg, self.location)
+
+class Rules(object):
+ """ A 'Rules' object is the parsed version of a .may_include file. """
+ def __init__(self, dirpath):
+ self.dirpath = dirpath
+ if dirpath.startswith("src/"):
+ self.incpath = dirpath[4:]
+ else:
+ self.incpath = dirpath
+ self.patterns = []
+ self.usedPatterns = set()
+ self.is_advisory = False
+
+ def addPattern(self, pattern):
+ if pattern == "!advisory":
+ self.is_advisory = True
+ return
+ if not pattern_is_normal(pattern):
+ warn("Unusual pattern {} in {}".format(pattern, self.dirpath))
+ self.patterns.append(pattern)
+
+ def includeOk(self, path):
+ for pattern in self.patterns:
+ if fnmatch.fnmatchcase(path, pattern):
+ self.usedPatterns.add(pattern)
+ return True
+ return False
+
+ def applyToLines(self, lines, loc_prefix=""):
+ lineno = 0
+ for line in lines:
+ lineno += 1
+ m = INCLUDE_PATTERN.match(line)
+ if m:
+ include = m.group(1)
+ if not self.includeOk(include):
+ yield Error("{}{}".format(loc_prefix,str(lineno)),
+ "Forbidden include of {}".format(include),
+ is_advisory=self.is_advisory)
+
+ def applyToFile(self, fname, f):
+ for error in self.applyToLines(iter(f), "{}:".format(fname)):
+ yield error
+
+ def noteUnusedRules(self):
+ for p in self.patterns:
+ if p not in self.usedPatterns:
+ warn("Pattern {} in {} was never used.".format(p, self.dirpath))
+
+ def getAllowedDirectories(self):
+ allowed = []
+ for p in self.patterns:
+ m = re.match(r'^(.*)/\*\.(h|inc)$', p)
+ if m:
+ allowed.append(m.group(1))
+ continue
+ m = re.match(r'^(.*)/[^/]*$', p)
+ if m:
+ allowed.append(m.group(1))
+ continue
+
+ return allowed
+
+
+def normalize_srcdir(fname):
+ """given the name of a source directory or file, return its name
+ relative to `src` in a unix-like format.
+ """
+ orig = fname
+ dirname, dirfile = os.path.split(fname)
+ if re.match(r'.*\.[ch]$', dirfile):
+ fname = dirname
+
+ # Now we have a directory.
+ dirname, result = os.path.split(fname)
+ for _ in range(100):
+ # prevent excess looping in case I missed a tricky case
+ dirname, dirpart = os.path.split(dirname)
+ if dirpart == 'src' or dirname == "":
+ #print(orig,"=>",result)
+ return result
+ result = "{}/{}".format(dirpart,result)
+
+ print("No progress!")
+ assert False
+
+include_rules_cache = {}
+
+def load_include_rules(fname):
+ """ Read a rules file from 'fname', and return it as a Rules object.
+ Return 'None' if fname does not exist.
+ """
+ if fname in include_rules_cache:
+ return include_rules_cache[fname]
+ if not os.path.exists(fname):
+ include_rules_cache[fname] = None
+ return None
+ result = Rules(os.path.split(fname)[0])
+ with open_file(fname) as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith("#") or not line:
+ continue
+ result.addPattern(line)
+ include_rules_cache[fname] = result
+ return result
+
+def get_all_include_rules():
+ """Return a list of all the Rules objects we have loaded so far,
+ sorted by their directory names."""
+ return [ rules for (fname,rules) in
+ sorted(include_rules_cache.items())
+ if rules is not None ]
+
+def remove_self_edges(graph):
+ """Takes a directed graph in as an adjacency mapping (a mapping from
+ node to a list of the nodes to which it connects).
+
+ Remove all edges from a node to itself."""
+
+ for k in list(graph):
+ graph[k] = [ d for d in graph[k] if d != k ]
+
+def closure(graph):
+ """Takes a directed graph in as an adjacency mapping (a mapping from
+ node to a list of the nodes to which it connects), and completes
+ its closure.
+ """
+ graph = graph.copy()
+ changed = False
+ for k in graph.keys():
+ graph[k] = set(graph[k])
+ while True:
+ for k in graph.keys():
+ sz = len(graph[k])
+ for v in list(graph[k]):
+ graph[k].update(graph.get(v, []))
+ if sz != len(graph[k]):
+ changed = True
+
+ if not changed:
+ return graph
+ changed = False
+
+def toposort(graph, limit=100):
+ """Takes a directed graph in as an adjacency mapping (a mapping from
+ node to a list of the nodes to which it connects). Tries to
+ perform a topological sort on the graph, arranging the nodes into
+ "levels", such that every member of each level is only reachable
+ by members of later levels.
+
+ Returns a list of the members of each level.
+
+ Modifies the input graph, removing every member that could be
+ sorted. If the graph does not become empty, then it contains a
+ cycle.
+
+ "limit" is the max depth of the graph after which we give up trying
+ to sort it and conclude we have a cycle.
+ """
+ all_levels = []
+
+ n = 0
+ while graph:
+ n += 0
+ cur_level = []
+ all_levels.append(cur_level)
+ for k in list(graph):
+ graph[k] = [ d for d in graph[k] if d in graph ]
+ if graph[k] == []:
+ cur_level.append(k)
+ for k in cur_level:
+ del graph[k]
+ n += 1
+ if n > limit:
+ break
+
+ return all_levels
+
+def consider_include_rules(fname, f):
+ dirpath = os.path.split(fname)[0]
+ rules_fname = os.path.join(dirpath, RULES_FNAME)
+ rules = load_include_rules(os.path.join(dirpath, RULES_FNAME))
+ if rules is None:
+ return
+
+ for err in rules.applyToFile(fname, f):
+ yield err
+
+ list_unused = False
+ log_sorted_levels = False
+
+def walk_c_files(topdir="src"):
+ """Run through all .c and .h files under topdir, looking for
+ include-rule violations. Yield those violations."""
+
+ for dirpath, dirnames, fnames in os.walk(topdir):
+ for fname in fnames:
+ if fname_is_c(fname):
+ fullpath = os.path.join(dirpath,fname)
+ with open(fullpath) as f:
+ for err in consider_include_rules(fullpath, f):
+ yield err
+
+def open_or_stdin(fname):
+ if fname == '-':
+ return sys.stdin
+ else:
+ return open(fname)
+
+def check_subsys_file(fname, uses_dirs):
+ if not uses_dirs:
+ # We're doing a distcheck build, or for some other reason there are
+ # no .may_include files.
+ print("SKIPPING")
+ return False
+
+ uses_dirs = { normalize_srcdir(k) : { normalize_srcdir(d) for d in v }
+ for (k,v) in uses_dirs.items() }
+ uses_closure = closure(uses_dirs)
+ ok = True
+ previous_subsystems = []
+
+ with open_or_stdin(fname) as f:
+ for line in f:
+ _, name, fname = line.split()
+ fname = normalize_srcdir(fname)
+ for prev in previous_subsystems:
+ if fname in uses_closure[prev]:
+ print("INVERSION: {} uses {}".format(prev,fname))
+ ok = False
+ previous_subsystems.append(fname)
+ return not ok
+
+def run_check_includes(topdir, list_unused=False, log_sorted_levels=False,
+ list_advisories=False, check_subsystem_order=None):
+ trouble = False
+
+ for err in walk_c_files(topdir):
+ if err.is_advisory and not list_advisories:
+ continue
+ print(err, file=sys.stderr)
+ if not err.is_advisory:
+ trouble = True
+
+ if trouble:
+ warn(
+ """To change which includes are allowed in a C file, edit the {}
+ files in its enclosing directory.""".format(RULES_FNAME))
+ sys.exit(1)
+
+ if list_unused:
+ for rules in get_all_include_rules():
+ rules.noteUnusedRules()
+
+ uses_dirs = { }
+ for rules in get_all_include_rules():
+ uses_dirs[rules.incpath] = rules.getAllowedDirectories()
+
+ remove_self_edges(uses_dirs)
+
+ if check_subsystem_order:
+ if check_subsys_file(check_subsystem_order, uses_dirs):
+ sys.exit(1)
+
+ all_levels = toposort(uses_dirs)
+
+ if log_sorted_levels:
+ for (n, cur_level) in enumerate(all_levels):
+ if cur_level:
+ print(n, cur_level)
+
+ if uses_dirs:
+ print("There are circular .may_include dependencies in here somewhere:",
+ uses_dirs)
+ sys.exit(1)
+
+def main(argv):
+ import argparse
+
+ progname = argv[0]
+ parser = argparse.ArgumentParser(prog=progname)
+ parser.add_argument("--toposort", action="store_true",
+ help="Print a topologically sorted list of modules")
+ parser.add_argument("--list-unused", action="store_true",
+ help="List unused lines in .may_include files.")
+ parser.add_argument("--list-advisories", action="store_true",
+ help="List advisories as well as forbidden includes")
+ parser.add_argument("--check-subsystem-order", action="store",
+ help="Check a list of subsystems for ordering")
+ parser.add_argument("topdir", default="src", nargs="?",
+ help="Top-level directory for the tor source")
+ args = parser.parse_args(argv[1:])
+
+ global TOPDIR
+ TOPDIR = args.topdir
+ run_check_includes(topdir=args.topdir,
+ log_sorted_levels=args.toposort,
+ list_unused=args.list_unused,
+ list_advisories=args.list_advisories,
+ check_subsystem_order=args.check_subsystem_order)
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/scripts/maint/practracker/metrics.py b/scripts/maint/practracker/metrics.py
new file mode 100644
index 0000000000..300a4501a9
--- /dev/null
+++ b/scripts/maint/practracker/metrics.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+
+# Implementation of various source code metrics.
+# These are currently ad-hoc string operations and regexps.
+# We might want to use a proper static analysis library in the future, if we want to get more advanced metrics.
+
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import re
+
+def get_file_len(f):
+ """Get file length of file"""
+ i = -1
+ for i, l in enumerate(f):
+ pass
+ return i + 1
+
+def get_include_count(f):
+ """Get number of #include statements in the file"""
+ include_count = 0
+ for line in f:
+ if re.match(r'\s*#\s*include', line):
+ include_count += 1
+ return include_count
+
+def get_function_lines(f):
+ """
+ Return iterator which iterates over functions and returns (function name, function lines)
+ """
+
+ # Skip lines that look like they are defining functions with these
+ # names: they aren't real function definitions.
+ REGEXP_CONFUSE_TERMS = {"MOCK_IMPL", "MOCK_DECL", "HANDLE_DECL",
+ "ENABLE_GCC_WARNINGS", "ENABLE_GCC_WARNING",
+ "DUMMY_TYPECHECK_INSTANCE",
+ "DISABLE_GCC_WARNING", "DISABLE_GCC_WARNINGS"}
+
+ in_function = False
+ found_openbrace = False
+ for lineno, line in enumerate(f):
+ if not in_function:
+ # find the start of a function
+ m = re.match(r'^([a-zA-Z_][a-zA-Z_0-9]*),?\(', line)
+ if m:
+ func_name = m.group(1)
+ if func_name in REGEXP_CONFUSE_TERMS:
+ continue
+ func_start = lineno
+ in_function = True
+ elif not found_openbrace and line.startswith("{"):
+ found_openbrace = True
+ func_start = lineno
+ else:
+ # Find the end of a function
+ if line.startswith("}"):
+ n_lines = lineno - func_start + 1
+ in_function = False
+ found_openbrace = False
+ yield (func_name, n_lines)
diff --git a/scripts/maint/practracker/practracker.py b/scripts/maint/practracker/practracker.py
new file mode 100755
index 0000000000..76ffd64cfb
--- /dev/null
+++ b/scripts/maint/practracker/practracker.py
@@ -0,0 +1,320 @@
+#!/usr/bin/env python
+
+"""
+Best-practices tracker for Tor source code.
+
+Go through the various .c files and collect metrics about them. If the metrics
+violate some of our best practices and they are not found in the optional
+exceptions file, then log a problem about them.
+
+We currently do metrics about file size, function size and number of includes,
+for C source files and headers.
+
+practracker.py should be run with its second argument pointing to the Tor
+top-level source directory like this:
+ $ python3 ./scripts/maint/practracker/practracker.py .
+
+To regenerate the exceptions file so that it allows all current
+problems in the Tor source, use the --regen flag:
+ $ python3 --regen ./scripts/maint/practracker/practracker.py .
+"""
+
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import codecs, os, sys
+
+import metrics
+import util
+import problem
+import includes
+import shutil
+
+# The filename of the exceptions file (it should be placed in the practracker directory)
+EXCEPTIONS_FNAME = "./exceptions.txt"
+
+# Recommended file size
+MAX_FILE_SIZE = 3000 # lines
+# Recommended function size
+MAX_FUNCTION_SIZE = 100 # lines
+# Recommended number of #includes
+MAX_INCLUDE_COUNT = 50
+# Recommended file size for headers
+MAX_H_FILE_SIZE = 500
+# Recommended include count for headers
+MAX_H_INCLUDE_COUNT = 15
+# Recommended number of dependency violations
+MAX_DEP_VIOLATIONS = 0
+
+# Map from problem type to functions that adjust for tolerance
+TOLERANCE_FNS = {
+ 'include-count': lambda n: int(n*1.1),
+ 'function-size': lambda n: int(n*1.1),
+ 'file-size': lambda n: int(n*1.02),
+ 'dependency-violation': lambda n: (n+2)
+}
+
+#######################################################
+
+# The Tor source code topdir
+TOR_TOPDIR = None
+
+#######################################################
+
+def open_file(fname):
+ return codecs.open(fname, 'r', encoding='utf-8')
+
+def consider_file_size(fname, f):
+ """Consider the size of 'f' and yield an FileSizeItem for it.
+ """
+ file_size = metrics.get_file_len(f)
+ yield problem.FileSizeItem(fname, file_size)
+
+def consider_includes(fname, f):
+ """Consider the #include count in for 'f' and yield an IncludeCountItem
+ for it.
+ """
+ include_count = metrics.get_include_count(f)
+
+ yield problem.IncludeCountItem(fname, include_count)
+
+def consider_function_size(fname, f):
+ """yield a FunctionSizeItem for every function in f.
+ """
+
+ for name, lines in metrics.get_function_lines(f):
+ canonical_function_name = "%s:%s()" % (fname, name)
+ yield problem.FunctionSizeItem(canonical_function_name, lines)
+
+def consider_include_violations(fname, real_fname, f):
+ n = 0
+ for item in includes.consider_include_rules(real_fname, f):
+ n += 1
+ if n:
+ yield problem.DependencyViolationItem(fname, n)
+
+
+#######################################################
+
+def consider_all_metrics(files_list):
+ """Consider metrics for all files, and yield a sequence of problem.Item
+ object for those issues."""
+ for fname in files_list:
+ with open_file(fname) as f:
+ for item in consider_metrics_for_file(fname, f):
+ yield item
+
+def consider_metrics_for_file(fname, f):
+ """
+ Yield a sequence of problem.Item objects for all of the metrics in
+ 'f'.
+ """
+ real_fname = fname
+ # Strip the useless part of the path
+ if fname.startswith(TOR_TOPDIR):
+ fname = fname[len(TOR_TOPDIR):]
+
+ # Get file length
+ for item in consider_file_size(fname, f):
+ yield item
+
+ # Consider number of #includes
+ f.seek(0)
+ for item in consider_includes(fname, f):
+ yield item
+
+ # Get function length
+ f.seek(0)
+ for item in consider_function_size(fname, f):
+ yield item
+
+ # Check for "upward" includes
+ f.seek(0)
+ for item in consider_include_violations(fname, real_fname, f):
+ yield item
+
+HEADER="""\
+# Welcome to the exceptions file for Tor's best-practices tracker!
+#
+# Each line of this file represents a single violation of Tor's best
+# practices -- typically, a violation that we had before practracker.py
+# first existed.
+#
+# There are three kinds of problems that we recognize right now:
+# function-size -- a function of more than {MAX_FUNCTION_SIZE} lines.
+# file-size -- a .c file of more than {MAX_FILE_SIZE} lines, or a .h
+# file with more than {MAX_H_FILE_SIZE} lines.
+# include-count -- a .c file with more than {MAX_INCLUDE_COUNT} #includes,
+# or a .h file with more than {MAX_H_INCLUDE_COUNT} #includes.
+# dependency-violation -- a file includes a header that it should
+# not, according to an advisory .may_include file.
+#
+# Each line below represents a single exception that practracker should
+# _ignore_. Each line has four parts:
+# 1. The word "problem".
+# 2. The kind of problem.
+# 3. The location of the problem: either a filename, or a
+# filename:functionname pair.
+# 4. The magnitude of the problem to ignore.
+#
+# So for example, consider this line:
+# problem file-size /src/core/or/connection_or.c 3200
+#
+# It tells practracker to allow the mentioned file to be up to 3200 lines
+# long, even though ordinarily it would warn about any file with more than
+# {MAX_FILE_SIZE} lines.
+#
+# You can either edit this file by hand, or regenerate it completely by
+# running `make practracker-regen`.
+#
+# Remember: It is better to fix the problem than to add a new exception!
+
+""".format(**globals())
+
+def main(argv):
+ import argparse
+
+ progname = argv[0]
+ parser = argparse.ArgumentParser(prog=progname)
+ parser.add_argument("--regen", action="store_true",
+ help="Regenerate the exceptions file")
+ parser.add_argument("--list-overbroad", action="store_true",
+ help="List over-broad exceptions")
+ parser.add_argument("--regen-overbroad", action="store_true",
+ help="Regenerate the exceptions file, "
+ "removing over-broad exceptions.")
+ parser.add_argument("--exceptions",
+ help="Override the location for the exceptions file")
+ parser.add_argument("--strict", action="store_true",
+ help="Make all warnings into errors")
+ parser.add_argument("--terse", action="store_true",
+ help="Do not emit helpful instructions.")
+ parser.add_argument("--max-h-file-size", default=MAX_H_FILE_SIZE,
+ help="Maximum lines per .h file")
+ parser.add_argument("--max-h-include-count", default=MAX_H_INCLUDE_COUNT,
+ help="Maximum includes per .h file")
+ parser.add_argument("--max-file-size", default=MAX_FILE_SIZE,
+ help="Maximum lines per .c file")
+ parser.add_argument("--max-include-count", default=MAX_INCLUDE_COUNT,
+ help="Maximum includes per .c file")
+ parser.add_argument("--max-function-size", default=MAX_FUNCTION_SIZE,
+ help="Maximum lines per function")
+ parser.add_argument("--max-dependency-violations", default=MAX_DEP_VIOLATIONS,
+ help="Maximum number of dependency violations to allow")
+ parser.add_argument("--include-dir", action="append",
+ default=["src"],
+ help="A directory (under topdir) to search for source")
+ parser.add_argument("topdir", default=".", nargs="?",
+ help="Top-level directory for the tor source")
+ args = parser.parse_args(argv[1:])
+
+ global TOR_TOPDIR
+ TOR_TOPDIR = args.topdir
+ if args.exceptions:
+ exceptions_file = args.exceptions
+ else:
+ exceptions_file = os.path.join(TOR_TOPDIR, "scripts/maint/practracker", EXCEPTIONS_FNAME)
+
+ # 0) Configure our thresholds of "what is a problem actually"
+ filt = problem.ProblemFilter()
+ filt.addThreshold(problem.FileSizeItem("*.c", int(args.max_file_size)))
+ filt.addThreshold(problem.IncludeCountItem("*.c", int(args.max_include_count)))
+ filt.addThreshold(problem.FileSizeItem("*.h", int(args.max_h_file_size)))
+ filt.addThreshold(problem.IncludeCountItem("*.h", int(args.max_h_include_count)))
+ filt.addThreshold(problem.FunctionSizeItem("*.c", int(args.max_function_size)))
+ filt.addThreshold(problem.DependencyViolationItem("*.c", int(args.max_dependency_violations)))
+ filt.addThreshold(problem.DependencyViolationItem("*.h", int(args.max_dependency_violations)))
+
+ if args.list_overbroad + args.regen + args.regen_overbroad > 1:
+ print("Cannot use more than one of --regen, --list-overbroad, and "
+ "--regen-overbroad.",
+ file=sys.stderr)
+ sys.exit(1)
+
+ # 1) Get all the .c files we care about
+ files_list = util.get_tor_c_files(TOR_TOPDIR, args.include_dir)
+
+ # 2) Initialize problem vault and load an optional exceptions file so that
+ # we don't warn about the past
+ if args.regen:
+ tmpname = exceptions_file + ".tmp"
+ tmpfile = open(tmpname, "w")
+ problem_file = tmpfile
+ problem_file.write(HEADER)
+ ProblemVault = problem.ProblemVault()
+ else:
+ ProblemVault = problem.ProblemVault(exceptions_file)
+ problem_file = sys.stdout
+
+ if args.list_overbroad or args.regen_overbroad:
+ # If we're looking for overbroad exceptions, don't list problems
+ # immediately to the problem file.
+ problem_file = util.NullFile()
+
+ # 2.1) Adjust the exceptions so that we warn only about small problems,
+ # and produce errors on big ones.
+ if not (args.regen or args.list_overbroad or args.regen_overbroad or
+ args.strict):
+ ProblemVault.set_tolerances(TOLERANCE_FNS)
+
+ # 3) Go through all the files and report problems if they are not exceptions
+ found_new_issues = 0
+ for item in filt.filter(consider_all_metrics(files_list)):
+ status = ProblemVault.register_problem(item)
+ if status == problem.STATUS_ERR:
+ print(item, file=problem_file)
+ found_new_issues += 1
+ elif status == problem.STATUS_WARN:
+ # warnings always go to stdout.
+ print("(warning) {}".format(item))
+
+ if args.regen:
+ tmpfile.close()
+ shutil.move(tmpname, exceptions_file)
+ sys.exit(0)
+
+ if args.regen_overbroad:
+ tmpname = exceptions_file + ".tmp"
+ tmpfile = open(tmpname, "w")
+ tmpfile.write(HEADER)
+ for item in ProblemVault.list_exceptions_without_overbroad():
+ print(item, file=tmpfile)
+ tmpfile.close()
+ shutil.move(tmpname, exceptions_file)
+ sys.exit(0)
+
+ # If new issues were found, try to give out some advice to the developer on how to resolve it.
+ if found_new_issues and not args.regen and not args.terse:
+ new_issues_str = """\
+FAILURE: practracker found {} new problem(s) in the code: see warnings above.
+
+Please fix the problems if you can, and update the exceptions file
+({}) if you can't.
+
+See doc/HACKING/HelpfulTools.md for more information on using practracker.\
+
+You can disable this message by setting the TOR_DISABLE_PRACTRACKER environment
+variable.
+""".format(found_new_issues, exceptions_file)
+ print(new_issues_str)
+
+ if args.list_overbroad:
+ def k_fn(tup):
+ return tup[0].key()
+ for (ex,p) in sorted(ProblemVault.list_overbroad_exceptions(), key=k_fn):
+ if p is None:
+ print(ex, "->", 0)
+ else:
+ print(ex, "->", p.metric_value)
+
+
+ sys.exit(found_new_issues)
+
+if __name__ == '__main__':
+ if os.environ.get("TOR_DISABLE_PRACTRACKER"):
+ print("TOR_DISABLE_PRACTRACKER is set, skipping practracker tests.",
+ file=sys.stderr)
+ sys.exit(0)
+ main(sys.argv)
diff --git a/scripts/maint/practracker/practracker_tests.py b/scripts/maint/practracker/practracker_tests.py
new file mode 100755
index 0000000000..e03c9e05ae
--- /dev/null
+++ b/scripts/maint/practracker/practracker_tests.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+"""Some simple tests for practracker metrics"""
+
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import unittest
+
+try:
+ # python 2 names the module this way...
+ from StringIO import StringIO
+except ImportError:
+ # python 3 names the module this way.
+ from io import StringIO
+
+import metrics
+
+function_file = """static void
+fun(directory_request_t *req, const char *resource)
+{
+ time_t if_modified_since = 0;
+ uint8_t or_diff_from[DIGEST256_LEN];
+}
+
+static void
+fun(directory_request_t *req,
+ const char *resource)
+{
+ time_t if_modified_since = 0;
+ uint8_t or_diff_from[DIGEST256_LEN];
+}
+
+MOCK_IMPL(void,
+fun,(
+ uint8_t dir_purpose,
+ uint8_t router_purpose,
+ const char *resource,
+ int pds_flags,
+ download_want_authority_t want_authority))
+{
+ const routerstatus_t *rs = NULL;
+ const or_options_t *options = get_options();
+}
+"""
+
+class TestFunctionLength(unittest.TestCase):
+ def test_function_length(self):
+ funcs = StringIO(function_file)
+ # All functions should have length 2
+ for name, lines in metrics.get_function_lines(funcs):
+ self.assertEqual(name, "fun")
+
+ funcs.seek(0)
+
+ for name, lines in metrics.get_function_lines(funcs):
+ self.assertEqual(lines, 4)
+
+class TestIncludeCount(unittest.TestCase):
+ def test_include_count(self):
+ f = StringIO("""
+ # include <abc.h>
+ # include "def.h"
+#include "ghi.h"
+\t#\t include "jkl.h"
+""")
+ self.assertEqual(metrics.get_include_count(f),4)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/scripts/maint/practracker/problem.py b/scripts/maint/practracker/problem.py
new file mode 100644
index 0000000000..a3255dcc80
--- /dev/null
+++ b/scripts/maint/practracker/problem.py
@@ -0,0 +1,266 @@
+"""
+In this file we define a ProblemVault class where we store all the
+exceptions and all the problems we find with the code.
+
+The ProblemVault is capable of registering problems and also figuring out if a
+problem is worse than a registered exception so that it only warns when things
+get worse.
+"""
+
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os.path
+import re
+import sys
+
+STATUS_ERR = 2
+STATUS_WARN = 1
+STATUS_OK = 0
+
+class ProblemVault(object):
+ """
+ Singleton where we store the various new problems we
+ found in the code, and also the old problems we read from the exception
+ file.
+ """
+ def __init__(self, exception_fname=None):
+ # Exception dictionary: { problem.key() : Problem object }
+ self.exceptions = {}
+ # Exception list: list of Problem objects, in the order added.
+ self.exception_list = []
+ # Exception dictionary: maps key to the problem it was used to
+ # suppress.
+ self.used_exception_for = {}
+
+ if exception_fname == None:
+ return
+
+ try:
+ with open(exception_fname, 'r') as exception_f:
+ self.register_exceptions(exception_f)
+ except IOError:
+ print("No exception file provided", file=sys.stderr)
+
+ def register_exceptions(self, exception_file):
+ # Register exceptions
+ for lineno, line in enumerate(exception_file, 1):
+ try:
+ problem = get_old_problem_from_exception_str(line)
+ except ValueError as v:
+ print("Exception file line {} not recognized: {}"
+ .format(lineno,v),
+ file=sys.stderr)
+ continue
+
+ if problem is None:
+ continue
+
+ # Fail if we see dup exceptions. There is really no reason to have dup exceptions.
+ if problem.key() in self.exceptions:
+ print("Duplicate exceptions lines found in exception file:\n\t{}\n\t{}\nAborting...".format(problem, self.exceptions[problem.key()]),
+ file=sys.stderr)
+ sys.exit(1)
+
+ self.exceptions[problem.key()] = problem
+ self.exception_list.append(problem)
+ #print "Registering exception: %s" % problem
+
+ def register_problem(self, problem):
+ """
+ Register this problem to the problem value. Return true if it was a new
+ problem or it worsens an already existing problem. A true
+ value may be STATUS_ERR to indicate a hard violation, or STATUS_WARN
+ to indicate a warning.
+ """
+ # This is a new problem, print it
+ if problem.key() not in self.exceptions:
+ return STATUS_ERR
+
+ # If it's an old problem, we don't warn if the situation got better
+ # (e.g. we went from 4k LoC to 3k LoC), but we do warn if the
+ # situation worsened (e.g. we went from 60 includes to 80).
+ status = problem.is_worse_than(self.exceptions[problem.key()])
+
+ # Remember that we used this exception, so that we can later
+ # determine whether the exception was overbroad.
+ self.used_exception_for[problem.key()] = problem
+
+ return status
+
+ def list_overbroad_exceptions(self):
+ """Return an iterator of tuples containing (ex,prob) where ex is an
+ exceptions in this vault that are stricter than it needs to be, and
+ prob is the worst problem (if any) that it covered.
+ """
+ for k in self.exceptions:
+ e = self.exceptions[k]
+ p = self.used_exception_for.get(k)
+ if p is None or e.is_worse_than(p):
+ yield (e, p)
+
+ def list_exceptions_without_overbroad(self):
+ """Return an iterator of new problems, such that overbroad
+ exceptions are replaced with minimally broad versions, or removed.
+ """
+ for e in self.exception_list:
+ p = self.used_exception_for.get(e.key())
+ if p is None:
+ # This exception wasn't needed at all.
+ continue
+ if e.is_worse_than(p):
+ # The exception is worse than the problem we found.
+ # Yield the problem as the new exception value.
+ yield p
+ else:
+ # The problem is as bad as the exception, or worse.
+ # Yield the exception.
+ yield e
+
+ def set_tolerances(self, fns):
+ """Adjust the tolerances for the exceptions in this vault. Takes
+ a map of problem type to a function that adjusts the permitted
+ function to its new maximum value."""
+ for k in self.exceptions:
+ ex = self.exceptions[k]
+ fn = fns.get(ex.problem_type)
+ if fn is not None:
+ ex.metric_value = fn(ex.metric_value)
+
+class ProblemFilter(object):
+ def __init__(self):
+ self.thresholds = dict()
+
+ def addThreshold(self, item):
+ self.thresholds[(item.get_type(),item.get_file_type())] = item
+
+ def matches(self, item):
+ key = (item.get_type(), item.get_file_type())
+ filt = self.thresholds.get(key, None)
+ if filt is None:
+ return False
+ return item.is_worse_than(filt)
+
+ def filter(self, sequence):
+ for item in iter(sequence):
+ if self.matches(item):
+ yield item
+
+class Item(object):
+ """
+ A generic measurement about some aspect of our source code. See
+ the subclasses below for the specific problems we are trying to tackle.
+ """
+ def __init__(self, problem_type, problem_location, metric_value):
+ self.problem_location = problem_location
+ self.metric_value = int(metric_value)
+ self.warning_threshold = self.metric_value
+ self.problem_type = problem_type
+
+ def is_worse_than(self, other_problem):
+ """Return STATUS_ERR if this is a worse problem than other_problem.
+ Return STATUS_WARN if it is a little worse, but falls within the
+ warning threshold. Return STATUS_OK if this problem is not
+ at all worse than other_problem.
+ """
+ if self.metric_value > other_problem.metric_value:
+ return STATUS_ERR
+ elif self.metric_value > other_problem.warning_threshold:
+ return STATUS_WARN
+ else:
+ return STATUS_OK
+
+ def key(self):
+ """Generate a unique key that describes this problem that can be used as a dictionary key"""
+ # Item location is a filesystem path, so we need to normalize this
+ # across platforms otherwise same paths are not gonna match.
+ canonical_location = os.path.normcase(self.problem_location)
+ return "%s:%s" % (canonical_location, self.problem_type)
+
+ def __str__(self):
+ return "problem %s %s %s" % (self.problem_type, self.problem_location, self.metric_value)
+
+ def get_type(self):
+ return self.problem_type
+
+ def get_file_type(self):
+ if self.problem_location.endswith(".h"):
+ return "*.h"
+ else:
+ return "*.c"
+
+class FileSizeItem(Item):
+ """
+ Denotes a problem with the size of a .c file.
+
+ The 'problem_location' is the filesystem path of the .c file, and the
+ 'metric_value' is the number of lines in the .c file.
+ """
+ def __init__(self, problem_location, metric_value):
+ super(FileSizeItem, self).__init__("file-size", problem_location, metric_value)
+
+class IncludeCountItem(Item):
+ """
+ Denotes a problem with the number of #includes in a .c file.
+
+ The 'problem_location' is the filesystem path of the .c file, and the
+ 'metric_value' is the number of #includes in the .c file.
+ """
+ def __init__(self, problem_location, metric_value):
+ super(IncludeCountItem, self).__init__("include-count", problem_location, metric_value)
+
+class FunctionSizeItem(Item):
+ """
+ Denotes a problem with a size of a function in a .c file.
+
+ The 'problem_location' is "<path>:<function>()" where <path> is the
+ filesystem path of the .c file and <function> is the name of the offending
+ function.
+
+ The 'metric_value' is the size of the offending function in lines.
+ """
+ def __init__(self, problem_location, metric_value):
+ super(FunctionSizeItem, self).__init__("function-size", problem_location, metric_value)
+
+class DependencyViolationItem(Item):
+ """
+ Denotes a dependency violation in a .c or .h file. A dependency violation
+ occurs when a file includes a file from some module that is not listed
+ in its .may_include file.
+
+ The 'problem_location' is the file that contains the problem.
+
+ The 'metric_value' is the number of forbidden includes.
+ """
+ def __init__(self, problem_location, metric_value):
+ super(DependencyViolationItem, self).__init__("dependency-violation",
+ problem_location,
+ metric_value)
+
+comment_re = re.compile(r'#.*$')
+
+def get_old_problem_from_exception_str(exception_str):
+ orig_str = exception_str
+ exception_str = comment_re.sub("", exception_str)
+ fields = exception_str.split()
+ if len(fields) == 0:
+ # empty line or comment
+ return None
+ elif len(fields) == 4:
+ # valid line
+ _, problem_type, problem_location, metric_value = fields
+ else:
+ raise ValueError("Misformatted line {!r}".format(orig_str))
+
+ if problem_type == "file-size":
+ return FileSizeItem(problem_location, metric_value)
+ elif problem_type == "include-count":
+ return IncludeCountItem(problem_location, metric_value)
+ elif problem_type == "function-size":
+ return FunctionSizeItem(problem_location, metric_value)
+ elif problem_type == "dependency-violation":
+ return DependencyViolationItem(problem_location, metric_value)
+ else:
+ raise ValueError("Unknown exception type {!r}".format(orig_str))
diff --git a/scripts/maint/practracker/test_practracker.sh b/scripts/maint/practracker/test_practracker.sh
new file mode 100755
index 0000000000..e29b9106de
--- /dev/null
+++ b/scripts/maint/practracker/test_practracker.sh
@@ -0,0 +1,96 @@
+#!/bin/sh
+
+# Fail if any subprocess fails unexpectedly
+set -e
+
+umask 077
+unset TOR_DISABLE_PRACTRACKER
+
+TMPDIR=""
+clean () {
+ if [ -n "$TMPDIR" ] && [ -d "$TMPDIR" ]; then
+ rm -rf "$TMPDIR"
+ fi
+}
+trap clean EXIT HUP INT TERM
+
+if test "${PRACTRACKER_DIR}" = "" ||
+ test ! -e "${PRACTRACKER_DIR}/practracker.py" ; then
+ PRACTRACKER_DIR=$(dirname "$0")
+fi
+
+# Change to the tor directory, and canonicalise PRACTRACKER_DIR,
+# so paths in practracker output are consistent, even in out-of-tree builds
+cd "${PRACTRACKER_DIR}"/../../..
+PRACTRACKER_DIR="scripts/maint/practracker"
+
+TMPDIR="$(mktemp -d -t pracktracker.test.XXXXXX)"
+if test -z "${TMPDIR}" || test ! -d "${TMPDIR}" ; then
+ echo >&2 "mktemp failed."
+ exit 1;
+fi
+
+DATA="${PRACTRACKER_DIR}/testdata"
+
+run_practracker() {
+ "${PYTHON:-python}" "${PRACTRACKER_DIR}/practracker.py" \
+ --include-dir "" \
+ --max-file-size=0 \
+ --max-function-size=0 \
+ --max-h-file-size=0 \
+ --max-h-include-count=0 \
+ --max-include-count=0 \
+ --terse \
+ "${DATA}/" "$@" || echo "practracker exit status: $?"
+}
+compare() {
+ # we can't use cmp because we need to use -b for windows
+ diff -b -u "$@" > "${TMPDIR}/test-diff" || true
+ if test -z "$(cat "${TMPDIR}"/test-diff)"; then
+ echo "OK"
+ else
+ cat "${TMPDIR}/test-diff"
+ echo "FAILED"
+ exit 1
+ fi
+}
+
+echo "unit tests:"
+
+"${PYTHON:-python}" "${PRACTRACKER_DIR}/practracker_tests.py"
+
+echo "ex0:"
+
+run_practracker --exceptions "${DATA}/ex0.txt" \
+ > "${TMPDIR}/ex0-received.txt" 2>&1
+
+compare "${TMPDIR}/ex0-received.txt" \
+ "${DATA}/ex0-expected.txt"
+
+echo "ex1:"
+
+run_practracker --exceptions "${DATA}/ex1.txt" \
+ > "${TMPDIR}/ex1-received.txt" 2>&1
+
+compare "${TMPDIR}/ex1-received.txt" \
+ "${DATA}/ex1-expected.txt"
+
+echo "ex1.overbroad:"
+
+run_practracker --exceptions "${DATA}/ex1.txt" --list-overbroad \
+ > "${TMPDIR}/ex1-overbroad-received.txt" 2>&1
+
+compare "${TMPDIR}/ex1-overbroad-received.txt" \
+ "${DATA}/ex1-overbroad-expected.txt"
+
+echo "ex1.regen:"
+
+cp "${DATA}/ex1.txt" "${TMPDIR}/ex1-copy.txt"
+run_practracker --exceptions "${TMPDIR}/ex1-copy.txt" --regen >/dev/null 2>&1
+compare "${TMPDIR}/ex1-copy.txt" "${DATA}/ex1-regen-expected.txt"
+
+echo "ex1.regen_overbroad:"
+
+cp "${DATA}/ex1.txt" "${TMPDIR}/ex1-copy.txt"
+run_practracker --exceptions "${TMPDIR}/ex1-copy.txt" --regen-overbroad >/dev/null 2>&1
+compare "${TMPDIR}/ex1-copy.txt" "${DATA}/ex1-regen-overbroad-expected.txt"
diff --git a/scripts/maint/practracker/testdata/.may_include b/scripts/maint/practracker/testdata/.may_include
new file mode 100644
index 0000000000..8542a35807
--- /dev/null
+++ b/scripts/maint/practracker/testdata/.may_include
@@ -0,0 +1,4 @@
+!advisory
+
+permitted.h
+ext/good.c
diff --git a/scripts/maint/practracker/testdata/a.c b/scripts/maint/practracker/testdata/a.c
new file mode 100644
index 0000000000..3c338ab40d
--- /dev/null
+++ b/scripts/maint/practracker/testdata/a.c
@@ -0,0 +1,41 @@
+
+#include "one.h"
+#include "two.h"
+#incldue "three.h"
+
+# include "permitted.h"
+
+#include "ext/good.c"
+#include "bad.c"
+
+int
+i_am_a_function(void)
+{
+ call();
+ call();
+ /* comment
+
+ another */
+
+ return 3;
+}
+
+# include "five.h"
+
+long
+another_function(long x,
+ long y)
+{
+ int abcd;
+
+ abcd = x+y;
+ abcd *= abcd;
+
+ /* comment here */
+
+ return abcd +
+ abcd +
+ abcd;
+}
+
+/* And a comment to grow! */
diff --git a/scripts/maint/practracker/testdata/b.c b/scripts/maint/practracker/testdata/b.c
new file mode 100644
index 0000000000..bef277aaae
--- /dev/null
+++ b/scripts/maint/practracker/testdata/b.c
@@ -0,0 +1,15 @@
+
+MOCK_IMPL(int,
+foo,(void))
+{
+ // blah1
+ return 0;
+}
+
+MOCK_IMPL(int,
+bar,( long z))
+{
+ // blah2
+
+ return (int)(z+2);
+}
diff --git a/scripts/maint/practracker/testdata/ex.txt b/scripts/maint/practracker/testdata/ex.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/scripts/maint/practracker/testdata/ex.txt
diff --git a/scripts/maint/practracker/testdata/ex0-expected.txt b/scripts/maint/practracker/testdata/ex0-expected.txt
new file mode 100644
index 0000000000..c9fb83bac3
--- /dev/null
+++ b/scripts/maint/practracker/testdata/ex0-expected.txt
@@ -0,0 +1,13 @@
+Unusual pattern permitted.h in scripts/maint/practracker/testdata
+problem file-size a.c 41
+problem include-count a.c 6
+problem function-size a.c:i_am_a_function() 9
+problem function-size a.c:another_function() 12
+problem dependency-violation a.c 4
+problem file-size b.c 15
+problem function-size b.c:foo() 4
+problem function-size b.c:bar() 5
+problem file-size header.h 8
+problem include-count header.h 4
+problem dependency-violation header.h 3
+practracker exit status: 11
diff --git a/scripts/maint/practracker/testdata/ex0.txt b/scripts/maint/practracker/testdata/ex0.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/scripts/maint/practracker/testdata/ex0.txt
diff --git a/scripts/maint/practracker/testdata/ex1-expected.txt b/scripts/maint/practracker/testdata/ex1-expected.txt
new file mode 100644
index 0000000000..2713338ae4
--- /dev/null
+++ b/scripts/maint/practracker/testdata/ex1-expected.txt
@@ -0,0 +1,5 @@
+Unusual pattern permitted.h in scripts/maint/practracker/testdata
+problem function-size a.c:i_am_a_function() 9
+(warning) problem function-size a.c:another_function() 12
+problem function-size b.c:foo() 4
+practracker exit status: 2
diff --git a/scripts/maint/practracker/testdata/ex1-overbroad-expected.txt b/scripts/maint/practracker/testdata/ex1-overbroad-expected.txt
new file mode 100644
index 0000000000..5ca480dc04
--- /dev/null
+++ b/scripts/maint/practracker/testdata/ex1-overbroad-expected.txt
@@ -0,0 +1,4 @@
+Unusual pattern permitted.h in scripts/maint/practracker/testdata
+problem file-size a.c 45 -> 41
+problem file-size z.c 100 -> 0
+practracker exit status: 3
diff --git a/scripts/maint/practracker/testdata/ex1-regen-expected.txt b/scripts/maint/practracker/testdata/ex1-regen-expected.txt
new file mode 100644
index 0000000000..bdf3681edf
--- /dev/null
+++ b/scripts/maint/practracker/testdata/ex1-regen-expected.txt
@@ -0,0 +1,46 @@
+# Welcome to the exceptions file for Tor's best-practices tracker!
+#
+# Each line of this file represents a single violation of Tor's best
+# practices -- typically, a violation that we had before practracker.py
+# first existed.
+#
+# There are three kinds of problems that we recognize right now:
+# function-size -- a function of more than 100 lines.
+# file-size -- a .c file of more than 3000 lines, or a .h
+# file with more than 500 lines.
+# include-count -- a .c file with more than 50 #includes,
+# or a .h file with more than 15 #includes.
+# dependency-violation -- a file includes a header that it should
+# not, according to an advisory .may_include file.
+#
+# Each line below represents a single exception that practracker should
+# _ignore_. Each line has four parts:
+# 1. The word "problem".
+# 2. The kind of problem.
+# 3. The location of the problem: either a filename, or a
+# filename:functionname pair.
+# 4. The magnitude of the problem to ignore.
+#
+# So for example, consider this line:
+# problem file-size /src/core/or/connection_or.c 3200
+#
+# It tells practracker to allow the mentioned file to be up to 3200 lines
+# long, even though ordinarily it would warn about any file with more than
+# 3000 lines.
+#
+# You can either edit this file by hand, or regenerate it completely by
+# running `make practracker-regen`.
+#
+# Remember: It is better to fix the problem than to add a new exception!
+
+problem file-size a.c 41
+problem include-count a.c 6
+problem function-size a.c:i_am_a_function() 9
+problem function-size a.c:another_function() 12
+problem dependency-violation a.c 4
+problem file-size b.c 15
+problem function-size b.c:foo() 4
+problem function-size b.c:bar() 5
+problem file-size header.h 8
+problem include-count header.h 4
+problem dependency-violation header.h 3
diff --git a/scripts/maint/practracker/testdata/ex1-regen-overbroad-expected.txt b/scripts/maint/practracker/testdata/ex1-regen-overbroad-expected.txt
new file mode 100644
index 0000000000..4521029b10
--- /dev/null
+++ b/scripts/maint/practracker/testdata/ex1-regen-overbroad-expected.txt
@@ -0,0 +1,45 @@
+# Welcome to the exceptions file for Tor's best-practices tracker!
+#
+# Each line of this file represents a single violation of Tor's best
+# practices -- typically, a violation that we had before practracker.py
+# first existed.
+#
+# There are three kinds of problems that we recognize right now:
+# function-size -- a function of more than 100 lines.
+# file-size -- a .c file of more than 3000 lines, or a .h
+# file with more than 500 lines.
+# include-count -- a .c file with more than 50 #includes,
+# or a .h file with more than 15 #includes.
+# dependency-violation -- a file includes a header that it should
+# not, according to an advisory .may_include file.
+#
+# Each line below represents a single exception that practracker should
+# _ignore_. Each line has four parts:
+# 1. The word "problem".
+# 2. The kind of problem.
+# 3. The location of the problem: either a filename, or a
+# filename:functionname pair.
+# 4. The magnitude of the problem to ignore.
+#
+# So for example, consider this line:
+# problem file-size /src/core/or/connection_or.c 3200
+#
+# It tells practracker to allow the mentioned file to be up to 3200 lines
+# long, even though ordinarily it would warn about any file with more than
+# 3000 lines.
+#
+# You can either edit this file by hand, or regenerate it completely by
+# running `make practracker-regen`.
+#
+# Remember: It is better to fix the problem than to add a new exception!
+
+problem file-size a.c 41
+problem include-count a.c 6
+problem function-size a.c:i_am_a_function() 8
+problem function-size a.c:another_function() 11
+problem file-size b.c 15
+problem function-size b.c:bar() 5
+problem dependency-violation a.c 4
+problem dependency-violation header.h 3
+problem file-size header.h 8
+problem include-count header.h 4
diff --git a/scripts/maint/practracker/testdata/ex1.txt b/scripts/maint/practracker/testdata/ex1.txt
new file mode 100644
index 0000000000..af8de03291
--- /dev/null
+++ b/scripts/maint/practracker/testdata/ex1.txt
@@ -0,0 +1,18 @@
+
+problem file-size a.c 45
+problem include-count a.c 6
+# this problem will produce an error
+problem function-size a.c:i_am_a_function() 8
+# this problem will produce a warning
+problem function-size a.c:another_function() 11
+problem file-size b.c 15
+# This is removed, and so will produce an error.
+# problem function-size b.c:foo() 4
+# This exception isn't used.
+problem file-size z.c 100
+
+problem function-size b.c:bar() 5
+problem dependency-violation a.c 4
+problem dependency-violation header.h 3
+problem file-size header.h 8
+problem include-count header.h 4
diff --git a/scripts/maint/practracker/testdata/header.h b/scripts/maint/practracker/testdata/header.h
new file mode 100644
index 0000000000..1183f5db9a
--- /dev/null
+++ b/scripts/maint/practracker/testdata/header.h
@@ -0,0 +1,8 @@
+
+// some forbidden includes
+#include "foo.h"
+#include "quux.h"
+#include "quup.h"
+
+// a permitted include
+#include "permitted.h"
diff --git a/scripts/maint/practracker/testdata/not_c_file b/scripts/maint/practracker/testdata/not_c_file
new file mode 100644
index 0000000000..e150962c02
--- /dev/null
+++ b/scripts/maint/practracker/testdata/not_c_file
@@ -0,0 +1,2 @@
+
+This isn't a C file, so practracker shouldn't care about it.
diff --git a/scripts/maint/practracker/util.py b/scripts/maint/practracker/util.py
new file mode 100644
index 0000000000..c52ca2fbbf
--- /dev/null
+++ b/scripts/maint/practracker/util.py
@@ -0,0 +1,61 @@
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+
+# We don't want to run metrics for unittests, automatically-generated C files,
+# external libraries or git leftovers.
+EXCLUDE_SOURCE_DIRS = {"src/test/", "src/trunnel/", "src/rust/",
+ "src/ext/" }
+
+EXCLUDE_FILES = {"orconfig.h"}
+
+def _norm(p):
+ return os.path.normcase(os.path.normpath(p))
+
+def get_tor_c_files(tor_topdir, include_dirs=None):
+ """
+ Return a list with the .c and .h filenames we want to get metrics of.
+ """
+ files_list = []
+ exclude_dirs = { _norm(os.path.join(tor_topdir, p)) for p in EXCLUDE_SOURCE_DIRS }
+
+ if include_dirs is None:
+ topdirs = [ tor_topdir ]
+ else:
+ topdirs = [ os.path.join(tor_topdir, inc) for inc in include_dirs ]
+
+ for topdir in topdirs:
+ for root, directories, filenames in os.walk(topdir):
+ # Remove all the directories that are excluded.
+ directories[:] = [ d for d in directories
+ if _norm(os.path.join(root,d)) not in exclude_dirs ]
+ directories.sort()
+ filenames.sort()
+ for filename in filenames:
+ # We only care about .c and .h files
+ if not (filename.endswith(".c") or filename.endswith(".h")):
+ continue
+ if filename in EXCLUDE_FILES:
+ continue
+ # Avoid editor temporary files
+ bname = os.path.basename(filename)
+ if bname.startswith("."):
+ continue
+ if bname.startswith("#"):
+ continue
+
+ full_path = os.path.join(root,filename)
+
+ files_list.append(full_path)
+
+ return files_list
+
+class NullFile:
+ """A file-like object that we can us to suppress output."""
+ def __init__(self):
+ pass
+ def write(self, s):
+ pass
diff --git a/scripts/maint/rectify_include_paths.py b/scripts/maint/rectify_include_paths.py
index 401fadae6d..6c7b252535 100755
--- a/scripts/maint/rectify_include_paths.py
+++ b/scripts/maint/rectify_include_paths.py
@@ -1,8 +1,17 @@
-#!/usr/bin/python3
+#!/usr/bin/env python
+
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
import os
import os.path
import re
+import sys
+
+def warn(msg):
+ sys.stderr.write("WARNING: %s\n"%msg)
# Find all the include files, map them to their real names.
@@ -11,6 +20,8 @@ def exclude(paths, dirnames):
if p in dirnames:
dirnames.remove(p)
+DUPLICATE = object()
+
def get_include_map():
includes = { }
@@ -18,8 +29,17 @@ def get_include_map():
exclude(["ext", "win32"], dirnames)
for fname in fnames:
+ # Avoid editor temporary files
+ if fname.startswith("."):
+ continue
+ if fname.startswith("#"):
+ continue
+
if fname.endswith(".h"):
- assert fname not in includes
+ if fname in includes:
+ warn("Multiple headers named %s"%fname)
+ includes[fname] = DUPLICATE
+ continue
include = os.path.join(dirpath, fname)
assert include.startswith("src/")
includes[fname] = include[4:]
@@ -37,7 +57,7 @@ def fix_includes(inp, out, mapping):
if m:
include,hdr,rest = m.groups()
basehdr = get_base_header_name(hdr)
- if basehdr in mapping:
+ if basehdr in mapping and mapping[basehdr] is not DUPLICATE:
out.write('{}{}{}\n'.format(include,mapping[basehdr],rest))
continue
@@ -49,6 +69,12 @@ for dirpath,dirnames,fnames in os.walk("src"):
exclude(["trunnel"], dirnames)
for fname in fnames:
+ # Avoid editor temporary files
+ if fname.startswith("."):
+ continue
+ if fname.startswith("#"):
+ continue
+
if fname.endswith(".c") or fname.endswith(".h"):
fname = os.path.join(dirpath, fname)
tmpfile = fname+".tmp"
diff --git a/scripts/maint/redox.py b/scripts/maint/redox.py
index 203cce0107..12b02c8a44 100755
--- a/scripts/maint/redox.py
+++ b/scripts/maint/redox.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2008-2019, The Tor Project, Inc.
# See LICENSE for licensing information.
@@ -29,6 +29,19 @@
# "mv fname.c.newdoc fname.c". Otherwise, you'll need to merge
# the parts you like by hand.
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import re
+import sys
+
+try:
+ xrange # Python 2
+except NameError:
+ xrange = range # Python 3
+
# Which files should we ignore warning from? Mostly, these are external
# files that we've snarfed in from somebody else, whose C we do no intend
# to document for them.
@@ -52,9 +65,6 @@ ADD_DOCDOCS_TO_TYPES += [ 'variable', ]
# ====================
# The rest of this should not need hacking.
-import re
-import sys
-
KINDS = [ "type", "field", "typedef", "define", "function", "variable",
"enumeration" ]
@@ -73,7 +83,7 @@ def parsething(thing):
else:
m = THING_RE.match(thing)
if not m:
- print thing, "???? Format didn't match."
+ print(thing, "???? Format didn't match.")
return None, None
else:
name, tp, parent = m.groups()
@@ -150,7 +160,7 @@ def checkf(fn, errs):
"""
for skip in SKIP_FILES:
if fn.endswith(skip):
- print "Skipping",fn
+ print("Skipping",fn)
return
comments = []
@@ -169,8 +179,8 @@ def checkf(fn, errs):
ln = findline(lines, line, name)
if ln == None:
- print "Couldn't find the definition of %s allegedly on %s of %s"%(
- name, line, fn)
+ print("Couldn't find the definition of %s allegedly on %s of %s"%(
+ name, line, fn))
else:
if hasdocdoc(lines, line, kind):
# print "Has a DOCDOC"
@@ -215,12 +225,12 @@ def applyComments(fn, entries):
outf.write(line)
outf.close()
- print "Added %s DOCDOCs to %s" %(N, fn)
+ print("Added %s DOCDOCs to %s" %(N, fn))
e = read()
for fn, errs in e.iteritems():
- print `(fn, errs)`
+ print(repr((fn, errs)))
comments = checkf(fn, errs)
if comments:
applyComments(fn, comments)
diff --git a/scripts/maint/rename_c_identifier.py b/scripts/maint/rename_c_identifier.py
new file mode 100755
index 0000000000..77802e10f3
--- /dev/null
+++ b/scripts/maint/rename_c_identifier.py
@@ -0,0 +1,267 @@
+#!/usr/bin/env python3
+#
+# Copyright (c) 2001 Matej Pfajfar.
+# Copyright (c) 2001-2004, Roger Dingledine.
+# Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+# Copyright (c) 2007-2019, The Tor Project, Inc.
+# See LICENSE for licensing information
+
+"""
+Helpful script to replace one or more C identifiers, and optionally
+generate a commit message explaining what happened.
+"""
+
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import argparse
+import fileinput
+import os
+import re
+import shlex
+import subprocess
+import sys
+import tempfile
+
+TOPDIR = "src"
+
+
+def is_c_file(fn):
+ """
+ Return true iff fn is the name of a C file.
+
+ >>> is_c_file("a/b/module.c")
+ True
+ >>> is_c_file("a/b/module.h")
+ True
+ >>> is_c_file("a/b/module.c~")
+ False
+ >>> is_c_file("a/b/.module.c")
+ False
+ >>> is_c_file("a/b/module.cpp")
+ False
+ """
+ fn = os.path.split(fn)[1]
+ # Avoid editor temporary files
+ if fn.startswith(".") or fn.startswith("#"):
+ return False
+ ext = os.path.splitext(fn)[1]
+ return ext in {".c", ".h", ".i", ".inc"}
+
+
+def list_c_files(topdir=TOPDIR):
+ """
+ Use git to list all the C files under version control.
+
+ >>> lst = list(list_c_files())
+ >>> "src/core/mainloop/mainloop.c" in lst
+ True
+ >>> "src/core/mainloop/twiddledeedoo.c" in lst
+ False
+ >>> "micro-revision.i" in lst
+ False
+ """
+ proc = subprocess.Popen(
+ ["git", "ls-tree", "--name-only", "-r", "HEAD", topdir],
+ stdout=subprocess.PIPE,
+ encoding="utf-8")
+ for line in proc.stdout.readlines():
+ line = line.strip()
+ if is_c_file(line):
+ yield line
+
+
+class Rewriter:
+ """
+ A rewriter applies a series of word-by-word replacements, in
+ sequence. Replacements only happen at "word boundaries",
+ as determined by the \\b regular expression marker.
+
+ ("A word is defined as a sequence of alphanumeric or underscore
+ characters", according to the documentation.)
+
+ >>> R = Rewriter([("magic", "secret"), ("words", "codes")])
+ >>> R.apply("The magic words are rambunctious bluejay")
+ 'The secret codes are rambunctious bluejay'
+ >>> R.apply("The magical words are rambunctious bluejay")
+ 'The magical codes are rambunctious bluejay'
+ >>> R.get_count()
+ 3
+
+ """
+
+ def __init__(self, replacements):
+ """Make a new Rewriter. Takes a sequence of pairs of
+ (from_id, to_id), where from_id is an identifier to replace,
+ and to_id is its replacement.
+ """
+ self._patterns = []
+ for id1, id2 in replacements:
+ pat = re.compile(r"\b{}\b".format(re.escape(id1)))
+ self._patterns.append((pat, id2))
+
+ self._count = 0
+
+ def apply(self, line):
+ """Return `line` as transformed by this rewriter."""
+ for pat, ident in self._patterns:
+ line, count = pat.subn(ident, line)
+ self._count += count
+ return line
+
+ def get_count(self):
+ """Return the number of identifiers that this rewriter has
+ rewritten."""
+ return self._count
+
+
+def rewrite_files(files, rewriter):
+ """
+ Apply `rewriter` to every file in `files`, replacing those files
+ with their rewritten contents.
+ """
+ for line in fileinput.input(files, inplace=True):
+ sys.stdout.write(rewriter.apply(line))
+
+
+def make_commit_msg(pairs, no_verify):
+ """Return a commit message to explain what was replaced by the provided
+ arguments.
+ """
+ script = ["./scripts/maint/rename_c_identifier.py"]
+ for id1, id2 in pairs:
+ qid1 = shlex.quote(id1)
+ qid2 = shlex.quote(id2)
+ script.append(" {} {}".format(qid1, qid2))
+ script = " \\\n".join(script)
+
+ if len(pairs) == 1:
+ line1 = "Rename {} to {}".format(*pairs[0])
+ else:
+ line1 = "Replace several C identifiers."
+
+ msg = """\
+{}
+
+This is an automated commit, generated by this command:
+
+{}
+""".format(line1, script)
+
+ if no_verify:
+ msg += """
+It was generated with --no-verify, so it probably breaks some commit hooks.
+The commiter should be sure to fix them up in a subsequent commit.
+"""
+
+ return msg
+
+
+def commit(pairs, no_verify=False):
+ """Try to commit the current git state, generating the commit message as
+ appropriate. If `no_verify` is True, pass the --no-verify argument to
+ git commit.
+ """
+ args = []
+ if no_verify:
+ args.append("--no-verify")
+
+ # We have to use a try block to delete the temporary file here, since we
+ # are using tempfile with delete=False. We have to use delete=False,
+ # since otherwise we are not guaranteed to be able to give the file to
+ # git for it to open.
+ fname = None
+ try:
+ with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
+ fname = f.name
+ f.write(make_commit_msg(pairs, no_verify))
+ s = subprocess.run(["git", "commit", "-a", "-F", fname, "--edit"]+args)
+ if s.returncode != 0 and not no_verify:
+ print('"git commit" failed. Maybe retry with --no-verify?',
+ file=sys.stderr)
+ revert_changes()
+ return False
+ finally:
+ os.unlink(fname)
+
+ return True
+
+
+def any_uncommitted_changes():
+ """Return True if git says there are any uncommitted changes in the current
+ working tree; false otherwise.
+ """
+ s = subprocess.run(["git", "diff-index", "--quiet", "HEAD"])
+ return s.returncode != 0
+
+
+DESC = "Replace one identifier with another throughout our source."
+EXAMPLES = """\
+Examples:
+
+ rename_c_identifier.py set_ctrl_id set_controller_id
+ (Replaces every occurrence of "set_ctrl_id" with "set_controller_id".)
+
+ rename_c_identifier.py --commit set_ctrl_id set_controller_id
+ (As above, but also generate a git commit with an appropriate message.)
+
+ rename_c_identifier.py a b c d
+ (Replace "a" with "b", and "c" with "d".)"""
+
+
+def revert_changes():
+ """Tell git to revert all the changes in the current working tree.
+ """
+ print('Reverting changes.', file=sys.stderr)
+ subprocess.run(["git", "checkout", "--quiet", TOPDIR])
+
+
+def main(argv):
+ import argparse
+ parser = argparse.ArgumentParser(description=DESC, epilog=EXAMPLES,
+ # prevent re-wrapping the examples
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+
+ parser.add_argument("--commit", action='store_true',
+ help="Generate a Git commit.")
+ parser.add_argument("--no-verify", action='store_true',
+ help="Tell Git not to run its pre-commit hooks.")
+ parser.add_argument("from_id", type=str, help="Original identifier")
+ parser.add_argument("to_id", type=str, help="New identifier")
+ parser.add_argument("more", type=str, nargs=argparse.REMAINDER,
+ help="Additional identifier pairs")
+
+ args = parser.parse_args(argv[1:])
+
+ if len(args.more) % 2 != 0:
+ print("I require an even number of identifiers.", file=sys.stderr)
+ return 1
+
+ if any_uncommitted_changes():
+ print("Uncommitted changes found. Not running.", file=sys.stderr)
+ return 1
+
+ pairs = []
+ print("renaming {} to {}".format(args.from_id, args.to_id), file=sys.stderr)
+ pairs.append((args.from_id, args.to_id))
+ for idx in range(0, len(args.more), 2):
+ id1 = args.more[idx]
+ id2 = args.more[idx+1]
+ print("renaming {} to {}".format(id1, id2))
+ pairs.append((id1, id2))
+
+ rewriter = Rewriter(pairs)
+
+ rewrite_files(list_c_files(), rewriter)
+
+ print("Replaced {} identifiers".format(rewriter.get_count()),
+ file=sys.stderr)
+
+ if args.commit:
+ commit(pairs, args.no_verify)
+
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/scripts/maint/run_calltool.sh b/scripts/maint/run_calltool.sh
index efb8706fea..b0268322f4 100755
--- a/scripts/maint/run_calltool.sh
+++ b/scripts/maint/run_calltool.sh
@@ -15,10 +15,10 @@ SUBITEMS="fn_graph fn_invgraph fn_scc fn_scc_weaklinks module_graph module_invgr
for calculation in $SUBITEMS; do
echo "======== $calculation"
- python -m calltool $calculation > callgraph/$calculation
+ python -m calltool "$calculation" > callgraph/"$calculation"
done
-echo <<EOF > callgraph/README
+cat <<EOF > callgraph/README
This directory holds output from calltool, as run on Tor. For more
information about each of these files, see the NOTES and README files in
the calltool distribution.
diff --git a/scripts/maint/run_check_subsystem_order.sh b/scripts/maint/run_check_subsystem_order.sh
new file mode 100755
index 0000000000..8e98f1e49c
--- /dev/null
+++ b/scripts/maint/run_check_subsystem_order.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -e
+
+TOR="${abs_top_builddir:-.}/src/app/tor"
+
+INCLUDES_PY="${abs_top_srcdir:-.}/scripts/maint/practracker/includes.py"
+
+if ! test -x "${INCLUDES_PY}" ; then
+ echo "skip"
+ exit 77
+fi
+
+"${TOR}" --dbg-dump-subsystem-list | \
+ "${PYTHON:-python}" \
+ "${INCLUDES_PY}" --check-subsystem-order - "${abs_top_srcdir}/src"
+
+echo ok
diff --git a/scripts/maint/sortChanges.py b/scripts/maint/sortChanges.py
index 986b94b025..5f6324e387 100755
--- a/scripts/maint/sortChanges.py
+++ b/scripts/maint/sortChanges.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# Copyright (c) 2014-2019, The Tor Project, Inc.
# See LICENSE for licensing information
@@ -7,6 +7,11 @@
changelog.
"""
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
import re
import sys
@@ -43,7 +48,7 @@ REPLACEMENTS = {
def score(s,fname=None):
m = re.match(r'^ +o ([^\n]*)\n(.*)', s, re.M|re.S)
if not m:
- print >>sys.stderr, "Can't score %r from %s"%(s,fname)
+ print("Can't score %r from %s"%(s,fname), file=sys.stderr)
heading = m.group(1)
heading = REPLACEMENTS.get(heading, heading)
lw = m.group(1).lower()
@@ -100,9 +105,9 @@ changes.sort()
last_lw = "this is not a header"
for _, lw, header, rest in changes:
if lw == last_lw:
- print rest,
+ print(rest, end="")
else:
- print
- print " o",header
- print rest,
+ print()
+ print(" o",header)
+ print(rest, end="")
last_lw = lw
diff --git a/scripts/maint/updateCopyright.pl b/scripts/maint/updateCopyright.pl
index bd24377d38..6800032f87 100755
--- a/scripts/maint/updateCopyright.pl
+++ b/scripts/maint/updateCopyright.pl
@@ -1,7 +1,9 @@
#!/usr/bin/perl -i -w -p
-$NEWYEAR=2018;
+@now = gmtime();
-s/Copyright(.*) (201[^8]), The Tor Project/Copyright$1 $2-${NEWYEAR}, The Tor Project/;
+$NEWYEAR=$now[5]+1900;
+
+s/Copyright([^-]*) (20[^-]*), The Tor Project/Copyright$1 $2-${NEWYEAR}, The Tor Project/;
s/Copyright(.*)-(20..), The Tor Project/Copyright$1-${NEWYEAR}, The Tor Project/;
diff --git a/scripts/maint/updateFallbackDirs.py b/scripts/maint/updateFallbackDirs.py
deleted file mode 100755
index 0ea3992d8f..0000000000
--- a/scripts/maint/updateFallbackDirs.py
+++ /dev/null
@@ -1,2216 +0,0 @@
-#!/usr/bin/env python
-
-# Usage:
-#
-# Regenerate the list:
-# scripts/maint/updateFallbackDirs.py > src/app/config/fallback_dirs.inc 2> fallback_dirs.log
-#
-# Check the existing list:
-# scripts/maint/updateFallbackDirs.py check_existing > fallback_dirs.inc.ok 2> fallback_dirs.log
-# mv fallback_dirs.inc.ok src/app/config/fallback_dirs.inc
-#
-# This script should be run from a stable, reliable network connection,
-# with no other network activity (and not over tor).
-# If this is not possible, please disable:
-# PERFORM_IPV4_DIRPORT_CHECKS and PERFORM_IPV6_DIRPORT_CHECKS
-#
-# Needs dateutil, stem, and potentially other python packages.
-# Optionally uses ipaddress (python 3 builtin) or py2-ipaddress (package)
-# for netblock analysis.
-#
-# Then read the logs to make sure the fallbacks aren't dominated by a single
-# netblock or port.
-
-# Script by weasel, April 2015
-# Portions by gsathya & karsten, 2013
-# https://trac.torproject.org/projects/tor/attachment/ticket/8374/dir_list.2.py
-# Modifications by teor, 2015
-
-import StringIO
-import string
-import re
-import datetime
-import gzip
-import os.path
-import json
-import math
-import sys
-import urllib
-import urllib2
-import hashlib
-import dateutil.parser
-# bson_lazy provides bson
-#from bson import json_util
-import copy
-import re
-
-from stem.descriptor import DocumentHandler
-from stem.descriptor.remote import get_consensus, get_server_descriptors, MAX_FINGERPRINTS
-
-import logging
-logging.root.name = ''
-
-HAVE_IPADDRESS = False
-try:
- # python 3 builtin, or install package py2-ipaddress
- # there are several ipaddress implementations for python 2
- # with slightly different semantics with str typed text
- # fortunately, all our IP addresses are in unicode
- import ipaddress
- HAVE_IPADDRESS = True
-except ImportError:
- # if this happens, we avoid doing netblock analysis
- logging.warning('Unable to import ipaddress, please install py2-ipaddress.' +
- ' A fallback list will be created, but optional netblock' +
- ' analysis will not be performed.')
-
-## Top-Level Configuration
-
-# We use semantic versioning: https://semver.org
-# In particular:
-# * major changes include removing a mandatory field, or anything else that
-# would break an appropriately tolerant parser,
-# * minor changes include adding a field,
-# * patch changes include changing header comments or other unstructured
-# content
-FALLBACK_FORMAT_VERSION = '2.0.0'
-SECTION_SEPARATOR_BASE = '====='
-SECTION_SEPARATOR_COMMENT = '/* ' + SECTION_SEPARATOR_BASE + ' */'
-
-# Output all candidate fallbacks, or only output selected fallbacks?
-OUTPUT_CANDIDATES = False
-
-# Perform DirPort checks over IPv4?
-# Change this to False if IPv4 doesn't work for you, or if you don't want to
-# download a consensus for each fallback
-# Don't check ~1000 candidates when OUTPUT_CANDIDATES is True
-PERFORM_IPV4_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else True
-
-# Perform DirPort checks over IPv6?
-# If you know IPv6 works for you, set this to True
-# This will exclude IPv6 relays without an IPv6 DirPort configured
-# So it's best left at False until #18394 is implemented
-# Don't check ~1000 candidates when OUTPUT_CANDIDATES is True
-PERFORM_IPV6_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else False
-
-# Must relays be running now?
-MUST_BE_RUNNING_NOW = (PERFORM_IPV4_DIRPORT_CHECKS
- or PERFORM_IPV6_DIRPORT_CHECKS)
-
-# Clients have been using microdesc consensuses by default for a while now
-DOWNLOAD_MICRODESC_CONSENSUS = True
-
-# If a relay delivers an expired consensus, if it expired less than this many
-# seconds ago, we still allow the relay. This should never be less than -90,
-# as all directory mirrors should have downloaded a consensus 90 minutes
-# before it expires. It should never be more than 24 hours, because clients
-# reject consensuses that are older than REASONABLY_LIVE_TIME.
-# For the consensus expiry check to be accurate, the machine running this
-# script needs an accurate clock.
-#
-# Relays on 0.3.0 and later return a 404 when they are about to serve an
-# expired consensus. This makes them fail the download check.
-# We use a tolerance of 0, so that 0.2.x series relays also fail the download
-# check if they serve an expired consensus.
-CONSENSUS_EXPIRY_TOLERANCE = 0
-
-# Output fallback name, flags, bandwidth, and ContactInfo in a C comment?
-OUTPUT_COMMENTS = True if OUTPUT_CANDIDATES else False
-
-# Output matching ContactInfo in fallbacks list?
-# Useful if you're trying to contact operators
-CONTACT_COUNT = True if OUTPUT_CANDIDATES else False
-
-# How the list should be sorted:
-# fingerprint: is useful for stable diffs of fallback lists
-# measured_bandwidth: is useful when pruning the list based on bandwidth
-# contact: is useful for contacting operators once the list has been pruned
-OUTPUT_SORT_FIELD = 'contact' if OUTPUT_CANDIDATES else 'fingerprint'
-
-## OnionOO Settings
-
-ONIONOO = 'https://onionoo.torproject.org/'
-#ONIONOO = 'https://onionoo.thecthulhu.com/'
-
-# Don't bother going out to the Internet, just use the files available locally,
-# even if they're very old
-LOCAL_FILES_ONLY = False
-
-## Whitelist / Blacklist Filter Settings
-
-# The whitelist contains entries that are included if all attributes match
-# (IPv4, dirport, orport, id, and optionally IPv6 and IPv6 orport)
-
-# What happens to entries not in whitelist?
-# When True, they are included, when False, they are excluded
-INCLUDE_UNLISTED_ENTRIES = True if OUTPUT_CANDIDATES else False
-
-WHITELIST_FILE_NAME = 'scripts/maint/fallback.whitelist'
-FALLBACK_FILE_NAME = 'src/app/config/fallback_dirs.inc'
-
-# The number of bytes we'll read from a filter file before giving up
-MAX_LIST_FILE_SIZE = 1024 * 1024
-
-## Eligibility Settings
-
-# Require fallbacks to have the same address and port for a set amount of time
-# We used to have this at 1 week, but that caused many fallback failures, which
-# meant that we had to rebuild the list more often. We want fallbacks to be
-# stable for 2 years, so we set it to a few months.
-#
-# If a relay changes address or port, that's it, it's not useful any more,
-# because clients can't find it
-ADDRESS_AND_PORT_STABLE_DAYS = 90
-# We ignore relays that have been down for more than this period
-MAX_DOWNTIME_DAYS = 0 if MUST_BE_RUNNING_NOW else 7
-# FallbackDirs must have a time-weighted-fraction that is greater than or
-# equal to:
-# Mirrors that are down half the time are still useful half the time
-CUTOFF_RUNNING = .50
-CUTOFF_V2DIR = .50
-# Guard flags are removed for some time after a relay restarts, so we ignore
-# the guard flag.
-CUTOFF_GUARD = .00
-# FallbackDirs must have a time-weighted-fraction that is less than or equal
-# to:
-# .00 means no bad exits
-PERMITTED_BADEXIT = .00
-
-# older entries' weights are adjusted with ALPHA^(age in days)
-AGE_ALPHA = 0.99
-
-# this factor is used to scale OnionOO entries to [0,1]
-ONIONOO_SCALE_ONE = 999.
-
-## Fallback Count Limits
-
-# The target for these parameters is 20% of the guards in the network
-# This is around 200 as of October 2015
-_FB_POG = 0.2
-FALLBACK_PROPORTION_OF_GUARDS = None if OUTPUT_CANDIDATES else _FB_POG
-
-# Limit the number of fallbacks (eliminating lowest by advertised bandwidth)
-MAX_FALLBACK_COUNT = None if OUTPUT_CANDIDATES else 200
-# Emit a C #error if the number of fallbacks is less than expected
-MIN_FALLBACK_COUNT = 0 if OUTPUT_CANDIDATES else MAX_FALLBACK_COUNT*0.5
-
-# The maximum number of fallbacks on the same address, contact, or family
-#
-# With 150 fallbacks, this means each operator sees 5% of client bootstraps.
-# For comparison:
-# - We try to limit guard and exit operators to 5% of the network
-# - The directory authorities used to see 11% of client bootstraps each
-#
-# We also don't want too much of the list to go down if a single operator
-# has to move all their relays.
-MAX_FALLBACKS_PER_IP = 1
-MAX_FALLBACKS_PER_IPV4 = MAX_FALLBACKS_PER_IP
-MAX_FALLBACKS_PER_IPV6 = MAX_FALLBACKS_PER_IP
-MAX_FALLBACKS_PER_CONTACT = 7
-MAX_FALLBACKS_PER_FAMILY = 7
-
-## Fallback Bandwidth Requirements
-
-# Any fallback with the Exit flag has its bandwidth multiplied by this fraction
-# to make sure we aren't further overloading exits
-# (Set to 1.0, because we asked that only lightly loaded exits opt-in,
-# and the extra load really isn't that much for large relays.)
-EXIT_BANDWIDTH_FRACTION = 1.0
-
-# If a single fallback's bandwidth is too low, it's pointless adding it
-# We expect fallbacks to handle an extra 10 kilobytes per second of traffic
-# Make sure they can support fifty times the expected extra load
-#
-# We convert this to a consensus weight before applying the filter,
-# because all the bandwidth amounts are specified by the relay
-MIN_BANDWIDTH = 50.0 * 10.0 * 1024.0
-
-# Clients will time out after 30 seconds trying to download a consensus
-# So allow fallback directories half that to deliver a consensus
-# The exact download times might change based on the network connection
-# running this script, but only by a few seconds
-# There is also about a second of python overhead
-CONSENSUS_DOWNLOAD_SPEED_MAX = 15.0
-# If the relay fails a consensus check, retry the download
-# This avoids delisting a relay due to transient network conditions
-CONSENSUS_DOWNLOAD_RETRY = True
-
-## Parsing Functions
-
-def parse_ts(t):
- return datetime.datetime.strptime(t, "%Y-%m-%d %H:%M:%S")
-
-def remove_bad_chars(raw_string, bad_char_list):
- # Remove each character in the bad_char_list
- cleansed_string = raw_string
- for c in bad_char_list:
- cleansed_string = cleansed_string.replace(c, '')
- return cleansed_string
-
-def cleanse_unprintable(raw_string):
- # Remove all unprintable characters
- cleansed_string = ''
- for c in raw_string:
- if c in string.printable:
- cleansed_string += c
- return cleansed_string
-
-def cleanse_whitespace(raw_string):
- # Replace all whitespace characters with a space
- cleansed_string = raw_string
- for c in string.whitespace:
- cleansed_string = cleansed_string.replace(c, ' ')
- return cleansed_string
-
-def cleanse_c_multiline_comment(raw_string):
- cleansed_string = raw_string
- # Embedded newlines should be removed by tor/onionoo, but let's be paranoid
- cleansed_string = cleanse_whitespace(cleansed_string)
- # ContactInfo and Version can be arbitrary binary data
- cleansed_string = cleanse_unprintable(cleansed_string)
- # Prevent a malicious / unanticipated string from breaking out
- # of a C-style multiline comment
- # This removes '/*' and '*/' and '//'
- bad_char_list = '*/'
- # Prevent a malicious string from using C nulls
- bad_char_list += '\0'
- # Avoid confusing parsers by making sure there is only one comma per fallback
- bad_char_list += ','
- # Avoid confusing parsers by making sure there is only one equals per field
- bad_char_list += '='
- # Be safer by removing bad characters entirely
- cleansed_string = remove_bad_chars(cleansed_string, bad_char_list)
- # Some compilers may further process the content of comments
- # There isn't much we can do to cover every possible case
- # But comment-based directives are typically only advisory
- return cleansed_string
-
-def cleanse_c_string(raw_string):
- cleansed_string = raw_string
- # Embedded newlines should be removed by tor/onionoo, but let's be paranoid
- cleansed_string = cleanse_whitespace(cleansed_string)
- # ContactInfo and Version can be arbitrary binary data
- cleansed_string = cleanse_unprintable(cleansed_string)
- # Prevent a malicious address/fingerprint string from breaking out
- # of a C-style string
- bad_char_list = '"'
- # Prevent a malicious string from using escapes
- bad_char_list += '\\'
- # Prevent a malicious string from using C nulls
- bad_char_list += '\0'
- # Avoid confusing parsers by making sure there is only one comma per fallback
- bad_char_list += ','
- # Avoid confusing parsers by making sure there is only one equals per field
- bad_char_list += '='
- # Be safer by removing bad characters entirely
- cleansed_string = remove_bad_chars(cleansed_string, bad_char_list)
- # Some compilers may further process the content of strings
- # There isn't much we can do to cover every possible case
- # But this typically only results in changes to the string data
- return cleansed_string
-
-## OnionOO Source Functions
-
-# a dictionary of source metadata for each onionoo query we've made
-fetch_source = {}
-
-# register source metadata for 'what'
-# assumes we only retrieve one document for each 'what'
-def register_fetch_source(what, url, relays_published, version):
- fetch_source[what] = {}
- fetch_source[what]['url'] = url
- fetch_source[what]['relays_published'] = relays_published
- fetch_source[what]['version'] = version
-
-# list each registered source's 'what'
-def fetch_source_list():
- return sorted(fetch_source.keys())
-
-# given 'what', provide a multiline C comment describing the source
-def describe_fetch_source(what):
- desc = '/*'
- desc += '\n'
- desc += 'Onionoo Source: '
- desc += cleanse_c_multiline_comment(what)
- desc += ' Date: '
- desc += cleanse_c_multiline_comment(fetch_source[what]['relays_published'])
- desc += ' Version: '
- desc += cleanse_c_multiline_comment(fetch_source[what]['version'])
- desc += '\n'
- desc += 'URL: '
- desc += cleanse_c_multiline_comment(fetch_source[what]['url'])
- desc += '\n'
- desc += '*/'
- return desc
-
-## File Processing Functions
-
-def write_to_file(str, file_name, max_len):
- try:
- with open(file_name, 'w') as f:
- f.write(str[0:max_len])
- except EnvironmentError, error:
- logging.error('Writing file %s failed: %d: %s'%
- (file_name,
- error.errno,
- error.strerror)
- )
-
-def read_from_file(file_name, max_len):
- try:
- if os.path.isfile(file_name):
- with open(file_name, 'r') as f:
- return f.read(max_len)
- except EnvironmentError, error:
- logging.info('Loading file %s failed: %d: %s'%
- (file_name,
- error.errno,
- error.strerror)
- )
- return None
-
-def parse_fallback_file(file_name):
- file_data = read_from_file(file_name, MAX_LIST_FILE_SIZE)
- file_data = cleanse_unprintable(file_data)
- file_data = remove_bad_chars(file_data, '\n"\0')
- file_data = re.sub('/\*.*?\*/', '', file_data)
- file_data = file_data.replace(',', '\n')
- file_data = file_data.replace(' weight=10', '')
- return file_data
-
-def load_possibly_compressed_response_json(response):
- if response.info().get('Content-Encoding') == 'gzip':
- buf = StringIO.StringIO( response.read() )
- f = gzip.GzipFile(fileobj=buf)
- return json.load(f)
- else:
- return json.load(response)
-
-def load_json_from_file(json_file_name):
- # An exception here may be resolved by deleting the .last_modified
- # and .json files, and re-running the script
- try:
- with open(json_file_name, 'r') as f:
- return json.load(f)
- except EnvironmentError, error:
- raise Exception('Reading not-modified json file %s failed: %d: %s'%
- (json_file_name,
- error.errno,
- error.strerror)
- )
-
-## OnionOO Functions
-
-def datestr_to_datetime(datestr):
- # Parse datetimes like: Fri, 02 Oct 2015 13:34:14 GMT
- if datestr is not None:
- dt = dateutil.parser.parse(datestr)
- else:
- # Never modified - use start of epoch
- dt = datetime.datetime.utcfromtimestamp(0)
- # strip any timezone out (in case they're supported in future)
- dt = dt.replace(tzinfo=None)
- return dt
-
-def onionoo_fetch(what, **kwargs):
- params = kwargs
- params['type'] = 'relay'
- #params['limit'] = 10
- params['first_seen_days'] = '%d-'%(ADDRESS_AND_PORT_STABLE_DAYS)
- params['last_seen_days'] = '-%d'%(MAX_DOWNTIME_DAYS)
- params['flag'] = 'V2Dir'
- url = ONIONOO + what + '?' + urllib.urlencode(params)
-
- # Unfortunately, the URL is too long for some OS filenames,
- # but we still don't want to get files from different URLs mixed up
- base_file_name = what + '-' + hashlib.sha1(url).hexdigest()
-
- full_url_file_name = base_file_name + '.full_url'
- MAX_FULL_URL_LENGTH = 1024
-
- last_modified_file_name = base_file_name + '.last_modified'
- MAX_LAST_MODIFIED_LENGTH = 64
-
- json_file_name = base_file_name + '.json'
-
- if LOCAL_FILES_ONLY:
- # Read from the local file, don't write to anything
- response_json = load_json_from_file(json_file_name)
- else:
- # store the full URL to a file for debugging
- # no need to compare as long as you trust SHA-1
- write_to_file(url, full_url_file_name, MAX_FULL_URL_LENGTH)
-
- request = urllib2.Request(url)
- request.add_header('Accept-encoding', 'gzip')
-
- # load the last modified date from the file, if it exists
- last_mod_date = read_from_file(last_modified_file_name,
- MAX_LAST_MODIFIED_LENGTH)
- if last_mod_date is not None:
- request.add_header('If-modified-since', last_mod_date)
-
- # Parse last modified date
- last_mod = datestr_to_datetime(last_mod_date)
-
- # Not Modified and still recent enough to be useful
- # Onionoo / Globe used to use 6 hours, but we can afford a day
- required_freshness = datetime.datetime.utcnow()
- # strip any timezone out (to match dateutil.parser)
- required_freshness = required_freshness.replace(tzinfo=None)
- required_freshness -= datetime.timedelta(hours=24)
-
- # Make the OnionOO request
- response_code = 0
- try:
- response = urllib2.urlopen(request)
- response_code = response.getcode()
- except urllib2.HTTPError, error:
- response_code = error.code
- if response_code == 304: # not modified
- pass
- else:
- raise Exception("Could not get " + url + ": "
- + str(error.code) + ": " + error.reason)
-
- if response_code == 200: # OK
- last_mod = datestr_to_datetime(response.info().get('Last-Modified'))
-
- # Check for freshness
- if last_mod < required_freshness:
- if last_mod_date is not None:
- # This check sometimes fails transiently, retry the script if it does
- date_message = "Outdated data: last updated " + last_mod_date
- else:
- date_message = "No data: never downloaded "
- raise Exception(date_message + " from " + url)
-
- # Process the data
- if response_code == 200: # OK
-
- response_json = load_possibly_compressed_response_json(response)
-
- with open(json_file_name, 'w') as f:
- # use the most compact json representation to save space
- json.dump(response_json, f, separators=(',',':'))
-
- # store the last modified date in its own file
- if response.info().get('Last-modified') is not None:
- write_to_file(response.info().get('Last-Modified'),
- last_modified_file_name,
- MAX_LAST_MODIFIED_LENGTH)
-
- elif response_code == 304: # Not Modified
-
- response_json = load_json_from_file(json_file_name)
-
- else: # Unexpected HTTP response code not covered in the HTTPError above
- raise Exception("Unexpected HTTP response code to " + url + ": "
- + str(response_code))
-
- register_fetch_source(what,
- url,
- response_json['relays_published'],
- response_json['version'])
-
- return response_json
-
-def fetch(what, **kwargs):
- #x = onionoo_fetch(what, **kwargs)
- # don't use sort_keys, as the order of or_addresses is significant
- #print json.dumps(x, indent=4, separators=(',', ': '))
- #sys.exit(0)
-
- return onionoo_fetch(what, **kwargs)
-
-## Fallback Candidate Class
-
-class Candidate(object):
- CUTOFF_ADDRESS_AND_PORT_STABLE = (datetime.datetime.utcnow()
- - datetime.timedelta(ADDRESS_AND_PORT_STABLE_DAYS))
-
- def __init__(self, details):
- for f in ['fingerprint', 'nickname', 'last_changed_address_or_port',
- 'consensus_weight', 'or_addresses', 'dir_address']:
- if not f in details: raise Exception("Document has no %s field."%(f,))
-
- if not 'contact' in details:
- details['contact'] = None
- if not 'flags' in details or details['flags'] is None:
- details['flags'] = []
- if (not 'advertised_bandwidth' in details
- or details['advertised_bandwidth'] is None):
- # relays without advertised bandwidth have it calculated from their
- # consensus weight
- details['advertised_bandwidth'] = 0
- if (not 'effective_family' in details
- or details['effective_family'] is None):
- details['effective_family'] = []
- if not 'platform' in details:
- details['platform'] = None
- details['last_changed_address_or_port'] = parse_ts(
- details['last_changed_address_or_port'])
- self._data = details
- self._stable_sort_or_addresses()
-
- self._fpr = self._data['fingerprint']
- self._running = self._guard = self._v2dir = 0.
- self._split_dirport()
- self._compute_orport()
- if self.orport is None:
- raise Exception("Failed to get an orport for %s."%(self._fpr,))
- self._compute_ipv6addr()
- if not self.has_ipv6():
- logging.debug("Failed to get an ipv6 address for %s."%(self._fpr,))
- self._compute_version()
- self._extra_info_cache = None
-
- def _stable_sort_or_addresses(self):
- # replace self._data['or_addresses'] with a stable ordering,
- # sorting the secondary addresses in string order
- # leave the received order in self._data['or_addresses_raw']
- self._data['or_addresses_raw'] = self._data['or_addresses']
- or_address_primary = self._data['or_addresses'][:1]
- # subsequent entries in the or_addresses array are in an arbitrary order
- # so we stabilise the addresses by sorting them in string order
- or_addresses_secondaries_stable = sorted(self._data['or_addresses'][1:])
- or_addresses_stable = or_address_primary + or_addresses_secondaries_stable
- self._data['or_addresses'] = or_addresses_stable
-
- def get_fingerprint(self):
- return self._fpr
-
- # is_valid_ipv[46]_address by gsathya, karsten, 2013
- @staticmethod
- def is_valid_ipv4_address(address):
- if not isinstance(address, (str, unicode)):
- return False
-
- # check if there are four period separated values
- if address.count(".") != 3:
- return False
-
- # checks that each value in the octet are decimal values between 0-255
- for entry in address.split("."):
- if not entry.isdigit() or int(entry) < 0 or int(entry) > 255:
- return False
- elif entry[0] == "0" and len(entry) > 1:
- return False # leading zeros, for instance in "1.2.3.001"
-
- return True
-
- @staticmethod
- def is_valid_ipv6_address(address):
- if not isinstance(address, (str, unicode)):
- return False
-
- # remove brackets
- address = address[1:-1]
-
- # addresses are made up of eight colon separated groups of four hex digits
- # with leading zeros being optional
- # https://en.wikipedia.org/wiki/IPv6#Address_format
-
- colon_count = address.count(":")
-
- if colon_count > 7:
- return False # too many groups
- elif colon_count != 7 and not "::" in address:
- return False # not enough groups and none are collapsed
- elif address.count("::") > 1 or ":::" in address:
- return False # multiple groupings of zeros can't be collapsed
-
- found_ipv4_on_previous_entry = False
- for entry in address.split(":"):
- # If an IPv6 address has an embedded IPv4 address,
- # it must be the last entry
- if found_ipv4_on_previous_entry:
- return False
- if not re.match("^[0-9a-fA-f]{0,4}$", entry):
- if not Candidate.is_valid_ipv4_address(entry):
- return False
- else:
- found_ipv4_on_previous_entry = True
-
- return True
-
- def _split_dirport(self):
- # Split the dir_address into dirip and dirport
- (self.dirip, _dirport) = self._data['dir_address'].split(':', 2)
- self.dirport = int(_dirport)
-
- def _compute_orport(self):
- # Choose the first ORPort that's on the same IPv4 address as the DirPort.
- # In rare circumstances, this might not be the primary ORPort address.
- # However, _stable_sort_or_addresses() ensures we choose the same one
- # every time, even if onionoo changes the order of the secondaries.
- self._split_dirport()
- self.orport = None
- for i in self._data['or_addresses']:
- if i != self._data['or_addresses'][0]:
- logging.debug('Secondary IPv4 Address Used for %s: %s'%(self._fpr, i))
- (ipaddr, port) = i.rsplit(':', 1)
- if (ipaddr == self.dirip) and Candidate.is_valid_ipv4_address(ipaddr):
- self.orport = int(port)
- return
-
- def _compute_ipv6addr(self):
- # Choose the first IPv6 address that uses the same port as the ORPort
- # Or, choose the first IPv6 address in the list
- # _stable_sort_or_addresses() ensures we choose the same IPv6 address
- # every time, even if onionoo changes the order of the secondaries.
- self.ipv6addr = None
- self.ipv6orport = None
- # Choose the first IPv6 address that uses the same port as the ORPort
- for i in self._data['or_addresses']:
- (ipaddr, port) = i.rsplit(':', 1)
- if (port == self.orport) and Candidate.is_valid_ipv6_address(ipaddr):
- self.ipv6addr = ipaddr
- self.ipv6orport = int(port)
- return
- # Choose the first IPv6 address in the list
- for i in self._data['or_addresses']:
- (ipaddr, port) = i.rsplit(':', 1)
- if Candidate.is_valid_ipv6_address(ipaddr):
- self.ipv6addr = ipaddr
- self.ipv6orport = int(port)
- return
-
- def _compute_version(self):
- # parse the version out of the platform string
- # The platform looks like: "Tor 0.2.7.6 on Linux"
- self._data['version'] = None
- if self._data['platform'] is None:
- return
- # be tolerant of weird whitespacing, use a whitespace split
- tokens = self._data['platform'].split()
- for token in tokens:
- vnums = token.split('.')
- # if it's at least a.b.c.d, with potentially an -alpha-dev, -alpha, -rc
- if (len(vnums) >= 4 and vnums[0].isdigit() and vnums[1].isdigit() and
- vnums[2].isdigit()):
- self._data['version'] = token
- return
-
- # From #20509
- # bug #20499 affects versions from 0.2.9.1-alpha-dev to 0.2.9.4-alpha-dev
- # and version 0.3.0.0-alpha-dev
- # Exhaustive lists are hard to get wrong
- STALE_CONSENSUS_VERSIONS = ['0.2.9.1-alpha-dev',
- '0.2.9.2-alpha',
- '0.2.9.2-alpha-dev',
- '0.2.9.3-alpha',
- '0.2.9.3-alpha-dev',
- '0.2.9.4-alpha',
- '0.2.9.4-alpha-dev',
- '0.3.0.0-alpha-dev'
- ]
-
- def is_valid_version(self):
- # call _compute_version before calling this
- # is the version of the relay a version we want as a fallback?
- # checks both recommended versions and bug #20499 / #20509
- #
- # if the relay doesn't have a recommended version field, exclude the relay
- if not self._data.has_key('recommended_version'):
- log_excluded('%s not a candidate: no recommended_version field',
- self._fpr)
- return False
- if not self._data['recommended_version']:
- log_excluded('%s not a candidate: version not recommended', self._fpr)
- return False
- # if the relay doesn't have version field, exclude the relay
- if not self._data.has_key('version'):
- log_excluded('%s not a candidate: no version field', self._fpr)
- return False
- if self._data['version'] in Candidate.STALE_CONSENSUS_VERSIONS:
- logging.warning('%s not a candidate: version delivers stale consensuses',
- self._fpr)
- return False
- return True
-
- @staticmethod
- def _extract_generic_history(history, which='unknown'):
- # given a tree like this:
- # {
- # "1_month": {
- # "count": 187,
- # "factor": 0.001001001001001001,
- # "first": "2015-02-27 06:00:00",
- # "interval": 14400,
- # "last": "2015-03-30 06:00:00",
- # "values": [
- # 999,
- # 999
- # ]
- # },
- # "1_week": {
- # "count": 169,
- # "factor": 0.001001001001001001,
- # "first": "2015-03-23 07:30:00",
- # "interval": 3600,
- # "last": "2015-03-30 07:30:00",
- # "values": [ ...]
- # },
- # "1_year": {
- # "count": 177,
- # "factor": 0.001001001001001001,
- # "first": "2014-04-11 00:00:00",
- # "interval": 172800,
- # "last": "2015-03-29 00:00:00",
- # "values": [ ...]
- # },
- # "3_months": {
- # "count": 185,
- # "factor": 0.001001001001001001,
- # "first": "2014-12-28 06:00:00",
- # "interval": 43200,
- # "last": "2015-03-30 06:00:00",
- # "values": [ ...]
- # }
- # },
- # extract exactly one piece of data per time interval,
- # using smaller intervals where available.
- #
- # returns list of (age, length, value) dictionaries.
-
- generic_history = []
-
- periods = history.keys()
- periods.sort(key = lambda x: history[x]['interval'])
- now = datetime.datetime.utcnow()
- newest = now
- for p in periods:
- h = history[p]
- interval = datetime.timedelta(seconds = h['interval'])
- this_ts = parse_ts(h['last'])
-
- if (len(h['values']) != h['count']):
- logging.warning('Inconsistent value count in %s document for %s'
- %(p, which))
- for v in reversed(h['values']):
- if (this_ts <= newest):
- agt1 = now - this_ts
- agt2 = interval
- agetmp1 = (agt1.microseconds + (agt1.seconds + agt1.days * 24 * 3600)
- * 10**6) / 10**6
- agetmp2 = (agt2.microseconds + (agt2.seconds + agt2.days * 24 * 3600)
- * 10**6) / 10**6
- generic_history.append(
- { 'age': agetmp1,
- 'length': agetmp2,
- 'value': v
- })
- newest = this_ts
- this_ts -= interval
-
- if (this_ts + interval != parse_ts(h['first'])):
- logging.warning('Inconsistent time information in %s document for %s'
- %(p, which))
-
- #print json.dumps(generic_history, sort_keys=True,
- # indent=4, separators=(',', ': '))
- return generic_history
-
- @staticmethod
- def _avg_generic_history(generic_history):
- a = []
- for i in generic_history:
- if i['age'] > (ADDRESS_AND_PORT_STABLE_DAYS * 24 * 3600):
- continue
- if (i['length'] is not None
- and i['age'] is not None
- and i['value'] is not None):
- w = i['length'] * math.pow(AGE_ALPHA, i['age']/(3600*24))
- a.append( (i['value'] * w, w) )
-
- sv = math.fsum(map(lambda x: x[0], a))
- sw = math.fsum(map(lambda x: x[1], a))
-
- if sw == 0.0:
- svw = 0.0
- else:
- svw = sv/sw
- return svw
-
- def _add_generic_history(self, history):
- periods = r['read_history'].keys()
- periods.sort(key = lambda x: r['read_history'][x]['interval'] )
-
- print periods
-
- def add_running_history(self, history):
- pass
-
- def add_uptime(self, uptime):
- logging.debug('Adding uptime %s.'%(self._fpr,))
-
- # flags we care about: Running, V2Dir, Guard
- if not 'flags' in uptime:
- logging.debug('No flags in document for %s.'%(self._fpr,))
- return
-
- for f in ['Running', 'Guard', 'V2Dir']:
- if not f in uptime['flags']:
- logging.debug('No %s in flags for %s.'%(f, self._fpr,))
- return
-
- running = self._extract_generic_history(uptime['flags']['Running'],
- '%s-Running'%(self._fpr))
- guard = self._extract_generic_history(uptime['flags']['Guard'],
- '%s-Guard'%(self._fpr))
- v2dir = self._extract_generic_history(uptime['flags']['V2Dir'],
- '%s-V2Dir'%(self._fpr))
- if 'BadExit' in uptime['flags']:
- badexit = self._extract_generic_history(uptime['flags']['BadExit'],
- '%s-BadExit'%(self._fpr))
-
- self._running = self._avg_generic_history(running) / ONIONOO_SCALE_ONE
- self._guard = self._avg_generic_history(guard) / ONIONOO_SCALE_ONE
- self._v2dir = self._avg_generic_history(v2dir) / ONIONOO_SCALE_ONE
- self._badexit = None
- if 'BadExit' in uptime['flags']:
- self._badexit = self._avg_generic_history(badexit) / ONIONOO_SCALE_ONE
-
- def is_candidate(self):
- try:
- if (MUST_BE_RUNNING_NOW and not self.is_running()):
- log_excluded('%s not a candidate: not running now, unable to check ' +
- 'DirPort consensus download', self._fpr)
- return False
- if (self._data['last_changed_address_or_port'] >
- self.CUTOFF_ADDRESS_AND_PORT_STABLE):
- log_excluded('%s not a candidate: changed address/port recently (%s)',
- self._fpr, self._data['last_changed_address_or_port'])
- return False
- if self._running < CUTOFF_RUNNING:
- log_excluded('%s not a candidate: running avg too low (%lf)',
- self._fpr, self._running)
- return False
- if self._v2dir < CUTOFF_V2DIR:
- log_excluded('%s not a candidate: v2dir avg too low (%lf)',
- self._fpr, self._v2dir)
- return False
- if self._badexit is not None and self._badexit > PERMITTED_BADEXIT:
- log_excluded('%s not a candidate: badexit avg too high (%lf)',
- self._fpr, self._badexit)
- return False
- # this function logs a message depending on which check fails
- if not self.is_valid_version():
- return False
- if self._guard < CUTOFF_GUARD:
- log_excluded('%s not a candidate: guard avg too low (%lf)',
- self._fpr, self._guard)
- return False
- if (not self._data.has_key('consensus_weight')
- or self._data['consensus_weight'] < 1):
- log_excluded('%s not a candidate: consensus weight invalid', self._fpr)
- return False
- except BaseException as e:
- logging.warning("Exception %s when checking if fallback is a candidate",
- str(e))
- return False
- return True
-
- def is_in_whitelist(self, relaylist):
- """ A fallback matches if each key in the whitelist line matches:
- ipv4
- dirport
- orport
- id
- ipv6 address and port (if present)
- If the fallback has an ipv6 key, the whitelist line must also have
- it, and vice versa, otherwise they don't match. """
- ipv6 = None
- if self.has_ipv6():
- ipv6 = '%s:%d'%(self.ipv6addr, self.ipv6orport)
- for entry in relaylist:
- if entry['id'] != self._fpr:
- # can't log here unless we match an IP and port, because every relay's
- # fingerprint is compared to every entry's fingerprint
- if entry['ipv4'] == self.dirip and int(entry['orport']) == self.orport:
- logging.warning('%s excluded: has OR %s:%d changed fingerprint to ' +
- '%s?', entry['id'], self.dirip, self.orport,
- self._fpr)
- if self.has_ipv6() and entry.has_key('ipv6') and entry['ipv6'] == ipv6:
- logging.warning('%s excluded: has OR %s changed fingerprint to ' +
- '%s?', entry['id'], ipv6, self._fpr)
- continue
- if entry['ipv4'] != self.dirip:
- logging.warning('%s excluded: has it changed IPv4 from %s to %s?',
- self._fpr, entry['ipv4'], self.dirip)
- continue
- if int(entry['dirport']) != self.dirport:
- logging.warning('%s excluded: has it changed DirPort from %s:%d to ' +
- '%s:%d?', self._fpr, self.dirip, int(entry['dirport']),
- self.dirip, self.dirport)
- continue
- if int(entry['orport']) != self.orport:
- logging.warning('%s excluded: has it changed ORPort from %s:%d to ' +
- '%s:%d?', self._fpr, self.dirip, int(entry['orport']),
- self.dirip, self.orport)
- continue
- if entry.has_key('ipv6') and self.has_ipv6():
- # if both entry and fallback have an ipv6 address, compare them
- if entry['ipv6'] != ipv6:
- logging.warning('%s excluded: has it changed IPv6 ORPort from %s ' +
- 'to %s?', self._fpr, entry['ipv6'], ipv6)
- continue
- # if the fallback has an IPv6 address but the whitelist entry
- # doesn't, or vice versa, the whitelist entry doesn't match
- elif entry.has_key('ipv6') and not self.has_ipv6():
- logging.warning('%s excluded: has it lost its former IPv6 address %s?',
- self._fpr, entry['ipv6'])
- continue
- elif not entry.has_key('ipv6') and self.has_ipv6():
- logging.warning('%s excluded: has it gained an IPv6 address %s?',
- self._fpr, ipv6)
- continue
- return True
- return False
-
- def cw_to_bw_factor(self):
- # any relays with a missing or zero consensus weight are not candidates
- # any relays with a missing advertised bandwidth have it set to zero
- return self._data['advertised_bandwidth'] / self._data['consensus_weight']
-
- # since advertised_bandwidth is reported by the relay, it can be gamed
- # to avoid this, use the median consensus weight to bandwidth factor to
- # estimate this relay's measured bandwidth, and make that the upper limit
- def measured_bandwidth(self, median_cw_to_bw_factor):
- cw_to_bw= median_cw_to_bw_factor
- # Reduce exit bandwidth to make sure we're not overloading them
- if self.is_exit():
- cw_to_bw *= EXIT_BANDWIDTH_FRACTION
- measured_bandwidth = self._data['consensus_weight'] * cw_to_bw
- if self._data['advertised_bandwidth'] != 0:
- # limit advertised bandwidth (if available) to measured bandwidth
- return min(measured_bandwidth, self._data['advertised_bandwidth'])
- else:
- return measured_bandwidth
-
- def set_measured_bandwidth(self, median_cw_to_bw_factor):
- self._data['measured_bandwidth'] = self.measured_bandwidth(
- median_cw_to_bw_factor)
-
- def is_exit(self):
- return 'Exit' in self._data['flags']
-
- def is_guard(self):
- return 'Guard' in self._data['flags']
-
- def is_running(self):
- return 'Running' in self._data['flags']
-
- # does this fallback have an IPv6 address and orport?
- def has_ipv6(self):
- return self.ipv6addr is not None and self.ipv6orport is not None
-
- # strip leading and trailing brackets from an IPv6 address
- # safe to use on non-bracketed IPv6 and on IPv4 addresses
- # also convert to unicode, and make None appear as ''
- @staticmethod
- def strip_ipv6_brackets(ip):
- if ip is None:
- return unicode('')
- if len(ip) < 2:
- return unicode(ip)
- if ip[0] == '[' and ip[-1] == ']':
- return unicode(ip[1:-1])
- return unicode(ip)
-
- # are ip_a and ip_b in the same netblock?
- # mask_bits is the size of the netblock
- # takes both IPv4 and IPv6 addresses
- # the versions of ip_a and ip_b must be the same
- # the mask must be valid for the IP version
- @staticmethod
- def netblocks_equal(ip_a, ip_b, mask_bits):
- if ip_a is None or ip_b is None:
- return False
- ip_a = Candidate.strip_ipv6_brackets(ip_a)
- ip_b = Candidate.strip_ipv6_brackets(ip_b)
- a = ipaddress.ip_address(ip_a)
- b = ipaddress.ip_address(ip_b)
- if a.version != b.version:
- raise Exception('Mismatching IP versions in %s and %s'%(ip_a, ip_b))
- if mask_bits > a.max_prefixlen:
- logging.error('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b))
- mask_bits = a.max_prefixlen
- if mask_bits < 0:
- logging.error('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b))
- mask_bits = 0
- a_net = ipaddress.ip_network('%s/%d'%(ip_a, mask_bits), strict=False)
- return b in a_net
-
- # is this fallback's IPv4 address (dirip) in the same netblock as other's
- # IPv4 address?
- # mask_bits is the size of the netblock
- def ipv4_netblocks_equal(self, other, mask_bits):
- return Candidate.netblocks_equal(self.dirip, other.dirip, mask_bits)
-
- # is this fallback's IPv6 address (ipv6addr) in the same netblock as
- # other's IPv6 address?
- # Returns False if either fallback has no IPv6 address
- # mask_bits is the size of the netblock
- def ipv6_netblocks_equal(self, other, mask_bits):
- if not self.has_ipv6() or not other.has_ipv6():
- return False
- return Candidate.netblocks_equal(self.ipv6addr, other.ipv6addr, mask_bits)
-
- # is this fallback's IPv4 DirPort the same as other's IPv4 DirPort?
- def dirport_equal(self, other):
- return self.dirport == other.dirport
-
- # is this fallback's IPv4 ORPort the same as other's IPv4 ORPort?
- def ipv4_orport_equal(self, other):
- return self.orport == other.orport
-
- # is this fallback's IPv6 ORPort the same as other's IPv6 ORPort?
- # Returns False if either fallback has no IPv6 address
- def ipv6_orport_equal(self, other):
- if not self.has_ipv6() or not other.has_ipv6():
- return False
- return self.ipv6orport == other.ipv6orport
-
- # does this fallback have the same DirPort, IPv4 ORPort, or
- # IPv6 ORPort as other?
- # Ignores IPv6 ORPort if either fallback has no IPv6 address
- def port_equal(self, other):
- return (self.dirport_equal(other) or self.ipv4_orport_equal(other)
- or self.ipv6_orport_equal(other))
-
- # return a list containing IPv4 ORPort, DirPort, and IPv6 ORPort (if present)
- def port_list(self):
- ports = [self.dirport, self.orport]
- if self.has_ipv6() and not self.ipv6orport in ports:
- ports.append(self.ipv6orport)
- return ports
-
- # does this fallback share a port with other, regardless of whether the
- # port types match?
- # For example, if self's IPv4 ORPort is 80 and other's DirPort is 80,
- # return True
- def port_shared(self, other):
- for p in self.port_list():
- if p in other.port_list():
- return True
- return False
-
- # log how long it takes to download a consensus from dirip:dirport
- # returns True if the download failed, False if it succeeded within max_time
- @staticmethod
- def fallback_consensus_download_speed(dirip, dirport, nickname, fingerprint,
- max_time):
- download_failed = False
- # some directory mirrors respond to requests in ways that hang python
- # sockets, which is why we log this line here
- logging.info('Initiating %sconsensus download from %s (%s:%d) %s.',
- 'microdesc ' if DOWNLOAD_MICRODESC_CONSENSUS else '',
- nickname, dirip, dirport, fingerprint)
- # there appears to be about 1 second of overhead when comparing stem's
- # internal trace time and the elapsed time calculated here
- TIMEOUT_SLOP = 1.0
- start = datetime.datetime.utcnow()
- try:
- consensus = get_consensus(
- endpoints = [(dirip, dirport)],
- timeout = (max_time + TIMEOUT_SLOP),
- validate = True,
- retries = 0,
- fall_back_to_authority = False,
- document_handler = DocumentHandler.BARE_DOCUMENT,
- microdescriptor = DOWNLOAD_MICRODESC_CONSENSUS
- ).run()[0]
- end = datetime.datetime.utcnow()
- time_since_expiry = (end - consensus.valid_until).total_seconds()
- except Exception, stem_error:
- end = datetime.datetime.utcnow()
- log_excluded('Unable to retrieve a consensus from %s: %s', nickname,
- stem_error)
- status = 'error: "%s"' % (stem_error)
- level = logging.WARNING
- download_failed = True
- elapsed = (end - start).total_seconds()
- if download_failed:
- # keep the error failure status, and avoid using the variables
- pass
- elif elapsed > max_time:
- status = 'too slow'
- level = logging.WARNING
- download_failed = True
- elif (time_since_expiry > 0):
- status = 'outdated consensus, expired %ds ago'%(int(time_since_expiry))
- if time_since_expiry <= CONSENSUS_EXPIRY_TOLERANCE:
- status += ', tolerating up to %ds'%(CONSENSUS_EXPIRY_TOLERANCE)
- level = logging.INFO
- else:
- status += ', invalid'
- level = logging.WARNING
- download_failed = True
- else:
- status = 'ok'
- level = logging.DEBUG
- logging.log(level, 'Consensus download: %0.1fs %s from %s (%s:%d) %s, ' +
- 'max download time %0.1fs.', elapsed, status, nickname,
- dirip, dirport, fingerprint, max_time)
- return download_failed
-
- # does this fallback download the consensus fast enough?
- def check_fallback_download_consensus(self):
- # include the relay if we're not doing a check, or we can't check (IPv6)
- ipv4_failed = False
- ipv6_failed = False
- if PERFORM_IPV4_DIRPORT_CHECKS:
- ipv4_failed = Candidate.fallback_consensus_download_speed(self.dirip,
- self.dirport,
- self._data['nickname'],
- self._fpr,
- CONSENSUS_DOWNLOAD_SPEED_MAX)
- if self.has_ipv6() and PERFORM_IPV6_DIRPORT_CHECKS:
- # Clients assume the IPv6 DirPort is the same as the IPv4 DirPort
- ipv6_failed = Candidate.fallback_consensus_download_speed(self.ipv6addr,
- self.dirport,
- self._data['nickname'],
- self._fpr,
- CONSENSUS_DOWNLOAD_SPEED_MAX)
- return ((not ipv4_failed) and (not ipv6_failed))
-
- # if this fallback has not passed a download check, try it again,
- # and record the result, available in get_fallback_download_consensus
- def try_fallback_download_consensus(self):
- if not self.get_fallback_download_consensus():
- self._data['download_check'] = self.check_fallback_download_consensus()
-
- # did this fallback pass the download check?
- def get_fallback_download_consensus(self):
- # if we're not performing checks, return True
- if not PERFORM_IPV4_DIRPORT_CHECKS and not PERFORM_IPV6_DIRPORT_CHECKS:
- return True
- # if we are performing checks, but haven't done one, return False
- if not self._data.has_key('download_check'):
- return False
- return self._data['download_check']
-
- # output an optional header comment and info for this fallback
- # try_fallback_download_consensus before calling this
- def fallbackdir_line(self, fallbacks, prefilter_fallbacks):
- s = ''
- if OUTPUT_COMMENTS:
- s += self.fallbackdir_comment(fallbacks, prefilter_fallbacks)
- # if the download speed is ok, output a C string
- # if it's not, but we OUTPUT_COMMENTS, output a commented-out C string
- if self.get_fallback_download_consensus() or OUTPUT_COMMENTS:
- s += self.fallbackdir_info(self.get_fallback_download_consensus())
- return s
-
- # output a header comment for this fallback
- def fallbackdir_comment(self, fallbacks, prefilter_fallbacks):
- # /*
- # nickname
- # flags
- # adjusted bandwidth, consensus weight
- # [contact]
- # [identical contact counts]
- # */
- # Multiline C comment
- s = '/*'
- s += '\n'
- s += cleanse_c_multiline_comment(self._data['nickname'])
- s += '\n'
- s += 'Flags: '
- s += cleanse_c_multiline_comment(' '.join(sorted(self._data['flags'])))
- s += '\n'
- # this is an adjusted bandwidth, see calculate_measured_bandwidth()
- bandwidth = self._data['measured_bandwidth']
- weight = self._data['consensus_weight']
- s += 'Bandwidth: %.1f MByte/s, Consensus Weight: %d'%(
- bandwidth/(1024.0*1024.0),
- weight)
- s += '\n'
- if self._data['contact'] is not None:
- s += cleanse_c_multiline_comment(self._data['contact'])
- if CONTACT_COUNT:
- fallback_count = len([f for f in fallbacks
- if f._data['contact'] == self._data['contact']])
- if fallback_count > 1:
- s += '\n'
- s += '%d identical contacts listed' % (fallback_count)
-
- # output the fallback info C string for this fallback
- # this is the text that would go after FallbackDir in a torrc
- # if this relay failed the download test and we OUTPUT_COMMENTS,
- # comment-out the returned string
- def fallbackdir_info(self, dl_speed_ok):
- # "address:dirport orport=port id=fingerprint"
- # (insert additional madatory fields here)
- # "[ipv6=addr:orport]"
- # (insert additional optional fields here)
- # /* nickname=name */
- # /* extrainfo={0,1} */
- # (insert additional comment fields here)
- # /* ===== */
- # ,
- #
- # Do we want a C string, or a commented-out string?
- c_string = dl_speed_ok
- comment_string = not dl_speed_ok and OUTPUT_COMMENTS
- # If we don't want either kind of string, bail
- if not c_string and not comment_string:
- return ''
- s = ''
- # Comment out the fallback directory entry if it's too slow
- # See the debug output for which address and port is failing
- if comment_string:
- s += '/* Consensus download failed or was too slow:\n'
- # Multi-Line C string with trailing comma (part of a string list)
- # This makes it easier to diff the file, and remove IPv6 lines using grep
- # Integers don't need escaping
- s += '"%s orport=%d id=%s"'%(
- cleanse_c_string(self._data['dir_address']),
- self.orport,
- cleanse_c_string(self._fpr))
- s += '\n'
- # (insert additional madatory fields here)
- if self.has_ipv6():
- s += '" ipv6=%s:%d"'%(cleanse_c_string(self.ipv6addr), self.ipv6orport)
- s += '\n'
- # (insert additional optional fields here)
- if not comment_string:
- s += '/* '
- s += 'nickname=%s'%(cleanse_c_string(self._data['nickname']))
- if not comment_string:
- s += ' */'
- s += '\n'
- # if we know that the fallback is an extrainfo cache, flag it
- # and if we don't know, assume it is not
- if not comment_string:
- s += '/* '
- s += 'extrainfo=%d'%(1 if self._extra_info_cache else 0)
- if not comment_string:
- s += ' */'
- s += '\n'
- # (insert additional comment fields here)
- # The terminator and comma must be the last line in each fallback entry
- if not comment_string:
- s += '/* '
- s += SECTION_SEPARATOR_BASE
- if not comment_string:
- s += ' */'
- s += '\n'
- s += ','
- if comment_string:
- s += '\n'
- s += '*/'
- return s
-
-## Fallback Candidate List Class
-
-class CandidateList(dict):
- def __init__(self):
- pass
-
- def _add_relay(self, details):
- if not 'dir_address' in details: return
- c = Candidate(details)
- self[ c.get_fingerprint() ] = c
-
- def _add_uptime(self, uptime):
- try:
- fpr = uptime['fingerprint']
- except KeyError:
- raise Exception("Document has no fingerprint field.")
-
- try:
- c = self[fpr]
- except KeyError:
- logging.debug('Got unknown relay %s in uptime document.'%(fpr,))
- return
-
- c.add_uptime(uptime)
-
- def _add_details(self):
- logging.debug('Loading details document.')
- d = fetch('details',
- fields=('fingerprint,nickname,contact,last_changed_address_or_port,' +
- 'consensus_weight,advertised_bandwidth,or_addresses,' +
- 'dir_address,recommended_version,flags,effective_family,' +
- 'platform'))
- logging.debug('Loading details document done.')
-
- if not 'relays' in d: raise Exception("No relays found in document.")
-
- for r in d['relays']: self._add_relay(r)
-
- def _add_uptimes(self):
- logging.debug('Loading uptime document.')
- d = fetch('uptime')
- logging.debug('Loading uptime document done.')
-
- if not 'relays' in d: raise Exception("No relays found in document.")
- for r in d['relays']: self._add_uptime(r)
-
- def add_relays(self):
- self._add_details()
- self._add_uptimes()
-
- def count_guards(self):
- guard_count = 0
- for fpr in self.keys():
- if self[fpr].is_guard():
- guard_count += 1
- return guard_count
-
- # Find fallbacks that fit the uptime, stability, and flags criteria,
- # and make an array of them in self.fallbacks
- def compute_fallbacks(self):
- self.fallbacks = map(lambda x: self[x],
- filter(lambda x: self[x].is_candidate(),
- self.keys()))
-
- # sort fallbacks by their consensus weight to advertised bandwidth factor,
- # lowest to highest
- # used to find the median cw_to_bw_factor()
- def sort_fallbacks_by_cw_to_bw_factor(self):
- self.fallbacks.sort(key=lambda f: f.cw_to_bw_factor())
-
- # sort fallbacks by their measured bandwidth, highest to lowest
- # calculate_measured_bandwidth before calling this
- # this is useful for reviewing candidates in priority order
- def sort_fallbacks_by_measured_bandwidth(self):
- self.fallbacks.sort(key=lambda f: f._data['measured_bandwidth'],
- reverse=True)
-
- # sort fallbacks by the data field data_field, lowest to highest
- def sort_fallbacks_by(self, data_field):
- self.fallbacks.sort(key=lambda f: f._data[data_field])
-
- @staticmethod
- def load_relaylist(file_obj):
- """ Read each line in the file, and parse it like a FallbackDir line:
- an IPv4 address and optional port:
- <IPv4 address>:<port>
- which are parsed into dictionary entries:
- ipv4=<IPv4 address>
- dirport=<port>
- followed by a series of key=value entries:
- orport=<port>
- id=<fingerprint>
- ipv6=<IPv6 address>:<IPv6 orport>
- each line's key/value pairs are placed in a dictonary,
- (of string -> string key/value pairs),
- and these dictionaries are placed in an array.
- comments start with # and are ignored """
- file_data = file_obj['data']
- file_name = file_obj['name']
- relaylist = []
- if file_data is None:
- return relaylist
- for line in file_data.split('\n'):
- relay_entry = {}
- # ignore comments
- line_comment_split = line.split('#')
- line = line_comment_split[0]
- # cleanup whitespace
- line = cleanse_whitespace(line)
- line = line.strip()
- if len(line) == 0:
- continue
- for item in line.split(' '):
- item = item.strip()
- if len(item) == 0:
- continue
- key_value_split = item.split('=')
- kvl = len(key_value_split)
- if kvl < 1 or kvl > 2:
- print '#error Bad %s item: %s, format is key=value.'%(
- file_name, item)
- if kvl == 1:
- # assume that entries without a key are the ipv4 address,
- # perhaps with a dirport
- ipv4_maybe_dirport = key_value_split[0]
- ipv4_maybe_dirport_split = ipv4_maybe_dirport.split(':')
- dirl = len(ipv4_maybe_dirport_split)
- if dirl < 1 or dirl > 2:
- print '#error Bad %s IPv4 item: %s, format is ipv4:port.'%(
- file_name, item)
- if dirl >= 1:
- relay_entry['ipv4'] = ipv4_maybe_dirport_split[0]
- if dirl == 2:
- relay_entry['dirport'] = ipv4_maybe_dirport_split[1]
- elif kvl == 2:
- relay_entry[key_value_split[0]] = key_value_split[1]
- relaylist.append(relay_entry)
- return relaylist
-
- # apply the fallback whitelist
- def apply_filter_lists(self, whitelist_obj):
- excluded_count = 0
- logging.debug('Applying whitelist')
- # parse the whitelist
- whitelist = self.load_relaylist(whitelist_obj)
- filtered_fallbacks = []
- for f in self.fallbacks:
- in_whitelist = f.is_in_whitelist(whitelist)
- if in_whitelist:
- # include
- filtered_fallbacks.append(f)
- elif INCLUDE_UNLISTED_ENTRIES:
- # include
- filtered_fallbacks.append(f)
- else:
- # exclude
- excluded_count += 1
- log_excluded('Excluding %s: not in whitelist.',
- f._fpr)
- self.fallbacks = filtered_fallbacks
- return excluded_count
-
- @staticmethod
- def summarise_filters(initial_count, excluded_count):
- return '/* Whitelist excluded %d of %d candidates. */'%(
- excluded_count, initial_count)
-
- # calculate each fallback's measured bandwidth based on the median
- # consensus weight to advertised bandwidth ratio
- def calculate_measured_bandwidth(self):
- self.sort_fallbacks_by_cw_to_bw_factor()
- median_fallback = self.fallback_median(True)
- if median_fallback is not None:
- median_cw_to_bw_factor = median_fallback.cw_to_bw_factor()
- else:
- # this will never be used, because there are no fallbacks
- median_cw_to_bw_factor = None
- for f in self.fallbacks:
- f.set_measured_bandwidth(median_cw_to_bw_factor)
-
- # remove relays with low measured bandwidth from the fallback list
- # calculate_measured_bandwidth for each relay before calling this
- def remove_low_bandwidth_relays(self):
- if MIN_BANDWIDTH is None:
- return
- above_min_bw_fallbacks = []
- for f in self.fallbacks:
- if f._data['measured_bandwidth'] >= MIN_BANDWIDTH:
- above_min_bw_fallbacks.append(f)
- else:
- # the bandwidth we log here is limited by the relay's consensus weight
- # as well as its adverttised bandwidth. See set_measured_bandwidth
- # for details
- log_excluded('%s not a candidate: bandwidth %.1fMByte/s too low, ' +
- 'must be at least %.1fMByte/s', f._fpr,
- f._data['measured_bandwidth']/(1024.0*1024.0),
- MIN_BANDWIDTH/(1024.0*1024.0))
- self.fallbacks = above_min_bw_fallbacks
-
- # the minimum fallback in the list
- # call one of the sort_fallbacks_* functions before calling this
- def fallback_min(self):
- if len(self.fallbacks) > 0:
- return self.fallbacks[-1]
- else:
- return None
-
- # the median fallback in the list
- # call one of the sort_fallbacks_* functions before calling this
- def fallback_median(self, require_advertised_bandwidth):
- # use the low-median when there are an evan number of fallbacks,
- # for consistency with the bandwidth authorities
- if len(self.fallbacks) > 0:
- median_position = (len(self.fallbacks) - 1) / 2
- if not require_advertised_bandwidth:
- return self.fallbacks[median_position]
- # if we need advertised_bandwidth but this relay doesn't have it,
- # move to a fallback with greater consensus weight until we find one
- while not self.fallbacks[median_position]._data['advertised_bandwidth']:
- median_position += 1
- if median_position >= len(self.fallbacks):
- return None
- return self.fallbacks[median_position]
- else:
- return None
-
- # the maximum fallback in the list
- # call one of the sort_fallbacks_* functions before calling this
- def fallback_max(self):
- if len(self.fallbacks) > 0:
- return self.fallbacks[0]
- else:
- return None
-
- # return a new bag suitable for storing attributes
- @staticmethod
- def attribute_new():
- return dict()
-
- # get the count of attribute in attribute_bag
- # if attribute is None or the empty string, return 0
- @staticmethod
- def attribute_count(attribute, attribute_bag):
- if attribute is None or attribute == '':
- return 0
- if attribute not in attribute_bag:
- return 0
- return attribute_bag[attribute]
-
- # does attribute_bag contain more than max_count instances of attribute?
- # if so, return False
- # if not, return True
- # if attribute is None or the empty string, or max_count is invalid,
- # always return True
- @staticmethod
- def attribute_allow(attribute, attribute_bag, max_count=1):
- if attribute is None or attribute == '' or max_count <= 0:
- return True
- elif CandidateList.attribute_count(attribute, attribute_bag) >= max_count:
- return False
- else:
- return True
-
- # add attribute to attribute_bag, incrementing the count if it is already
- # present
- # if attribute is None or the empty string, or count is invalid,
- # do nothing
- @staticmethod
- def attribute_add(attribute, attribute_bag, count=1):
- if attribute is None or attribute == '' or count <= 0:
- pass
- attribute_bag.setdefault(attribute, 0)
- attribute_bag[attribute] += count
-
- # make sure there are only MAX_FALLBACKS_PER_IP fallbacks per IPv4 address,
- # and per IPv6 address
- # there is only one IPv4 address on each fallback: the IPv4 DirPort address
- # (we choose the IPv4 ORPort which is on the same IPv4 as the DirPort)
- # there is at most one IPv6 address on each fallback: the IPv6 ORPort address
- # we try to match the IPv4 ORPort, but will use any IPv6 address if needed
- # (clients only use the IPv6 ORPort)
- # if there is no IPv6 address, only the IPv4 address is checked
- # return the number of candidates we excluded
- def limit_fallbacks_same_ip(self):
- ip_limit_fallbacks = []
- ip_list = CandidateList.attribute_new()
- for f in self.fallbacks:
- if (CandidateList.attribute_allow(f.dirip, ip_list,
- MAX_FALLBACKS_PER_IPV4)
- and CandidateList.attribute_allow(f.ipv6addr, ip_list,
- MAX_FALLBACKS_PER_IPV6)):
- ip_limit_fallbacks.append(f)
- CandidateList.attribute_add(f.dirip, ip_list)
- if f.has_ipv6():
- CandidateList.attribute_add(f.ipv6addr, ip_list)
- elif not CandidateList.attribute_allow(f.dirip, ip_list,
- MAX_FALLBACKS_PER_IPV4):
- log_excluded('Eliminated %s: already have %d fallback(s) on IPv4 %s'
- %(f._fpr, CandidateList.attribute_count(f.dirip, ip_list),
- f.dirip))
- elif (f.has_ipv6() and
- not CandidateList.attribute_allow(f.ipv6addr, ip_list,
- MAX_FALLBACKS_PER_IPV6)):
- log_excluded('Eliminated %s: already have %d fallback(s) on IPv6 %s'
- %(f._fpr, CandidateList.attribute_count(f.ipv6addr,
- ip_list),
- f.ipv6addr))
- original_count = len(self.fallbacks)
- self.fallbacks = ip_limit_fallbacks
- return original_count - len(self.fallbacks)
-
- # make sure there are only MAX_FALLBACKS_PER_CONTACT fallbacks for each
- # ContactInfo
- # if there is no ContactInfo, allow the fallback
- # this check can be gamed by providing no ContactInfo, or by setting the
- # ContactInfo to match another fallback
- # However, given the likelihood that relays with the same ContactInfo will
- # go down at similar times, its usefulness outweighs the risk
- def limit_fallbacks_same_contact(self):
- contact_limit_fallbacks = []
- contact_list = CandidateList.attribute_new()
- for f in self.fallbacks:
- if CandidateList.attribute_allow(f._data['contact'], contact_list,
- MAX_FALLBACKS_PER_CONTACT):
- contact_limit_fallbacks.append(f)
- CandidateList.attribute_add(f._data['contact'], contact_list)
- else:
- log_excluded(
- 'Eliminated %s: already have %d fallback(s) on ContactInfo %s'
- %(f._fpr, CandidateList.attribute_count(f._data['contact'],
- contact_list),
- f._data['contact']))
- original_count = len(self.fallbacks)
- self.fallbacks = contact_limit_fallbacks
- return original_count - len(self.fallbacks)
-
- # make sure there are only MAX_FALLBACKS_PER_FAMILY fallbacks per effective
- # family
- # if there is no family, allow the fallback
- # we use effective family, which ensures mutual family declarations
- # but the check can be gamed by not declaring a family at all
- # if any indirect families exist, the result depends on the order in which
- # fallbacks are sorted in the list
- def limit_fallbacks_same_family(self):
- family_limit_fallbacks = []
- fingerprint_list = CandidateList.attribute_new()
- for f in self.fallbacks:
- if CandidateList.attribute_allow(f._fpr, fingerprint_list,
- MAX_FALLBACKS_PER_FAMILY):
- family_limit_fallbacks.append(f)
- CandidateList.attribute_add(f._fpr, fingerprint_list)
- for family_fingerprint in f._data['effective_family']:
- CandidateList.attribute_add(family_fingerprint, fingerprint_list)
- else:
- # we already have a fallback with this fallback in its effective
- # family
- log_excluded(
- 'Eliminated %s: already have %d fallback(s) in effective family'
- %(f._fpr, CandidateList.attribute_count(f._fpr, fingerprint_list)))
- original_count = len(self.fallbacks)
- self.fallbacks = family_limit_fallbacks
- return original_count - len(self.fallbacks)
-
- # try once to get the descriptors for fingerprint_list using stem
- # returns an empty list on exception
- @staticmethod
- def get_fallback_descriptors_once(fingerprint_list):
- desc_list = get_server_descriptors(fingerprints=fingerprint_list).run(suppress=True)
- return desc_list
-
- # try up to max_retries times to get the descriptors for fingerprint_list
- # using stem. Stops retrying when all descriptors have been retrieved.
- # returns a list containing the descriptors that were retrieved
- @staticmethod
- def get_fallback_descriptors(fingerprint_list, max_retries=5):
- # we can't use stem's retries=, because we want to support more than 96
- # descriptors
- #
- # add an attempt for every MAX_FINGERPRINTS (or part thereof) in the list
- max_retries += (len(fingerprint_list) + MAX_FINGERPRINTS - 1) / MAX_FINGERPRINTS
- remaining_list = fingerprint_list
- desc_list = []
- for _ in xrange(max_retries):
- if len(remaining_list) == 0:
- break
- new_desc_list = CandidateList.get_fallback_descriptors_once(remaining_list[0:MAX_FINGERPRINTS])
- for d in new_desc_list:
- try:
- remaining_list.remove(d.fingerprint)
- except ValueError:
- # warn and ignore if a directory mirror returned a bad descriptor
- logging.warning("Directory mirror returned unwanted descriptor %s, ignoring",
- d.fingerprint)
- continue
- desc_list.append(d)
- return desc_list
-
- # find the fallbacks that cache extra-info documents
- # Onionoo doesn't know this, so we have to use stem
- def mark_extra_info_caches(self):
- fingerprint_list = [ f._fpr for f in self.fallbacks ]
- logging.info("Downloading fallback descriptors to find extra-info caches")
- desc_list = CandidateList.get_fallback_descriptors(fingerprint_list)
- for d in desc_list:
- self[d.fingerprint]._extra_info_cache = d.extra_info_cache
- missing_descriptor_list = [ f._fpr for f in self.fallbacks
- if f._extra_info_cache is None ]
- for f in missing_descriptor_list:
- logging.warning("No descriptor for {}. Assuming extrainfo=0.".format(f))
-
- # try a download check on each fallback candidate in order
- # stop after max_count successful downloads
- # but don't remove any candidates from the array
- def try_download_consensus_checks(self, max_count):
- dl_ok_count = 0
- for f in self.fallbacks:
- f.try_fallback_download_consensus()
- if f.get_fallback_download_consensus():
- # this fallback downloaded a consensus ok
- dl_ok_count += 1
- if dl_ok_count >= max_count:
- # we have enough fallbacks
- return
-
- # put max_count successful candidates in the fallbacks array:
- # - perform download checks on each fallback candidate
- # - retry failed candidates if CONSENSUS_DOWNLOAD_RETRY is set
- # - eliminate failed candidates
- # - if there are more than max_count candidates, eliminate lowest bandwidth
- # - if there are fewer than max_count candidates, leave only successful
- # Return the number of fallbacks that failed the consensus check
- def perform_download_consensus_checks(self, max_count):
- self.sort_fallbacks_by_measured_bandwidth()
- self.try_download_consensus_checks(max_count)
- if CONSENSUS_DOWNLOAD_RETRY:
- # try unsuccessful candidates again
- # we could end up with more than max_count successful candidates here
- self.try_download_consensus_checks(max_count)
- # now we have at least max_count successful candidates,
- # or we've tried them all
- original_count = len(self.fallbacks)
- self.fallbacks = filter(lambda x: x.get_fallback_download_consensus(),
- self.fallbacks)
- # some of these failed the check, others skipped the check,
- # if we already had enough successful downloads
- failed_count = original_count - len(self.fallbacks)
- self.fallbacks = self.fallbacks[:max_count]
- return failed_count
-
- # return a string that describes a/b as a percentage
- @staticmethod
- def describe_percentage(a, b):
- if b != 0:
- return '%d/%d = %.0f%%'%(a, b, (a*100.0)/b)
- else:
- # technically, 0/0 is undefined, but 0.0% is a sensible result
- return '%d/%d = %.0f%%'%(a, b, 0.0)
-
- # return a dictionary of lists of fallbacks by IPv4 netblock
- # the dictionary is keyed by the fingerprint of an arbitrary fallback
- # in each netblock
- # mask_bits is the size of the netblock
- def fallbacks_by_ipv4_netblock(self, mask_bits):
- netblocks = {}
- for f in self.fallbacks:
- found_netblock = False
- for b in netblocks.keys():
- # we found an existing netblock containing this fallback
- if f.ipv4_netblocks_equal(self[b], mask_bits):
- # add it to the list
- netblocks[b].append(f)
- found_netblock = True
- break
- # make a new netblock based on this fallback's fingerprint
- if not found_netblock:
- netblocks[f._fpr] = [f]
- return netblocks
-
- # return a dictionary of lists of fallbacks by IPv6 netblock
- # where mask_bits is the size of the netblock
- def fallbacks_by_ipv6_netblock(self, mask_bits):
- netblocks = {}
- for f in self.fallbacks:
- # skip fallbacks without IPv6 addresses
- if not f.has_ipv6():
- continue
- found_netblock = False
- for b in netblocks.keys():
- # we found an existing netblock containing this fallback
- if f.ipv6_netblocks_equal(self[b], mask_bits):
- # add it to the list
- netblocks[b].append(f)
- found_netblock = True
- break
- # make a new netblock based on this fallback's fingerprint
- if not found_netblock:
- netblocks[f._fpr] = [f]
- return netblocks
-
- # log a message about the proportion of fallbacks in each IPv4 netblock,
- # where mask_bits is the size of the netblock
- def describe_fallback_ipv4_netblock_mask(self, mask_bits):
- fallback_count = len(self.fallbacks)
- shared_netblock_fallback_count = 0
- most_frequent_netblock = None
- netblocks = self.fallbacks_by_ipv4_netblock(mask_bits)
- for b in netblocks.keys():
- if len(netblocks[b]) > 1:
- # how many fallbacks are in a netblock with other fallbacks?
- shared_netblock_fallback_count += len(netblocks[b])
- # what's the netblock with the most fallbacks?
- if (most_frequent_netblock is None
- or len(netblocks[b]) > len(netblocks[most_frequent_netblock])):
- most_frequent_netblock = b
- logging.debug('Fallback IPv4 addresses in the same /%d:'%(mask_bits))
- for f in netblocks[b]:
- logging.debug('%s - %s', f.dirip, f._fpr)
- if most_frequent_netblock is not None:
- logging.warning('There are %s fallbacks in the IPv4 /%d containing %s'%(
- CandidateList.describe_percentage(
- len(netblocks[most_frequent_netblock]),
- fallback_count),
- mask_bits,
- self[most_frequent_netblock].dirip))
- if shared_netblock_fallback_count > 0:
- logging.warning(('%s of fallbacks are in an IPv4 /%d with other ' +
- 'fallbacks')%(CandidateList.describe_percentage(
- shared_netblock_fallback_count,
- fallback_count),
- mask_bits))
-
- # log a message about the proportion of fallbacks in each IPv6 netblock,
- # where mask_bits is the size of the netblock
- def describe_fallback_ipv6_netblock_mask(self, mask_bits):
- fallback_count = len(self.fallbacks_with_ipv6())
- shared_netblock_fallback_count = 0
- most_frequent_netblock = None
- netblocks = self.fallbacks_by_ipv6_netblock(mask_bits)
- for b in netblocks.keys():
- if len(netblocks[b]) > 1:
- # how many fallbacks are in a netblock with other fallbacks?
- shared_netblock_fallback_count += len(netblocks[b])
- # what's the netblock with the most fallbacks?
- if (most_frequent_netblock is None
- or len(netblocks[b]) > len(netblocks[most_frequent_netblock])):
- most_frequent_netblock = b
- logging.debug('Fallback IPv6 addresses in the same /%d:'%(mask_bits))
- for f in netblocks[b]:
- logging.debug('%s - %s', f.ipv6addr, f._fpr)
- if most_frequent_netblock is not None:
- logging.warning('There are %s fallbacks in the IPv6 /%d containing %s'%(
- CandidateList.describe_percentage(
- len(netblocks[most_frequent_netblock]),
- fallback_count),
- mask_bits,
- self[most_frequent_netblock].ipv6addr))
- if shared_netblock_fallback_count > 0:
- logging.warning(('%s of fallbacks are in an IPv6 /%d with other ' +
- 'fallbacks')%(CandidateList.describe_percentage(
- shared_netblock_fallback_count,
- fallback_count),
- mask_bits))
-
- # log a message about the proportion of fallbacks in each IPv4 /8, /16,
- # and /24
- def describe_fallback_ipv4_netblocks(self):
- # this doesn't actually tell us anything useful
- #self.describe_fallback_ipv4_netblock_mask(8)
- self.describe_fallback_ipv4_netblock_mask(16)
- #self.describe_fallback_ipv4_netblock_mask(24)
-
- # log a message about the proportion of fallbacks in each IPv6 /12 (RIR),
- # /23 (smaller RIR blocks), /32 (LIR), /48 (Customer), and /64 (Host)
- # https://www.iana.org/assignments/ipv6-unicast-address-assignments/
- def describe_fallback_ipv6_netblocks(self):
- # these don't actually tell us anything useful
- #self.describe_fallback_ipv6_netblock_mask(12)
- #self.describe_fallback_ipv6_netblock_mask(23)
- self.describe_fallback_ipv6_netblock_mask(32)
- #self.describe_fallback_ipv6_netblock_mask(48)
- self.describe_fallback_ipv6_netblock_mask(64)
-
- # log a message about the proportion of fallbacks in each IPv4 and IPv6
- # netblock
- def describe_fallback_netblocks(self):
- self.describe_fallback_ipv4_netblocks()
- self.describe_fallback_ipv6_netblocks()
-
- # return a list of fallbacks which are on the IPv4 ORPort port
- def fallbacks_on_ipv4_orport(self, port):
- return filter(lambda x: x.orport == port, self.fallbacks)
-
- # return a list of fallbacks which are on the IPv6 ORPort port
- def fallbacks_on_ipv6_orport(self, port):
- return filter(lambda x: x.ipv6orport == port, self.fallbacks_with_ipv6())
-
- # return a list of fallbacks which are on the DirPort port
- def fallbacks_on_dirport(self, port):
- return filter(lambda x: x.dirport == port, self.fallbacks)
-
- # log a message about the proportion of fallbacks on IPv4 ORPort port
- # and return that count
- def describe_fallback_ipv4_orport(self, port):
- port_count = len(self.fallbacks_on_ipv4_orport(port))
- fallback_count = len(self.fallbacks)
- logging.warning('%s of fallbacks are on IPv4 ORPort %d'%(
- CandidateList.describe_percentage(port_count,
- fallback_count),
- port))
- return port_count
-
- # log a message about the proportion of IPv6 fallbacks on IPv6 ORPort port
- # and return that count
- def describe_fallback_ipv6_orport(self, port):
- port_count = len(self.fallbacks_on_ipv6_orport(port))
- fallback_count = len(self.fallbacks_with_ipv6())
- logging.warning('%s of IPv6 fallbacks are on IPv6 ORPort %d'%(
- CandidateList.describe_percentage(port_count,
- fallback_count),
- port))
- return port_count
-
- # log a message about the proportion of fallbacks on DirPort port
- # and return that count
- def describe_fallback_dirport(self, port):
- port_count = len(self.fallbacks_on_dirport(port))
- fallback_count = len(self.fallbacks)
- logging.warning('%s of fallbacks are on DirPort %d'%(
- CandidateList.describe_percentage(port_count,
- fallback_count),
- port))
- return port_count
-
- # log a message about the proportion of fallbacks on each dirport,
- # each IPv4 orport, and each IPv6 orport
- def describe_fallback_ports(self):
- fallback_count = len(self.fallbacks)
- ipv4_or_count = fallback_count
- ipv4_or_count -= self.describe_fallback_ipv4_orport(443)
- ipv4_or_count -= self.describe_fallback_ipv4_orport(9001)
- logging.warning('%s of fallbacks are on other IPv4 ORPorts'%(
- CandidateList.describe_percentage(ipv4_or_count,
- fallback_count)))
- ipv6_fallback_count = len(self.fallbacks_with_ipv6())
- ipv6_or_count = ipv6_fallback_count
- ipv6_or_count -= self.describe_fallback_ipv6_orport(443)
- ipv6_or_count -= self.describe_fallback_ipv6_orport(9001)
- logging.warning('%s of IPv6 fallbacks are on other IPv6 ORPorts'%(
- CandidateList.describe_percentage(ipv6_or_count,
- ipv6_fallback_count)))
- dir_count = fallback_count
- dir_count -= self.describe_fallback_dirport(80)
- dir_count -= self.describe_fallback_dirport(9030)
- logging.warning('%s of fallbacks are on other DirPorts'%(
- CandidateList.describe_percentage(dir_count,
- fallback_count)))
-
- # return a list of fallbacks which cache extra-info documents
- def fallbacks_with_extra_info_cache(self):
- return filter(lambda x: x._extra_info_cache, self.fallbacks)
-
- # log a message about the proportion of fallbacks that cache extra-info docs
- def describe_fallback_extra_info_caches(self):
- extra_info_falback_count = len(self.fallbacks_with_extra_info_cache())
- fallback_count = len(self.fallbacks)
- logging.warning('%s of fallbacks cache extra-info documents'%(
- CandidateList.describe_percentage(extra_info_falback_count,
- fallback_count)))
-
- # return a list of fallbacks which have the Exit flag
- def fallbacks_with_exit(self):
- return filter(lambda x: x.is_exit(), self.fallbacks)
-
- # log a message about the proportion of fallbacks with an Exit flag
- def describe_fallback_exit_flag(self):
- exit_falback_count = len(self.fallbacks_with_exit())
- fallback_count = len(self.fallbacks)
- logging.warning('%s of fallbacks have the Exit flag'%(
- CandidateList.describe_percentage(exit_falback_count,
- fallback_count)))
-
- # return a list of fallbacks which have an IPv6 address
- def fallbacks_with_ipv6(self):
- return filter(lambda x: x.has_ipv6(), self.fallbacks)
-
- # log a message about the proportion of fallbacks on IPv6
- def describe_fallback_ip_family(self):
- ipv6_falback_count = len(self.fallbacks_with_ipv6())
- fallback_count = len(self.fallbacks)
- logging.warning('%s of fallbacks are on IPv6'%(
- CandidateList.describe_percentage(ipv6_falback_count,
- fallback_count)))
-
- def summarise_fallbacks(self, eligible_count, operator_count, failed_count,
- guard_count, target_count):
- s = ''
- # Report:
- # whether we checked consensus download times
- # the number of fallback directories (and limits/exclusions, if relevant)
- # min & max fallback bandwidths
- # #error if below minimum count
- if PERFORM_IPV4_DIRPORT_CHECKS or PERFORM_IPV6_DIRPORT_CHECKS:
- s += '/* Checked %s%s%s DirPorts served a consensus within %.1fs. */'%(
- 'IPv4' if PERFORM_IPV4_DIRPORT_CHECKS else '',
- ' and ' if (PERFORM_IPV4_DIRPORT_CHECKS
- and PERFORM_IPV6_DIRPORT_CHECKS) else '',
- 'IPv6' if PERFORM_IPV6_DIRPORT_CHECKS else '',
- CONSENSUS_DOWNLOAD_SPEED_MAX)
- else:
- s += '/* Did not check IPv4 or IPv6 DirPort consensus downloads. */'
- s += '\n'
- # Multiline C comment with #error if things go bad
- s += '/*'
- s += '\n'
- # Integers don't need escaping in C comments
- fallback_count = len(self.fallbacks)
- if FALLBACK_PROPORTION_OF_GUARDS is None:
- fallback_proportion = ''
- else:
- fallback_proportion = ', Target %d (%d * %.2f)'%(target_count,
- guard_count,
- FALLBACK_PROPORTION_OF_GUARDS)
- s += 'Final Count: %d (Eligible %d%s'%(fallback_count, eligible_count,
- fallback_proportion)
- if MAX_FALLBACK_COUNT is not None:
- s += ', Max %d'%(MAX_FALLBACK_COUNT)
- s += ')\n'
- if eligible_count != fallback_count:
- removed_count = eligible_count - fallback_count
- excess_to_target_or_max = (eligible_count - operator_count - failed_count
- - fallback_count)
- # some 'Failed' failed the check, others 'Skipped' the check,
- # if we already had enough successful downloads
- s += ('Excluded: %d (Same Operator %d, Failed/Skipped Download %d, ' +
- 'Excess %d)')%(removed_count, operator_count, failed_count,
- excess_to_target_or_max)
- s += '\n'
- min_fb = self.fallback_min()
- min_bw = min_fb._data['measured_bandwidth']
- max_fb = self.fallback_max()
- max_bw = max_fb._data['measured_bandwidth']
- s += 'Bandwidth Range: %.1f - %.1f MByte/s'%(min_bw/(1024.0*1024.0),
- max_bw/(1024.0*1024.0))
- s += '\n'
- s += '*/'
- if fallback_count < MIN_FALLBACK_COUNT:
- # We must have a minimum number of fallbacks so they are always
- # reachable, and are in diverse locations
- s += '\n'
- s += '#error Fallback Count %d is too low. '%(fallback_count)
- s += 'Must be at least %d for diversity. '%(MIN_FALLBACK_COUNT)
- s += 'Try adding entries to the whitelist, '
- s += 'or setting INCLUDE_UNLISTED_ENTRIES = True.'
- return s
-
-def process_existing():
- logging.basicConfig(level=logging.INFO)
- logging.getLogger('stem').setLevel(logging.INFO)
- whitelist = {'data': parse_fallback_file(FALLBACK_FILE_NAME),
- 'name': FALLBACK_FILE_NAME}
- list_fallbacks(whitelist)
-
-def process_default():
- logging.basicConfig(level=logging.WARNING)
- logging.getLogger('stem').setLevel(logging.WARNING)
- whitelist = {'data': read_from_file(WHITELIST_FILE_NAME, MAX_LIST_FILE_SIZE),
- 'name': WHITELIST_FILE_NAME}
- list_fallbacks(whitelist)
-
-## Main Function
-def main():
- if get_command() == 'check_existing':
- process_existing()
- else:
- process_default()
-
-def get_command():
- if len(sys.argv) == 2:
- return sys.argv[1]
- else:
- return None
-
-def log_excluded(msg, *args):
- if get_command() == 'check_existing':
- logging.warning(msg, *args)
- else:
- logging.info(msg, *args)
-
-def list_fallbacks(whitelist):
- """ Fetches required onionoo documents and evaluates the
- fallback directory criteria for each of the relays """
-
- print "/* type=fallback */"
- print ("/* version={} */"
- .format(cleanse_c_multiline_comment(FALLBACK_FORMAT_VERSION)))
- now = datetime.datetime.utcnow()
- timestamp = now.strftime('%Y%m%d%H%M%S')
- print ("/* timestamp={} */"
- .format(cleanse_c_multiline_comment(timestamp)))
- # end the header with a separator, to make it easier for parsers
- print SECTION_SEPARATOR_COMMENT
-
- logging.warning('Downloading and parsing Onionoo data. ' +
- 'This may take some time.')
- # find relays that could be fallbacks
- candidates = CandidateList()
- candidates.add_relays()
-
- # work out how many fallbacks we want
- guard_count = candidates.count_guards()
- if FALLBACK_PROPORTION_OF_GUARDS is None:
- target_count = guard_count
- else:
- target_count = int(guard_count * FALLBACK_PROPORTION_OF_GUARDS)
- # the maximum number of fallbacks is the least of:
- # - the target fallback count (FALLBACK_PROPORTION_OF_GUARDS * guard count)
- # - the maximum fallback count (MAX_FALLBACK_COUNT)
- if MAX_FALLBACK_COUNT is None:
- max_count = target_count
- else:
- max_count = min(target_count, MAX_FALLBACK_COUNT)
-
- candidates.compute_fallbacks()
- prefilter_fallbacks = copy.copy(candidates.fallbacks)
-
- # filter with the whitelist
- # if a relay has changed IPv4 address or ports recently, it will be excluded
- # as ineligible before we call apply_filter_lists, and so there will be no
- # warning that the details have changed from those in the whitelist.
- # instead, there will be an info-level log during the eligibility check.
- initial_count = len(candidates.fallbacks)
- excluded_count = candidates.apply_filter_lists(whitelist)
- print candidates.summarise_filters(initial_count, excluded_count)
- eligible_count = len(candidates.fallbacks)
-
- # calculate the measured bandwidth of each relay,
- # then remove low-bandwidth relays
- candidates.calculate_measured_bandwidth()
- candidates.remove_low_bandwidth_relays()
-
- # print the raw fallback list
- #for x in candidates.fallbacks:
- # print x.fallbackdir_line(True)
- # print json.dumps(candidates[x]._data, sort_keys=True, indent=4,
- # separators=(',', ': '), default=json_util.default)
-
- # impose mandatory conditions here, like one per contact, family, IP
- # in measured bandwidth order
- candidates.sort_fallbacks_by_measured_bandwidth()
- operator_count = 0
- # only impose these limits on the final list - operators can nominate
- # multiple candidate fallbacks, and then we choose the best set
- if not OUTPUT_CANDIDATES:
- operator_count += candidates.limit_fallbacks_same_ip()
- operator_count += candidates.limit_fallbacks_same_contact()
- operator_count += candidates.limit_fallbacks_same_family()
-
- # check if each candidate can serve a consensus
- # there's a small risk we've eliminated relays from the same operator that
- # can serve a consensus, in favour of one that can't
- # but given it takes up to 15 seconds to check each consensus download,
- # the risk is worth it
- if PERFORM_IPV4_DIRPORT_CHECKS or PERFORM_IPV6_DIRPORT_CHECKS:
- logging.warning('Checking consensus download speeds. ' +
- 'This may take some time.')
- failed_count = candidates.perform_download_consensus_checks(max_count)
-
- # work out which fallbacks cache extra-infos
- candidates.mark_extra_info_caches()
-
- # analyse and log interesting diversity metrics
- # like netblock, ports, exit, IPv4-only
- # (we can't easily analyse AS, and it's hard to accurately analyse country)
- candidates.describe_fallback_ip_family()
- # if we can't import the ipaddress module, we can't do netblock analysis
- if HAVE_IPADDRESS:
- candidates.describe_fallback_netblocks()
- candidates.describe_fallback_ports()
- candidates.describe_fallback_extra_info_caches()
- candidates.describe_fallback_exit_flag()
-
- # output C comments summarising the fallback selection process
- if len(candidates.fallbacks) > 0:
- print candidates.summarise_fallbacks(eligible_count, operator_count,
- failed_count, guard_count,
- target_count)
- else:
- print '/* No Fallbacks met criteria */'
-
- # output C comments specifying the OnionOO data used to create the list
- for s in fetch_source_list():
- print describe_fetch_source(s)
-
- # start the list with a separator, to make it easy for parsers
- print SECTION_SEPARATOR_COMMENT
-
- # sort the list differently depending on why we've created it:
- # if we're outputting the final fallback list, sort by fingerprint
- # this makes diffs much more stable
- # otherwise, if we're trying to find a bandwidth cutoff, or we want to
- # contact operators in priority order, sort by bandwidth (not yet
- # implemented)
- # otherwise, if we're contacting operators, sort by contact
- candidates.sort_fallbacks_by(OUTPUT_SORT_FIELD)
-
- for x in candidates.fallbacks:
- print x.fallbackdir_line(candidates.fallbacks, prefilter_fallbacks)
-
-if __name__ == "__main__":
- main()
diff --git a/scripts/maint/updateRustDependencies.sh b/scripts/maint/updateRustDependencies.sh
index a5a92579d3..6d0587351f 100755
--- a/scripts/maint/updateRustDependencies.sh
+++ b/scripts/maint/updateRustDependencies.sh
@@ -20,26 +20,26 @@
set -e
-HERE=`dirname $(realpath $0)`
-TOPLEVEL=`dirname $(dirname $HERE)`
+HERE=$(dirname "$(realpath "$0")")
+TOPLEVEL=$(dirname "$(dirname "$HERE")")
TOML="$TOPLEVEL/src/rust/Cargo.toml"
VENDORED="$TOPLEVEL/src/ext/rust/crates"
-CARGO=`which cargo`
+CARGO=$(command -v cargo)
if ! test -f "$TOML" ; then
- printf "Error: Couldn't find workspace Cargo.toml in expected location: %s\n" "$TOML"
+ printf "Error: Couldn't find workspace Cargo.toml in expected location: %s\\n" "$TOML"
fi
if ! test -d "$VENDORED" ; then
- printf "Error: Couldn't find directory for Rust dependencies! Expected location: %s\n" "$VENDORED"
+ printf "Error: Couldn't find directory for Rust dependencies! Expected location: %s\\n" "$VENDORED"
fi
if test -z "$CARGO" ; then
- printf "Error: cargo must be installed and in your \$PATH\n"
+ printf "Error: cargo must be installed and in your \$PATH\\n"
fi
-if test -z `cargo --list | grep vendor` ; then
- printf "Error: cargo-vendor not installed\n"
+if test -z "$(cargo --list | grep vendor)" ; then
+ printf "Error: cargo-vendor not installed\\n"
fi
-$CARGO vendor -v --locked --explicit-version --no-delete --sync $TOML $VENDORED
+$CARGO vendor -v --locked --explicit-version --no-delete --sync "$TOML" "$VENDORED"
diff --git a/scripts/maint/updateVersions.pl.in b/scripts/maint/updateVersions.pl.in
deleted file mode 100755
index 65c51a1f2d..0000000000
--- a/scripts/maint/updateVersions.pl.in
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/perl -w
-
-$CONFIGURE_IN = '@abs_top_srcdir@/configure.ac';
-$ORCONFIG_H = '@abs_top_srcdir@/src/win32/orconfig.h';
-$TOR_NSI = '@abs_top_srcdir@/contrib/win32build/tor-mingw.nsi.in';
-
-$quiet = 1;
-
-sub demand {
- my $fn = shift;
- die "Missing file $fn" unless (-f $fn);
-}
-
-demand($CONFIGURE_IN);
-demand($ORCONFIG_H);
-demand($TOR_NSI);
-
-# extract version from configure.ac
-
-open(F, $CONFIGURE_IN) or die "$!";
-$version = undef;
-while (<F>) {
- if (/AC_INIT\(\[tor\],\s*\[([^\]]*)\]\)/) {
- $version = $1;
- last;
- }
-}
-die "No version found" unless $version;
-print "Tor version is $version\n" unless $quiet;
-close F;
-
-sub correctversion {
- my ($fn, $defchar) = @_;
- undef $/;
- open(F, $fn) or die "$!";
- my $s = <F>;
- close F;
- if ($s =~ /^$defchar(?:)define\s+VERSION\s+\"([^\"]+)\"/m) {
- $oldver = $1;
- if ($oldver ne $version) {
- print "Version mismatch in $fn: It thinks that the version is $oldver. I think it's $version. Fixing.\n";
- $line = $defchar . "define VERSION \"$version\"";
- open(F, ">$fn.bak");
- print F $s;
- close F;
- $s =~ s/^$defchar(?:)define\s+VERSION.*?$/$line/m;
- open(F, ">$fn");
- print F $s;
- close F;
- } else {
- print "$fn has the correct version. Good.\n" unless $quiet;
- }
- } else {
- print "Didn't find a version line in $fn -- uh oh.\n";
- }
-}
-
-correctversion($TOR_NSI, "!");
-correctversion($ORCONFIG_H, "#");
diff --git a/scripts/maint/update_versions.py b/scripts/maint/update_versions.py
new file mode 100755
index 0000000000..07de1c343a
--- /dev/null
+++ b/scripts/maint/update_versions.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python
+
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import io
+import os
+import re
+import sys
+import time
+
+def P(path):
+ """
+ Give 'path' as a path relative to the abs_top_srcdir environment
+ variable.
+ """
+ return os.path.join(
+ os.environ.get('abs_top_srcdir', "."),
+ path)
+
+def warn(msg):
+ """
+ Print an warning message.
+ """
+ print("WARNING: {}".format(msg), file=sys.stderr)
+
+def find_version(infile):
+ """
+ Given an open file (or some other iterator of lines) holding a
+ configure.ac file, find the current version line.
+ """
+ for line in infile:
+ m = re.search(r'AC_INIT\(\[tor\],\s*\[([^\]]*)\]\)', line)
+ if m:
+ return m.group(1)
+
+ return None
+
+def update_version_in(infile, outfile, regex, versionline):
+ """
+ Copy every line from infile to outfile. If any line matches 'regex',
+ replace it with 'versionline'. Return True if any line was changed;
+ false otherwise.
+
+ 'versionline' is either a string -- in which case it is used literally,
+ or a function that receives the output of 'regex.match'.
+ """
+ found = False
+ have_changed = False
+ for line in infile:
+ m = regex.match(line)
+ if m:
+ found = True
+ oldline = line
+ if type(versionline) == type(u""):
+ line = versionline
+ else:
+ line = versionline(m)
+ if not line.endswith("\n"):
+ line += "\n"
+ if oldline != line:
+ have_changed = True
+ outfile.write(line)
+
+ if not found:
+ warn("didn't find any version line to replace in {}".format(infile.name))
+
+ return have_changed
+
+def replace_on_change(fname, change):
+ """
+ If "change" is true, replace fname with fname.tmp. Otherwise,
+ delete fname.tmp. Log what we're doing to stderr.
+ """
+ if not change:
+ print("No change in {}".format(fname))
+ os.unlink(fname+".tmp")
+ else:
+ print("Updating {}".format(fname))
+ os.rename(fname+".tmp", fname)
+
+
+def update_file(fname,
+ regex,
+ versionline,
+ encoding="utf-8"):
+ """
+ Replace any line matching 'regex' in 'fname' with 'versionline'.
+ Do not modify 'fname' if there are no changes made. Use the
+ provided encoding to read and write.
+ """
+ with io.open(fname, "r", encoding=encoding) as f, \
+ io.open(fname+".tmp", "w", encoding=encoding) as outf:
+ have_changed = update_version_in(f, outf, regex, versionline)
+
+ replace_on_change(fname, have_changed)
+
+# Find out our version
+with open(P("configure.ac")) as f:
+ version = find_version(f)
+
+# If we have no version, we can't proceed.
+if version == None:
+ print("No version found in configure.ac", file=sys.stderr())
+ sys.exit(1)
+
+print("The version is {}".format(version))
+
+today = time.strftime("%Y-%m-%d", time.gmtime())
+
+# In configure.ac, we replace the definition of APPROX_RELEASE_DATE
+# with "{today} for {version}", but only if the version does not match
+# what is already there.
+def replace_fn(m):
+ if m.group(1) != version:
+ # The version changed -- we change the date.
+ return u'AC_DEFINE(APPROX_RELEASE_DATE, ["{}"], # for {}'.format(today, version)
+ else:
+ # No changes.
+ return m.group(0)
+update_file(P("configure.ac"),
+ re.compile(r'AC_DEFINE\(APPROX_RELEASE_DATE.* for (.*)'),
+ replace_fn)
+
+# In tor-mingw.nsi.in, we replace the definition of VERSION.
+update_file(P("contrib/win32build/tor-mingw.nsi.in"),
+ re.compile(r'!define VERSION .*'),
+ u'!define VERSION "{}"'.format(version),
+ encoding="iso-8859-1")
+
+# In src/win32/orconfig.h, we replace the definition of VERSION.
+update_file(P("src/win32/orconfig.h"),
+ re.compile(r'#define VERSION .*'),
+ u'#define VERSION "{}"'.format(version))
diff --git a/scripts/test/appveyor-irc-notify.py b/scripts/test/appveyor-irc-notify.py
index cfe0afe7ae..598a68f47d 100644
--- a/scripts/test/appveyor-irc-notify.py
+++ b/scripts/test/appveyor-irc-notify.py
@@ -75,8 +75,10 @@ in Appveyor's YAML:
- "python scripts/test/appveyor-irc-notify.py irc.oftc.net:6697 tor-ci failure
"""
+# Future imports for Python 2.7, mandatory in 3.0
+from __future__ import division
from __future__ import print_function
-from __future__ import absolute_import
+from __future__ import unicode_literals
import os
import random
diff --git a/scripts/test/chutney-git-bisect.sh b/scripts/test/chutney-git-bisect.sh
index 8a3f2c70c8..dcf8ab1102 100755
--- a/scripts/test/chutney-git-bisect.sh
+++ b/scripts/test/chutney-git-bisect.sh
@@ -15,21 +15,21 @@
# Skips the test if <skip-flavour> fails (default no skip).
CHUTNEY_TRIES=3
-if [ ! -z "$1" ]; then
+if [ -n "$1" ]; then
CHUTNEY_TRIES="$1"
fi
-if [ ! -z "$2" ]; then
- cd "$2"
+if [ -n "$2" ]; then
+ cd "$2" || exit
fi
CHUTNEY_TEST_CMD="make test-network-all"
-if [ ! -z "$3" ]; then
+if [ -n "$3" ]; then
CHUTNEY_TEST_CMD="$CHUTNEY_PATH/tools/test-network.sh --flavour $3"
fi
CHUTNEY_SKIP_ON_FAIL_CMD="true"
-if [ ! -z "$4" ]; then
+if [ -n "$4" ]; then
CHUTNEY_SKIP_ON_FAIL_CMD="$CHUTNEY_PATH/tools/test-network.sh --flavour $4"
fi
@@ -54,9 +54,9 @@ while [ "$i" -le "$CHUTNEY_TRIES" ]; do
echo "test '$CHUTNEY_TEST_CMD' succeeded after $i/$CHUTNEY_TRIES attempts, good"
exit 0
fi
- i=$[$i+1]
+ i=$((i+1))
done
-i=$[$i-1]
+i=$((i-1))
echo "test '$CHUTNEY_TEST_CMD' failed $i/$CHUTNEY_TRIES attempts, bad"
exit 1
diff --git a/scripts/test/cov-diff b/scripts/test/cov-diff
index 6179dff63e..8751800966 100755
--- a/scripts/test/cov-diff
+++ b/scripts/test/cov-diff
@@ -7,15 +7,14 @@
DIRA="$1"
DIRB="$2"
-for B in $DIRB/*; do
- A=$DIRA/`basename $B`
- if [ -f $A ]; then
+for B in "$DIRB"/*; do
+ A=$DIRA/$(basename "$B")
+ if [ -f "$A" ]; then
perl -pe 's/^\s*\!*\d+(\*?):/ 1$1:/; s/^([^:]+:)[\d\s]+:/$1/; s/^ *-:(Runs|Programs):.*//;' "$A" > "$A.tmp"
else
cat /dev/null > "$A.tmp"
fi
perl -pe 's/^\s*\!*\d+(\*?):/ 1$1:/; s/^([^:]+:)[\d\s]+:/$1/; s/^ *-:(Runs|Programs):.*//;' "$B" > "$B.tmp"
diff -u "$A.tmp" "$B.tmp" |perl -pe 's/^((?:\+\+\+|---)(?:.*tmp))\s+.*/$1/;'
- rm "$A.tmp" "$B.tmp"
+ rm -f "$A.tmp" "$B.tmp"
done
-
diff --git a/scripts/test/cov-test-determinism.sh b/scripts/test/cov-test-determinism.sh
new file mode 100755
index 0000000000..3458f96968
--- /dev/null
+++ b/scripts/test/cov-test-determinism.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+# To use this script, build Tor with coverage enabled, and then say:
+# ./scripts/test/cov-test-determinism.sh run
+#
+# Let it run for a long time so it can run the tests over and over. It
+# will put their coverage outputs in coverage-raw/coverage-*/.
+#
+# Then say:
+# ./scripts/test/cov-test-determinism.sh check
+#
+# It will diff the other coverage outputs to the first one, and put their
+# diffs in coverage-raw/diff-coverage-*.
+
+run=0
+check=0
+
+if test "$1" = run; then
+ run=1
+elif test "$1" = check; then
+ check=1
+else
+ echo "First use 'run' with this script, then use 'check'."
+ exit 1
+fi
+
+if test "$run" = 1; then
+ # same seed as in travis.yml
+ TOR_TEST_RNG_SEED="636f766572616765"
+ export TOR_TEST_RNG_SEED
+ while true; do
+ make reset-gcov
+ CD=coverage-raw/coverage-$(date +%s)
+ make -j5 check
+ mkdir -p "$CD"
+ ./scripts/test/coverage "$CD"
+ done
+fi
+
+if test "$check" = 1; then
+ cd coverage-raw || exit 1
+
+ FIRST="$(find . -name "coverage-*" -type d | head -1)"
+ rm -f A
+ ln -sf "$FIRST" A
+ for dir in coverage-*; do
+ rm -f B
+ ln -sf "$dir" B
+ ../scripts/test/cov-diff A B > "diff-$dir"
+ done
+fi
diff --git a/scripts/test/coverage b/scripts/test/coverage
index b6e17abe25..f61c83bc72 100755
--- a/scripts/test/coverage
+++ b/scripts/test/coverage
@@ -8,9 +8,9 @@
dst=$1
for fn in src/core/*/*.c src/feature/*/*.c src/app/*/*.c src/lib/*/*.c; do
- BN=`basename $fn`
- DN=`dirname $fn`
- F=`echo $BN | sed -e 's/\.c$//;'`
+ BN=$(basename "$fn")
+ DN=$(dirname "$fn")
+ F=$(echo "$BN" | sed -e 's/\.c$//;')
GC="${BN}.gcov"
# Figure out the object file names
ONS=$(echo "${DN}"/*testing_a-"${F}".o)
@@ -20,18 +20,18 @@ for fn in src/core/*/*.c src/feature/*/*.c src/app/*/*.c src/lib/*/*.c; do
then
for on in $ONS; do
# We should have a gcno file
- GCNO=`echo $on | sed -e 's/\.o$/\.gcno/;'`
- if [ -e $GCNO ]
+ GCNO=$(echo "$on" | sed -e 's/\.o$/\.gcno/;')
+ if [ -e "$GCNO" ]
then
# No need to test for gcda, since gcov assumes no execution
# if it's absent
- rm -f $GC
- gcov -o $on $fn
- if [ -e $GC ]
+ rm -f "$GC"
+ gcov -o "$on" "$fn"
+ if [ -e "$GC" ]
then
if [ -d "$dst" ]
then
- mv $GC $dst/$GC
+ mv "$GC" "$dst"/"$GC"
fi
else
echo "gcov -o $on $fn didn't make a .gcov file"
diff --git a/scripts/test/scan-build.sh b/scripts/test/scan-build.sh
index 8d126cbcee..26e05ff101 100755
--- a/scripts/test/scan-build.sh
+++ b/scripts/test/scan-build.sh
@@ -33,6 +33,7 @@ CHECKERS="\
-enable-checker security.insecureAPI.strcpy \
"
+# shellcheck disable=SC2034
# These have high false-positive rates.
EXTRA_CHECKERS="\
-enable-checker alpha.security.ArrayBoundV2 \
@@ -40,6 +41,7 @@ EXTRA_CHECKERS="\
-enable-checker alpha.core.CastSize \
"
+# shellcheck disable=SC2034
# These don't seem to generate anything useful
NOISY_CHECKERS="\
-enable-checker alpha.clone.CloneChecker \
@@ -52,6 +54,7 @@ else
OUTPUTARG=""
fi
+# shellcheck disable=SC2086
scan-build \
$CHECKERS \
./configure
@@ -61,11 +64,13 @@ scan-build \
# Make this not get scanned for dead assignments, since it has lots of
# dead assignments we don't care about.
+# shellcheck disable=SC2086
scan-build \
$CHECKERS \
-disable-checker deadcode.DeadStores \
make -j5 -k ./src/ext/ed25519/ref10/libed25519_ref10.a
+# shellcheck disable=SC2086
scan-build \
$CHECKERS $OUTPUTARG \
make -j5 -k