diff options
110 files changed, 2871 insertions, 1373 deletions
diff --git a/.gitignore b/.gitignore index 7900141ceb..2f94254c53 100644 --- a/.gitignore +++ b/.gitignore @@ -132,6 +132,9 @@ uptime-*.json /src/common/libor.a /src/common/libor-testing.a /src/common/libor.lib +/src/common/libor-ctime.a +/src/common/libor-ctime-testing.a +/src/common/libor-ctime.lib /src/common/libor-crypto.a /src/common/libor-crypto-testing.a /src/common/libor-crypto.lib @@ -191,9 +194,6 @@ uptime-*.json /src/test/test-switch-id.exe /src/test/test-timers.exe /src/test/test_workqueue.exe -/src/test/test_zero_length_keys.sh -/src/test/test_ntor.sh -/src/test/test_bt.sh # /src/tools/ /src/tools/tor-checkkey @@ -1,6 +1,197 @@ Changes in version 0.2.9.1-alpha - 2016-??-?? + +Changes in version 0.2.8.3-alpha - 2016-05-26 + Tor 0.2.8.3-alpha resolves several bugs, most of them introduced over + the course of the 0.2.8 development cycle. It improves the behavior of + directory clients, fixes several crash bugs, fixes a gap in compiler + hardening, and allows the full integration test suite to run on + more platforms. + + o Major bugfixes (security, client, DNS proxy): + - Stop a crash that could occur when a client running with DNSPort + received a query with multiple address types, and the first + address type was not supported. Found and fixed by Scott Dial. + Fixes bug 18710; bugfix on 0.2.5.4-alpha. + + o Major bugfixes (security, compilation): + - Correctly detect compiler flags on systems where _FORTIFY_SOURCE + is predefined. Previously, our use of -D_FORTIFY_SOURCE would + cause a compiler warning, thereby making other checks fail, and + needlessly disabling compiler-hardening support. Fixes one case of + bug 18841; bugfix on 0.2.3.17-beta. Patch from "trudokal". + + o Major bugfixes (security, directory authorities): + - Fix a crash and out-of-bounds write during authority voting, when + the list of relays includes duplicate ed25519 identity keys. Fixes + bug 19032; bugfix on 0.2.8.2-alpha. + + o Major bugfixes (client, bootstrapping): + - Check if bootstrap consensus downloads are still needed when the + linked connection attaches. This prevents tor making unnecessary + begindir-style connections, which are the only directory + connections tor clients make since the fix for 18483 was merged. + - Fix some edge cases where consensus download connections may not + have been closed, even though they were not needed. Related to fix + for 18809. + - Make relays retry consensus downloads the correct number of times, + rather than the more aggressive client retry count. Fixes part of + ticket 18809. + - Stop downloading consensuses when we have a consensus, even if we + don't have all the certificates for it yet. Fixes bug 18809; + bugfix on 0.2.8.1-alpha. Patches by arma and teor. + + o Major bugfixes (directory mirrors): + - Decide whether to advertise begindir support in the the same way + we decide whether to advertise our DirPort. Allowing these + decisions to become out-of-sync led to surprising behavior like + advertising begindir support when hibernation made us not + advertise a DirPort. Resolves bug 18616; bugfix on 0.2.8.1-alpha. + Patch by teor. + + o Major bugfixes (IPv6 bridges, client): + - Actually use IPv6 addresses when selecting directory addresses for + IPv6 bridges. Fixes bug 18921; bugfix on 0.2.8.1-alpha. Patch + by "teor". + + o Major bugfixes (key management): + - If OpenSSL fails to generate an RSA key, do not retain a dangling + pointer to the previous (uninitialized) key value. The impact here + should be limited to a difficult-to-trigger crash, if OpenSSL is + running an engine that makes key generation failures possible, or + if OpenSSL runs out of memory. Fixes bug 19152; bugfix on + 0.2.1.10-alpha. Found by Yuan Jochen Kang, Suman Jana, and + Baishakhi Ray. + + o Major bugfixes (testing): + - Fix a bug that would block 'make test-network-all' on systems where + IPv6 packets were lost. Fixes bug 19008; bugfix on tor-0.2.7.3-rc. + - Avoid "WSANOTINITIALISED" warnings in the unit tests. Fixes bug 18668; + bugfix on 0.2.8.1-alpha. + + o Minor features (clients): + - Make clients, onion services, and bridge relays always use an + encrypted begindir connection for directory requests. Resolves + ticket 18483. Patch by "teor". + + o Minor features (fallback directory mirrors): + - Give each fallback the same weight for client selection; restrict + fallbacks to one per operator; report fallback directory detail + changes when rebuilding list; add new fallback directory mirrors + to the whitelist; update fallback directories based on the latest + OnionOO data; and any other minor simplifications and fixes. + Closes tasks 17158, 17905, 18749, bug 18689, and fixes part of bug + 18812 on 0.2.8.1-alpha; patch by "teor". + + o Minor features (geoip): + - Update geoip and geoip6 to the May 4 2016 Maxmind GeoLite2 + Country database. + + o Minor bugfixes (assert, portability): + - Fix an assertion failure in memarea.c on systems where "long" is + shorter than the size of a pointer. Fixes bug 18716; bugfix + on 0.2.1.1-alpha. + + o Minor bugfixes (bootstrap): + - Consistently use the consensus download schedule for authority + certificates. Fixes bug 18816; bugfix on 0.2.4.13-alpha. + + o Minor bugfixes (build): + - Remove a pair of redundant AM_CONDITIONAL declarations from + configure.ac. Fixes one final case of bug 17744; bugfix + on 0.2.8.2-alpha. + - Resolve warnings when building on systems that are concerned with + signed char. Fixes bug 18728; bugfix on 0.2.7.2-alpha + and 0.2.6.1-alpha. + - When libscrypt.h is found, but no libscrypt library can be linked, + treat libscrypt as absent. Fixes bug 19161; bugfix + on 0.2.6.1-alpha. + + o Minor bugfixes (client): + - Turn all TestingClientBootstrap* into non-testing torrc options. + This changes simply renames them by removing "Testing" in front of + them and they do not require TestingTorNetwork to be enabled + anymore. Fixes bug 18481; bugfix on 0.2.8.1-alpha. + - Make directory node selection more reliable, mainly for IPv6-only + clients and clients with few reachable addresses. Fixes bug 18929; + bugfix on 0.2.8.1-alpha. Patch by "teor". + + o Minor bugfixes (controller, microdescriptors): + - Make GETINFO dir/status-vote/current/consensus conform to the + control specification by returning "551 Could not open cached + consensus..." when not caching consensuses. Fixes bug 18920; + bugfix on 0.2.2.6-alpha. + + o Minor bugfixes (crypto, portability): + - The SHA3 and SHAKE routines now produce the correct output on Big + Endian systems. No code calls either algorithm yet, so this is + primarily a build fix. Fixes bug 18943; bugfix on 0.2.8.1-alpha. + - Tor now builds again with the recent OpenSSL 1.1 development + branch (tested against 1.1.0-pre4 and 1.1.0-pre5-dev). Closes + ticket 18286. + + o Minor bugfixes (directories): + - When fetching extrainfo documents, compare their SHA256 digests + and Ed25519 signing key certificates with the routerinfo that led + us to fetch them, rather than with the most recent routerinfo. + Otherwise we generate many spurious warnings about mismatches. + Fixes bug 17150; bugfix on 0.2.7.2-alpha. + + o Minor bugfixes (logging): + - When we can't generate a signing key because OfflineMasterKey is + set, do not imply that we should have been able to load it. Fixes + bug 18133; bugfix on 0.2.7.2-alpha. + - Stop periodic_event_dispatch() from blasting twelve lines per + second at loglevel debug. Fixes bug 18729; fix on 0.2.8.1-alpha. + - When rejecting a misformed INTRODUCE2 cell, only log at + PROTOCOL_WARN severity. Fixes bug 18761; bugfix on 0.2.8.2-alpha. + + o Minor bugfixes (pluggable transports): + - Avoid reporting a spurious error when we decide that we don't need + to terminate a pluggable transport because it has already exited. + Fixes bug 18686; bugfix on 0.2.5.5-alpha. + + o Minor bugfixes (pointer arithmetic): + - Fix a bug in memarea_alloc() that could have resulted in remote + heap write access, if Tor had ever passed an unchecked size to + memarea_alloc(). Fortunately, all the sizes we pass to + memarea_alloc() are pre-checked to be less than 128 kilobytes. + Fixes bug 19150; bugfix on 0.2.1.1-alpha. Bug found by + Guido Vranken. + + o Minor bugfixes (relays): + - Consider more config options when relays decide whether to + regenerate their descriptor. Fixes more of bug 12538; bugfix + on 0.2.8.1-alpha. + - Resolve some edge cases where we might launch an ORPort + reachability check even when DisableNetwork is set. Noticed while + fixing bug 18616; bugfix on 0.2.3.9-alpha. + + o Minor bugfixes (statistics): + - We now include consensus downloads via IPv6 in our directory- + request statistics. Fixes bug 18460; bugfix on 0.2.3.14-alpha. + + o Minor bugfixes (testing): + - Allow directories in small networks to bootstrap by skipping + DirPort checks when the consensus has no exits. Fixes bug 19003; + bugfix on 0.2.8.1-alpha. Patch by teor. + - Fix a small memory leak that would occur when the + TestingEnableCellStatsEvent option was turned on. Fixes bug 18673; + bugfix on 0.2.5.2-alpha. + + o Minor bugfixes (time handling): + - When correcting a corrupt 'struct tm' value, fill in the tm_wday + field. Otherwise, our unit tests crash on Windows. Fixes bug + 18977; bugfix on 0.2.2.25-alpha. + + o Documentation: + - Document the contents of the 'datadir/keys' subdirectory in the + manual page. Closes ticket 17621. + - Stop recommending use of nicknames to identify relays in our + MapAddress documentation. Closes ticket 18312. + + Changes in version 0.2.8.2-alpha - 2016-03-28 Tor 0.2.8.2-alpha is the second alpha in its series. It fixes numerous bugs in earlier versions of Tor, including some that prevented @@ -271,6 +271,110 @@ src/ext/readpassphrase.[ch] are distributed under this license: Materiel Command, USAF, under agreement number F39502-99-1-0512. =============================================================================== +src/ext/mulodi4.c is distributed under this license: + + ========================================================================= + compiler_rt License + ========================================================================= + + The compiler_rt library is dual licensed under both the + University of Illinois "BSD-Like" license and the MIT license. + As a user of this code you may choose to use it under either + license. As a contributor, you agree to allow your code to be + used under both. + + Full text of the relevant licenses is included below. + + ========================================================================= + + University of Illinois/NCSA + Open Source License + + Copyright (c) 2009-2016 by the contributors listed in CREDITS.TXT + + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal with the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + * Redistributions of source code must retain the above + copyright notice, this list of conditions and the following + disclaimers. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimers in the documentation and/or other materials + provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois + at Urbana-Champaign, nor the names of its contributors may + be used to endorse or promote products derived from this + Software without specific prior written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS WITH THE SOFTWARE. + + ========================================================================= + + Copyright (c) 2009-2015 by the contributors listed in CREDITS.TXT + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + ========================================================================= + Copyrights and Licenses for Third Party Software Distributed with LLVM: + ========================================================================= + + The LLVM software contains code written by third parties. Such + software will have its own individual LICENSE.TXT file in the + directory in which it appears. This file will describe the + copyrights, license, and restrictions which apply to that code. + + The disclaimer of warranty in the University of Illinois Open + Source License applies to all code in the LLVM Distribution, and + nothing in any of the other licenses gives permission to use the + names of the LLVM Team or the University of Illinois to endorse + or promote products derived from this Software. + +=============================================================================== If you got Tor as a static binary with OpenSSL included, then you should know: "This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/)" diff --git a/Makefile.am b/Makefile.am index 13ba00d4b5..a8aa3af40d 100644 --- a/Makefile.am +++ b/Makefile.am @@ -15,8 +15,8 @@ noinst_PROGRAMS= DISTCLEANFILES= bin_SCRIPTS= AM_CPPFLAGS= -AM_CFLAGS = @TOR_SYSTEMD_CFLAGS@ -SHELL = @SHELL@ +AM_CFLAGS=@TOR_SYSTEMD_CFLAGS@ @CFLAGS_BUGTRAP@ +SHELL=@SHELL@ if COVERAGE_ENABLED TESTING_TOR_BINARY=$(top_builddir)/src/or/tor-cov$(EXEEXT) diff --git a/acinclude.m4 b/acinclude.m4 index 7b1aab2f99..4b9f0953e9 100644 --- a/acinclude.m4 +++ b/acinclude.m4 @@ -42,10 +42,11 @@ AC_DEFUN([TOR_DEFINE_CODEPATH], AC_SUBST(TOR_LDFLAGS_$2) ]) -dnl 1:flags -dnl 2:also try to link (yes: non-empty string) -dnl will set yes or no in $tor_can_link_$1 (as modified by AS_VAR_PUSHDEF) -AC_DEFUN([TOR_CHECK_CFLAGS], [ +dnl 1: flags +dnl 2: try to link too if this is nonempty. +dnl 3: what to do on success compiling +dnl 4: what to do on failure compiling +AC_DEFUN([TOR_TRY_COMPILE_WITH_CFLAGS], [ AS_VAR_PUSHDEF([VAR],[tor_cv_cflags_$1]) AC_CACHE_CHECK([whether the compiler accepts $1], VAR, [ tor_saved_CFLAGS="$CFLAGS" @@ -63,12 +64,21 @@ AC_DEFUN([TOR_CHECK_CFLAGS], [ CFLAGS="$tor_saved_CFLAGS" ]) if test x$VAR = xyes; then - CFLAGS="$CFLAGS $1" + $3 + else + $4 fi AS_VAR_POPDEF([VAR]) ]) dnl 1:flags +dnl 2:also try to link (yes: non-empty string) +dnl will set yes or no in $tor_can_link_$1 (as modified by AS_VAR_PUSHDEF) +AC_DEFUN([TOR_CHECK_CFLAGS], [ + TOR_TRY_COMPILE_WITH_CFLAGS($1, $2, CFLAGS="$CFLAGS $1", /bin/true) +]) + +dnl 1:flags dnl 2:extra ldflags dnl 3:extra libraries AC_DEFUN([TOR_CHECK_LDFLAGS], [ diff --git a/changes/19044 b/changes/19044 new file mode 100644 index 0000000000..a7f938a248 --- /dev/null +++ b/changes/19044 @@ -0,0 +1,5 @@ + o Minor features (compilation): + - Our big list of extra GCC warnings is now enabled by default when + building with GCC (or with anything like Clang that claims to be + GCC-compatible). To make all warnings into fatal compilation errors, + pass --enable-fatal-warnings to configure. Closes ticket 19044. diff --git a/changes/bug17150 b/changes/bug17150 new file mode 100644 index 0000000000..686cc34296 --- /dev/null +++ b/changes/bug17150 @@ -0,0 +1,7 @@ + o Minor bugfixes (directory warnings): + - When fetching extrainfo documents, compare their SHA256 digests + and Ed25519 signing key certificates + with the routerinfo that led us to fetch them, rather than + with the most recent routerinfo. Otherwise we generate many + spurious warnings about mismatches. Fixes bug 17150; bugfix + on 0.2.7.2-alpha. diff --git a/changes/bug17744_redux b/changes/bug17744_redux deleted file mode 100644 index d61e17fec3..0000000000 --- a/changes/bug17744_redux +++ /dev/null @@ -1,5 +0,0 @@ - o Minor bugfixes (build): - - Remove a pair of redundant AM_CONDITIONAL declarations from - configure.ac. Fixes one final case of bug 17744; bugfix on - 0.2.8.2-alpha. - diff --git a/changes/bug17983 b/changes/bug17983 new file mode 100644 index 0000000000..db52a37615 --- /dev/null +++ b/changes/bug17983 @@ -0,0 +1,11 @@ + o Minor features (bug-finding): + - Tor now builds with -ftrapv by default on compilers that support it. + This option detects signed integer overflow, and turns it into a + hard-failure. We do not apply this option to code that needs to run + in constant time to avoid side-channels; instead, we use -fwrapv. + Closes ticket 17983. + - When --enable-expensive-hardening is selected, stop applying the clang/gcc + sanitizers to code that needs to run in constant-time to avoid side + channels: although we are aware of no introduced side-channels, we + are not able to prove that this is safe. Related to ticket 17983. + diff --git a/changes/bug18133 b/changes/bug18133 deleted file mode 100644 index 177d286495..0000000000 --- a/changes/bug18133 +++ /dev/null @@ -1,4 +0,0 @@ - o Minor bugfixes (logging): - - When we can't generate a signing key because OfflineMasterKey is set, - do not imply that we should have been able to load it. - Fixes bug 18133; bugfix on 0.2.7.2-alpha. diff --git a/changes/bug18286 b/changes/bug18286 deleted file mode 100644 index 6e9ae3de09..0000000000 --- a/changes/bug18286 +++ /dev/null @@ -1,4 +0,0 @@ - o Minor features (build): - - Tor now again builds with the recent OpenSSL 1.1 development branch - (tested against 1.1.0-pre4 and 1.1.0-pre5-dev). - diff --git a/changes/bug18312 b/changes/bug18312 deleted file mode 100644 index 7dcb3266bf..0000000000 --- a/changes/bug18312 +++ /dev/null @@ -1,4 +0,0 @@ - o Documentation: - - Stop recommending use of nicknames to identify relays in our - MapAddress documentation. Closes ticket 18312. - diff --git a/changes/bug18460 b/changes/bug18460 deleted file mode 100644 index 457e5dfc17..0000000000 --- a/changes/bug18460 +++ /dev/null @@ -1,4 +0,0 @@ - o Minor bugfixes (statistics): - - Include consensus downloads via IPv6 in directory-request statistics. - Fixes bug 18480; bugfix on 4741aa4 in 0.2.3.14-alpha. - diff --git a/changes/bug18481 b/changes/bug18481 deleted file mode 100644 index 7fd9e1edc0..0000000000 --- a/changes/bug18481 +++ /dev/null @@ -1,5 +0,0 @@ - o Minor bugfixes (client): - - Turn all TestingClientBootstrap* into non-testing torrc options. This - changes simply renames them by removing "Testing" in front of them and - they do not require TestingTorNetwork to be enabled anymore. Fixes - #18481; bugfix on tor-0.2.8.1-alpha. diff --git a/changes/bug18668 b/changes/bug18668 new file mode 100644 index 0000000000..4b186b5c05 --- /dev/null +++ b/changes/bug18668 @@ -0,0 +1,3 @@ + o Minor bugfixes (tests): + - Avoid "WSANOTINITIALISED" warnings in the unit tests. Fixes bug 18668; + bugfix on 0.2.8.1-alpha. diff --git a/changes/bug18673 b/changes/bug18673 deleted file mode 100644 index 5d6161718a..0000000000 --- a/changes/bug18673 +++ /dev/null @@ -1,4 +0,0 @@ - o Minor bugfixes (memory leak): - - Fix a small memory leak that would occur when the - TestingEnableCellStatsEvent option was turned on. Fixes bug 18673; - bugfix on 0.2.5.2-alpha. diff --git a/changes/bug18686 b/changes/bug18686 deleted file mode 100644 index 23547d211d..0000000000 --- a/changes/bug18686 +++ /dev/null @@ -1,5 +0,0 @@ - o Minor bugfixes (pluggable transports): - - Avoid reporting a spurious error when we decide that we don't - need to terminate a pluggable transport because it has already - exited. Fixes bug 18686; bugfix on 0.2.5.5-alpha. - diff --git a/changes/bug18710 b/changes/bug18710 deleted file mode 100644 index 269395563d..0000000000 --- a/changes/bug18710 +++ /dev/null @@ -1,6 +0,0 @@ - o Major bugfixes (DNS proxy): - - Stop a crash that could occur when a client running with DNSPort - received a query with multiple address types, where the first - address type was not supported. Found and fixed by Scott Dial. - Fixes bug 18710; bugfix on 0.2.5.4-alpha. - diff --git a/changes/bug18716 b/changes/bug18716 deleted file mode 100644 index b15a343f4c..0000000000 --- a/changes/bug18716 +++ /dev/null @@ -1,4 +0,0 @@ - o Minor bugfixes (assert, portability): - - Fix an assertion failure in memarea.c on systems where "long" is - shorter than the size of a pointer. - Fixes bug 18716; bugfix on 0.2.1.1-alpha diff --git a/changes/bug18728 b/changes/bug18728 deleted file mode 100644 index e181c17e65..0000000000 --- a/changes/bug18728 +++ /dev/null @@ -1,4 +0,0 @@ - o Minor bugfixes (build): - - Resolve warnings when building on systems that are concerned with - signed char. Fixes bug 18728; bugfix on 0.2.7.2-alpha and - 0.2.6.1-alpha. diff --git a/changes/bug18729 b/changes/bug18729 deleted file mode 100644 index d4312c0b76..0000000000 --- a/changes/bug18729 +++ /dev/null @@ -1,3 +0,0 @@ - o Minor logging changes: - - Stop blasting twelve lines per second from periodic_event_dispatch() - at loglevel debug. Resolves ticket 18729; fix on 0.2.8.1-alpha. diff --git a/changes/bug18761 b/changes/bug18761 deleted file mode 100644 index 78500a88ea..0000000000 --- a/changes/bug18761 +++ /dev/null @@ -1,3 +0,0 @@ - o Minor feature (logging): - - When rejecting a misformed INTRODUCE2 cell, only log at PROTOCOL_WARN - severity. Closes ticket 18761. diff --git a/changes/bug18809 b/changes/bug18809 new file mode 100644 index 0000000000..1e151874b7 --- /dev/null +++ b/changes/bug18809 @@ -0,0 +1,16 @@ + o Major bugfixes (bootstrap): + - Check if bootstrap consensus downloads are still needed + when the linked connection attaches. This prevents tor + making unnecessary begindir-style connections, which are + the only directory connections tor clients make since + the fix for 18483 was merged. + - Fix some edge cases where consensus download connections + may not have been closed, even though they were not needed. + Related to fix 18809. + - Make relays retry consensus downloads the correct number of + times, rather than the more aggressive client retry count. + Fixes part of ticket 18809. + - Stop downloading consensuses when we have a consensus, + even if we don't have all the certificates for it yet. + Fixes bug 18809; bugfix on 0.2.8.1-alpha. + Patches by arma and teor. diff --git a/changes/bug18815 b/changes/bug18815 new file mode 100644 index 0000000000..cb504b2a8e --- /dev/null +++ b/changes/bug18815 @@ -0,0 +1,3 @@ + o Minor features (performance): + - When fetching a consensus for the first time, use optimistic data. + This saves a round-trip during startup. Closes ticket 18815. diff --git a/changes/bug18816 b/changes/bug18816 deleted file mode 100644 index 7265f5ab3f..0000000000 --- a/changes/bug18816 +++ /dev/null @@ -1,4 +0,0 @@ - o Minor bugfix (bootstrap): - - Consistently use the consensus download schedule for - authority certificates. - Resolves ticket 18816; fix on fddb814fe in 0.2.4.13-alpha. diff --git a/changes/bug18840 b/changes/bug18840 new file mode 100644 index 0000000000..b8de1aae88 --- /dev/null +++ b/changes/bug18840 @@ -0,0 +1,4 @@ + o Minor bugfixes: + - Authorities now sort the "package" lines in their votes, for ease + of debugging. (They are already sorted in the consensus documents.) + Fixes bug 18840; bugfix on 0.2.6.3-alpha. diff --git a/changes/bug18841.1 b/changes/bug18841.1 deleted file mode 100644 index 205ee5a425..0000000000 --- a/changes/bug18841.1 +++ /dev/null @@ -1,7 +0,0 @@ - o Major bugfixes (compilation): - - Correctly detect compiler flags on systems where _FORTIFY_SOURCE - is predefined. Previously, our use of -D_FORTIFY_SOURCE would - cause a compiler warning, thereby making other checks fail. - Fixes one case of bug 18841; bugfix on 0.2.3.17-beta. Patch from - "trudokal". - diff --git a/changes/bug18895 b/changes/bug18895 new file mode 100644 index 0000000000..9ad857c546 --- /dev/null +++ b/changes/bug18895 @@ -0,0 +1,6 @@ + o Minor features (build): + - When building on a system without runtime support for some of the + runtime hardening options, try to log a useful warning at configuration + time, rather than an incomprehensible warning at link time. + If expensive hardening was requested, this warning becomes an error. + Closes ticket 18895. diff --git a/changes/bug18920 b/changes/bug18920 deleted file mode 100644 index 1babfd6656..0000000000 --- a/changes/bug18920 +++ /dev/null @@ -1,5 +0,0 @@ - o Minor bugfixes (controller, microdescriptors): - - Make GETINFO dir/status-vote/current/consensus conform to the control - specification by returning "551 Could not open cached consensus..." - when not caching consensuses. - Fixes bug 18920; bugfix on 0.2.2.6-alpha. diff --git a/changes/bug18921 b/changes/bug18921 deleted file mode 100644 index 934a604945..0000000000 --- a/changes/bug18921 +++ /dev/null @@ -1,4 +0,0 @@ - o Major bugfixes (IPv6 bridges): - - Fix directory address selection for IPv6 bridges. - Resolves #18921, bugfix on #17840 in 0.2.8.1-alpha. - Patch by "teor". diff --git a/changes/bug18929 b/changes/bug18929 deleted file mode 100644 index f79bacae8e..0000000000 --- a/changes/bug18929 +++ /dev/null @@ -1,5 +0,0 @@ - o Minor bugfixes (IPv6): - - Make directory node selection more reliable, mainly for - IPv6-only clients and clients with few reachable addresses. - Resolves #18929, bugfix on #17840 in 0.2.8.1-alpha. - Patch by "teor". diff --git a/changes/bug18934_2 b/changes/bug18934_2 new file mode 100644 index 0000000000..039feafb89 --- /dev/null +++ b/changes/bug18934_2 @@ -0,0 +1,4 @@ + o Minor bugfixes (testing): + - Disable ASAN's detection of segmentation faults while running + test_bt.sh, so that we can make sure that our own backtrace generation + code works. Fixes another aspect of bug 18934. Patch from "cypherpunks". diff --git a/changes/bug18943 b/changes/bug18943 deleted file mode 100644 index 53569f05cb..0000000000 --- a/changes/bug18943 +++ /dev/null @@ -1,6 +0,0 @@ - o Major bugfixes (crypto, portability): - - The SHA3 and SHAKE routines now produce the correct output on - Big Endian systems, unbreaking the unit tests. No code calls - either algorithm family yet, so this is primarily a build fix. - Closes ticket 18943. - diff --git a/changes/bug18956 b/changes/bug18956 new file mode 100644 index 0000000000..0cf10e9224 --- /dev/null +++ b/changes/bug18956 @@ -0,0 +1,5 @@ + o Minor bugfixes (memory leaks): + - Fix a small, uncommon memory leak that could occur when reading a + truncated ed25519 key file. Fixes bug 18956; bugfix on 0.2.6.1-alpha. + + diff --git a/changes/bug18963 b/changes/bug18963 new file mode 100644 index 0000000000..f122288b1d --- /dev/null +++ b/changes/bug18963 @@ -0,0 +1,5 @@ + o Minor bugfix (bootstrap): + - Remember the directory we fetched the consensus or previous + certificates from, and use it to fetch future authority + certificates. + Resolves ticket 18963; fix on #4483 in 0.2.8.1-alpha. diff --git a/changes/bug19008 b/changes/bug19008 deleted file mode 100644 index c51c98faa6..0000000000 --- a/changes/bug19008 +++ /dev/null @@ -1,3 +0,0 @@ - o Major bugfixes (testing): - - Fix a bug that would block 'make test-network-all' on systems - where IPv6 packets were lost. Fixes bug 19008; bugfix on tor-0.2.7.3-rc. diff --git a/changes/bug19032 b/changes/bug19032 deleted file mode 100644 index 93f17c2f91..0000000000 --- a/changes/bug19032 +++ /dev/null @@ -1,4 +0,0 @@ - o Major bugfixes (security, directory authorities): - - Fix a crash and out-of-bounds write during authority voting, when the - list of relays includes duplicate ed25519 identity keys. Fixes bug 19032; - bugfix on 0.2.8.2-alpha. diff --git a/changes/bug19066 b/changes/bug19066 new file mode 100644 index 0000000000..c3d1fc789a --- /dev/null +++ b/changes/bug19066 @@ -0,0 +1,5 @@ + o Minor bugfixes (directory authority): + - When parsing detached signature, make sure we use the length of the + digest algorithm instead of an hardcoded DIGEST256_LEN in order to + avoid comparing bytes out of bound with a smaller digest length such + as SHA1. Fixes #19066; bugfix on tor-0.2.2.6-alpha. diff --git a/changes/bug19079 b/changes/bug19079 new file mode 100644 index 0000000000..6cbc6cee3b --- /dev/null +++ b/changes/bug19079 @@ -0,0 +1,4 @@ + o Minor features (build): + - Detect and work around a libclang_rt problem that prevents clang from + finding __mulodi4() on some 32-bit platforms. This clang bug would keep + -ftrapv from linking on those systems. Closes ticket 19079. diff --git a/changes/bug19161 b/changes/bug19161 new file mode 100644 index 0000000000..78c2165308 --- /dev/null +++ b/changes/bug19161 @@ -0,0 +1,3 @@ + o Minor bugfixes (compilation): + - When libscrypt.h is found, but no libscrypt library can be linked, + treat libscrypt as absent. Fixes bug 19161; bugfix on 0.2.6.1-alpha. diff --git a/changes/fallbacks-201604 b/changes/fallbacks-201604 deleted file mode 100644 index d61615a6e8..0000000000 --- a/changes/fallbacks-201604 +++ /dev/null @@ -1,9 +0,0 @@ - o Minor enhancements (fallback directory mirrors): - - Give each fallback the same weight for client selection. - Restrict fallbacks to one per operator. - Report fallback directory detail changes when rebuilding list. - Add new fallback directory mirrors to the whitelist. - Update fallback directories based on the latest OnionOO data. - Many other minor simplifications and fixes. - Closes tasks 17158, 17905, 18749, bug 18689, and fixes part of - bug 18812 on tor 0.2.8.1-alpha; patch by "teor". diff --git a/changes/feature18483 b/changes/feature18483 deleted file mode 100644 index b3c42e60fd..0000000000 --- a/changes/feature18483 +++ /dev/null @@ -1,4 +0,0 @@ - o Minor features (clients): - - Make clients, onion services, and bridge relays always - use an encrypted begindir connection for directory requests. - Resolves #18483. Patch by "teor". diff --git a/changes/feature19036 b/changes/feature19036 new file mode 100644 index 0000000000..98bcfca515 --- /dev/null +++ b/changes/feature19036 @@ -0,0 +1,4 @@ + o Minor features: + - Make directory authorities write the v3-status-votes file out + to disk earlier in the consensus process, so we have the votes + even if we abort the consensus process below. Resolves ticket 19036. diff --git a/changes/geoip-april2016 b/changes/geoip-april2016 deleted file mode 100644 index 4cd03e556b..0000000000 --- a/changes/geoip-april2016 +++ /dev/null @@ -1,4 +0,0 @@ - o Minor features: - - Update geoip and geoip6 to the April 5 2016 Maxmind GeoLite2 - Country database. - diff --git a/changes/geoip-may2016 b/changes/geoip-may2016 deleted file mode 100644 index 3fd42dce24..0000000000 --- a/changes/geoip-may2016 +++ /dev/null @@ -1,4 +0,0 @@ - o Minor features: - - Update geoip and geoip6 to the May 4 2016 Maxmind GeoLite2 - Country database. - diff --git a/changes/memarea_overflow b/changes/memarea_overflow new file mode 100644 index 0000000000..8fdc38cc09 --- /dev/null +++ b/changes/memarea_overflow @@ -0,0 +1,7 @@ + o Minor bugfixes (pointer arithmetic): + - Fix a bug in memarea_alloc() that could have resulted in remote heap + write access, if Tor had ever passed an unchecked size to + memarea_alloc(). Fortunately, all the sizes we pass to memarea_alloc() + are pre-checked to be less than 128 kilobytes. Fixes bug 19150; bugfix + on 0.2.1.1-alpha. Bug found by Guido Vranken. + diff --git a/changes/rsa_init_bug b/changes/rsa_init_bug new file mode 100644 index 0000000000..6b5fb4f2f9 --- /dev/null +++ b/changes/rsa_init_bug @@ -0,0 +1,7 @@ + o Major bugfixes (key management): + - If OpenSSL fails to generate an RSA key, do not retain a dangling pointer + to the previous (uninitialized) key value. The impact here should be + limited to a difficult-to-trigger crash, if OpenSSL is running an + engine that makes key generation failures possible, or if OpenSSL runs + out of memory. Fixes bug 19152; bugfix on 0.2.1.10-alpha. Found by + Yuan Jochen Kang, Suman Jana, and Baishakhi Ray. diff --git a/configure.ac b/configure.ac index b5e4b2bec5..26f85b5acf 100644 --- a/configure.ac +++ b/configure.ac @@ -137,9 +137,11 @@ case "$host" in esac AC_ARG_ENABLE(gcc-warnings, - AS_HELP_STRING(--enable-gcc-warnings, [enable verbose warnings])) + AS_HELP_STRING(--enable-gcc-warnings, [deprecated alias for enable-fatal-warnings])) +AC_ARG_ENABLE(fatal-warnings, + AS_HELP_STRING(--enable-fatal-warnings, [tell the compiler to treat all warnings as errors.])) AC_ARG_ENABLE(gcc-warnings-advisory, - AS_HELP_STRING(--enable-gcc-warnings-advisory, [enable verbose warnings, excluding -Werror])) + AS_HELP_STRING(--disable-gcc-warnings-advisory, [disable the regular verbose warnings])) dnl Others suggest '/gs /safeseh /nxcompat /dynamicbase' for non-gcc on Windows AC_ARG_ENABLE(gcc-hardening, @@ -426,6 +428,7 @@ AC_CHECK_FUNCS( strtoull \ sysconf \ sysctl \ + truncate \ uname \ usleep \ vasprintf \ @@ -749,6 +752,11 @@ dnl use it with a build of a library. all_ldflags_for_check="$TOR_LDFLAGS_zlib $TOR_LDFLAGS_openssl $TOR_LDFLAGS_libevent" all_libs_for_check="$TOR_ZLIB_LIBS $TOR_LIB_MATH $TOR_LIBEVENT_LIBS $TOR_OPENSSL_LIBS $TOR_SYSTEMD_LIBS $TOR_LIB_WS32 $TOR_LIB_GDI $TOR_CAP_LIBS" +CFLAGS_FTRAPV= +CFLAGS_FWRAPV= +CFLAGS_ASAN= +CFLAGS_UBSAN= + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ #if !defined(__clang__) #error @@ -771,20 +779,88 @@ m4_ifdef([AS_VAR_IF],[ AS_VAR_POPDEF([can_link]) AS_VAR_POPDEF([can_compile]) TOR_CHECK_CFLAGS(-Wstack-protector) - TOR_CHECK_CFLAGS(-fwrapv) TOR_CHECK_CFLAGS(--param ssp-buffer-size=1) if test "$bwin32" = "false"; then TOR_CHECK_CFLAGS(-fPIE) TOR_CHECK_LDFLAGS(-pie, "$all_ldflags_for_check", "$all_libs_for_check") fi + TOR_TRY_COMPILE_WITH_CFLAGS(-ftrapv, also_link, CFLAGS_FTRAPV="-ftrapv", true) + TOR_TRY_COMPILE_WITH_CFLAGS(-fwrapv, also_link, CFLAGS_FWRAPV="-fwrapv", true) + if test "$tor_cv_cflags__ftrapv" = "yes" && test "$tor_can_link__ftrapv" != "yes"; then + AC_MSG_WARN([The compiler supports -ftrapv, but for some reason I was not able to link with -ftrapv. Are you missing run-time support? Run-time hardening will not work as well as it should.]) + fi fi if test "x$enable_expensive_hardening" = "xyes"; then - TOR_CHECK_CFLAGS([-fsanitize=address]) - TOR_CHECK_CFLAGS([-fsanitize=undefined]) - TOR_CHECK_CFLAGS([-fno-omit-frame-pointer]) + if test "$tor_cv_cflags__ftrapv" != "yes"; then + AC_MSG_ERROR([You requested expensive hardening, but the compiler does not seem to support -ftrapv.]) + fi + + TOR_TRY_COMPILE_WITH_CFLAGS([-fsanitize=address], also_link, CFLAGS_ASAN="-fsanitize=address", true) + if test "$tor_cv_cflags__fsanitize_address" = "yes" && test "$tor_can_link__fsanitize_address" != "yes"; then + AC_MSG_ERROR([The compiler supports -fsanitize=address, but for some reason I was not able to link when using it. Are you missing run-time support? With GCC you need libubsan.so, and with Clang you need libclang_rt.ubsan*]) + fi + + TOR_TRY_COMPILE_WITH_CFLAGS([-fsanitize=undefined], also_link, CFLAGS_UBSAN="-fsanitize=undefined", true) + if test "$tor_cv_cflags__fsanitize_address" = "yes" && test "$tor_can_link__fsanitize_address" != "yes"; then + AC_MSG_ERROR([The compiler supports -fsanitize=undefined, but for some reason I was not able to link when using it. Are you missing run-time support? With GCC you need libasan.so, and with Clang you need libclang_rt.ubsan*]) + fi + +TOR_CHECK_CFLAGS([-fno-omit-frame-pointer]) +fi + +CFLAGS_BUGTRAP="$CFLAGS_FTRAPV $CFLAGS_ASAN $CFLAGS_UBSAN" +CFLAGS_CONSTTIME="$CFLAGS_FWRAPV" + +mulodi_fixes_ftrapv=no +if test "$have_clang" = "yes"; then + saved_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $CFLAGS_FTRAPV" + AC_MSG_CHECKING([whether clang -ftrapv can link a 64-bit int multiply]) + AC_LINK_IFELSE([ + AC_LANG_SOURCE([[ + #include <stdint.h> + #include <stdlib.h> + int main(int argc, char **argv) + { + int64_t x = ((int64_t)atoi(argv[1])) * (int64_t)atoi(argv[2]) + * (int64_t)atoi(argv[3]); + return x == 9; + } ]])], + [ftrapv_can_link=yes; AC_MSG_RESULT([yes])], + [ftrapv_can_link=no; AC_MSG_RESULT([no])]) + if test "$ftrapv_can_link" = "no"; then + AC_MSG_CHECKING([whether defining __mulodi4 fixes that]) + AC_LINK_IFELSE([ + AC_LANG_SOURCE([[ + #include <stdint.h> + #include <stdlib.h> + int64_t __mulodi4(int64_t a, int64_t b, int *overflow) { + *overflow=0; + return a; + } + int main(int argc, char **argv) + { + int64_t x = ((int64_t)atoi(argv[1])) * (int64_t)atoi(argv[2]) + * (int64_t)atoi(argv[3]); + return x == 9; + } ]])], + [mulodi_fixes_ftrapv=yes; AC_MSG_RESULT([yes])], + [mulodi_fixes_ftrapv=no; AC_MSG_RESULT([no])]) + fi + CFLAGS="$saved_CFLAGS" fi +AM_CONDITIONAL(ADD_MULODI4, test "$mulodi_fixes_ftrapv" = "yes") + +dnl These cflags add bunches of branches, and we haven't been able to +dnl persuade ourselves that they're suitable for code that needs to be +dnl constant time. +AC_SUBST(CFLAGS_BUGTRAP) +dnl These cflags are variant ones sutable for code that needs to be +dnl constant-time. +AC_SUBST(CFLAGS_CONSTTIME) + if test "x$enable_linker_hardening" != "xno"; then TOR_CHECK_LDFLAGS(-z relro -z now, "$all_ldflags_for_check", "$all_libs_for_check") fi @@ -827,6 +903,7 @@ dnl Check for libscrypt if test "x$enable_libscrypt" != "xno"; then AC_CHECK_HEADERS([libscrypt.h]) AC_SEARCH_LIBS(libscrypt_scrypt, [scrypt]) + AC_CHECK_FUNCS([libscrypt_scrypt]) fi dnl ============================================================ @@ -936,64 +1013,57 @@ AC_SUBST(CURVE25519_LIBS) dnl Make sure to enable support for large off_t if available. AC_SYS_LARGEFILE -AC_CHECK_HEADERS( - assert.h \ - errno.h \ - fcntl.h \ - signal.h \ - string.h \ - sys/capability.h \ - sys/fcntl.h \ - sys/stat.h \ - sys/time.h \ - sys/types.h \ - time.h \ - unistd.h - , , AC_MSG_WARN(Some headers were not found, compilation may fail. If compilation succeeds, please send your orconfig.h to the developers so we can fix this warning.)) - -dnl These headers are not essential - -AC_CHECK_HEADERS( - arpa/inet.h \ - crt_externs.h \ - execinfo.h \ - grp.h \ - ifaddrs.h \ - inttypes.h \ - limits.h \ - linux/types.h \ - machine/limits.h \ - malloc.h \ - malloc/malloc.h \ - malloc_np.h \ - netdb.h \ - netinet/in.h \ - netinet/in6.h \ - pwd.h \ - readpassphrase.h \ - stdint.h \ - sys/eventfd.h \ - sys/file.h \ - sys/ioctl.h \ - sys/limits.h \ - sys/mman.h \ - sys/param.h \ - sys/prctl.h \ - sys/resource.h \ - sys/select.h \ - sys/socket.h \ - sys/statvfs.h \ - sys/syscall.h \ - sys/sysctl.h \ - sys/syslimits.h \ - sys/time.h \ - sys/types.h \ - sys/un.h \ - sys/utime.h \ - sys/wait.h \ - syslog.h \ - utime.h -) +AC_CHECK_HEADERS([assert.h \ + errno.h \ + fcntl.h \ + signal.h \ + string.h \ + sys/capability.h \ + sys/fcntl.h \ + sys/stat.h \ + sys/time.h \ + sys/types.h \ + time.h \ + unistd.h \ + arpa/inet.h \ + crt_externs.h \ + execinfo.h \ + grp.h \ + ifaddrs.h \ + inttypes.h \ + limits.h \ + linux/types.h \ + machine/limits.h \ + malloc.h \ + malloc/malloc.h \ + malloc_np.h \ + netdb.h \ + netinet/in.h \ + netinet/in6.h \ + pwd.h \ + readpassphrase.h \ + stdint.h \ + sys/eventfd.h \ + sys/file.h \ + sys/ioctl.h \ + sys/limits.h \ + sys/mman.h \ + sys/param.h \ + sys/prctl.h \ + sys/resource.h \ + sys/select.h \ + sys/socket.h \ + sys/statvfs.h \ + sys/syscall.h \ + sys/sysctl.h \ + sys/syslimits.h \ + sys/time.h \ + sys/types.h \ + sys/un.h \ + sys/utime.h \ + sys/wait.h \ + syslog.h \ + utime.h]) AC_CHECK_HEADERS(sys/param.h) @@ -1570,6 +1640,12 @@ else enable_gcc_warnings_advisory=no fi +# Warnings implies advisory-warnings and -Werror. +if test "$enable_gcc_warnings" = "yes"; then + enable_gcc_warnings_advisory=yes + enable_fatal_warnings=yes +fi + # OS X Lion started deprecating the system openssl. Let's just disable # all deprecation warnings on OS X. Also, to potentially make the binary # a little smaller, let's enable dead_strip. @@ -1582,8 +1658,7 @@ esac # Add some more warnings which we use in development but not in the # released versions. (Some relevant gcc versions can't handle these.) -if test "x$enable_gcc_warnings" = "xyes" || - test "x$enable_gcc_warnings_advisory" = "xyes"; then +if test "x$enable_gcc_warnings_advisory" != "xno"; then AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ #if !defined(__GNUC__) || (__GNUC__ < 4) @@ -1605,7 +1680,6 @@ if test "x$enable_gcc_warnings" = "xyes" || #error #endif])], have_gcc46=yes, have_gcc46=no) - save_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -Wshorten-64-to-32" AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], have_shorten64_flag=yes, @@ -1627,10 +1701,6 @@ if test "x$enable_gcc_warnings" = "xyes" || CFLAGS="$CFLAGS -Wwrite-strings -Wmissing-declarations -Wredundant-decls" CFLAGS="$CFLAGS -Wnested-externs -Wbad-function-cast -Wswitch-enum" - if test "x$enable_gcc_warnings" = "xyes"; then - CFLAGS="$CFLAGS -Werror" - fi - # Disabled, so we can use mallinfo(): -Waggregate-return if test "x$have_gcc4" = "xyes"; then @@ -1667,7 +1737,11 @@ if test "x$enable_gcc_warnings" = "xyes" || CFLAGS="$CFLAGS -Wshorten-64-to-32" fi - + if test "x$enable_fatal_warnings" = "xyes"; then + # I'd like to use TOR_CHECK_CFLAGS here, but I can't, since the + # default autoconf programs are full of errors. + CFLAGS="$CFLAGS -Werror" + fi ##This will break the world on some 64-bit architectures # CFLAGS="$CFLAGS -Winline" diff --git a/doc/HACKING/CodingStandards.md b/doc/HACKING/CodingStandards.md index 4aafa5ddd4..f1c65850a4 100644 --- a/doc/HACKING/CodingStandards.md +++ b/doc/HACKING/CodingStandards.md @@ -3,7 +3,7 @@ Coding conventions for Tor tl;dr: - - Run configure with `--enable-gcc-warnings` + - Run configure with `--enable-fatal-warnings` - Run `make check-spaces` to catch whitespace errors - Document your functions - Write unit tests @@ -21,7 +21,7 @@ preference) Did you remember... - - To build your code while configured with `--enable-gcc-warnings`? + - To build your code while configured with `--enable-fatal-warnings`? - To run `make check-spaces` on your code? - To run `make check-docs` to see whether all new options are on the manpage? @@ -125,10 +125,10 @@ deviations from our C whitespace style. Generally, we use: `puts (x)`. - Function declarations at the start of the line. -We try hard to build without warnings everywhere. In particular, if you're -using gcc, you should invoke the configure script with the option -`--enable-gcc-warnings`. This will give a bunch of extra warning flags to -the compiler, and help us find divergences from our preferred C style. +We try hard to build without warnings everywhere. In particular, if +you're using gcc, you should invoke the configure script with the +option `--enable-fatal-warnings`. This will tell the compiler +to make all warnings into errors. Functions to use; functions not to use -------------------------------------- diff --git a/doc/HACKING/HowToReview.md b/doc/HACKING/HowToReview.md index de7891c923..d53318942f 100644 --- a/doc/HACKING/HowToReview.md +++ b/doc/HACKING/HowToReview.md @@ -15,7 +15,7 @@ Top-level smell-checks (Difficulty: easy) -- Does it compile with `--enable-gcc-warnings`? +- Does it compile with `--enable-fatal-warnings`? - Does `make check-spaces` pass? diff --git a/doc/tor.1.txt b/doc/tor.1.txt index 787223d701..fe73c7fc05 100644 --- a/doc/tor.1.txt +++ b/doc/tor.1.txt @@ -2103,8 +2103,7 @@ on the public Tor network. server. Instead of caching the directory, it generates its own list of good servers, signs it, and sends that to the clients. Unless the clients already have you listed as a trusted directory, you probably do not want - to set this option. Please coordinate with the other admins at - tor-ops@torproject.org if you think you should be a directory. + to set this option. [[V3AuthoritativeDirectory]] **V3AuthoritativeDirectory** **0**|**1**:: When this option is set in addition to **AuthoritativeDirectory**, Tor @@ -2702,6 +2701,61 @@ __DataDirectory__**/lock**:: __DataDirectory__**/keys/***:: Only used by servers. Holds identity keys and onion keys. +__DataDirectory__**/keys/authority_identity_key**:: + A v3 directory authority's master identity key, used to authenticate its + signing key. Tor doesn't use this while it's running. The tor-gencert + program uses this. If you're running an authority, you should keep this + key offline, and not actually put it here. + +__DataDirectory__**/keys/authority_certificate**:: + A v3 directory authority's certificate, which authenticates the authority's + current vote- and consensus-signing key using its master identity key. + Only directory authorities use this file. + +__DataDirectory__**/keys/authority_signing_key**:: + A v3 directory authority's signing key, used to sign votes and consensuses. + Only directory authorities use this file. Corresponds to the + **authority_certificate** cert. + +__DataDirectory__**/keys/legacy_certificate**:: + As authority_certificate: used only when V3AuthUseLegacyKey is set. + See documentation for V3AuthUseLegacyKey. + +__DataDirectory__**/keys/legacy_signing_key**:: + As authority_signing_key: used only when V3AuthUseLegacyKey is set. + See documentation for V3AuthUseLegacyKey. + +__DataDirectory__**/keys/secret_id_key**:: + A relay's RSA1024 permanent identity key, including private and public + components. Used to sign router descriptors, and to sign other keys. + +__DataDirectory__**/keys/ed25519_master_id_public_key**:: + The public part of a relay's Ed25519 permanent identity key. + +__DataDirectory__**/keys/ed25519_master_id_secret_key**:: + The private part of a relay's Ed25519 permanent identity key. This key + is used to sign the medium-term ed25519 signing key. This file can be + kept offline, or kept encrypted. If so, Tor will not be able to generate + new signing keys itself; you'll need to use tor --keygen yourself to do + so. + +__DataDirectory__**/keys/ed25519_signing_secret_key**:: + The private and public components of a relay's medium-term Ed25519 signing + key. This key is authenticated by the Ed25519 master key, in turn + authenticates other keys (and router descriptors). + +__DataDirectory__**/keys/ed25519_signing_cert**:: + The certificate which authenticates "ed25519_signing_secret_key" as + having been signed by the Ed25519 master key. + +__DataDirectory__**/keys/secret_onion_key**:: + A relay's RSA1024 short-term onion key. Used to decrypt old-style ("TAP") + circuit extension requests. + +__DataDirectory__**/keys/secret_onion_key_ntor**:: + A relay's Curve25519 short-term onion key. Used to handle modern ("ntor") + circuit extension requests. + __DataDirectory__**/fingerprint**:: Only used by servers. Holds the fingerprint of the server's identity key. @@ -2710,9 +2764,8 @@ __DataDirectory__**/hashed-fingerprint**:: identity key. (That is, the hash of the hash of the identity key.) __DataDirectory__**/v3-status-votes**:: - Only for authoritative directory servers. This file contains status votes - from all the authoritative directory servers and is used to generate the - network consensus document. + Only for v3 authoritative directory servers. This file contains + status votes from all the authoritative directory servers. __DataDirectory__**/unverified-consensus**:: This file contains a network consensus document that has been downloaded, diff --git a/scripts/maint/format_changelog.py b/scripts/maint/format_changelog.py index 5e4c8cac9a..6b588702a7 100755 --- a/scripts/maint/format_changelog.py +++ b/scripts/maint/format_changelog.py @@ -398,16 +398,24 @@ class ChangeLog(object): self.dumpEndOfSections() self.dumpEndOfChangelog() +# Let's turn bugs to html. +BUG_PAT = re.compile('(bug|ticket|feature)\s+(\d{4,5})', re.I) +def bug_html(m): + return "%s <a href='https://trac.torproject.org/projects/tor/ticket/%s'>%s</a>" % (m.group(1), m.group(2), m.group(2)) + class HTMLChangeLog(ChangeLog): def __init__(self, *args, **kwargs): ChangeLog.__init__(self, *args, **kwargs) def htmlText(self, graf): + output = [] for line in graf: line = line.rstrip().replace("&","&") line = line.rstrip().replace("<","<").replace(">",">") - sys.stdout.write(line.strip()) - sys.stdout.write(" ") + output.append(line.strip()) + output = " ".join(output) + output = BUG_PAT.sub(bug_html, output) + sys.stdout.write(output) def htmlPar(self, graf): sys.stdout.write("<p>") diff --git a/scripts/maint/updateFallbackDirs.py b/scripts/maint/updateFallbackDirs.py index d27c1449ee..464d8476f0 100755 --- a/scripts/maint/updateFallbackDirs.py +++ b/scripts/maint/updateFallbackDirs.py @@ -1,6 +1,13 @@ #!/usr/bin/python # Usage: scripts/maint/updateFallbackDirs.py > src/or/fallback_dirs.inc +# +# This script should be run from a stable, reliable network connection, +# with no other network activity (and not over tor). +# If this is not possible, please disable: +# PERFORM_IPV4_DIRPORT_CHECKS and PERFORM_IPV6_DIRPORT_CHECKS +# +# Needs dateutil (and potentially other python packages) # Needs stem available in your PYTHONPATH, or just ln -s ../stem/stem . # Optionally uses ipaddress (python 3 builtin) or py2-ipaddress (package) # for netblock analysis, in PYTHONPATH, or just @@ -35,7 +42,7 @@ from stem.descriptor.remote import DescriptorDownloader import logging # INFO tells you why each relay was included or excluded -# WARN tells you about potential misconfigurations +# WARN tells you about potential misconfigurations and relay detail changes logging.basicConfig(level=logging.WARNING) logging.root.name = '' # INFO tells you about each consensus download attempt @@ -51,7 +58,9 @@ try: HAVE_IPADDRESS = True except ImportError: # if this happens, we avoid doing netblock analysis - logging.warning('Unable to import ipaddress, please install py2-ipaddress') + logging.warning('Unable to import ipaddress, please install py2-ipaddress.' + + ' A fallback list will be created, but optional netblock' + + ' analysis will not be performed.') ## Top-Level Configuration @@ -210,8 +219,7 @@ def cleanse_unprintable(raw_string): # Remove all unprintable characters cleansed_string = '' for c in raw_string: - if (c in string.ascii_letters or c in string.digits - or c in string.punctuation or c in string.whitespace): + if c in string.printable: cleansed_string += c return cleansed_string @@ -302,11 +310,11 @@ def write_to_file(str, file_name, max_len): with open(file_name, 'w') as f: f.write(str[0:max_len]) except EnvironmentError, error: - logging.warning('Writing file %s failed: %d: %s'% - (file_name, - error.errno, - error.strerror) - ) + logging.error('Writing file %s failed: %d: %s'% + (file_name, + error.errno, + error.strerror) + ) def read_from_file(file_name, max_len): try: @@ -672,8 +680,8 @@ class Candidate(object): this_ts = parse_ts(h['last']) if (len(h['values']) != h['count']): - logging.warn('Inconsistent value count in %s document for %s' - %(p, which)) + logging.warning('Inconsistent value count in %s document for %s' + %(p, which)) for v in reversed(h['values']): if (this_ts <= newest): agt1 = now - this_ts @@ -691,8 +699,8 @@ class Candidate(object): this_ts -= interval if (this_ts + interval != parse_ts(h['first'])): - logging.warn('Inconsistent time information in %s document for %s' - %(p, which)) + logging.warning('Inconsistent time information in %s document for %s' + %(p, which)) #print json.dumps(generic_history, sort_keys=True, # indent=4, separators=(',', ': ')) @@ -992,10 +1000,10 @@ class Candidate(object): if a.version != b.version: raise Exception('Mismatching IP versions in %s and %s'%(ip_a, ip_b)) if mask_bits > a.max_prefixlen: - logging.warning('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b)) + logging.error('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b)) mask_bits = a.max_prefixlen if mask_bits < 0: - logging.warning('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b)) + logging.error('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b)) mask_bits = 0 a_net = ipaddress.ip_network('%s/%d'%(ip_a, mask_bits), strict=False) return b in a_net @@ -1061,7 +1069,7 @@ class Candidate(object): downloader = DescriptorDownloader() start = datetime.datetime.utcnow() # some directory mirrors respond to requests in ways that hang python - # sockets, which is why we long this line here + # sockets, which is why we log this line here logging.info('Initiating consensus download from %s (%s:%d).', nickname, dirip, dirport) # there appears to be about 1 second of overhead when comparing stem's @@ -1074,7 +1082,7 @@ class Candidate(object): retries = 0, fall_back_to_authority = False).run() except Exception, stem_error: - logging.debug('Unable to retrieve a consensus from %s: %s', nickname, + logging.info('Unable to retrieve a consensus from %s: %s', nickname, stem_error) status = 'error: "%s"' % (stem_error) level = logging.WARNING @@ -1377,7 +1385,7 @@ class CandidateList(dict): elif in_blacklist: # exclude excluded_count += 1 - logging.debug('Excluding %s: in blacklist.', f._fpr) + logging.info('Excluding %s: in blacklist.', f._fpr) else: if INCLUDE_UNLISTED_ENTRIES: # include @@ -1491,10 +1499,10 @@ class CandidateList(dict): if f.has_ipv6(): ip_list.append(f.ipv6addr) elif not CandidateList.allow(f.dirip, ip_list): - logging.debug('Eliminated %s: already have fallback on IPv4 %s'%( + logging.info('Eliminated %s: already have fallback on IPv4 %s'%( f._fpr, f.dirip)) elif f.has_ipv6() and not CandidateList.allow(f.ipv6addr, ip_list): - logging.debug('Eliminated %s: already have fallback on IPv6 %s'%( + logging.info('Eliminated %s: already have fallback on IPv6 %s'%( f._fpr, f.ipv6addr)) original_count = len(self.fallbacks) self.fallbacks = ip_limit_fallbacks @@ -1514,7 +1522,7 @@ class CandidateList(dict): contact_limit_fallbacks.append(f) contact_list.append(f._data['contact']) else: - logging.debug(('Eliminated %s: already have fallback on ' + + logging.info(('Eliminated %s: already have fallback on ' + 'ContactInfo %s')%(f._fpr, f._data['contact'])) original_count = len(self.fallbacks) self.fallbacks = contact_limit_fallbacks @@ -1537,7 +1545,7 @@ class CandidateList(dict): else: # technically, we already have a fallback with this fallback in its # effective family - logging.debug('Eliminated %s: already have fallback in effective ' + + logging.info('Eliminated %s: already have fallback in effective ' + 'family'%(f._fpr)) original_count = len(self.fallbacks) self.fallbacks = family_limit_fallbacks @@ -1877,6 +1885,8 @@ def list_fallbacks(): """ Fetches required onionoo documents and evaluates the fallback directory criteria for each of the relays """ + logging.warning('Downloading and parsing Onionoo data. ' + + 'This may take some time.') # find relays that could be fallbacks candidates = CandidateList() candidates.add_relays() @@ -1931,6 +1941,9 @@ def list_fallbacks(): # can serve a consensus, in favour of one that can't # but given it takes up to 15 seconds to check each consensus download, # the risk is worth it + if PERFORM_IPV4_DIRPORT_CHECKS or PERFORM_IPV6_DIRPORT_CHECKS: + logging.warning('Checking consensus download speeds. ' + + 'This may take some time.') failed_count = candidates.perform_download_consensus_checks(max_count) # analyse and log interesting diversity metrics diff --git a/src/common/compat.c b/src/common/compat.c index af61f024ef..23eaa134cf 100644 --- a/src/common/compat.c +++ b/src/common/compat.c @@ -2923,6 +2923,7 @@ correct_tm(int islocal, const time_t *timep, struct tm *resultbuf, r->tm_mon = 11; r->tm_mday = 31; r->tm_yday = 364; + r->tm_wday = 6; r->tm_hour = 23; r->tm_min = 59; r->tm_sec = 59; @@ -2931,6 +2932,7 @@ correct_tm(int islocal, const time_t *timep, struct tm *resultbuf, r->tm_mon = 0; r->tm_mday = 1; r->tm_yday = 0; + r->tm_wday = 0; r->tm_hour = 0; r->tm_min = 0; r->tm_sec = 0; @@ -2948,6 +2950,7 @@ correct_tm(int islocal, const time_t *timep, struct tm *resultbuf, r->tm_mon = 0; r->tm_mday = 1; r->tm_yday = 0; + r->tm_wday = 0; r->tm_hour = 0; r->tm_min = 0 ; r->tm_sec = 0; @@ -2961,6 +2964,7 @@ correct_tm(int islocal, const time_t *timep, struct tm *resultbuf, r->tm_mon = 11; r->tm_mday = 31; r->tm_yday = 364; + r->tm_wday = 6; r->tm_hour = 23; r->tm_min = 59; r->tm_sec = 59; diff --git a/src/common/crypto.c b/src/common/crypto.c index 65a575ebea..76e262e257 100644 --- a/src/common/crypto.c +++ b/src/common/crypto.c @@ -171,13 +171,9 @@ crypto_log_errors(int severity, const char *doing) if (!msg) msg = "(null)"; if (!lib) lib = "(null)"; if (!func) func = "(null)"; - if (doing) { - tor_log(severity, LD_CRYPTO, "crypto error while %s: %s (in %s:%s)", + if (BUG(!doing)) doing = "(null)"; + tor_log(severity, LD_CRYPTO, "crypto error while %s: %s (in %s:%s)", doing, msg, lib, func); - } else { - tor_log(severity, LD_CRYPTO, "crypto error: %s (in %s:%s)", - msg, lib, func); - } } } @@ -585,8 +581,10 @@ MOCK_IMPL(int, { tor_assert(env); - if (env->key) + if (env->key) { RSA_free(env->key); + env->key = NULL; + } { BIGNUM *e = BN_new(); @@ -942,6 +940,10 @@ crypto_pk_copy_full(crypto_pk_t *env) new_key = RSAPublicKey_dup(env->key); } if (!new_key) { + /* LCOV_EXCL_START + * + * We can't cause RSA*Key_dup() to fail, so we can't really test this. + */ log_err(LD_CRYPTO, "Unable to duplicate a %s key: openssl failed.", privatekey?"private":"public"); crypto_log_errors(LOG_ERR, @@ -949,6 +951,7 @@ crypto_pk_copy_full(crypto_pk_t *env) "Duplicating a public key"); tor_fragile_assert(); return NULL; + /* LCOV_EXCL_STOP */ } return crypto_new_pk_from_rsa_(new_key); @@ -1699,8 +1702,10 @@ crypto_digest_algorithm_get_name(digest_algorithm_t alg) case DIGEST_SHA3_512: return "sha3-512"; default: + // LCOV_EXCL_START tor_fragile_assert(); return "??unknown_digest??"; + // LCOV_EXCL_STOP } } @@ -1724,7 +1729,7 @@ crypto_digest_algorithm_parse_name(const char *name) } /** Given an algorithm, return the digest length in bytes. */ -static inline size_t +size_t crypto_digest_algorithm_get_length(digest_algorithm_t alg) { switch (alg) { @@ -1790,16 +1795,46 @@ crypto_digest_alloc_bytes(digest_algorithm_t alg) #undef STRUCT_FIELD_SIZE } +/** + * Internal function: create and return a new digest object for 'algorithm'. + * Does not typecheck the algorithm. + */ +static crypto_digest_t * +crypto_digest_new_internal(digest_algorithm_t algorithm) +{ + crypto_digest_t *r = tor_malloc(crypto_digest_alloc_bytes(algorithm)); + r->algorithm = algorithm; + + switch (algorithm) + { + case DIGEST_SHA1: + SHA1_Init(&r->d.sha1); + break; + case DIGEST_SHA256: + SHA256_Init(&r->d.sha2); + break; + case DIGEST_SHA512: + SHA512_Init(&r->d.sha512); + break; + case DIGEST_SHA3_256: + keccak_digest_init(&r->d.sha3, 256); + break; + case DIGEST_SHA3_512: + keccak_digest_init(&r->d.sha3, 512); + break; + default: + tor_assert_unreached(); + } + + return r; +} + /** Allocate and return a new digest object to compute SHA1 digests. */ crypto_digest_t * crypto_digest_new(void) { - crypto_digest_t *r; - r = tor_malloc(crypto_digest_alloc_bytes(DIGEST_SHA1)); - SHA1_Init(&r->d.sha1); - r->algorithm = DIGEST_SHA1; - return r; + return crypto_digest_new_internal(DIGEST_SHA1); } /** Allocate and return a new digest object to compute 256-bit digests @@ -1807,15 +1842,8 @@ crypto_digest_new(void) crypto_digest_t * crypto_digest256_new(digest_algorithm_t algorithm) { - crypto_digest_t *r; tor_assert(algorithm == DIGEST_SHA256 || algorithm == DIGEST_SHA3_256); - r = tor_malloc(crypto_digest_alloc_bytes(algorithm)); - if (algorithm == DIGEST_SHA256) - SHA256_Init(&r->d.sha2); - else - keccak_digest_init(&r->d.sha3, 256); - r->algorithm = algorithm; - return r; + return crypto_digest_new_internal(algorithm); } /** Allocate and return a new digest object to compute 512-bit digests @@ -1823,15 +1851,8 @@ crypto_digest256_new(digest_algorithm_t algorithm) crypto_digest_t * crypto_digest512_new(digest_algorithm_t algorithm) { - crypto_digest_t *r; tor_assert(algorithm == DIGEST_SHA512 || algorithm == DIGEST_SHA3_512); - r = tor_malloc(crypto_digest_alloc_bytes(algorithm)); - if (algorithm == DIGEST_SHA512) - SHA512_Init(&r->d.sha512); - else - keccak_digest_init(&r->d.sha3, 512); - r->algorithm = algorithm; - return r; + return crypto_digest_new_internal(algorithm); } /** Deallocate a digest object. @@ -1874,8 +1895,10 @@ crypto_digest_add_bytes(crypto_digest_t *digest, const char *data, keccak_digest_update(&digest->d.sha3, (const uint8_t *)data, len); break; default: + /* LCOV_EXCL_START */ tor_fragile_assert(); break; + /* LCOV_EXCL_STOP */ } } @@ -1917,10 +1940,10 @@ crypto_digest_get_digest(crypto_digest_t *digest, //LCOV_EXCL_START case DIGEST_SHA3_256: /* FALLSTHROUGH */ case DIGEST_SHA3_512: - log_warn(LD_BUG, "Handling unexpected algorithm %d", digest->algorithm); - tor_assert(0); /* This is fatal, because it should never happen. */ default: - tor_assert(0); /* Unreachable. */ + log_warn(LD_BUG, "Handling unexpected algorithm %d", digest->algorithm); + /* This is fatal, because it should never happen. */ + tor_assert_unreached(); break; //LCOV_EXCL_STOP } @@ -1981,27 +2004,7 @@ crypto_digest_smartlist_prefix(char *digest_out, size_t len_out, const char *append, digest_algorithm_t alg) { - crypto_digest_t *d = NULL; - switch (alg) { - case DIGEST_SHA1: - d = crypto_digest_new(); - break; - case DIGEST_SHA256: /* FALLSTHROUGH */ - case DIGEST_SHA3_256: - d = crypto_digest256_new(alg); - break; - case DIGEST_SHA512: /* FALLSTHROUGH */ - case DIGEST_SHA3_512: - d = crypto_digest512_new(alg); - break; - default: - log_warn(LD_BUG, "Called with unknown algorithm %d", alg); - /* If fragile_assert is not enabled, wipe output and return - * without running any calculations */ - memwipe(digest_out, 0xff, len_out); - tor_fragile_assert(); - goto free; - } + crypto_digest_t *d = crypto_digest_new_internal(alg); if (prepend) crypto_digest_add_bytes(d, prepend, strlen(prepend)); SMARTLIST_FOREACH(lst, const char *, cp, @@ -2009,8 +2012,6 @@ crypto_digest_smartlist_prefix(char *digest_out, size_t len_out, if (append) crypto_digest_add_bytes(d, append, strlen(append)); crypto_digest_get_digest(d, digest_out, len_out); - - free: crypto_digest_free(d); } @@ -2169,9 +2170,14 @@ crypto_set_tls_dh_prime(void) int r; /* If the space is occupied, free the previous TLS DH prime */ - if (dh_param_p_tls) { + if (BUG(dh_param_p_tls)) { + /* LCOV_EXCL_START + * + * We shouldn't be calling this twice. + */ BN_clear_free(dh_param_p_tls); dh_param_p_tls = NULL; + /* LCOV_EXCL_STOP */ } tls_prime = BN_new(); @@ -2203,8 +2209,8 @@ init_dh_param(void) { BIGNUM *circuit_dh_prime; int r; - if (dh_param_p && dh_param_g) - return; + if (BUG(dh_param_p && dh_param_g)) + return; // LCOV_EXCL_LINE This function isn't supposed to be called twice. circuit_dh_prime = BN_new(); tor_assert(circuit_dh_prime); @@ -2269,10 +2275,13 @@ crypto_dh_new(int dh_type) return res; err: + /* LCOV_EXCL_START + * This error condition is only reached when an allocation fails */ crypto_log_errors(LOG_WARN, "creating DH object"); if (res->dh) DH_free(res->dh); /* frees p and g too */ tor_free(res); return NULL; + /* LCOV_EXCL_STOP */ } /** Return a copy of <b>dh</b>, sharing its internal state. */ @@ -2304,10 +2313,15 @@ crypto_dh_generate_public(crypto_dh_t *dh) { again: if (!DH_generate_key(dh->dh)) { + /* LCOV_EXCL_START + * To test this we would need some way to tell openssl to break DH. */ crypto_log_errors(LOG_WARN, "generating DH key"); return -1; + /* LCOV_EXCL_STOP */ } if (tor_check_dh_key(LOG_WARN, dh->dh->pub_key)<0) { + /* LCOV_EXCL_START + * If this happens, then openssl's DH implementation is busted. */ log_warn(LD_CRYPTO, "Weird! Our own DH key was invalid. I guess once-in-" "the-universe chances really do happen. Trying again."); /* Free and clear the keys, so OpenSSL will actually try again. */ @@ -2315,6 +2329,7 @@ crypto_dh_generate_public(crypto_dh_t *dh) BN_clear_free(dh->dh->priv_key); dh->dh->pub_key = dh->dh->priv_key = NULL; goto again; + /* LCOV_EXCL_STOP */ } return 0; } @@ -2361,8 +2376,8 @@ tor_check_dh_key(int severity, BIGNUM *bn) tor_assert(bn); x = BN_new(); tor_assert(x); - if (!dh_param_p) - init_dh_param(); + if (BUG(!dh_param_p)) + init_dh_param(); //LCOV_EXCL_LINE we already checked whether we did this. BN_set_word(x, 1); if (BN_cmp(bn,x)<=0) { log_fn(severity, LD_CRYPTO, "DH key must be at least 2."); @@ -2571,6 +2586,11 @@ crypto_seed_weak_rng(tor_weak_rng_t *rng) tor_init_weak_random(rng, seed); } +#ifdef TOR_UNIT_TESTS +int break_strongest_rng_syscall = 0; +int break_strongest_rng_fallback = 0; +#endif + /** Try to get <b>out_len</b> bytes of the strongest entropy we can generate, * via system calls, storing it into <b>out</b>. Return 0 on success, -1 on * failure. A maximum request size of 256 bytes is imposed. @@ -2580,6 +2600,11 @@ crypto_strongest_rand_syscall(uint8_t *out, size_t out_len) { tor_assert(out_len <= MAX_STRONGEST_RAND_SIZE); +#ifdef TOR_UNIT_TESTS + if (break_strongest_rng_syscall) + return -1; +#endif + #if defined(_WIN32) static int provider_set = 0; static HCRYPTPROV provider; @@ -2629,6 +2654,7 @@ crypto_strongest_rand_syscall(uint8_t *out, size_t out_len) } while (ret == -1 && ((errno == EINTR) ||(errno == EAGAIN))); if (PREDICT_UNLIKELY(ret == -1)) { + /* LCOV_EXCL_START we can't actually make the syscall fail in testing. */ tor_assert(errno != EAGAIN); tor_assert(errno != EINTR); @@ -2636,6 +2662,7 @@ crypto_strongest_rand_syscall(uint8_t *out, size_t out_len) log_warn(LD_CRYPTO, "Can't get entropy from getrandom()."); getrandom_works = 0; /* Don't bother trying again. */ return -1; + /* LCOV_EXCL_STOP */ } tor_assert(ret == (long)out_len); @@ -2664,6 +2691,11 @@ crypto_strongest_rand_syscall(uint8_t *out, size_t out_len) static int crypto_strongest_rand_fallback(uint8_t *out, size_t out_len) { +#ifdef TOR_UNIT_TESTS + if (break_strongest_rng_fallback) + return -1; +#endif + #ifdef _WIN32 /* Windows exclusively uses crypto_strongest_rand_syscall(). */ (void)out; @@ -2684,10 +2716,13 @@ crypto_strongest_rand_fallback(uint8_t *out, size_t out_len) n = read_all(fd, (char*)out, out_len, 0); close(fd); if (n != out_len) { + /* LCOV_EXCL_START + * We can't make /dev/foorandom actually fail. */ log_warn(LD_CRYPTO, "Error reading from entropy source (read only %lu bytes).", (unsigned long)n); return -1; + /* LCOV_EXCL_STOP */ } return 0; @@ -2701,7 +2736,7 @@ crypto_strongest_rand_fallback(uint8_t *out, size_t out_len) * storing it into <b>out</b>. Return 0 on success, -1 on failure. A maximum * request size of 256 bytes is imposed. */ -static int +STATIC int crypto_strongest_rand_raw(uint8_t *out, size_t out_len) { static const size_t sanity_min_size = 16; @@ -2735,13 +2770,17 @@ crypto_strongest_rand_raw(uint8_t *out, size_t out_len) return 0; } - /* We tried max_attempts times to fill a buffer >= 128 bits long, + /* LCOV_EXCL_START + * + * We tried max_attempts times to fill a buffer >= 128 bits long, * and each time it returned all '0's. Either the system entropy * source is busted, or the user should go out and buy a ticket to * every lottery on the planet. */ log_warn(LD_CRYPTO, "Strong OS entropy returned all zero buffer."); + return -1; + /* LCOV_EXCL_STOP */ } /** Try to get <b>out_len</b> bytes of the strongest entropy we can generate, @@ -2796,7 +2835,7 @@ crypto_seed_rng(void) * functions. If one succeeds, we'll accept the RNG as seeded. */ rand_poll_ok = RAND_poll(); if (rand_poll_ok == 0) - log_warn(LD_CRYPTO, "RAND_poll() failed."); + log_warn(LD_CRYPTO, "RAND_poll() failed."); // LCOV_EXCL_LINE load_entropy_ok = !crypto_strongest_rand_raw(buf, sizeof(buf)); if (load_entropy_ok) { diff --git a/src/common/crypto.h b/src/common/crypto.h index 682c4e3253..ff38cca0da 100644 --- a/src/common/crypto.h +++ b/src/common/crypto.h @@ -233,6 +233,7 @@ void crypto_digest_smartlist(char *digest_out, size_t len_out, const struct smartlist_t *lst, const char *append, digest_algorithm_t alg); const char *crypto_digest_algorithm_get_name(digest_algorithm_t alg); +size_t crypto_digest_algorithm_get_length(digest_algorithm_t alg); int crypto_digest_algorithm_parse_name(const char *name); crypto_digest_t *crypto_digest_new(void); crypto_digest_t *crypto_digest256_new(digest_algorithm_t algorithm); @@ -317,6 +318,7 @@ void crypto_add_spaces_to_fp(char *out, size_t outlen, const char *in); #ifdef CRYPTO_PRIVATE STATIC int crypto_force_rand_ssleay(void); +STATIC int crypto_strongest_rand_raw(uint8_t *out, size_t out_len); #endif #endif diff --git a/src/common/crypto_curve25519.c b/src/common/crypto_curve25519.c index 57c878b79a..58ec923638 100644 --- a/src/common/crypto_curve25519.c +++ b/src/common/crypto_curve25519.c @@ -65,8 +65,10 @@ STATIC int curve25519_basepoint_impl(uint8_t *output, const uint8_t *secret) { int r = 0; - if (PREDICT_UNLIKELY(curve25519_use_ed == -1)) { + if (BUG(curve25519_use_ed == -1)) { + /* LCOV_EXCL_START - Only reached if we forgot to call curve25519_init() */ pick_curve25519_basepoint_impl(); + /* LCOV_EXCL_STOP */ } /* TODO: Someone should benchmark curved25519_scalarmult_basepoint versus @@ -290,10 +292,13 @@ pick_curve25519_basepoint_impl(void) if (curve25519_basepoint_spot_check() == 0) return; - log_warn(LD_CRYPTO, "The ed25519-based curve25519 basepoint " + /* LCOV_EXCL_START + * only reachable if our basepoint implementation broken */ + log_warn(LD_BUG|LD_CRYPTO, "The ed25519-based curve25519 basepoint " "multiplication seems broken; using the curve25519 " "implementation."); curve25519_use_ed = 0; + /* LCOV_EXCL_STOP */ } /** Initialize the curve25519 implementations. This is necessary if you're diff --git a/src/common/crypto_ed25519.c b/src/common/crypto_ed25519.c index ea2d8e3892..84c3eece6d 100644 --- a/src/common/crypto_ed25519.c +++ b/src/common/crypto_ed25519.c @@ -94,8 +94,8 @@ static const ed25519_impl_t *ed25519_impl = NULL; static inline const ed25519_impl_t * get_ed_impl(void) { - if (PREDICT_UNLIKELY(ed25519_impl == NULL)) { - pick_ed25519_impl(); + if (BUG(ed25519_impl == NULL)) { + pick_ed25519_impl(); // LCOV_EXCL_LINE - We always call ed25519_init(). } return ed25519_impl; } @@ -259,11 +259,11 @@ ed25519_checksig_batch(int *okay_out, int *oks; int all_ok; - ms = tor_malloc(sizeof(uint8_t*)*n_checkable); - lens = tor_malloc(sizeof(size_t)*n_checkable); - pks = tor_malloc(sizeof(uint8_t*)*n_checkable); - sigs = tor_malloc(sizeof(uint8_t*)*n_checkable); - oks = okay_out ? okay_out : tor_malloc(sizeof(int)*n_checkable); + ms = tor_calloc(n_checkable, sizeof(uint8_t*)); + lens = tor_calloc(n_checkable, sizeof(size_t)); + pks = tor_calloc(n_checkable, sizeof(uint8_t*)); + sigs = tor_calloc(n_checkable, sizeof(uint8_t*)); + oks = okay_out ? okay_out : tor_calloc(n_checkable, sizeof(int)); for (i = 0; i < n_checkable; ++i) { ms[i] = checkable[i].msg; @@ -433,6 +433,7 @@ ed25519_seckey_read_from_file(ed25519_secret_key_t *seckey_out, errno = EINVAL; } + tor_free(*tag_out); return -1; } @@ -472,6 +473,7 @@ ed25519_pubkey_read_from_file(ed25519_public_key_t *pubkey_out, errno = EINVAL; } + tor_free(*tag_out); return -1; } @@ -594,9 +596,12 @@ pick_ed25519_impl(void) if (ed25519_impl_spot_check() == 0) return; + /* LCOV_EXCL_START + * unreachable unless ed25519_donna is broken */ log_warn(LD_CRYPTO, "The Ed25519-donna implementation seems broken; using " "the ref10 implementation."); ed25519_impl = &impl_ref10; + /* LCOV_EXCL_STOP */ } /* Initialize the Ed25519 implementation. This is neccessary if you're diff --git a/src/common/crypto_pwbox.c b/src/common/crypto_pwbox.c index 819dc0c39d..31e37c007d 100644 --- a/src/common/crypto_pwbox.c +++ b/src/common/crypto_pwbox.c @@ -61,7 +61,7 @@ crypto_pwbox(uint8_t **out, size_t *outlen_out, pwbox_encoded_getarray_skey_header(enc), S2K_MAXLEN, s2k_flags); - if (spec_len < 0 || spec_len > S2K_MAXLEN) + if (BUG(spec_len < 0 || spec_len > S2K_MAXLEN)) goto err; pwbox_encoded_setlen_skey_header(enc, spec_len); enc->header_len = spec_len; @@ -76,10 +76,11 @@ crypto_pwbox(uint8_t **out, size_t *outlen_out, /* Now that all the data is in position, derive some keys, encrypt, and * digest */ - if (secret_to_key_derivekey(keys, sizeof(keys), + const int s2k_rv = secret_to_key_derivekey(keys, sizeof(keys), pwbox_encoded_getarray_skey_header(enc), spec_len, - secret, secret_len) < 0) + secret, secret_len); + if (BUG(s2k_rv < 0)) goto err; cipher = crypto_cipher_new_with_iv((char*)keys, (char*)enc->iv); @@ -87,11 +88,11 @@ crypto_pwbox(uint8_t **out, size_t *outlen_out, crypto_cipher_free(cipher); result_len = pwbox_encoded_encoded_len(enc); - if (result_len < 0) + if (BUG(result_len < 0)) goto err; result = tor_malloc(result_len); enc_len = pwbox_encoded_encode(result, result_len, enc); - if (enc_len < 0) + if (BUG(enc_len < 0)) goto err; tor_assert(enc_len == result_len); @@ -107,9 +108,24 @@ crypto_pwbox(uint8_t **out, size_t *outlen_out, goto out; err: + /* LCOV_EXCL_START + + This error case is often unreachable if we're correctly coded, unless + somebody adds a new error case somewhere, or unless you're building + without scrypto support. + + - make_specifier can't fail, unless S2K_MAX_LEN is too short. + - secret_to_key_derivekey can't really fail unless we're missing + scrypt, or the underlying function fails, or we pass it a bogus + algorithm or parameters. + - pwbox_encoded_encoded_len can't fail unless we're using trunnel + incorrectly. + - pwbox_encoded_encode can't fail unless we're using trunnel wrong, + or it's buggy. + */ tor_free(result); rv = -1; - + /* LCOV_EXCL_STOP */ out: pwbox_encoded_free(enc); memwipe(keys, 0, sizeof(keys)); diff --git a/src/common/crypto_s2k.c b/src/common/crypto_s2k.c index 149c39344c..5dbd2ad91f 100644 --- a/src/common/crypto_s2k.c +++ b/src/common/crypto_s2k.c @@ -19,7 +19,7 @@ #include <openssl/evp.h> -#ifdef HAVE_LIBSCRYPT_H +#if defined(HAVE_LIBSCRYPT_H) && defined(HAVE_LIBSCRYPT_SCRYPT) #define HAVE_SCRYPT #include <libscrypt.h> #endif @@ -170,7 +170,7 @@ make_specifier(uint8_t *spec_out, uint8_t type, unsigned flags) spec_out[SCRYPT_SPEC_LEN-1] = (3u << 4) | (1u << 0); break; default: - tor_fragile_assert(); + tor_fragile_assert(); // LCOV_EXCL_LINE - we should have returned above. return S2K_BAD_ALGORITHM; } diff --git a/src/common/di_ops.c b/src/common/di_ops.c index 5dfe828066..e671af6fac 100644 --- a/src/common/di_ops.c +++ b/src/common/di_ops.c @@ -226,3 +226,48 @@ safe_mem_is_zero(const void *mem, size_t sz) return 1 & ((total - 1) >> 8); } +/** Time-invariant 64-bit greater-than; works on two integers in the range + * (0,INT64_MAX). */ +#if SIZEOF_VOID_P == 8 +#define gt_i64_timei(a,b) ((a) > (b)) +#else +static inline int +gt_i64_timei(uint64_t a, uint64_t b) +{ + int64_t diff = (int64_t) (b - a); + int res = diff >> 63; + return res & 1; +} +#endif + +/** + * Given an array of list of <b>n_entries</b> uint64_t values, whose sum is + * <b>total</b>, find the first i such that the total of all elements 0...i is + * greater than rand_val. + * + * Try to perform this operation in a constant-time way. + */ +int +select_array_member_cumulative_timei(const uint64_t *entries, int n_entries, + uint64_t total, uint64_t rand_val) +{ + int i, i_chosen=-1, n_chosen=0; + uint64_t total_so_far = 0; + + for (i = 0; i < n_entries; ++i) { + total_so_far += entries[i]; + if (gt_i64_timei(total_so_far, rand_val)) { + i_chosen = i; + n_chosen++; + /* Set rand_val to INT64_MAX rather than stopping the loop. This way, + * the time we spend in the loop does not leak which element we chose. */ + rand_val = INT64_MAX; + } + } + tor_assert(total_so_far == total); + tor_assert(n_chosen == 1); + tor_assert(i_chosen >= 0); + tor_assert(i_chosen < n_entries); + + return i_chosen; +} diff --git a/src/common/di_ops.h b/src/common/di_ops.h index 6e77b5cfd7..f1050a00db 100644 --- a/src/common/di_ops.h +++ b/src/common/di_ops.h @@ -42,6 +42,8 @@ void dimap_add_entry(di_digest256_map_t **map, const uint8_t *key, void *val); void *dimap_search(const di_digest256_map_t *map, const uint8_t *key, void *dflt_val); +int select_array_member_cumulative_timei(const uint64_t *entries, int n_entries, + uint64_t total, uint64_t rand_val); #endif diff --git a/src/common/include.am b/src/common/include.am index f7c486d24a..222afe0291 100644 --- a/src/common/include.am +++ b/src/common/include.am @@ -1,12 +1,14 @@ noinst_LIBRARIES += \ src/common/libor.a \ + src/common/libor-ctime.a \ src/common/libor-crypto.a \ src/common/libor-event.a if UNITTESTS_ENABLED noinst_LIBRARIES += \ src/common/libor-testing.a \ + src/common/libor-ctime-testing.a \ src/common/libor-crypto-testing.a \ src/common/libor-event-testing.a endif @@ -27,12 +29,14 @@ src_common_libcurve25519_donna_a_CFLAGS= if BUILD_CURVE25519_DONNA src_common_libcurve25519_donna_a_SOURCES=\ src/ext/curve25519_donna/curve25519-donna.c +# See bug 13538 -- this code is known to have signed overflow issues. src_common_libcurve25519_donna_a_CFLAGS+=\ - @F_OMIT_FRAME_POINTER@ + @F_OMIT_FRAME_POINTER@ @CFLAGS_CONSTTIME@ noinst_LIBRARIES+=src/common/libcurve25519_donna.a LIBDONNA=src/common/libcurve25519_donna.a else if BUILD_CURVE25519_DONNA_C64 +src_common_libcurve25519_donna_a_CFLAGS+=@CFLAGS_CONSTTIME@ src_common_libcurve25519_donna_a_SOURCES=\ src/ext/curve25519_donna/curve25519-donna-c64.c noinst_LIBRARIES+=src/common/libcurve25519_donna.a @@ -58,13 +62,28 @@ else readpassphrase_source= endif -LIBOR_A_SOURCES = \ +if ADD_MULODI4 +mulodi4_source=src/ext/mulodi/mulodi4.c +else +mulodi4_source= +endif + +LIBOR_CTIME_A_SRC = \ + $(mulodi4_source) \ + src/ext/csiphash.c \ + src/common/di_ops.c + +src_common_libor_ctime_a_SOURCES = $(LIBOR_CTIME_A_SRC) +src_common_libor_ctime_testing_a_SOURCES = $(LIBOR_CTIME_A_SRC) +src_common_libor_ctime_a_CFLAGS = @CFLAGS_CONSTTIME@ +src_common_libor_ctime_testing_a_CFLAGS = @CFLAGS_CONSTTIME@ $(TEST_CFLAGS) + +LIBOR_A_SRC = \ src/common/address.c \ src/common/backtrace.c \ src/common/compat.c \ src/common/compat_threads.c \ src/common/container.c \ - src/common/di_ops.c \ src/common/log.c \ src/common/memarea.c \ src/common/pubsub.c \ @@ -74,7 +93,6 @@ LIBOR_A_SOURCES = \ src/common/util_process.c \ src/common/sandbox.c \ src/common/workqueue.c \ - src/ext/csiphash.c \ $(libor_extra_source) \ $(threads_impl_source) \ $(readpassphrase_source) @@ -82,7 +100,7 @@ LIBOR_A_SOURCES = \ src/common/src_common_libor_testing_a-log.$(OBJEXT) \ src/common/log.$(OBJEXT): micro-revision.i -LIBOR_CRYPTO_A_SOURCES = \ +LIBOR_CRYPTO_A_SRC = \ src/common/aes.c \ src/common/crypto.c \ src/common/crypto_pwbox.c \ @@ -93,19 +111,19 @@ LIBOR_CRYPTO_A_SOURCES = \ src/common/crypto_curve25519.c \ src/common/crypto_ed25519.c -LIBOR_EVENT_A_SOURCES = \ +LIBOR_EVENT_A_SRC = \ src/common/compat_libevent.c \ src/common/procmon.c \ src/common/timers.c \ src/ext/timeouts/timeout.c -src_common_libor_a_SOURCES = $(LIBOR_A_SOURCES) -src_common_libor_crypto_a_SOURCES = $(LIBOR_CRYPTO_A_SOURCES) -src_common_libor_event_a_SOURCES = $(LIBOR_EVENT_A_SOURCES) +src_common_libor_a_SOURCES = $(LIBOR_A_SRC) +src_common_libor_crypto_a_SOURCES = $(LIBOR_CRYPTO_A_SRC) +src_common_libor_event_a_SOURCES = $(LIBOR_EVENT_A_SRC) -src_common_libor_testing_a_SOURCES = $(LIBOR_A_SOURCES) -src_common_libor_crypto_testing_a_SOURCES = $(LIBOR_CRYPTO_A_SOURCES) -src_common_libor_event_testing_a_SOURCES = $(LIBOR_EVENT_A_SOURCES) +src_common_libor_testing_a_SOURCES = $(LIBOR_A_SRC) +src_common_libor_crypto_testing_a_SOURCES = $(LIBOR_CRYPTO_A_SRC) +src_common_libor_event_testing_a_SOURCES = $(LIBOR_EVENT_A_SRC) src_common_libor_testing_a_CPPFLAGS = $(AM_CPPFLAGS) $(TEST_CPPFLAGS) src_common_libor_crypto_testing_a_CPPFLAGS = $(AM_CPPFLAGS) $(TEST_CPPFLAGS) diff --git a/src/common/memarea.c b/src/common/memarea.c index 61117288c3..7d16b702e3 100644 --- a/src/common/memarea.c +++ b/src/common/memarea.c @@ -83,8 +83,7 @@ typedef struct memarea_chunk_t { struct memarea_chunk_t *next_chunk; size_t mem_size; /**< How much RAM is available in mem, total? */ char *next_mem; /**< Next position in mem to allocate data at. If it's - * greater than or equal to mem+mem_size, this chunk is - * full. */ + * equal to mem+mem_size, this chunk is full. */ #ifdef USE_ALIGNED_ATTRIBUTE /** Actual content of the memory chunk. */ char mem[FLEXIBLE_ARRAY_MEMBER] __attribute__((aligned(MEMAREA_ALIGN))); @@ -205,7 +204,10 @@ memarea_alloc(memarea_t *area, size_t sz) tor_assert(sz < SIZE_T_CEILING); if (sz == 0) sz = 1; - if (chunk->next_mem+sz > chunk->U_MEM+chunk->mem_size) { + tor_assert(chunk->next_mem <= chunk->U_MEM + chunk->mem_size); + const size_t space_remaining = + (chunk->U_MEM + chunk->mem_size) - chunk->next_mem; + if (sz > space_remaining) { if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) { /* This allocation is too big. Stick it in a special chunk, and put * that chunk second in the list. */ diff --git a/src/common/pubsub.c b/src/common/pubsub.c index 98ec3f81cc..b3faf40e00 100644 --- a/src/common/pubsub.c +++ b/src/common/pubsub.c @@ -48,7 +48,7 @@ pubsub_subscribe_(pubsub_topic_t *topic, if (subscribe_flags & SUBSCRIBE_ATSTART) { tor_assert(topic->n_events_fired == 0); } - pubsub_subscriber_t *r = tor_malloc_zero(sizeof(r)); + pubsub_subscriber_t *r = tor_malloc_zero(sizeof(*r)); r->priority = priority; r->subscriber_flags = subscribe_flags; r->fn = fn; diff --git a/src/common/util.c b/src/common/util.c index fa2953cc30..78afe5954f 100644 --- a/src/common/util.c +++ b/src/common/util.c @@ -513,21 +513,6 @@ round_uint64_to_next_multiple_of(uint64_t number, uint64_t divisor) return number; } -/** Return the lowest x in [INT64_MIN, INT64_MAX] such that x is at least - * <b>number</b>, and x modulo <b>divisor</b> == 0. If no such x can be - * expressed as an int64_t, return INT64_MAX */ -int64_t -round_int64_to_next_multiple_of(int64_t number, int64_t divisor) -{ - tor_assert(divisor > 0); - if (INT64_MAX - divisor + 1 < number) - return INT64_MAX; - if (number >= 0) - number += divisor - 1; - number -= number % divisor; - return number; -} - /** Transform a random value <b>p</b> from the uniform distribution in * [0.0, 1.0[ into a Laplace distributed value with location parameter * <b>mu</b> and scale parameter <b>b</b>. Truncate the final result @@ -1706,6 +1691,7 @@ parse_iso_time_(const char *cp, time_t *t, int strict) st_tm.tm_hour = hour; st_tm.tm_min = minute; st_tm.tm_sec = second; + st_tm.tm_wday = 0; /* Should be ignored. */ if (st_tm.tm_year < 70) { char *esc = esc_for_log(cp); @@ -1773,6 +1759,7 @@ parse_http_time(const char *date, struct tm *tm) tm->tm_hour = (int)tm_hour; tm->tm_min = (int)tm_min; tm->tm_sec = (int)tm_sec; + tm->tm_wday = 0; /* Leave this unset. */ month[3] = '\0'; /* Okay, now decode the month. */ @@ -3060,7 +3047,7 @@ digit_to_num(char d) * success, store the result in <b>out</b>, advance bufp to the next * character, and return 0. On failure, return -1. */ static int -scan_unsigned(const char **bufp, unsigned long *out, int width, int base) +scan_unsigned(const char **bufp, unsigned long *out, int width, unsigned base) { unsigned long result = 0; int scanned_so_far = 0; @@ -3073,7 +3060,7 @@ scan_unsigned(const char **bufp, unsigned long *out, int width, int base) while (**bufp && (hex?TOR_ISXDIGIT(**bufp):TOR_ISDIGIT(**bufp)) && scanned_so_far < width) { - int digit = hex?hex_decode_digit(*(*bufp)++):digit_to_num(*(*bufp)++); + unsigned digit = hex?hex_decode_digit(*(*bufp)++):digit_to_num(*(*bufp)++); // Check for overflow beforehand, without actually causing any overflow // This preserves functionality on compilers that don't wrap overflow // (i.e. that trap or optimise away overflow) @@ -3119,14 +3106,15 @@ scan_signed(const char **bufp, long *out, int width) if (neg && result > 0) { if (result > ((unsigned long)LONG_MAX) + 1) return -1; /* Underflow */ - // Avoid overflow on the cast to signed long when result is LONG_MIN - // by subtracting 1 from the unsigned long positive value, - // then, after it has been cast to signed and negated, - // subtracting the original 1 (the double-subtraction is intentional). - // Otherwise, the cast to signed could cause a temporary long - // to equal LONG_MAX + 1, which is undefined. - // We avoid underflow on the subtraction by treating -0 as positive. - *out = (-(long)(result - 1)) - 1; + else if (result == ((unsigned long)LONG_MAX) + 1) + *out = LONG_MIN; + else { + /* We once had a far more clever no-overflow conversion here, but + * some versions of GCC apparently ran it into the ground. Now + * we just check for LONG_MIN explicitly. + */ + *out = -(long)result; + } } else { if (result > LONG_MAX) return -1; /* Overflow */ @@ -3272,8 +3260,10 @@ tor_vsscanf(const char *buf, const char *pattern, va_list ap) *out = lng; } else { int *out = va_arg(ap, int *); +#if LONG_MAX > INT_MAX if (lng < INT_MIN || lng > INT_MAX) return n_matched; +#endif *out = (int)lng; } ++pattern; diff --git a/src/common/util.h b/src/common/util.h index 814c8622a2..4c5070e65b 100644 --- a/src/common/util.h +++ b/src/common/util.h @@ -145,7 +145,6 @@ uint64_t round_to_power_of_2(uint64_t u64); unsigned round_to_next_multiple_of(unsigned number, unsigned divisor); uint32_t round_uint32_to_next_multiple_of(uint32_t number, uint32_t divisor); uint64_t round_uint64_to_next_multiple_of(uint64_t number, uint64_t divisor); -int64_t round_int64_to_next_multiple_of(int64_t number, int64_t divisor); int64_t sample_laplace_distribution(double mu, double b, double p); int64_t add_laplace_noise(int64_t signal, double random, double delta_f, double epsilon); diff --git a/src/ext/README b/src/ext/README index c180927b86..dfe620ed16 100644 --- a/src/ext/README +++ b/src/ext/README @@ -77,3 +77,10 @@ readpassphrase.[ch] timeouts/ William Ahern's hierarchical timer-wheel implementation. MIT license. + +mulodi/ + + Contains an overflow-checking 64-bit signed integer multiply + from LLVM's compiler_rt. For some reason, this is missing from + 32-bit libclang in many places. Dual licensed MIT-license and + BSD-like license; see mulodi/LICENSE.TXT. diff --git a/src/ext/include.am b/src/ext/include.am index d1a3b47a63..6cfdbcc447 100644 --- a/src/ext/include.am +++ b/src/ext/include.am @@ -20,7 +20,8 @@ EXTHEADERS = \ noinst_HEADERS+= $(EXTHEADERS) -src_ext_ed25519_ref10_libed25519_ref10_a_CFLAGS= +src_ext_ed25519_ref10_libed25519_ref10_a_CFLAGS=\ + @CFLAGS_CONSTTIME@ src_ext_ed25519_ref10_libed25519_ref10_a_SOURCES= \ src/ext/ed25519/ref10/fe_0.c \ @@ -97,7 +98,8 @@ noinst_HEADERS += $(ED25519_REF10_HDRS) LIBED25519_REF10=src/ext/ed25519/ref10/libed25519_ref10.a noinst_LIBRARIES += $(LIBED25519_REF10) -src_ext_ed25519_donna_libed25519_donna_a_CFLAGS= \ +src_ext_ed25519_donna_libed25519_donna_a_CFLAGS=\ + @CFLAGS_CONSTTIME@ \ -DED25519_CUSTOMRANDOM \ -DED25519_SUFFIX=_donna @@ -139,7 +141,8 @@ noinst_HEADERS += $(ED25519_DONNA_HDRS) LIBED25519_DONNA=src/ext/ed25519/donna/libed25519_donna.a noinst_LIBRARIES += $(LIBED25519_DONNA) -src_ext_keccak_tiny_libkeccak_tiny_a_CFLAGS= +src_ext_keccak_tiny_libkeccak_tiny_a_CFLAGS=\ + @CFLAGS_CONSTTIME@ src_ext_keccak_tiny_libkeccak_tiny_a_SOURCES= \ src/ext/keccak-tiny/keccak-tiny-unrolled.c @@ -153,20 +156,20 @@ LIBKECCAK_TINY=src/ext/keccak-tiny/libkeccak-tiny.a noinst_LIBRARIES += $(LIBKECCAK_TINY) EXTRA_DIST += \ - timeouts/bench/bench-add.lua \ - timeouts/bench/bench-aux.lua \ - timeouts/bench/bench.c \ - timeouts/bench/bench-del.lua \ - timeouts/bench/bench-expire.lua \ - timeouts/bench/bench.h \ - timeouts/bench/bench-heap.c \ - timeouts/bench/bench-llrb.c \ - timeouts/bench/bench.plt \ - timeouts/bench/bench-wheel.c \ - timeouts/bench/Rules.mk \ - timeouts/lua/Rules.mk \ - timeouts/lua/timeout-lua.c \ - timeouts/Makefile \ - timeouts/Rules.shrc \ - timeouts/test-timeout.c + src/ext/timeouts/bench/bench-add.lua \ + src/ext/timeouts/bench/bench-aux.lua \ + src/ext/timeouts/bench/bench.c \ + src/ext/timeouts/bench/bench-del.lua \ + src/ext/timeouts/bench/bench-expire.lua \ + src/ext/timeouts/bench/bench.h \ + src/ext/timeouts/bench/bench-heap.c \ + src/ext/timeouts/bench/bench-llrb.c \ + src/ext/timeouts/bench/bench.plt \ + src/ext/timeouts/bench/bench-wheel.c \ + src/ext/timeouts/bench/Rules.mk \ + src/ext/timeouts/lua/Rules.mk \ + src/ext/timeouts/lua/timeout-lua.c \ + src/ext/timeouts/Makefile \ + src/ext/timeouts/Rules.shrc \ + src/ext/timeouts/test-timeout.c diff --git a/src/ext/mulodi/LICENSE.TXT b/src/ext/mulodi/LICENSE.TXT new file mode 100644 index 0000000000..a17dc12b27 --- /dev/null +++ b/src/ext/mulodi/LICENSE.TXT @@ -0,0 +1,91 @@ +============================================================================== +compiler_rt License +============================================================================== + +The compiler_rt library is dual licensed under both the University of Illinois +"BSD-Like" license and the MIT license. As a user of this code you may choose +to use it under either license. As a contributor, you agree to allow your code +to be used under both. + +Full text of the relevant licenses is included below. + +============================================================================== + +University of Illinois/NCSA +Open Source License + +Copyright (c) 2009-2016 by the contributors listed in CREDITS.TXT + +All rights reserved. + +Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at + Urbana-Champaign, nor the names of its contributors may be used to + endorse or promote products derived from this Software without specific + prior written permission. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE +SOFTWARE. + +============================================================================== + +Copyright (c) 2009-2015 by the contributors listed in CREDITS.TXT + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +============================================================================== +Copyrights and Licenses for Third Party Software Distributed with LLVM: +============================================================================== +The LLVM software contains code written by third parties. Such software will +have its own individual LICENSE.TXT file in the directory in which it appears. +This file will describe the copyrights, license, and restrictions which apply +to that code. + +The disclaimer of warranty in the University of Illinois Open Source License +applies to all code in the LLVM Distribution, and nothing in any of the +other licenses gives permission to use the names of the LLVM Team or the +University of Illinois to endorse or promote products derived from this +Software. + diff --git a/src/ext/mulodi/mulodi4.c b/src/ext/mulodi/mulodi4.c new file mode 100644 index 0000000000..bfa5e01295 --- /dev/null +++ b/src/ext/mulodi/mulodi4.c @@ -0,0 +1,66 @@ +/*===-- mulodi4.c - Implement __mulodi4 -----------------------------------=== + * + * The LLVM Compiler Infrastructure + * + * This file is dual licensed under the MIT and the University of Illinois Open + * Source Licenses. See LICENSE.TXT for details. + * + * ===----------------------------------------------------------------------=== + * + * This file implements __mulodi4 for the compiler_rt library. + * + * ===----------------------------------------------------------------------=== + */ + +#if 0 +#include "int_lib.h" +#else +#define COMPILER_RT_ABI +#define di_int int64_t +#include "torint.h" + +di_int __mulodi4(di_int a, di_int b, int* overflow); +#endif + +/* Returns: a * b */ + +/* Effects: sets *overflow to 1 if a * b overflows */ + +COMPILER_RT_ABI di_int +__mulodi4(di_int a, di_int b, int* overflow) +{ + const int N = (int)(sizeof(di_int) * CHAR_BIT); + const di_int MIN = (di_int)1 << (N-1); + const di_int MAX = ~MIN; + *overflow = 0; + di_int result = a * b; + if (a == MIN) + { + if (b != 0 && b != 1) + *overflow = 1; + return result; + } + if (b == MIN) + { + if (a != 0 && a != 1) + *overflow = 1; + return result; + } + di_int sa = a >> (N - 1); + di_int abs_a = (a ^ sa) - sa; + di_int sb = b >> (N - 1); + di_int abs_b = (b ^ sb) - sb; + if (abs_a < 2 || abs_b < 2) + return result; + if (sa == sb) + { + if (abs_a > MAX / abs_b) + *overflow = 1; + } + else + { + if (abs_a > MIN / -abs_b) + *overflow = 1; + } + return result; +} diff --git a/src/ext/timeouts/timeout.c b/src/ext/timeouts/timeout.c index f528576ffb..bd463a700d 100644 --- a/src/ext/timeouts/timeout.c +++ b/src/ext/timeouts/timeout.c @@ -38,7 +38,7 @@ #include <errno.h> /* errno */ -#include <sys/queue.h> /* TAILQ(3) */ +#include "tor_queue.h" /* TAILQ(3) */ #include "timeout.h" @@ -80,21 +80,21 @@ #define MAX(a, b) (((a) > (b))? (a) : (b)) #endif -#if !defined TAILQ_CONCAT -#define TAILQ_CONCAT(head1, head2, field) do { \ - if (!TAILQ_EMPTY(head2)) { \ +#if !defined TOR_TAILQ_CONCAT +#define TOR_TAILQ_CONCAT(head1, head2, field) do { \ + if (!TOR_TAILQ_EMPTY(head2)) { \ *(head1)->tqh_last = (head2)->tqh_first; \ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ (head1)->tqh_last = (head2)->tqh_last; \ - TAILQ_INIT((head2)); \ + TOR_TAILQ_INIT((head2)); \ } \ } while (0) #endif -#if !defined TAILQ_FOREACH_SAFE -#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ - for ((var) = TAILQ_FIRST(head); \ - (var) && ((tvar) = TAILQ_NEXT(var, field), 1); \ +#if !defined TOR_TAILQ_FOREACH_SAFE +#define TOR_TAILQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = TOR_TAILQ_FIRST(head); \ + (var) && ((tvar) = TOR_TAILQ_NEXT(var, field), 1); \ (var) = (tvar)) #endif @@ -210,7 +210,7 @@ static inline wheel_t rotr(const wheel_t v, int c) { * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -TAILQ_HEAD(timeout_list, timeout); +TOR_TAILQ_HEAD(timeout_list, timeout); struct timeouts { struct timeout_list wheel[WHEEL_NUM][WHEEL_LEN], expired; @@ -227,11 +227,11 @@ static struct timeouts *timeouts_init(struct timeouts *T, timeout_t hz) { for (i = 0; i < countof(T->wheel); i++) { for (j = 0; j < countof(T->wheel[i]); j++) { - TAILQ_INIT(&T->wheel[i][j]); + TOR_TAILQ_INIT(&T->wheel[i][j]); } } - TAILQ_INIT(&T->expired); + TOR_TAILQ_INIT(&T->expired); for (i = 0; i < countof(T->pending); i++) { T->pending[i] = 0; @@ -261,17 +261,17 @@ static void timeouts_reset(struct timeouts *T) { struct timeout *to; unsigned i, j; - TAILQ_INIT(&reset); + TOR_TAILQ_INIT(&reset); for (i = 0; i < countof(T->wheel); i++) { for (j = 0; j < countof(T->wheel[i]); j++) { - TAILQ_CONCAT(&reset, &T->wheel[i][j], tqe); + TOR_TAILQ_CONCAT(&reset, &T->wheel[i][j], tqe); } } - TAILQ_CONCAT(&reset, &T->expired, tqe); + TOR_TAILQ_CONCAT(&reset, &T->expired, tqe); - TAILQ_FOREACH(to, &reset, tqe) { + TOR_TAILQ_FOREACH(to, &reset, tqe) { to->pending = NULL; TO_SET_TIMEOUTS(to, NULL); } @@ -296,9 +296,9 @@ TIMEOUT_PUBLIC timeout_t timeouts_hz(struct timeouts *T) { TIMEOUT_PUBLIC void timeouts_del(struct timeouts *T, struct timeout *to) { if (to->pending) { - TAILQ_REMOVE(to->pending, to, tqe); + TOR_TAILQ_REMOVE(to->pending, to, tqe); - if (to->pending != &T->expired && TAILQ_EMPTY(to->pending)) { + if (to->pending != &T->expired && TOR_TAILQ_EMPTY(to->pending)) { ptrdiff_t index = to->pending - &T->wheel[0][0]; int wheel = (int) (index / WHEEL_LEN); int slot = index % WHEEL_LEN; @@ -350,12 +350,12 @@ static void timeouts_sched(struct timeouts *T, struct timeout *to, timeout_t exp slot = timeout_slot(wheel, to->expires); to->pending = &T->wheel[wheel][slot]; - TAILQ_INSERT_TAIL(to->pending, to, tqe); + TOR_TAILQ_INSERT_TAIL(to->pending, to, tqe); T->pending[wheel] |= WHEEL_C(1) << slot; } else { to->pending = &T->expired; - TAILQ_INSERT_TAIL(to->pending, to, tqe); + TOR_TAILQ_INSERT_TAIL(to->pending, to, tqe); } } /* timeouts_sched() */ @@ -397,7 +397,7 @@ TIMEOUT_PUBLIC void timeouts_update(struct timeouts *T, abstime_t curtime) { struct timeout_list todo; int wheel; - TAILQ_INIT(&todo); + TOR_TAILQ_INIT(&todo); /* * There's no avoiding looping over every wheel. It's best to keep @@ -442,7 +442,7 @@ TIMEOUT_PUBLIC void timeouts_update(struct timeouts *T, abstime_t curtime) { while (pending & T->pending[wheel]) { /* ctz input cannot be zero: loop condition. */ int slot = ctz(pending & T->pending[wheel]); - TAILQ_CONCAT(&todo, &T->wheel[wheel][slot], tqe); + TOR_TAILQ_CONCAT(&todo, &T->wheel[wheel][slot], tqe); T->pending[wheel] &= ~(UINT64_C(1) << slot); } @@ -455,10 +455,10 @@ TIMEOUT_PUBLIC void timeouts_update(struct timeouts *T, abstime_t curtime) { T->curtime = curtime; - while (!TAILQ_EMPTY(&todo)) { - struct timeout *to = TAILQ_FIRST(&todo); + while (!TOR_TAILQ_EMPTY(&todo)) { + struct timeout *to = TOR_TAILQ_FIRST(&todo); - TAILQ_REMOVE(&todo, to, tqe); + TOR_TAILQ_REMOVE(&todo, to, tqe); to->pending = NULL; timeouts_sched(T, to, to->expires); @@ -489,7 +489,7 @@ TIMEOUT_PUBLIC bool timeouts_pending(struct timeouts *T) { TIMEOUT_PUBLIC bool timeouts_expired(struct timeouts *T) { - return !TAILQ_EMPTY(&T->expired); + return !TOR_TAILQ_EMPTY(&T->expired); } /* timeouts_expired() */ @@ -544,7 +544,7 @@ static timeout_t timeouts_int(struct timeouts *T) { * events. */ TIMEOUT_PUBLIC timeout_t timeouts_timeout(struct timeouts *T) { - if (!TAILQ_EMPTY(&T->expired)) + if (!TOR_TAILQ_EMPTY(&T->expired)) return 0; return timeouts_int(T); @@ -552,10 +552,10 @@ TIMEOUT_PUBLIC timeout_t timeouts_timeout(struct timeouts *T) { TIMEOUT_PUBLIC struct timeout *timeouts_get(struct timeouts *T) { - if (!TAILQ_EMPTY(&T->expired)) { - struct timeout *to = TAILQ_FIRST(&T->expired); + if (!TOR_TAILQ_EMPTY(&T->expired)) { + struct timeout *to = TOR_TAILQ_FIRST(&T->expired); - TAILQ_REMOVE(&T->expired, to, tqe); + TOR_TAILQ_REMOVE(&T->expired, to, tqe); to->pending = NULL; TO_SET_TIMEOUTS(to, NULL); @@ -581,7 +581,7 @@ static struct timeout *timeouts_min(struct timeouts *T) { for (i = 0; i < countof(T->wheel); i++) { for (j = 0; j < countof(T->wheel[i]); j++) { - TAILQ_FOREACH(to, &T->wheel[i][j], tqe) { + TOR_TAILQ_FOREACH(to, &T->wheel[i][j], tqe) { if (!min || to->expires < min->expires) min = to; } @@ -623,7 +623,7 @@ TIMEOUT_PUBLIC bool timeouts_check(struct timeouts *T, FILE *fp) { } else { timeout = timeouts_timeout(T); - if (!TAILQ_EMPTY(&T->expired)) + if (!TOR_TAILQ_EMPTY(&T->expired)) check(timeout == 0, "wrong soft timeout (soft:%" TIMEOUT_PRIu " != hard:%" TIMEOUT_PRIu ")\n", timeout, TIMEOUT_C(0)); else check(timeout == ~TIMEOUT_C(0), "wrong soft timeout (soft:%" TIMEOUT_PRIu " != hard:%" TIMEOUT_PRIu ")\n", timeout, ~TIMEOUT_C(0)); @@ -665,7 +665,7 @@ TIMEOUT_PUBLIC struct timeout *timeouts_next(struct timeouts *T, struct timeouts YIELD(to); } } else { - TAILQ_FOREACH_SAFE(to, &T->expired, tqe, it->to) { + TOR_TAILQ_FOREACH_SAFE(to, &T->expired, tqe, it->to) { YIELD(to); } } @@ -674,7 +674,7 @@ TIMEOUT_PUBLIC struct timeout *timeouts_next(struct timeouts *T, struct timeouts if (it->flags & TIMEOUTS_PENDING) { for (it->i = 0; it->i < countof(T->wheel); it->i++) { for (it->j = 0; it->j < countof(T->wheel[it->i]); it->j++) { - TAILQ_FOREACH_SAFE(to, &T->wheel[it->i][it->j], tqe, it->to) { + TOR_TAILQ_FOREACH_SAFE(to, &T->wheel[it->i][it->j], tqe, it->to) { YIELD(to); } } diff --git a/src/ext/timeouts/timeout.h b/src/ext/timeouts/timeout.h index 3b08f19255..b35874e153 100644 --- a/src/ext/timeouts/timeout.h +++ b/src/ext/timeouts/timeout.h @@ -31,7 +31,7 @@ #include <inttypes.h> /* PRIu64 PRIx64 PRIX64 uint64_t */ -#include <sys/queue.h> /* TAILQ(3) */ +#include "tor_queue.h" /* TAILQ(3) */ /* @@ -121,7 +121,7 @@ struct timeout { struct timeout_list *pending; /* timeout list if pending on wheel or expiry queue */ - TAILQ_ENTRY(timeout) tqe; + TOR_TAILQ_ENTRY(timeout) tqe; /* entry member for struct timeout_list lists */ #ifndef TIMEOUT_DISABLE_CALLBACKS diff --git a/src/or/circuitbuild.c b/src/or/circuitbuild.c index 6941b438ff..5980ceaf9b 100644 --- a/src/or/circuitbuild.c +++ b/src/or/circuitbuild.c @@ -978,7 +978,7 @@ circuit_send_next_onion_skin(origin_circuit_t *circ) } control_event_client_status(LOG_NOTICE, "CIRCUIT_ESTABLISHED"); clear_broken_connection_map(1); - if (server_mode(options) && !check_whether_orport_reachable()) { + if (server_mode(options) && !check_whether_orport_reachable(options)) { inform_testing_reachability(); consider_testing_reachability(1, 1); } diff --git a/src/or/circuituse.c b/src/or/circuituse.c index b5959944f1..0d7e03be59 100644 --- a/src/or/circuituse.c +++ b/src/or/circuituse.c @@ -1426,7 +1426,7 @@ static void circuit_testing_opened(origin_circuit_t *circ) { if (have_performed_bandwidth_test || - !check_whether_orport_reachable()) { + !check_whether_orport_reachable(get_options())) { /* either we've already done everything we want with testing circuits, * or this testing circuit became open due to a fluke, e.g. we picked * a last hop where we already had the connection open due to an @@ -1443,7 +1443,8 @@ circuit_testing_opened(origin_circuit_t *circ) static void circuit_testing_failed(origin_circuit_t *circ, int at_last_hop) { - if (server_mode(get_options()) && check_whether_orport_reachable()) + const or_options_t *options = get_options(); + if (server_mode(options) && check_whether_orport_reachable(options)) return; log_info(LD_GENERAL, @@ -1674,7 +1675,11 @@ circuit_launch(uint8_t purpose, int flags) return circuit_launch_by_extend_info(purpose, NULL, flags); } -/* DOCDOC */ +/* Do we have enough descriptors to build paths? + * If need_exit is true, return 1 if we can build exit paths. + * (We need at least one Exit in the consensus to build exit paths.) + * If need_exit is false, return 1 if we can build internal paths. + */ static int have_enough_path_info(int need_exit) { @@ -2141,10 +2146,11 @@ optimistic_data_enabled(void) { const or_options_t *options = get_options(); if (options->OptimisticData < 0) { - /* XXX023 consider having auto default to 1 rather than 0 before - * the 0.2.3 branch goes stable. See bug 3617. -RD */ + /* Note: this default was 0 before #18815 was merged. We can't take the + * parameter out of the consensus until versions before that are all + * obsolete. */ const int32_t enabled = - networkstatus_get_param(NULL, "UseOptimisticData", 0, 0, 1); + networkstatus_get_param(NULL, "UseOptimisticData", /*default*/ 1, 0, 1); return (int)enabled; } return options->OptimisticData; @@ -2355,6 +2361,25 @@ connection_ap_handshake_attach_circuit(entry_connection_t *conn) /* we're a general conn */ origin_circuit_t *circ=NULL; + /* Are we linked to a dir conn that aims to fetch a consensus? + * We check here because this conn might no longer be needed. */ + if (base_conn->linked_conn && + base_conn->linked_conn->type == CONN_TYPE_DIR && + base_conn->linked_conn->purpose == DIR_PURPOSE_FETCH_CONSENSUS) { + + /* Yes we are. Is there a consensus fetch farther along than us? */ + if (networkstatus_consensus_is_already_downloading( + TO_DIR_CONN(base_conn->linked_conn)->requested_resource)) { + /* We're doing the "multiple consensus fetch attempts" game from + * proposal 210, and we're late to the party. Just close this conn. + * The circuit and TLS conn that we made will time out after a while + * if nothing else wants to use them. */ + log_info(LD_DIR, "Closing extra consensus fetch (to %s) since one " + "is already downloading.", base_conn->linked_conn->address); + return -1; + } + } + if (conn->chosen_exit_name) { const node_t *node = node_get_by_nickname(conn->chosen_exit_name, 1); int opt = conn->chosen_exit_optional; diff --git a/src/or/config.c b/src/or/config.c index 2e14ba69dc..03883601a6 100644 --- a/src/or/config.c +++ b/src/or/config.c @@ -4344,8 +4344,10 @@ options_transition_affects_descriptor(const or_options_t *old_options, !opt_streq(old_options->MyFamily, new_options->MyFamily) || !opt_streq(old_options->AccountingStart, new_options->AccountingStart) || old_options->AccountingMax != new_options->AccountingMax || + old_options->AccountingRule != new_options->AccountingRule || public_server_mode(old_options) != public_server_mode(new_options) || - old_options->DirCache != new_options->DirCache) + old_options->DirCache != new_options->DirCache || + old_options->AssumeReachable != new_options->AssumeReachable) return 1; return 0; @@ -7006,9 +7008,8 @@ get_first_listener_addrport_string(int listener_type) int get_first_advertised_port_by_type_af(int listener_type, int address_family) { - if (!configured_ports) - return 0; - SMARTLIST_FOREACH_BEGIN(configured_ports, const port_cfg_t *, cfg) { + const smartlist_t *conf_ports = get_configured_ports(); + SMARTLIST_FOREACH_BEGIN(conf_ports, const port_cfg_t *, cfg) { if (cfg->type == listener_type && !cfg->server_cfg.no_advertise && (tor_addr_family(&cfg->addr) == address_family || diff --git a/src/or/connection.c b/src/or/connection.c index 1bd1a92e39..e70b89767e 100644 --- a/src/or/connection.c +++ b/src/or/connection.c @@ -4436,32 +4436,6 @@ connection_get_by_type_state_rendquery(int type, int state, )); } -#define CONN_FIRST_AND_FREE_TEMPLATE(sl) \ - STMT_BEGIN \ - if (smartlist_len(sl) > 0) { \ - void *first_item = smartlist_get(sl, 0); \ - smartlist_free(sl); \ - return first_item; \ - } else { \ - smartlist_free(sl); \ - return NULL; \ - } \ - STMT_END - -/** Return a directory connection (if any one exists) that is fetching - * the item described by <b>purpose</b>/<b>resource</b>, otherwise return NULL. - */ -dir_connection_t * -connection_dir_get_by_purpose_and_resource( - int purpose, - const char *resource) -{ - smartlist_t *conns = connection_dir_list_by_purpose_and_resource( - purpose, - resource); - CONN_FIRST_AND_FREE_TEMPLATE(conns); -} - /** Return a new smartlist of dir_connection_t * from get_connection_array() * that satisfy conn_test on connection_t *conn_var, and dirconn_test on * dir_connection_t *dirconn_var. conn_var must be of CONN_TYPE_DIR and not @@ -4502,25 +4476,6 @@ connection_dir_list_by_purpose_and_resource( dirconn->requested_resource)); } -/** Return a directory connection (if any one exists) that is fetching - * the item described by <b>purpose</b>/<b>resource</b>/<b>state</b>, - * otherwise return NULL. */ -dir_connection_t * -connection_dir_get_by_purpose_resource_and_state( - int purpose, - const char *resource, - int state) -{ - smartlist_t *conns = - connection_dir_list_by_purpose_resource_and_state( - purpose, - resource, - state); - CONN_FIRST_AND_FREE_TEMPLATE(conns); -} - -#undef CONN_FIRST_AND_FREE_TEMPLATE - /** Return a list of directory connections that are fetching the item * described by <b>purpose</b>/<b>resource</b>/<b>state</b>. If there are * none, return an empty list. This list must be freed using smartlist_free, diff --git a/src/or/connection.h b/src/or/connection.h index 45175cd5a2..4835235fba 100644 --- a/src/or/connection.h +++ b/src/or/connection.h @@ -192,13 +192,6 @@ MOCK_DECL(connection_t *,connection_get_by_type_addr_port_purpose,(int type, connection_t *connection_get_by_type_state(int type, int state); connection_t *connection_get_by_type_state_rendquery(int type, int state, const char *rendquery); -dir_connection_t *connection_dir_get_by_purpose_and_resource( - int purpose, - const char *resource); -dir_connection_t *connection_dir_get_by_purpose_resource_and_state( - int purpose, - const char *resource, - int state); smartlist_t *connection_dir_list_by_purpose_and_resource( int purpose, const char *resource); diff --git a/src/or/control.c b/src/or/control.c index b503591859..2b69aafb77 100644 --- a/src/or/control.c +++ b/src/or/control.c @@ -2146,6 +2146,7 @@ getinfo_helper_events(control_connection_t *control_conn, const char *question, char **answer, const char **errmsg) { + const or_options_t *options = get_options(); (void) control_conn; if (!strcmp(question, "circuit-status")) { smartlist_t *status = smartlist_new(); @@ -2282,17 +2283,19 @@ getinfo_helper_events(control_connection_t *control_conn, *answer = tor_strdup(directories_have_accepted_server_descriptor() ? "1" : "0"); } else if (!strcmp(question, "status/reachability-succeeded/or")) { - *answer = tor_strdup(check_whether_orport_reachable() ? "1" : "0"); + *answer = tor_strdup(check_whether_orport_reachable(options) ? + "1" : "0"); } else if (!strcmp(question, "status/reachability-succeeded/dir")) { - *answer = tor_strdup(check_whether_dirport_reachable() ? "1" : "0"); + *answer = tor_strdup(check_whether_dirport_reachable(options) ? + "1" : "0"); } else if (!strcmp(question, "status/reachability-succeeded")) { tor_asprintf(answer, "OR=%d DIR=%d", - check_whether_orport_reachable() ? 1 : 0, - check_whether_dirport_reachable() ? 1 : 0); + check_whether_orport_reachable(options) ? 1 : 0, + check_whether_dirport_reachable(options) ? 1 : 0); } else if (!strcmp(question, "status/bootstrap-phase")) { *answer = tor_strdup(last_sent_bootstrap_message); } else if (!strcmpstart(question, "status/version/")) { - int is_server = server_mode(get_options()); + int is_server = server_mode(options); networkstatus_t *c = networkstatus_get_latest_consensus(); version_status_t status; const char *recommended; @@ -2334,7 +2337,7 @@ getinfo_helper_events(control_connection_t *control_conn, } *answer = bridge_stats; } else if (!strcmp(question, "status/fresh-relay-descs")) { - if (!server_mode(get_options())) { + if (!server_mode(options)) { *errmsg = "Only relays have descriptors"; return -1; } diff --git a/src/or/directory.c b/src/or/directory.c index b086fc78ed..67b54c003d 100644 --- a/src/or/directory.c +++ b/src/or/directory.c @@ -95,6 +95,9 @@ static void directory_initiate_command_rend( time_t if_modified_since, const rend_data_t *rend_query); +static void connection_dir_close_consensus_fetches( + dir_connection_t *except_this_one, const char *resource); + /********* START VARIABLES **********/ /** How far in the future do we allow a directory server to tell us it is @@ -1169,12 +1172,6 @@ directory_initiate_command_rend(const tor_addr_port_t *or_addr_port, return; } - /* ensure we don't make excess connections when we're already downloading - * a consensus during bootstrap */ - if (connection_dir_avoid_extra_connection_for_purpose(dir_purpose)) { - return; - } - conn = dir_connection_new(tor_addr_family(&addr)); /* set up conn so it's got all the data we need to remember */ @@ -1215,11 +1212,6 @@ directory_initiate_command_rend(const tor_addr_port_t *or_addr_port, conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING; /* fall through */ case 0: - /* Close this connection if there's another consensus connection - * downloading (during bootstrap), or connecting (after bootstrap). */ - if (connection_dir_close_consensus_conn_if_extra(conn)) { - return; - } /* queue the command on the outbuf */ directory_send_command(conn, dir_purpose, 1, resource, payload, payload_len, @@ -1267,11 +1259,6 @@ directory_initiate_command_rend(const tor_addr_port_t *or_addr_port, connection_mark_for_close(TO_CONN(conn)); return; } - /* Close this connection if there's another consensus connection - * downloading (during bootstrap), or connecting (after bootstrap). */ - if (connection_dir_close_consensus_conn_if_extra(conn)) { - return; - } conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING; /* queue the command on the outbuf */ directory_send_command(conn, dir_purpose, 0, resource, @@ -2015,7 +2002,8 @@ connection_dir_client_reached_eof(dir_connection_t *conn) } log_info(LD_DIR,"Received consensus directory (size %d) from server " "'%s:%d'", (int)body_len, conn->base_.address, conn->base_.port); - if ((r=networkstatus_set_current_consensus(body, flavname, 0))<0) { + if ((r=networkstatus_set_current_consensus(body, flavname, 0, + conn->identity_digest))<0) { log_fn(r<-1?LOG_WARN:LOG_INFO, LD_DIR, "Unable to load %s consensus directory downloaded from " "server '%s:%d'. I'll try again soon.", @@ -2024,6 +2012,10 @@ connection_dir_client_reached_eof(dir_connection_t *conn) networkstatus_consensus_download_failed(0, flavname); return -1; } + + /* If we launched other fetches for this consensus, cancel them. */ + connection_dir_close_consensus_fetches(conn, flavname); + /* launches router downloads as needed */ routers_update_all_from_networkstatus(now, 3); update_microdescs_from_networkstatus(now); @@ -2058,7 +2050,8 @@ connection_dir_client_reached_eof(dir_connection_t *conn) } if (src_code != -1) { - if (trusted_dirs_load_certs_from_string(body, src_code, 1)<0) { + if (trusted_dirs_load_certs_from_string(body, src_code, 1, + conn->identity_digest)<0) { log_warn(LD_DIR, "Unable to parse fetched certificates"); /* if we fetched more than one and only some failed, the successful * ones got flushed to disk so it's safe to call this on them */ @@ -3661,226 +3654,37 @@ connection_dir_finished_flushing(dir_connection_t *conn) return 0; } -/* A helper function for connection_dir_close_consensus_conn_if_extra() - * and connection_dir_close_extra_consensus_conns() that returns 0 if - * we can't have, or don't want to close, excess consensus connections. */ -STATIC int -connection_dir_would_close_consensus_conn_helper(void) -{ - const or_options_t *options = get_options(); - - /* we're only interested in closing excess connections if we could - * have created any in the first place */ - if (!networkstatus_consensus_can_use_multiple_directories(options)) { - return 0; - } - - /* We want to close excess connections downloading a consensus. - * If there aren't any excess, we don't have anything to close. */ - if (!networkstatus_consensus_has_excess_connections()) { - return 0; - } - - /* If we have excess connections, but none of them are downloading a - * consensus, and we are still bootstrapping (that is, we have no usable - * consensus), we don't want to close any until one starts downloading. */ - if (!networkstatus_consensus_is_downloading_usable_flavor() - && networkstatus_consensus_is_bootstrapping(time(NULL))) { - return 0; - } - - /* If we have just stopped bootstrapping (that is, just parsed a consensus), - * we might still have some excess connections hanging around. So we still - * have to check if we want to close any, even if we've stopped - * bootstrapping. */ - return 1; -} - -/* Check if we would close excess consensus connections. If we would, any - * new consensus connection would become excess immediately, so return 1. - * Otherwise, return 0. */ -int -connection_dir_avoid_extra_connection_for_purpose(unsigned int purpose) -{ - const or_options_t *options = get_options(); - - /* We're not interested in connections that aren't fetching a consensus. */ - if (purpose != DIR_PURPOSE_FETCH_CONSENSUS) { - return 0; - } - - /* we're only interested in avoiding excess connections if we could - * have created any in the first place */ - if (!networkstatus_consensus_can_use_multiple_directories(options)) { - return 0; - } - - /* If there are connections downloading a consensus, and we are still - * bootstrapping (that is, we have no usable consensus), we can be sure that - * any further connections would be excess. */ - if (networkstatus_consensus_is_downloading_usable_flavor() - && networkstatus_consensus_is_bootstrapping(time(NULL))) { - return 1; - } - - return 0; -} - -/* Check if we have more than one consensus download connection attempt, and - * close conn: - * - if we don't have a consensus, and we're downloading a consensus, and conn - * is not downloading a consensus yet; - * - if we do have a consensus, and there's more than one consensus connection. +/* We just got a new consensus! If there are other in-progress requests + * for this consensus flavor (for example because we launched several in + * parallel), cancel them. * - * Post-bootstrap consensus connection attempts are initiated one at a time. - * So this function won't close any consensus connection attempts that - * are initiated after bootstrap. - */ -int -connection_dir_close_consensus_conn_if_extra(dir_connection_t *conn) -{ - tor_assert(conn); - tor_assert(conn->base_.type == CONN_TYPE_DIR); - - /* We're not interested in connections that aren't fetching a consensus. */ - if (conn->base_.purpose != DIR_PURPOSE_FETCH_CONSENSUS) { - return 0; - } - - /* The connection has already been closed */ - if (conn->base_.marked_for_close) { - return 0; - } - - /* Only close this connection if there's another consensus connection - * downloading (during bootstrap), or connecting (after bootstrap). - * Post-bootstrap consensus connection attempts won't be closed, because - * they only occur one at a time. */ - if (!connection_dir_would_close_consensus_conn_helper()) { - return 0; - } - - const int we_are_bootstrapping = networkstatus_consensus_is_bootstrapping( - time(NULL)); - - /* We don't want to check other connections to see if they are downloading, - * as this is prone to race-conditions. So leave it for - * connection_dir_close_extra_consensus_conns(() to clean up. - * - * But if conn has just started connecting, or we have a consensus already, - * we can be sure it's not needed any more. */ - if (!we_are_bootstrapping - || conn->base_.state == DIR_CONN_STATE_CONNECTING) { - connection_close_immediate(&conn->base_); - connection_mark_for_close(&conn->base_); - return -1; - } - - return 0; -} - -/* Clean up excess consensus download connection attempts. - * During bootstrap, or when the bootstrap consensus has just been downloaded, - * if we have more than one active consensus connection: - * - if we don't have a consensus, and we're downloading a consensus, keep an - * earlier connection, or a connection to a fallback directory, and close - * all other connections; - * - if we have just downloaded the bootstrap consensus, and have other - * consensus connections left over, close all of them. + * We do this check here (not just in + * connection_ap_handshake_attach_circuit()) to handle the edge case where + * a consensus fetch begins and ends before some other one tries to attach to + * a circuit, in which case the other one won't know that we're all happy now. * - * Post-bootstrap consensus connection attempts are initiated one at a time. - * So this function won't close any consensus connection attempts that - * are initiated after bootstrap. + * Don't mark the conn that just gave us the consensus -- otherwise we + * would end up double-marking it when it cleans itself up. */ -void -connection_dir_close_extra_consensus_conns(void) +static void +connection_dir_close_consensus_fetches(dir_connection_t *except_this_one, + const char *resource) { - /* Only cleanup connections if there is more than one consensus connection, - * and at least one of those connections is already downloading - * (during bootstrap), or connecting (just after the bootstrap consensus is - * downloaded). - * Post-bootstrap consensus connection attempts won't be cleaned up, because - * they only occur one at a time. */ - if (!connection_dir_would_close_consensus_conn_helper()) { - return; - } - - int we_are_bootstrapping = networkstatus_consensus_is_bootstrapping( - time(NULL)); - - const char *usable_resource = networkstatus_get_flavor_name( - usable_consensus_flavor()); - smartlist_t *consens_usable_conns = - connection_dir_list_by_purpose_and_resource( - DIR_PURPOSE_FETCH_CONSENSUS, - usable_resource); - - /* If we want to keep a connection that's downloading, find a connection to - * keep, favouring: - * - connections opened earlier (they are likely to have progressed further) - * - connections to fallbacks (to reduce the load on authorities) */ - dir_connection_t *kept_download_conn = NULL; - int kept_is_authority = 0; - if (we_are_bootstrapping) { - SMARTLIST_FOREACH_BEGIN(consens_usable_conns, - dir_connection_t *, d) { - tor_assert(d); - int d_is_authority = router_digest_is_trusted_dir(d->identity_digest); - /* keep the first connection that is past the connecting state, but - * prefer fallbacks. */ - if (d->base_.state != DIR_CONN_STATE_CONNECTING) { - if (!kept_download_conn || (kept_is_authority && !d_is_authority)) { - kept_download_conn = d; - kept_is_authority = d_is_authority; - /* we've found the earliest fallback, and want to keep it regardless - * of any other connections */ - if (!kept_is_authority) - break; - } - } - } SMARTLIST_FOREACH_END(d); - } - - SMARTLIST_FOREACH_BEGIN(consens_usable_conns, - dir_connection_t *, d) { - tor_assert(d); - /* don't close this connection if it's the one we want to keep */ - if (kept_download_conn && d == kept_download_conn) + smartlist_t *conns_to_close = + connection_dir_list_by_purpose_and_resource(DIR_PURPOSE_FETCH_CONSENSUS, + resource); + SMARTLIST_FOREACH_BEGIN(conns_to_close, dir_connection_t *, d) { + if (d == except_this_one) continue; - /* mark all other connections for close */ - if (!d->base_.marked_for_close) { - connection_close_immediate(&d->base_); - connection_mark_for_close(&d->base_); - } + log_info(LD_DIR, "Closing consensus fetch (to %s) since one " + "has just arrived.", TO_CONN(d)->address); + connection_mark_for_close(TO_CONN(d)); } SMARTLIST_FOREACH_END(d); - - smartlist_free(consens_usable_conns); - consens_usable_conns = NULL; - - /* make sure we've closed all excess connections */ - const int final_connecting_conn_count = - connection_dir_count_by_purpose_resource_and_state( - DIR_PURPOSE_FETCH_CONSENSUS, - usable_resource, - DIR_CONN_STATE_CONNECTING); - if (final_connecting_conn_count > 0) { - log_warn(LD_BUG, "Expected 0 consensus connections connecting after " - "cleanup, got %d.", final_connecting_conn_count); - } - const int expected_final_conn_count = (we_are_bootstrapping ? 1 : 0); - const int final_conn_count = - connection_dir_count_by_purpose_and_resource( - DIR_PURPOSE_FETCH_CONSENSUS, - usable_resource); - if (final_conn_count > expected_final_conn_count) { - log_warn(LD_BUG, "Expected %d consensus connections after cleanup, got " - "%d.", expected_final_conn_count, final_connecting_conn_count); - } + smartlist_free(conns_to_close); } /** Connected handler for directory connections: begin sending data to the - * server, and return 0, or, if the connection is an excess bootstrap - * connection, close all excess bootstrap connections. + * server, and return 0. * Only used when connections don't immediately connect. */ int connection_dir_finished_connecting(dir_connection_t *conn) @@ -3892,12 +3696,6 @@ connection_dir_finished_connecting(dir_connection_t *conn) log_debug(LD_HTTP,"Dir connection to router %s:%u established.", conn->base_.address,conn->base_.port); - /* Close this connection if there's another consensus connection - * downloading (during bootstrap), or connecting (after bootstrap). */ - if (connection_dir_close_consensus_conn_if_extra(conn)) { - return -1; - } - /* start flushing conn */ conn->base_.state = DIR_CONN_STATE_CLIENT_SENDING; return 0; diff --git a/src/or/directory.h b/src/or/directory.h index c4edbb5c0f..7646cac03f 100644 --- a/src/or/directory.h +++ b/src/or/directory.h @@ -78,9 +78,6 @@ void directory_initiate_command(const tor_addr_t *or_addr, uint16_t or_port, const char *resource, const char *payload, size_t payload_len, time_t if_modified_since); -int connection_dir_avoid_extra_connection_for_purpose(unsigned int purpose); -int connection_dir_close_consensus_conn_if_extra(dir_connection_t *conn); -void connection_dir_close_extra_consensus_conns(void); #define DSR_HEX (1<<0) #define DSR_BASE64 (1<<1) @@ -147,7 +144,6 @@ STATIC int directory_handle_command_get(dir_connection_t *conn, const char *headers, const char *req_body, size_t req_body_len); -STATIC int connection_dir_would_close_consensus_conn_helper(void); STATIC int download_status_schedule_get_delay(download_status_t *dls, const smartlist_t *schedule, time_t now); diff --git a/src/or/dirserv.c b/src/or/dirserv.c index 441e4b5377..d1ea5dffd8 100644 --- a/src/or/dirserv.c +++ b/src/or/dirserv.c @@ -257,11 +257,11 @@ dirserv_router_get_status(const routerinfo_t *router, const char **msg, return FP_REJECT; } - if (router->signing_key_cert) { + if (router->cache_info.signing_key_cert) { /* This has an ed25519 identity key. */ if (KEYPIN_MISMATCH == keypin_check((const uint8_t*)router->cache_info.identity_digest, - router->signing_key_cert->signing_key.pubkey)) { + router->cache_info.signing_key_cert->signing_key.pubkey)) { log_fn(severity, LD_DIR, "Descriptor from router %s has an Ed25519 key, " "but the <rsa,ed25519> keys don't match what they were before.", @@ -629,10 +629,10 @@ dirserv_add_descriptor(routerinfo_t *ri, const char **msg, const char *source) /* Do keypinning again ... this time, to add the pin if appropriate */ int keypin_status; - if (ri->signing_key_cert) { + if (ri->cache_info.signing_key_cert) { keypin_status = keypin_check_and_add( (const uint8_t*)ri->cache_info.identity_digest, - ri->signing_key_cert->signing_key.pubkey, + ri->cache_info.signing_key_cert->signing_key.pubkey, ! key_pinning); } else { keypin_status = keypin_check_lone_rsa( @@ -691,12 +691,14 @@ dirserv_add_descriptor(routerinfo_t *ri, const char **msg, const char *source) static was_router_added_t dirserv_add_extrainfo(extrainfo_t *ei, const char **msg) { - const routerinfo_t *ri; + routerinfo_t *ri; int r; tor_assert(msg); *msg = NULL; - ri = router_get_by_id_digest(ei->cache_info.identity_digest); + /* Needs to be mutable so routerinfo_incompatible_with_extrainfo + * can mess with some of the flags in ri->cache_info. */ + ri = router_get_mutable_by_digest(ei->cache_info.identity_digest); if (!ri) { *msg = "No corresponding router descriptor for extra-info descriptor"; extrainfo_free(ei); @@ -716,7 +718,8 @@ dirserv_add_extrainfo(extrainfo_t *ei, const char **msg) return ROUTER_BAD_EI; } - if ((r = routerinfo_incompatible_with_extrainfo(ri, ei, NULL, msg))) { + if ((r = routerinfo_incompatible_with_extrainfo(ri->identity_pkey, ei, + &ri->cache_info, msg))) { extrainfo_free(ei); return r < 0 ? ROUTER_IS_ALREADY_KNOWN : ROUTER_BAD_EI; } @@ -1131,8 +1134,11 @@ directory_caches_unknown_auth_certs(const or_options_t *options) return dir_server_mode(options) || options->BridgeRelay; } -/** Return 1 if we want to keep descriptors, networkstatuses, etc around - * and we're willing to serve them to others. Else return 0. +/** Return 1 if we want to keep descriptors, networkstatuses, etc around. + * Else return 0. + * Check options->DirPort_set and directory_permits_begindir_requests() + * to see if we are willing to serve these directory documents to others via + * the DirPort and begindir-over-ORPort, respectively. */ int directory_caches_dir_info(const or_options_t *options) @@ -2136,9 +2142,9 @@ routers_make_ed_keys_unique(smartlist_t *routers) SMARTLIST_FOREACH_BEGIN(routers, routerinfo_t *, ri) { ri->omit_from_vote = 0; - if (ri->signing_key_cert == NULL) + if (ri->cache_info.signing_key_cert == NULL) continue; /* No ed key */ - const uint8_t *pk = ri->signing_key_cert->signing_key.pubkey; + const uint8_t *pk = ri->cache_info.signing_key_cert->signing_key.pubkey; if ((ri2 = digest256map_get(by_ed_key, pk))) { /* Duplicate; must omit one. Set the omit_from_vote flag in whichever * one has the earlier published_on. */ @@ -2891,8 +2897,9 @@ dirserv_generate_networkstatus_vote_obj(crypto_pk_t *private_key, set_routerstatus_from_routerinfo(rs, node, ri, now, listbadexits); - if (ri->signing_key_cert) { - memcpy(vrs->ed25519_id, ri->signing_key_cert->signing_key.pubkey, + if (ri->cache_info.signing_key_cert) { + memcpy(vrs->ed25519_id, + ri->cache_info.signing_key_cert->signing_key.pubkey, ED25519_PUBKEY_LEN); } diff --git a/src/or/dirvote.c b/src/or/dirvote.c index 9854af7d7f..1fc6237477 100644 --- a/src/or/dirvote.c +++ b/src/or/dirvote.c @@ -106,6 +106,7 @@ format_networkstatus_vote(crypto_pk_t *private_signing_key, SMARTLIST_FOREACH(v3_ns->package_lines, const char *, p, if (validate_recommended_package_line(p)) smartlist_add_asprintf(tmp, "package %s\n", p)); + smartlist_sort_strings(tmp); packages = smartlist_join_strings(tmp, "", 0, NULL); SMARTLIST_FOREACH(tmp, char *, cp, tor_free(cp)); smartlist_free(tmp); @@ -2916,7 +2917,8 @@ dirvote_add_vote(const char *vote_body, const char **msg_out, int *status_out) /* Hey, it's a new cert! */ trusted_dirs_load_certs_from_string( vote->cert->cache_info.signed_descriptor_body, - TRUSTED_DIRS_CERTS_SRC_FROM_VOTE, 1 /*flush*/); + TRUSTED_DIRS_CERTS_SRC_FROM_VOTE, 1 /*flush*/, + NULL); if (!authority_cert_get_by_digests(vote->cert->cache_info.identity_digest, vote->cert->signing_key_digest)) { log_warn(LD_BUG, "We added a cert, but still couldn't find it."); @@ -3019,6 +3021,30 @@ dirvote_add_vote(const char *vote_body, const char **msg_out, int *status_out) return any_failed ? NULL : pending_vote; } +/* Write the votes in <b>pending_vote_list</b> to disk. */ +static void +write_v3_votes_to_disk(const smartlist_t *pending_vote_list) +{ + smartlist_t *votestrings = smartlist_new(); + char *votefile = NULL; + + SMARTLIST_FOREACH(pending_vote_list, pending_vote_t *, v, + { + sized_chunk_t *c = tor_malloc(sizeof(sized_chunk_t)); + c->bytes = v->vote_body->dir; + c->len = v->vote_body->dir_len; + smartlist_add(votestrings, c); /* collect strings to write to disk */ + }); + + votefile = get_datadir_fname("v3-status-votes"); + write_chunks_to_file(votefile, votestrings, 0, 0); + log_debug(LD_DIR, "Wrote votes to disk (%s)!", votefile); + + tor_free(votefile); + SMARTLIST_FOREACH(votestrings, sized_chunk_t *, c, tor_free(c)); + smartlist_free(votestrings); +} + /** Try to compute a v3 networkstatus consensus from the currently pending * votes. Return 0 on success, -1 on failure. Store the consensus in * pending_consensus: it won't be ready to be published until we have @@ -3028,8 +3054,8 @@ dirvote_compute_consensuses(void) { /* Have we got enough votes to try? */ int n_votes, n_voters, n_vote_running = 0; - smartlist_t *votes = NULL, *votestrings = NULL; - char *consensus_body = NULL, *signatures = NULL, *votefile; + smartlist_t *votes = NULL; + char *consensus_body = NULL, *signatures = NULL; networkstatus_t *consensus = NULL; authority_cert_t *my_cert; pending_consensus_t pending[N_CONSENSUS_FLAVORS]; @@ -3040,6 +3066,17 @@ dirvote_compute_consensuses(void) if (!pending_vote_list) pending_vote_list = smartlist_new(); + /* Write votes to disk */ + write_v3_votes_to_disk(pending_vote_list); + + /* Setup votes smartlist */ + votes = smartlist_new(); + SMARTLIST_FOREACH(pending_vote_list, pending_vote_t *, v, + { + smartlist_add(votes, v->vote); /* collect votes to compute consensus */ + }); + + /* See if consensus managed to achieve majority */ n_voters = get_n_authorities(V3_DIRINFO); n_votes = smartlist_len(pending_vote_list); if (n_votes <= n_voters/2) { @@ -3066,24 +3103,6 @@ dirvote_compute_consensuses(void) goto err; } - votes = smartlist_new(); - votestrings = smartlist_new(); - SMARTLIST_FOREACH(pending_vote_list, pending_vote_t *, v, - { - sized_chunk_t *c = tor_malloc(sizeof(sized_chunk_t)); - c->bytes = v->vote_body->dir; - c->len = v->vote_body->dir_len; - smartlist_add(votestrings, c); /* collect strings to write to disk */ - - smartlist_add(votes, v->vote); /* collect votes to compute consensus */ - }); - - votefile = get_datadir_fname("v3-status-votes"); - write_chunks_to_file(votefile, votestrings, 0, 0); - tor_free(votefile); - SMARTLIST_FOREACH(votestrings, sized_chunk_t *, c, tor_free(c)); - smartlist_free(votestrings); - { char legacy_dbuf[DIGEST_LEN]; crypto_pk_t *legacy_sign=NULL; @@ -3373,7 +3392,7 @@ dirvote_publish_consensus(void) continue; } - if (networkstatus_set_current_consensus(pending->body, name, 0)) + if (networkstatus_set_current_consensus(pending->body, name, 0, NULL)) log_warn(LD_DIR, "Error publishing %s consensus", name); else log_notice(LD_DIR, "Published %s consensus", name); @@ -3528,10 +3547,11 @@ dirvote_create_microdescriptor(const routerinfo_t *ri, int consensus_method) char idbuf[ED25519_BASE64_LEN+1]; const char *keytype; if (consensus_method >= MIN_METHOD_FOR_ED25519_ID_IN_MD && - ri->signing_key_cert && - ri->signing_key_cert->signing_key_included) { + ri->cache_info.signing_key_cert && + ri->cache_info.signing_key_cert->signing_key_included) { keytype = "ed25519"; - ed25519_public_to_base64(idbuf, &ri->signing_key_cert->signing_key); + ed25519_public_to_base64(idbuf, + &ri->cache_info.signing_key_cert->signing_key); } else { keytype = "rsa1024"; digest_to_base64(idbuf, ri->cache_info.identity_digest); diff --git a/src/or/include.am b/src/or/include.am index 712ae18406..19f1a7fe0a 100644 --- a/src/or/include.am +++ b/src/or/include.am @@ -109,7 +109,7 @@ src_or_libtor_testing_a_CFLAGS = $(AM_CFLAGS) $(TEST_CFLAGS) src_or_tor_LDFLAGS = @TOR_LDFLAGS_zlib@ @TOR_LDFLAGS_openssl@ @TOR_LDFLAGS_libevent@ -src_or_tor_LDADD = src/or/libtor.a src/common/libor.a \ +src_or_tor_LDADD = src/or/libtor.a src/common/libor.a src/common/libor-ctime.a \ src/common/libor-crypto.a $(LIBKECCAK_TINY) $(LIBDONNA) \ src/common/libor-event.a src/trunnel/libor-trunnel.a \ @TOR_ZLIB_LIBS@ @TOR_LIB_MATH@ @TOR_LIBEVENT_LIBS@ @TOR_OPENSSL_LIBS@ \ @@ -121,6 +121,7 @@ src_or_tor_cov_CPPFLAGS = $(AM_CPPFLAGS) $(TEST_CPPFLAGS) src_or_tor_cov_CFLAGS = $(AM_CFLAGS) $(TEST_CFLAGS) src_or_tor_cov_LDFLAGS = @TOR_LDFLAGS_zlib@ @TOR_LDFLAGS_openssl@ @TOR_LDFLAGS_libevent@ src_or_tor_cov_LDADD = src/or/libtor-testing.a src/common/libor-testing.a \ + src/common/libor-ctime-testing.a \ src/common/libor-crypto-testing.a $(LIBKECCAK_TINY) $(LIBDONNA) \ src/common/libor-event-testing.a src/trunnel/libor-trunnel-testing.a \ @TOR_ZLIB_LIBS@ @TOR_LIB_MATH@ @TOR_LIBEVENT_LIBS@ @TOR_OPENSSL_LIBS@ \ diff --git a/src/or/main.c b/src/or/main.c index fba9799a60..1b161336c6 100644 --- a/src/or/main.c +++ b/src/or/main.c @@ -1484,17 +1484,6 @@ run_scheduled_events(time_t now) dirvote_act(options, now); } - /* 2d. Cleanup excess consensus bootstrap connections every second. - * connection_dir_close_consensus_conn_if_extra() closes some connections - * that are clearly excess, but this check is more thorough. - * This only closes connections if there is more than one consensus - * connection, and at least one of those connections is already downloading - * (during bootstrap), or connecting (just after the bootstrap consensus is - * downloaded). - * It won't close any consensus connections initiated after bootstrap, - * because those attempts are made one at a time. */ - connection_dir_close_extra_consensus_conns(); - /* 3a. Every second, we examine pending circuits and prune the * ones which have been pending for more than a few seconds. * We do this before step 4, so it can try building more if @@ -2094,7 +2083,7 @@ second_elapsed_callback(periodic_timer_t *timer, void *arg) TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT) { /* every 20 minutes, check and complain if necessary */ const routerinfo_t *me = router_get_my_routerinfo(); - if (me && !check_whether_orport_reachable()) { + if (me && !check_whether_orport_reachable(options)) { char *address = tor_dup_ip(me->addr); log_warn(LD_CONFIG,"Your server (%s:%d) has not managed to confirm that " "its ORPort is reachable. Relays do not publish descriptors " @@ -2107,7 +2096,7 @@ second_elapsed_callback(periodic_timer_t *timer, void *arg) tor_free(address); } - if (me && !check_whether_dirport_reachable()) { + if (me && !check_whether_dirport_reachable(options)) { char *address = tor_dup_ip(me->addr); log_warn(LD_CONFIG, "Your server (%s:%d) has not managed to confirm that its " diff --git a/src/or/microdesc.c b/src/or/microdesc.c index 299042995b..5b5c29a6d2 100644 --- a/src/or/microdesc.c +++ b/src/or/microdesc.c @@ -955,8 +955,8 @@ we_fetch_router_descriptors(const or_options_t *options) } /** Return the consensus flavor we actually want to use to build circuits. */ -int -usable_consensus_flavor(void) +MOCK_IMPL(int, +usable_consensus_flavor,(void)) { if (we_use_microdescriptors_for_circuits(get_options())) { return FLAV_MICRODESC; diff --git a/src/or/microdesc.h b/src/or/microdesc.h index 0675e233d6..40c83139e9 100644 --- a/src/or/microdesc.h +++ b/src/or/microdesc.h @@ -47,7 +47,7 @@ void microdesc_free_all(void); void update_microdesc_downloads(time_t now); void update_microdescs_from_networkstatus(time_t now); -int usable_consensus_flavor(void); +MOCK_DECL(int, usable_consensus_flavor,(void)); int we_fetch_microdescriptors(const or_options_t *options); int we_fetch_router_descriptors(const or_options_t *options); int we_use_microdescriptors_for_circuits(const or_options_t *options); diff --git a/src/or/networkstatus.c b/src/or/networkstatus.c index 2975e7ebb4..70e8b8d19b 100644 --- a/src/or/networkstatus.c +++ b/src/or/networkstatus.c @@ -121,8 +121,7 @@ static int have_warned_about_new_version = 0; static void routerstatus_list_update_named_server_map(void); static void update_consensus_bootstrap_multiple_downloads( time_t now, - const or_options_t *options, - int we_are_bootstrapping); + const or_options_t *options); /** Forget that we've warned about anything networkstatus-related, so we will * give fresh warnings if the same behavior happens again. */ @@ -174,7 +173,7 @@ router_reload_consensus_networkstatus(void) } s = read_file_to_str(filename, RFTS_IGNORE_MISSING, NULL); if (s) { - if (networkstatus_set_current_consensus(s, flavor, flags) < -1) { + if (networkstatus_set_current_consensus(s, flavor, flags, NULL) < -1) { log_warn(LD_FS, "Couldn't load consensus %s networkstatus from \"%s\"", flavor, filename); } @@ -192,7 +191,8 @@ router_reload_consensus_networkstatus(void) s = read_file_to_str(filename, RFTS_IGNORE_MISSING, NULL); if (s) { if (networkstatus_set_current_consensus(s, flavor, - flags|NSSET_WAS_WAITING_FOR_CERTS)) { + flags|NSSET_WAS_WAITING_FOR_CERTS, + NULL)) { log_info(LD_FS, "Couldn't load consensus %s networkstatus from \"%s\"", flavor, filename); } @@ -792,26 +792,6 @@ check_consensus_waiting_for_certs(int flavor, time_t now, return 0; } -/* Return the maximum download tries for a consensus, based on options and - * whether we_are_bootstrapping. */ -static int -consensus_max_download_tries(const or_options_t *options, - int we_are_bootstrapping) -{ - int use_fallbacks = networkstatus_consensus_can_use_extra_fallbacks(options); - - if (we_are_bootstrapping) { - if (use_fallbacks) { - return options->ClientBootstrapConsensusMaxDownloadTries; - } else { - return - options->ClientBootstrapConsensusAuthorityOnlyMaxDownloadTries; - } - } - - return options->TestingConsensusMaxDownloadTries; -} - /** If we want to download a fresh consensus, launch a new download as * appropriate. */ static void @@ -865,29 +845,14 @@ update_consensus_networkstatus_downloads(time_t now) && i == usable_consensus_flavor()) { /* Check if we're already downloading a usable consensus */ - int consens_conn_count = - connection_dir_count_by_purpose_and_resource( - DIR_PURPOSE_FETCH_CONSENSUS, - resource); - int connect_consens_conn_count = - connection_dir_count_by_purpose_resource_and_state( - DIR_PURPOSE_FETCH_CONSENSUS, - resource, - DIR_CONN_STATE_CONNECTING); - - /* If not all connections are "connecting", then some are - * downloading. We want to have at most one downloading at a time. */ - if (connect_consens_conn_count < consens_conn_count) { + if (networkstatus_consensus_is_already_downloading(resource)) continue; - } /* Make multiple connections for a bootstrap consensus download. */ - update_consensus_bootstrap_multiple_downloads(now, options, - we_are_bootstrapping); + update_consensus_bootstrap_multiple_downloads(now, options); } else { /* Check if we failed downloading a consensus too recently */ - int max_dl_tries = consensus_max_download_tries(options, - we_are_bootstrapping); + int max_dl_tries = options->TestingConsensusMaxDownloadTries; /* Let's make sure we remembered to update consensus_dl_status */ tor_assert(consensus_dl_status[i].schedule == DL_SCHED_CONSENSUS); @@ -922,12 +887,16 @@ static void update_consensus_bootstrap_attempt_downloads( time_t now, const or_options_t *options, - int we_are_bootstrapping, download_status_t *dls, download_want_authority_t want_authority) { - int max_dl_tries = consensus_max_download_tries(options, - we_are_bootstrapping); + int use_fallbacks = networkstatus_consensus_can_use_extra_fallbacks(options); + int max_dl_tries = options->ClientBootstrapConsensusMaxDownloadTries; + if (!use_fallbacks) { + max_dl_tries = + options->ClientBootstrapConsensusAuthorityOnlyMaxDownloadTries; + } + const char *resource = networkstatus_get_flavor_name( usable_consensus_flavor()); @@ -960,8 +929,7 @@ update_consensus_bootstrap_attempt_downloads( */ static void update_consensus_bootstrap_multiple_downloads(time_t now, - const or_options_t *options, - int we_are_bootstrapping) + const or_options_t *options) { const int usable_flavor = usable_consensus_flavor(); @@ -970,12 +938,6 @@ update_consensus_bootstrap_multiple_downloads(time_t now, return; } - /* If we've managed to validate a usable consensus, don't make additional - * connections. */ - if (!we_are_bootstrapping) { - return; - } - /* Launch concurrent consensus download attempt(s) based on the mirror and * authority schedules. Try the mirror first - this makes it slightly more * likely that we'll connect to the fallback first, and then end the @@ -994,8 +956,7 @@ update_consensus_bootstrap_multiple_downloads(time_t now, if (!check_consensus_waiting_for_certs(usable_flavor, now, dls_f)) { /* During bootstrap, DL_WANT_ANY_DIRSERVER means "use fallbacks". */ - update_consensus_bootstrap_attempt_downloads(now, options, - we_are_bootstrapping, dls_f, + update_consensus_bootstrap_attempt_downloads(now, options, dls_f, DL_WANT_ANY_DIRSERVER); } } @@ -1005,8 +966,7 @@ update_consensus_bootstrap_multiple_downloads(time_t now, &consensus_bootstrap_dl_status[CONSENSUS_BOOTSTRAP_SOURCE_AUTHORITY]; if (!check_consensus_waiting_for_certs(usable_flavor, now, dls_a)) { - update_consensus_bootstrap_attempt_downloads(now, options, - we_are_bootstrapping, dls_a, + update_consensus_bootstrap_attempt_downloads(now, options, dls_a, DL_WANT_AUTHORITY); } } @@ -1201,13 +1161,13 @@ update_certificate_downloads(time_t now) for (i = 0; i < N_CONSENSUS_FLAVORS; ++i) { if (consensus_waiting_for_certs[i].consensus) authority_certs_fetch_missing(consensus_waiting_for_certs[i].consensus, - now); + now, NULL); } if (current_ns_consensus) - authority_certs_fetch_missing(current_ns_consensus, now); + authority_certs_fetch_missing(current_ns_consensus, now, NULL); if (current_md_consensus) - authority_certs_fetch_missing(current_md_consensus, now); + authority_certs_fetch_missing(current_md_consensus, now, NULL); } /** Return 1 if we have a consensus but we don't have enough certificates @@ -1274,16 +1234,34 @@ networkstatus_get_reasonably_live_consensus(time_t now, int flavor) return NULL; } -/** Check if we're bootstrapping a consensus download. This means that we are - * only using the authorities and fallback directory mirrors to download the - * consensus flavour we'll use. */ -int -networkstatus_consensus_is_bootstrapping(time_t now) -{ - /* If we don't have a consensus, we must still be bootstrapping */ - return !networkstatus_get_reasonably_live_consensus( - now, - usable_consensus_flavor()); +/** Check if we need to download a consensus during tor's bootstrap phase. + * If we have no consensus, or our consensus is unusably old, return 1. + * As soon as we have received a consensus, return 0, even if we don't have + * enough certificates to validate it. + * If a fallback directory gives us a consensus we can never get certs for, + * check_consensus_waiting_for_certs() will wait 20 minutes before failing + * the cert downloads. After that, a new consensus will be fetched from a + * randomly chosen fallback. */ +MOCK_IMPL(int, +networkstatus_consensus_is_bootstrapping,(time_t now)) +{ + /* If we have a validated, reasonably live consensus, we're not + * bootstrapping a consensus at all. */ + if (networkstatus_get_reasonably_live_consensus( + now, + usable_consensus_flavor())) { + return 0; + } + + /* If we have a consensus, but we're waiting for certificates, + * we're not waiting for a consensus download while bootstrapping. */ + if (consensus_is_waiting_for_certs()) { + return 0; + } + + /* If we have no consensus, or our consensus is very old, we are + * bootstrapping, and we need to download a consensus. */ + return 1; } /** Check if we can use multiple directories for a consensus download. @@ -1300,8 +1278,8 @@ networkstatus_consensus_can_use_multiple_directories( /** Check if we can use fallback directory mirrors for a consensus download. * If we have fallbacks and don't want to fetch from the authorities, * we can use them. */ -int -networkstatus_consensus_can_use_extra_fallbacks(const or_options_t *options) +MOCK_IMPL(int, +networkstatus_consensus_can_use_extra_fallbacks,(const or_options_t *options)) { /* The list length comparisons are a quick way to check if we have any * non-authority fallback directories. If we ever have any authorities that @@ -1315,61 +1293,39 @@ networkstatus_consensus_can_use_extra_fallbacks(const or_options_t *options) > smartlist_len(router_get_trusted_dir_servers()))); } -/* Check if there is more than 1 consensus connection retrieving the usable - * consensus flavor. If so, return 1, if not, return 0. - * - * During normal operation, Tor only makes one consensus download - * connection. But clients can make multiple simultaneous consensus - * connections to improve bootstrap speed and reliability. - * - * If there is more than one connection, we must have connections left - * over from bootstrapping. However, some of the connections may have - * completed and been cleaned up, so it is not sufficient to check the - * return value of this function to see if a client could make multiple - * bootstrap connections. Use - * networkstatus_consensus_can_use_multiple_directories() - * and networkstatus_consensus_is_bootstrapping(). */ -int -networkstatus_consensus_has_excess_connections(void) -{ - const char *usable_resource = networkstatus_get_flavor_name( - usable_consensus_flavor()); - const int consens_conn_usable_count = - connection_dir_count_by_purpose_and_resource( - DIR_PURPOSE_FETCH_CONSENSUS, - usable_resource); - /* The maximum number of connections we want downloading a usable consensus - * Always 1, whether bootstrapping or not. */ - const int max_expected_consens_conn_usable_count = 1; - - if (consens_conn_usable_count > max_expected_consens_conn_usable_count) { - return 1; - } - - return 0; -} - -/* Is tor currently downloading a consensus of the usable flavor? */ +/* Is there a consensus fetch for flavor <b>resource</b> that's far + * enough along to be attached to a circuit? */ int -networkstatus_consensus_is_downloading_usable_flavor(void) -{ - const char *usable_resource = networkstatus_get_flavor_name( - usable_consensus_flavor()); - const int consens_conn_usable_count = - connection_dir_count_by_purpose_and_resource( - DIR_PURPOSE_FETCH_CONSENSUS, - usable_resource); - - const int connect_consens_conn_usable_count = - connection_dir_count_by_purpose_resource_and_state( - DIR_PURPOSE_FETCH_CONSENSUS, - usable_resource, - DIR_CONN_STATE_CONNECTING); - if (connect_consens_conn_usable_count < consens_conn_usable_count) { - return 1; - } +networkstatus_consensus_is_already_downloading(const char *resource) +{ + int answer = 0; + + /* First, get a list of all the dir conns that are fetching a consensus, + * fetching *this* consensus, and are in state "reading" (meaning they + * have already flushed their request onto the socks connection). */ + smartlist_t *fetching_conns = + connection_dir_list_by_purpose_resource_and_state( + DIR_PURPOSE_FETCH_CONSENSUS, resource, DIR_CONN_STATE_CLIENT_READING); + + /* Then, walk through each conn, to see if its linked socks connection + * is in an attached state. We have to check this separately, since with + * the optimistic data feature, fetches can send their request to the + * socks connection and go into state 'reading', even before they're + * attached to any circuit. */ + SMARTLIST_FOREACH_BEGIN(fetching_conns, dir_connection_t *, dirconn) { + /* Do any of these other dir conns have a linked socks conn that is + * attached to a circuit already? */ + connection_t *base = TO_CONN(dirconn); + if (base->linked_conn && + base->linked_conn->type == CONN_TYPE_AP && + !AP_CONN_STATE_IS_UNATTACHED(base->linked_conn->state)) { + answer = 1; + break; /* stop looping, because we know the answer will be yes */ + } + } SMARTLIST_FOREACH_END(dirconn); + smartlist_free(fetching_conns); - return 0; + return answer; } /** Given two router status entries for the same router identity, return 1 if @@ -1505,6 +1461,10 @@ networkstatus_set_current_consensus_from_ns(networkstatus_t *c, * If flags & NSSET_ACCEPT_OBSOLETE, then we should be willing to take this * consensus, even if it comes from many days in the past. * + * If source_dir is non-NULL, it's the identity digest for a directory that + * we've just successfully retrieved a consensus or certificates from, so try + * it first to fetch any missing certificates. + * * Return 0 on success, <0 on failure. On failure, caller should increment * the failure count as appropriate. * @@ -1514,7 +1474,8 @@ networkstatus_set_current_consensus_from_ns(networkstatus_t *c, int networkstatus_set_current_consensus(const char *consensus, const char *flavor, - unsigned flags) + unsigned flags, + const char *source_dir) { networkstatus_t *c=NULL; int r, result = -1; @@ -1636,7 +1597,7 @@ networkstatus_set_current_consensus(const char *consensus, write_str_to_file(unverified_fname, consensus, 0); } if (dl_certs) - authority_certs_fetch_missing(c, now); + authority_certs_fetch_missing(c, now, source_dir); /* This case is not a success or a failure until we get the certs * or fail to get the certs. */ result = 0; @@ -1674,7 +1635,7 @@ networkstatus_set_current_consensus(const char *consensus, /* Are we missing any certificates at all? */ if (r != 1 && dl_certs) - authority_certs_fetch_missing(c, now); + authority_certs_fetch_missing(c, now, source_dir); if (flav == usable_consensus_flavor()) { notify_control_networkstatus_changed(current_consensus, c); @@ -1797,9 +1758,14 @@ networkstatus_set_current_consensus(const char *consensus, } /** Called when we have gotten more certificates: see whether we can - * now verify a pending consensus. */ + * now verify a pending consensus. + * + * If source_dir is non-NULL, it's the identity digest for a directory that + * we've just successfully retrieved certificates from, so try it first to + * fetch any missing certificates. + */ void -networkstatus_note_certs_arrived(void) +networkstatus_note_certs_arrived(const char *source_dir) { int i; for (i=0; i<N_CONSENSUS_FLAVORS; ++i) { @@ -1811,7 +1777,8 @@ networkstatus_note_certs_arrived(void) if (!networkstatus_set_current_consensus( waiting_body, networkstatus_get_flavor_name(i), - NSSET_WAS_WAITING_FOR_CERTS)) { + NSSET_WAS_WAITING_FOR_CERTS, + source_dir)) { tor_free(waiting_body); } } diff --git a/src/or/networkstatus.h b/src/or/networkstatus.h index f2f8af5c6b..aee6641c6e 100644 --- a/src/or/networkstatus.h +++ b/src/or/networkstatus.h @@ -70,13 +70,12 @@ MOCK_DECL(networkstatus_t *,networkstatus_get_latest_consensus_by_flavor, networkstatus_t *networkstatus_get_live_consensus(time_t now); networkstatus_t *networkstatus_get_reasonably_live_consensus(time_t now, int flavor); -int networkstatus_consensus_is_bootstrapping(time_t now); +MOCK_DECL(int, networkstatus_consensus_is_bootstrapping,(time_t now)); int networkstatus_consensus_can_use_multiple_directories( const or_options_t *options); -int networkstatus_consensus_can_use_extra_fallbacks( - const or_options_t *options); -int networkstatus_consensus_has_excess_connections(void); -int networkstatus_consensus_is_downloading_usable_flavor(void); +MOCK_DECL(int, networkstatus_consensus_can_use_extra_fallbacks,( + const or_options_t *options)); +int networkstatus_consensus_is_already_downloading(const char *resource); #define NSSET_FROM_CACHE 1 #define NSSET_WAS_WAITING_FOR_CERTS 2 @@ -85,8 +84,9 @@ int networkstatus_consensus_is_downloading_usable_flavor(void); #define NSSET_REQUIRE_FLAVOR 16 int networkstatus_set_current_consensus(const char *consensus, const char *flavor, - unsigned flags); -void networkstatus_note_certs_arrived(void); + unsigned flags, + const char *source_dir); +void networkstatus_note_certs_arrived(const char *source_dir); void routers_update_all_from_networkstatus(time_t now, int dir_version); void routers_update_status_from_consensus_networkstatus(smartlist_t *routers, int reset_failures); diff --git a/src/or/or.h b/src/or/or.h index a6e4172443..b05c092ec2 100644 --- a/src/or/or.h +++ b/src/or/or.h @@ -2062,6 +2062,10 @@ typedef struct signed_descriptor_t { time_t published_on; /** For routerdescs only: digest of the corresponding extrainfo. */ char extra_info_digest[DIGEST_LEN]; + /** For routerdescs only: A SHA256-digest of the extrainfo (if any) */ + char extra_info_digest256[DIGEST256_LEN]; + /** Certificate for ed25519 signing key. */ + struct tor_cert_st *signing_key_cert; /** For routerdescs only: Status of downloading the corresponding * extrainfo. */ download_status_t ei_dl_status; @@ -2093,8 +2097,6 @@ typedef int16_t country_t; /** Information about another onion router in the network. */ typedef struct { signed_descriptor_t cache_info; - /** A SHA256-digest of the extrainfo (if any) */ - char extra_info_digest256[DIGEST256_LEN]; char *nickname; /**< Human-readable OR name. */ uint32_t addr; /**< IPv4 address of OR, in host order. */ @@ -2112,8 +2114,6 @@ typedef struct { crypto_pk_t *identity_pkey; /**< Public RSA key for signing. */ /** Public curve25519 key for onions */ curve25519_public_key_t *onion_curve25519_pkey; - /** Certificate for ed25519 signing key */ - struct tor_cert_st *signing_key_cert; /** What's the earliest expiration time on all the certs in this * routerinfo? */ time_t cert_expiration_time; @@ -2189,8 +2189,6 @@ typedef struct extrainfo_t { uint8_t digest256[DIGEST256_LEN]; /** The router's nickname. */ char nickname[MAX_NICKNAME_LEN+1]; - /** Certificate for ed25519 signing key */ - struct tor_cert_st *signing_key_cert; /** True iff we found the right key for this extra-info, verified the * signature, and found it to be bad. */ unsigned int bad_sig : 1; diff --git a/src/or/rephist.c b/src/or/rephist.c index b94ad29650..50e8bf5db7 100644 --- a/src/or/rephist.c +++ b/src/or/rephist.c @@ -1867,14 +1867,17 @@ any_predicted_circuits(time_t now) int rep_hist_circbuilding_dormant(time_t now) { + const or_options_t *options = get_options(); + if (any_predicted_circuits(now)) return 0; /* see if we'll still need to build testing circuits */ - if (server_mode(get_options()) && - (!check_whether_orport_reachable() || !circuit_enough_testing_circs())) + if (server_mode(options) && + (!check_whether_orport_reachable(options) || + !circuit_enough_testing_circs())) return 0; - if (!check_whether_dirport_reachable()) + if (!check_whether_dirport_reachable(options)) return 0; return 1; @@ -2930,7 +2933,7 @@ static time_t start_of_hs_stats_interval; * information needed. */ typedef struct hs_stats_t { /** How many relay cells have we seen as rendezvous points? */ - int64_t rp_relay_cells_seen; + uint64_t rp_relay_cells_seen; /** Set of unique public key digests we've seen this stat period * (could also be implemented as sorted smartlist). */ @@ -3071,16 +3074,20 @@ rep_hist_format_hs_stats(time_t now) int64_t obfuscated_cells_seen; int64_t obfuscated_onions_seen; - obfuscated_cells_seen = round_int64_to_next_multiple_of( - hs_stats->rp_relay_cells_seen, - REND_CELLS_BIN_SIZE); - obfuscated_cells_seen = add_laplace_noise(obfuscated_cells_seen, + uint64_t rounded_cells_seen + = round_uint64_to_next_multiple_of(hs_stats->rp_relay_cells_seen, + REND_CELLS_BIN_SIZE); + rounded_cells_seen = MIN(rounded_cells_seen, INT64_MAX); + obfuscated_cells_seen = add_laplace_noise((int64_t)rounded_cells_seen, crypto_rand_double(), REND_CELLS_DELTA_F, REND_CELLS_EPSILON); - obfuscated_onions_seen = round_int64_to_next_multiple_of(digestmap_size( - hs_stats->onions_seen_this_period), - ONIONS_SEEN_BIN_SIZE); - obfuscated_onions_seen = add_laplace_noise(obfuscated_onions_seen, + + uint64_t rounded_onions_seen = + round_uint64_to_next_multiple_of((size_t)digestmap_size( + hs_stats->onions_seen_this_period), + ONIONS_SEEN_BIN_SIZE); + rounded_onions_seen = MIN(rounded_onions_seen, INT64_MAX); + obfuscated_onions_seen = add_laplace_noise((int64_t)rounded_onions_seen, crypto_rand_double(), ONIONS_SEEN_DELTA_F, ONIONS_SEEN_EPSILON); diff --git a/src/or/router.c b/src/or/router.c index 68bcf1326e..43157a9070 100644 --- a/src/or/router.c +++ b/src/or/router.c @@ -1054,7 +1054,8 @@ init_keys(void) log_info(LD_DIR, "adding my own v3 cert"); if (trusted_dirs_load_certs_from_string( cert->cache_info.signed_descriptor_body, - TRUSTED_DIRS_CERTS_SRC_SELF, 0)<0) { + TRUSTED_DIRS_CERTS_SRC_SELF, 0, + NULL)<0) { log_warn(LD_DIR, "Unable to parse my own v3 cert! Failing."); return -1; } @@ -1079,23 +1080,49 @@ router_reset_reachability(void) can_reach_or_port = can_reach_dir_port = 0; } -/** Return 1 if ORPort is known reachable; else return 0. */ -int -check_whether_orport_reachable(void) +/** Return 1 if we won't do reachability checks, because: + * - AssumeReachable is set, or + * - the network is disabled. + * Otherwise, return 0. + */ +static int +router_reachability_checks_disabled(const or_options_t *options) { - const or_options_t *options = get_options(); return options->AssumeReachable || + net_is_disabled(); +} + +/** Return 0 if we need to do an ORPort reachability check, because: + * - no reachability check has been done yet, or + * - we've initiated reachability checks, but none have succeeded. + * Return 1 if we don't need to do an ORPort reachability check, because: + * - we've seen a successful reachability check, or + * - AssumeReachable is set, or + * - the network is disabled. + */ +int +check_whether_orport_reachable(const or_options_t *options) +{ + int reach_checks_disabled = router_reachability_checks_disabled(options); + return reach_checks_disabled || can_reach_or_port; } -/** Return 1 if we don't have a dirport configured, or if it's reachable. */ +/** Return 0 if we need to do a DirPort reachability check, because: + * - no reachability check has been done yet, or + * - we've initiated reachability checks, but none have succeeded. + * Return 1 if we don't need to do a DirPort reachability check, because: + * - we've seen a successful reachability check, or + * - there is no DirPort set, or + * - AssumeReachable is set, or + * - the network is disabled. + */ int -check_whether_dirport_reachable(void) +check_whether_dirport_reachable(const or_options_t *options) { - const or_options_t *options = get_options(); - return !options->DirPort_set || - options->AssumeReachable || - net_is_disabled() || + int reach_checks_disabled = router_reachability_checks_disabled(options) || + !options->DirPort_set; + return reach_checks_disabled || can_reach_dir_port; } @@ -1148,10 +1175,11 @@ router_should_be_directory_server(const or_options_t *options, int dir_port) "seconds long. Raising to 1."); interval_length = 1; } - log_info(LD_GENERAL, "Calculating whether to disable dirport: effective " + log_info(LD_GENERAL, "Calculating whether to advertise %s: effective " "bwrate: %u, AccountingMax: "U64_FORMAT", " - "accounting interval length %d", effective_bw, - U64_PRINTF_ARG(options->AccountingMax), + "accounting interval length %d", + dir_port ? "dirport" : "begindir", + effective_bw, U64_PRINTF_ARG(options->AccountingMax), interval_length); acc_bytes = options->AccountingMax; @@ -1199,34 +1227,62 @@ dir_server_mode(const or_options_t *options) } /** Look at a variety of factors, and return 0 if we don't want to - * advertise the fact that we have a DirPort open, else return the - * DirPort we want to advertise. + * advertise the fact that we have a DirPort open or begindir support, else + * return 1. + * + * Where dir_port or supports_tunnelled_dir_requests are not relevant, they + * must be 0. * - * Log a helpful message if we change our mind about whether to publish - * a DirPort. + * Log a helpful message if we change our mind about whether to publish. */ static int -decide_to_advertise_dirport(const or_options_t *options, uint16_t dir_port) +decide_to_advertise_dir_impl(const or_options_t *options, + uint16_t dir_port, + int supports_tunnelled_dir_requests) { /* Part one: reasons to publish or not publish that aren't * worth mentioning to the user, either because they're obvious * or because they're normal behavior. */ - if (!dir_port) /* short circuit the rest of the function */ + /* short circuit the rest of the function */ + if (!dir_port && !supports_tunnelled_dir_requests) return 0; if (authdir_mode(options)) /* always publish */ - return dir_port; + return 1; if (net_is_disabled()) return 0; - if (!check_whether_dirport_reachable()) + if (dir_port && !router_get_advertised_dir_port(options, dir_port)) return 0; - if (!router_get_advertised_dir_port(options, dir_port)) + if (supports_tunnelled_dir_requests && + !router_get_advertised_or_port(options)) return 0; - /* Part two: reasons to publish or not publish that the user - * might find surprising. router_should_be_directory_server() - * considers config options that make us choose not to publish. */ - return router_should_be_directory_server(options, dir_port) ? dir_port : 0; + /* Part two: consider config options that could make us choose to + * publish or not publish that the user might find surprising. */ + return router_should_be_directory_server(options, dir_port); +} + +/** Front-end to decide_to_advertise_dir_impl(): return 0 if we don't want to + * advertise the fact that we have a DirPort open, else return the + * DirPort we want to advertise. + */ +static int +decide_to_advertise_dirport(const or_options_t *options, uint16_t dir_port) +{ + /* supports_tunnelled_dir_requests is not relevant, pass 0 */ + return decide_to_advertise_dir_impl(options, dir_port, 0) ? dir_port : 0; +} + +/** Front-end to decide_to_advertise_dir_impl(): return 0 if we don't want to + * advertise the fact that we support begindir requests, else return 1. + */ +static int +decide_to_advertise_begindir(const or_options_t *options, + int supports_tunnelled_dir_requests) +{ + /* dir_port is not relevant, pass 0 */ + return decide_to_advertise_dir_impl(options, 0, + supports_tunnelled_dir_requests); } /** Allocate and return a new extend_info_t that can be used to build @@ -1260,9 +1316,9 @@ void consider_testing_reachability(int test_or, int test_dir) { const routerinfo_t *me = router_get_my_routerinfo(); - int orport_reachable = check_whether_orport_reachable(); - tor_addr_t addr; const or_options_t *options = get_options(); + int orport_reachable = check_whether_orport_reachable(options); + tor_addr_t addr; if (!me) return; @@ -1295,7 +1351,7 @@ consider_testing_reachability(int test_or, int test_dir) /* XXX IPv6 self testing */ tor_addr_from_ipv4h(&addr, me->addr); - if (test_dir && !check_whether_dirport_reachable() && + if (test_dir && !check_whether_dirport_reachable(options) && !connection_get_by_type_addr_port_purpose( CONN_TYPE_DIR, &addr, me->dir_port, DIR_PURPOSE_FETCH_SERVERDESC)) { @@ -1314,18 +1370,19 @@ void router_orport_found_reachable(void) { const routerinfo_t *me = router_get_my_routerinfo(); + const or_options_t *options = get_options(); if (!can_reach_or_port && me) { char *address = tor_dup_ip(me->addr); log_notice(LD_OR,"Self-testing indicates your ORPort is reachable from " "the outside. Excellent.%s", - get_options()->PublishServerDescriptor_ != NO_DIRINFO - && check_whether_dirport_reachable() ? + options->PublishServerDescriptor_ != NO_DIRINFO + && check_whether_dirport_reachable(options) ? " Publishing server descriptor." : ""); can_reach_or_port = 1; mark_my_descriptor_dirty("ORPort found reachable"); /* This is a significant enough change to upload immediately, * at least in a test network */ - if (get_options()->TestingTorNetwork == 1) { + if (options->TestingTorNetwork == 1) { reschedule_descriptor_update_check(); } control_event_server_status(LOG_NOTICE, @@ -1340,19 +1397,20 @@ void router_dirport_found_reachable(void) { const routerinfo_t *me = router_get_my_routerinfo(); + const or_options_t *options = get_options(); if (!can_reach_dir_port && me) { char *address = tor_dup_ip(me->addr); log_notice(LD_DIRSERV,"Self-testing indicates your DirPort is reachable " "from the outside. Excellent.%s", - get_options()->PublishServerDescriptor_ != NO_DIRINFO - && check_whether_orport_reachable() ? + options->PublishServerDescriptor_ != NO_DIRINFO + && check_whether_orport_reachable(options) ? " Publishing server descriptor." : ""); can_reach_dir_port = 1; - if (decide_to_advertise_dirport(get_options(), me->dir_port)) { + if (decide_to_advertise_dirport(options, me->dir_port)) { mark_my_descriptor_dirty("DirPort found reachable"); /* This is a significant enough change to upload immediately, * at least in a test network */ - if (get_options()->TestingTorNetwork == 1) { + if (options->TestingTorNetwork == 1) { reschedule_descriptor_update_check(); } } @@ -1549,8 +1607,10 @@ proxy_mode(const or_options_t *options) * and * - We have ORPort set * and - * - We believe both our ORPort and DirPort (if present) are reachable from + * - We believe our ORPort and DirPort (if present) are reachable from * the outside; or + * - We believe our ORPort is reachable from the outside, and we can't + * check our DirPort because the consensus has no exits; or * - We are an authoritative directory server. */ static int @@ -1568,8 +1628,15 @@ decide_if_publishable_server(void) return 1; if (!router_get_advertised_or_port(options)) return 0; - - return check_whether_orport_reachable() && check_whether_dirport_reachable(); + if (!check_whether_orport_reachable(options)) + return 0; + if (router_have_consensus_path() == CONSENSUS_PATH_INTERNAL) { + /* All set: there are no exits in the consensus (maybe this is a tiny + * test network), so we can't check our DirPort reachability. */ + return 1; + } else { + return check_whether_dirport_reachable(options); + } } /** Initiate server descriptor upload as reasonable (if server is publishable, @@ -1924,8 +1991,8 @@ router_build_fresh_descriptor(routerinfo_t **r, extrainfo_t **e) ri->addr = addr; ri->or_port = router_get_advertised_or_port(options); ri->dir_port = router_get_advertised_dir_port(options, 0); - ri->supports_tunnelled_dir_requests = dir_server_mode(options) && - router_should_be_directory_server(options, ri->dir_port); + ri->supports_tunnelled_dir_requests = + directory_permits_begindir_requests(options); ri->cache_info.published_on = time(NULL); ri->onion_pkey = crypto_pk_dup_key(get_onion_key()); /* must invoke from * main thread */ @@ -1970,7 +2037,8 @@ router_build_fresh_descriptor(routerinfo_t **r, extrainfo_t **e) routerinfo_free(ri); return -1; } - ri->signing_key_cert = tor_cert_dup(get_master_signing_key_cert()); + ri->cache_info.signing_key_cert = + tor_cert_dup(get_master_signing_key_cert()); get_platform_str(platform, sizeof(platform)); ri->platform = tor_strdup(platform); @@ -2062,7 +2130,9 @@ router_build_fresh_descriptor(routerinfo_t **r, extrainfo_t **e) ei->cache_info.is_extrainfo = 1; strlcpy(ei->nickname, get_options()->Nickname, sizeof(ei->nickname)); ei->cache_info.published_on = ri->cache_info.published_on; - ei->signing_key_cert = tor_cert_dup(get_master_signing_key_cert()); + ei->cache_info.signing_key_cert = + tor_cert_dup(get_master_signing_key_cert()); + memcpy(ei->cache_info.identity_digest, ri->cache_info.identity_digest, DIGEST_LEN); if (extrainfo_dump_to_string(&ei->cache_info.signed_descriptor_body, @@ -2088,7 +2158,7 @@ router_build_fresh_descriptor(routerinfo_t **r, extrainfo_t **e) memcpy(ri->cache_info.extra_info_digest, ei->cache_info.signed_descriptor_digest, DIGEST_LEN); - memcpy(ri->extra_info_digest256, + memcpy(ri->cache_info.extra_info_digest256, ei->digest256, DIGEST256_LEN); } else { @@ -2129,7 +2199,9 @@ router_build_fresh_descriptor(routerinfo_t **r, extrainfo_t **e) ri->cache_info.signed_descriptor_digest); if (ei) { - tor_assert(! routerinfo_incompatible_with_extrainfo(ri, ei, NULL, NULL)); + tor_assert(! + routerinfo_incompatible_with_extrainfo(ri->identity_pkey, ei, + &ri->cache_info, NULL)); } *r = ri; @@ -2458,7 +2530,8 @@ router_dump_router_to_string(routerinfo_t *router, const or_options_t *options = get_options(); smartlist_t *chunks = NULL; char *output = NULL; - const int emit_ed_sigs = signing_keypair && router->signing_key_cert; + const int emit_ed_sigs = signing_keypair && + router->cache_info.signing_key_cert; char *ed_cert_line = NULL; char *rsa_tap_cc_line = NULL; char *ntor_cc_line = NULL; @@ -2470,12 +2543,12 @@ router_dump_router_to_string(routerinfo_t *router, goto err; } if (emit_ed_sigs) { - if (!router->signing_key_cert->signing_key_included || - !ed25519_pubkey_eq(&router->signing_key_cert->signed_key, + if (!router->cache_info.signing_key_cert->signing_key_included || + !ed25519_pubkey_eq(&router->cache_info.signing_key_cert->signed_key, &signing_keypair->pubkey)) { log_warn(LD_BUG, "Tried to sign a router descriptor with a mismatched " "ed25519 key chain %d", - router->signing_key_cert->signing_key_included); + router->cache_info.signing_key_cert->signing_key_included); goto err; } } @@ -2491,14 +2564,14 @@ router_dump_router_to_string(routerinfo_t *router, char ed_cert_base64[256]; char ed_fp_base64[ED25519_BASE64_LEN+1]; if (base64_encode(ed_cert_base64, sizeof(ed_cert_base64), - (const char*)router->signing_key_cert->encoded, - router->signing_key_cert->encoded_len, - BASE64_ENCODE_MULTILINE) < 0) { + (const char*)router->cache_info.signing_key_cert->encoded, + router->cache_info.signing_key_cert->encoded_len, + BASE64_ENCODE_MULTILINE) < 0) { log_err(LD_BUG,"Couldn't base64-encode signing key certificate!"); goto err; } if (ed25519_public_to_base64(ed_fp_base64, - &router->signing_key_cert->signing_key)<0) { + &router->cache_info.signing_key_cert->signing_key)<0) { log_err(LD_BUG,"Couldn't base64-encode identity key\n"); goto err; } @@ -2525,15 +2598,15 @@ router_dump_router_to_string(routerinfo_t *router, } /* Cross-certify with RSA key */ - if (tap_key && router->signing_key_cert && - router->signing_key_cert->signing_key_included) { + if (tap_key && router->cache_info.signing_key_cert && + router->cache_info.signing_key_cert->signing_key_included) { char buf[256]; int tap_cc_len = 0; uint8_t *tap_cc = make_tap_onion_key_crosscert(tap_key, - &router->signing_key_cert->signing_key, - router->identity_pkey, - &tap_cc_len); + &router->cache_info.signing_key_cert->signing_key, + router->identity_pkey, + &tap_cc_len); if (!tap_cc) { log_warn(LD_BUG,"make_tap_onion_key_crosscert failed!"); goto err; @@ -2555,16 +2628,16 @@ router_dump_router_to_string(routerinfo_t *router, } /* Cross-certify with onion keys */ - if (ntor_keypair && router->signing_key_cert && - router->signing_key_cert->signing_key_included) { + if (ntor_keypair && router->cache_info.signing_key_cert && + router->cache_info.signing_key_cert->signing_key_included) { int sign = 0; char buf[256]; /* XXXX Base the expiration date on the actual onion key expiration time?*/ tor_cert_t *cert = make_ntor_onion_key_crosscert(ntor_keypair, - &router->signing_key_cert->signing_key, - router->cache_info.published_on, - MIN_ONION_KEY_LIFETIME, &sign); + &router->cache_info.signing_key_cert->signing_key, + router->cache_info.published_on, + MIN_ONION_KEY_LIFETIME, &sign); if (!cert) { log_warn(LD_BUG,"make_ntor_onion_key_crosscert failed!"); goto err; @@ -2603,9 +2676,9 @@ router_dump_router_to_string(routerinfo_t *router, char extra_info_digest[HEX_DIGEST_LEN+1]; base16_encode(extra_info_digest, sizeof(extra_info_digest), router->cache_info.extra_info_digest, DIGEST_LEN); - if (!tor_digest256_is_zero(router->extra_info_digest256)) { + if (!tor_digest256_is_zero(router->cache_info.extra_info_digest256)) { char d256_64[BASE64_DIGEST256_LEN+1]; - digest256_to_base64(d256_64, router->extra_info_digest256); + digest256_to_base64(d256_64, router->cache_info.extra_info_digest256); tor_asprintf(&extra_info_line, "extra-info-digest %s %s\n", extra_info_digest, d256_64); } else { @@ -2706,7 +2779,8 @@ router_dump_router_to_string(routerinfo_t *router, tor_free(p6); } - if (router->supports_tunnelled_dir_requests) { + if (decide_to_advertise_begindir(options, + router->supports_tunnelled_dir_requests)) { smartlist_add(chunks, tor_strdup("tunnelled-dir-server\n")); } @@ -2910,7 +2984,8 @@ extrainfo_dump_to_string(char **s_out, extrainfo_t *extrainfo, time_t now = time(NULL); smartlist_t *chunks = smartlist_new(); extrainfo_t *ei_tmp = NULL; - const int emit_ed_sigs = signing_keypair && extrainfo->signing_key_cert; + const int emit_ed_sigs = signing_keypair && + extrainfo->cache_info.signing_key_cert; char *ed_cert_line = NULL; base16_encode(identity, sizeof(identity), @@ -2918,19 +2993,19 @@ extrainfo_dump_to_string(char **s_out, extrainfo_t *extrainfo, format_iso_time(published, extrainfo->cache_info.published_on); bandwidth_usage = rep_hist_get_bandwidth_lines(); if (emit_ed_sigs) { - if (!extrainfo->signing_key_cert->signing_key_included || - !ed25519_pubkey_eq(&extrainfo->signing_key_cert->signed_key, + if (!extrainfo->cache_info.signing_key_cert->signing_key_included || + !ed25519_pubkey_eq(&extrainfo->cache_info.signing_key_cert->signed_key, &signing_keypair->pubkey)) { log_warn(LD_BUG, "Tried to sign a extrainfo descriptor with a " "mismatched ed25519 key chain %d", - extrainfo->signing_key_cert->signing_key_included); + extrainfo->cache_info.signing_key_cert->signing_key_included); goto err; } char ed_cert_base64[256]; if (base64_encode(ed_cert_base64, sizeof(ed_cert_base64), - (const char*)extrainfo->signing_key_cert->encoded, - extrainfo->signing_key_cert->encoded_len, - BASE64_ENCODE_MULTILINE) < 0) { + (const char*)extrainfo->cache_info.signing_key_cert->encoded, + extrainfo->cache_info.signing_key_cert->encoded_len, + BASE64_ENCODE_MULTILINE) < 0) { log_err(LD_BUG,"Couldn't base64-encode signing key certificate!"); goto err; } diff --git a/src/or/router.h b/src/or/router.h index 5165462a13..73bfea1faa 100644 --- a/src/or/router.h +++ b/src/or/router.h @@ -39,8 +39,8 @@ int router_initialize_tls_context(void); int init_keys(void); int init_keys_client(void); -int check_whether_orport_reachable(void); -int check_whether_dirport_reachable(void); +int check_whether_orport_reachable(const or_options_t *options); +int check_whether_dirport_reachable(const or_options_t *options); int dir_server_mode(const or_options_t *options); void consider_testing_reachability(int test_or, int test_dir); void router_orport_found_reachable(void); diff --git a/src/or/routerlist.c b/src/or/routerlist.c index 2149192509..4d9d249d10 100644 --- a/src/or/routerlist.c +++ b/src/or/routerlist.c @@ -67,7 +67,7 @@ typedef struct cert_list_t cert_list_t; /* static function prototypes */ static int compute_weighted_bandwidths(const smartlist_t *sl, bandwidth_weight_rule_t rule, - u64_dbl_t **bandwidths_out); + double **bandwidths_out); static const routerstatus_t *router_pick_trusteddirserver_impl( const smartlist_t *sourcelist, dirinfo_type_t auth, int flags, int *n_busy_out); @@ -287,7 +287,7 @@ trusted_dirs_reload_certs(void) return 0; r = trusted_dirs_load_certs_from_string( contents, - TRUSTED_DIRS_CERTS_SRC_FROM_STORE, 1); + TRUSTED_DIRS_CERTS_SRC_FROM_STORE, 1, NULL); tor_free(contents); return r; } @@ -317,16 +317,21 @@ already_have_cert(authority_cert_t *cert) * or TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_SK_DIGEST. If <b>flush</b> is true, we * need to flush any changed certificates to disk now. Return 0 on success, * -1 if any certs fail to parse. + * + * If source_dir is non-NULL, it's the identity digest for a directory that + * we've just successfully retrieved certificates from, so try it first to + * fetch any missing certificates. */ int trusted_dirs_load_certs_from_string(const char *contents, int source, - int flush) + int flush, const char *source_dir) { dir_server_t *ds; const char *s, *eos; int failure_code = 0; int from_store = (source == TRUSTED_DIRS_CERTS_SRC_FROM_STORE); + int added_trusted_cert = 0; for (s = contents; *s; s = eos) { authority_cert_t *cert = authority_cert_parse_from_string(s, &eos); @@ -386,6 +391,7 @@ trusted_dirs_load_certs_from_string(const char *contents, int source, } if (ds) { + added_trusted_cert = 1; log_info(LD_DIR, "Adding %s certificate for directory authority %s with " "signing key %s", from_store ? "cached" : "downloaded", ds->nickname, hex_str(cert->signing_key_digest,DIGEST_LEN)); @@ -430,8 +436,15 @@ trusted_dirs_load_certs_from_string(const char *contents, int source, trusted_dirs_flush_certs_to_disk(); /* call this even if failure_code is <0, since some certs might have - * succeeded. */ - networkstatus_note_certs_arrived(); + * succeeded, but only pass source_dir if there were no failures, + * and at least one more authority certificate was added to the store. + * This avoids retrying a directory that's serving bad or entirely duplicate + * certificates. */ + if (failure_code == 0 && added_trusted_cert) { + networkstatus_note_certs_arrived(source_dir); + } else { + networkstatus_note_certs_arrived(NULL); + } return failure_code; } @@ -718,9 +731,14 @@ authority_cert_dl_looks_uncertain(const char *id_digest) * <b>status</b>. Additionally, try to have a non-expired certificate for * every V3 authority in trusted_dir_servers. Don't fetch certificates we * already have. + * + * If dir_hint is non-NULL, it's the identity digest for a directory that + * we've just successfully retrieved a consensus or certificates from, so try + * it first to fetch any missing certificates. **/ void -authority_certs_fetch_missing(networkstatus_t *status, time_t now) +authority_certs_fetch_missing(networkstatus_t *status, time_t now, + const char *dir_hint) { /* * The pending_id digestmap tracks pending certificate downloads by @@ -884,6 +902,37 @@ authority_certs_fetch_missing(networkstatus_t *status, time_t now) } SMARTLIST_FOREACH_END(voter); } + /* Look up the routerstatus for the dir_hint */ + const routerstatus_t *rs = NULL; + + /* If we still need certificates, try the directory that just successfully + * served us a consensus or certificates. + * As soon as the directory fails to provide additional certificates, we try + * another, randomly selected directory. This avoids continual retries. + * (We only ever have one outstanding request per certificate.) + * + * Bridge clients won't find their bridges using this hint, so they will + * fall back to using directory_get_from_dirserver, which selects a bridge. + */ + if (dir_hint) { + /* First try the consensus routerstatus, then the fallback + * routerstatus */ + rs = router_get_consensus_status_by_id(dir_hint); + if (!rs) { + /* This will also find authorities */ + const dir_server_t *ds = router_get_fallback_dirserver_by_digest( + dir_hint); + if (ds) { + rs = &ds->fake_status; + } + } + + if (!rs) { + log_warn(LD_BUG, "Directory %s delivered a consensus, but a " + "routerstatus could not be found for it.", dir_hint); + } + } + /* Do downloads by identity digest */ if (smartlist_len(missing_id_digests) > 0) { int need_plus = 0; @@ -913,11 +962,25 @@ authority_certs_fetch_missing(networkstatus_t *status, time_t now) if (smartlist_len(fps) > 1) { resource = smartlist_join_strings(fps, "", 0, NULL); - /* We want certs from mirrors, because they will almost always succeed. - */ - directory_get_from_dirserver(DIR_PURPOSE_FETCH_CERTIFICATE, 0, - resource, PDS_RETRY_IF_NO_SERVERS, - DL_WANT_ANY_DIRSERVER); + + /* If we've just downloaded a consensus from a directory, re-use that + * directory */ + if (rs) { + /* Certificate fetches are one-hop, unless AllDirActionsPrivate is 1 */ + int get_via_tor = get_options()->AllDirActionsPrivate; + const dir_indirection_t indirection = get_via_tor ? DIRIND_ANONYMOUS + : DIRIND_ONEHOP; + directory_initiate_command_routerstatus(rs, + DIR_PURPOSE_FETCH_CERTIFICATE, + 0, indirection, resource, NULL, + 0, 0); + } else { + /* Otherwise, we want certs from a random fallback or directory + * mirror, because they will almost always succeed. */ + directory_get_from_dirserver(DIR_PURPOSE_FETCH_CERTIFICATE, 0, + resource, PDS_RETRY_IF_NO_SERVERS, + DL_WANT_ANY_DIRSERVER); + } tor_free(resource); } /* else we didn't add any: they were all pending */ @@ -960,11 +1023,25 @@ authority_certs_fetch_missing(networkstatus_t *status, time_t now) if (smartlist_len(fp_pairs) > 1) { resource = smartlist_join_strings(fp_pairs, "", 0, NULL); - /* We want certs from mirrors, because they will almost always succeed. - */ - directory_get_from_dirserver(DIR_PURPOSE_FETCH_CERTIFICATE, 0, - resource, PDS_RETRY_IF_NO_SERVERS, - DL_WANT_ANY_DIRSERVER); + + /* If we've just downloaded a consensus from a directory, re-use that + * directory */ + if (rs) { + /* Certificate fetches are one-hop, unless AllDirActionsPrivate is 1 */ + int get_via_tor = get_options()->AllDirActionsPrivate; + const dir_indirection_t indirection = get_via_tor ? DIRIND_ANONYMOUS + : DIRIND_ONEHOP; + directory_initiate_command_routerstatus(rs, + DIR_PURPOSE_FETCH_CERTIFICATE, + 0, indirection, resource, NULL, + 0, 0); + } else { + /* Otherwise, we want certs from a random fallback or directory + * mirror, because they will almost always succeed. */ + directory_get_from_dirserver(DIR_PURPOSE_FETCH_CERTIFICATE, 0, + resource, PDS_RETRY_IF_NO_SERVERS, + DL_WANT_ANY_DIRSERVER); + } tor_free(resource); } /* else they were all pending */ @@ -1815,20 +1892,23 @@ dirserver_choose_by_weight(const smartlist_t *servers, double authority_weight) { int n = smartlist_len(servers); int i; - u64_dbl_t *weights; + double *weights_dbl; + uint64_t *weights_u64; const dir_server_t *ds; - weights = tor_calloc(n, sizeof(u64_dbl_t)); + weights_dbl = tor_calloc(n, sizeof(double)); + weights_u64 = tor_calloc(n, sizeof(uint64_t)); for (i = 0; i < n; ++i) { ds = smartlist_get(servers, i); - weights[i].dbl = ds->weight; + weights_dbl[i] = ds->weight; if (ds->is_authority) - weights[i].dbl *= authority_weight; + weights_dbl[i] *= authority_weight; } - scale_array_elements_to_u64(weights, n, NULL); - i = choose_array_element_by_weight(weights, n); - tor_free(weights); + scale_array_elements_to_u64(weights_u64, weights_dbl, n, NULL); + i = choose_array_element_by_weight(weights_u64, n); + tor_free(weights_dbl); + tor_free(weights_u64); return (i < 0) ? NULL : smartlist_get(servers, i); } @@ -2090,7 +2170,8 @@ router_get_advertised_bandwidth_capped(const routerinfo_t *router) * much of the range of uint64_t. If <b>total_out</b> is provided, set it to * the sum of all elements in the array _before_ scaling. */ STATIC void -scale_array_elements_to_u64(u64_dbl_t *entries, int n_entries, +scale_array_elements_to_u64(uint64_t *entries_out, const double *entries_in, + int n_entries, uint64_t *total_out) { double total = 0.0; @@ -2100,13 +2181,13 @@ scale_array_elements_to_u64(u64_dbl_t *entries, int n_entries, #define SCALE_TO_U64_MAX ((int64_t) (INT64_MAX / 4)) for (i = 0; i < n_entries; ++i) - total += entries[i].dbl; + total += entries_in[i]; if (total > 0.0) scale_factor = SCALE_TO_U64_MAX / total; for (i = 0; i < n_entries; ++i) - entries[i].u64 = tor_llround(entries[i].dbl * scale_factor); + entries_out[i] = tor_llround(entries_in[i] * scale_factor); if (total_out) *total_out = (uint64_t) total; @@ -2114,35 +2195,20 @@ scale_array_elements_to_u64(u64_dbl_t *entries, int n_entries, #undef SCALE_TO_U64_MAX } -/** Time-invariant 64-bit greater-than; works on two integers in the range - * (0,INT64_MAX). */ -#if SIZEOF_VOID_P == 8 -#define gt_i64_timei(a,b) ((a) > (b)) -#else -static inline int -gt_i64_timei(uint64_t a, uint64_t b) -{ - int64_t diff = (int64_t) (b - a); - int res = diff >> 63; - return res & 1; -} -#endif - /** Pick a random element of <b>n_entries</b>-element array <b>entries</b>, * choosing each element with a probability proportional to its (uint64_t) * value, and return the index of that element. If all elements are 0, choose * an index at random. Return -1 on error. */ STATIC int -choose_array_element_by_weight(const u64_dbl_t *entries, int n_entries) +choose_array_element_by_weight(const uint64_t *entries, int n_entries) { - int i, i_chosen=-1, n_chosen=0; - uint64_t total_so_far = 0; + int i; uint64_t rand_val; uint64_t total = 0; for (i = 0; i < n_entries; ++i) - total += entries[i].u64; + total += entries[i]; if (n_entries < 1) return -1; @@ -2154,22 +2220,8 @@ choose_array_element_by_weight(const u64_dbl_t *entries, int n_entries) rand_val = crypto_rand_uint64(total); - for (i = 0; i < n_entries; ++i) { - total_so_far += entries[i].u64; - if (gt_i64_timei(total_so_far, rand_val)) { - i_chosen = i; - n_chosen++; - /* Set rand_val to INT64_MAX rather than stopping the loop. This way, - * the time we spend in the loop does not leak which element we chose. */ - rand_val = INT64_MAX; - } - } - tor_assert(total_so_far == total); - tor_assert(n_chosen == 1); - tor_assert(i_chosen >= 0); - tor_assert(i_chosen < n_entries); - - return i_chosen; + return select_array_member_cumulative_timei( + entries, n_entries, total, rand_val); } /** When weighting bridges, enforce these values as lower and upper @@ -2221,17 +2273,21 @@ static const node_t * smartlist_choose_node_by_bandwidth_weights(const smartlist_t *sl, bandwidth_weight_rule_t rule) { - u64_dbl_t *bandwidths=NULL; + double *bandwidths_dbl=NULL; + uint64_t *bandwidths_u64=NULL; - if (compute_weighted_bandwidths(sl, rule, &bandwidths) < 0) + if (compute_weighted_bandwidths(sl, rule, &bandwidths_dbl) < 0) return NULL; - scale_array_elements_to_u64(bandwidths, smartlist_len(sl), NULL); + bandwidths_u64 = tor_calloc(smartlist_len(sl), sizeof(uint64_t)); + scale_array_elements_to_u64(bandwidths_u64, bandwidths_dbl, + smartlist_len(sl), NULL); { - int idx = choose_array_element_by_weight(bandwidths, + int idx = choose_array_element_by_weight(bandwidths_u64, smartlist_len(sl)); - tor_free(bandwidths); + tor_free(bandwidths_dbl); + tor_free(bandwidths_u64); return idx < 0 ? NULL : smartlist_get(sl, idx); } } @@ -2244,14 +2300,14 @@ smartlist_choose_node_by_bandwidth_weights(const smartlist_t *sl, static int compute_weighted_bandwidths(const smartlist_t *sl, bandwidth_weight_rule_t rule, - u64_dbl_t **bandwidths_out) + double **bandwidths_out) { int64_t weight_scale; double Wg = -1, Wm = -1, We = -1, Wd = -1; double Wgb = -1, Wmb = -1, Web = -1, Wdb = -1; uint64_t weighted_bw = 0; guardfraction_bandwidth_t guardfraction_bw; - u64_dbl_t *bandwidths; + double *bandwidths; /* Can't choose exit and guard at same time */ tor_assert(rule == NO_WEIGHTING || @@ -2333,7 +2389,7 @@ compute_weighted_bandwidths(const smartlist_t *sl, Web /= weight_scale; Wdb /= weight_scale; - bandwidths = tor_calloc(smartlist_len(sl), sizeof(u64_dbl_t)); + bandwidths = tor_calloc(smartlist_len(sl), sizeof(double)); // Cycle through smartlist and total the bandwidth. static int warned_missing_bw = 0; @@ -2420,7 +2476,7 @@ compute_weighted_bandwidths(const smartlist_t *sl, final_weight = weight*this_bw; } - bandwidths[node_sl_idx].dbl = final_weight + 0.5; + bandwidths[node_sl_idx] = final_weight + 0.5; } SMARTLIST_FOREACH_END(node); log_debug(LD_CIRC, "Generated weighted bandwidths for rule %s based " @@ -2441,7 +2497,7 @@ double frac_nodes_with_descriptors(const smartlist_t *sl, bandwidth_weight_rule_t rule) { - u64_dbl_t *bandwidths = NULL; + double *bandwidths = NULL; double total, present; if (smartlist_len(sl) == 0) @@ -2458,7 +2514,7 @@ frac_nodes_with_descriptors(const smartlist_t *sl, total = present = 0.0; SMARTLIST_FOREACH_BEGIN(sl, const node_t *, node) { - const double bw = bandwidths[node_sl_idx].dbl; + const double bw = bandwidths[node_sl_idx]; total += bw; if (node_has_descriptor(node)) present += bw; @@ -2897,7 +2953,7 @@ routerinfo_free(routerinfo_t *router) tor_free(router->onion_curve25519_pkey); if (router->identity_pkey) crypto_pk_free(router->identity_pkey); - tor_cert_free(router->signing_key_cert); + tor_cert_free(router->cache_info.signing_key_cert); if (router->declared_family) { SMARTLIST_FOREACH(router->declared_family, char *, s, tor_free(s)); smartlist_free(router->declared_family); @@ -2916,7 +2972,7 @@ extrainfo_free(extrainfo_t *extrainfo) { if (!extrainfo) return; - tor_cert_free(extrainfo->signing_key_cert); + tor_cert_free(extrainfo->cache_info.signing_key_cert); tor_free(extrainfo->cache_info.signed_descriptor_body); tor_free(extrainfo->pending_sig); @@ -2932,11 +2988,25 @@ signed_descriptor_free(signed_descriptor_t *sd) return; tor_free(sd->signed_descriptor_body); + tor_cert_free(sd->signing_key_cert); memset(sd, 99, sizeof(signed_descriptor_t)); /* Debug bad mem usage */ tor_free(sd); } +/** Copy src into dest, and steal all references inside src so that when + * we free src, we don't mess up dest. */ +static void +signed_descriptor_move(signed_descriptor_t *dest, + signed_descriptor_t *src) +{ + tor_assert(dest != src); + memcpy(dest, src, sizeof(signed_descriptor_t)); + src->signed_descriptor_body = NULL; + src->signing_key_cert = NULL; + dest->routerlist_index = -1; +} + /** Extract a signed_descriptor_t from a general routerinfo, and free the * routerinfo. */ @@ -2946,9 +3016,7 @@ signed_descriptor_from_routerinfo(routerinfo_t *ri) signed_descriptor_t *sd; tor_assert(ri->purpose == ROUTER_PURPOSE_GENERAL); sd = tor_malloc_zero(sizeof(signed_descriptor_t)); - memcpy(sd, &(ri->cache_info), sizeof(signed_descriptor_t)); - sd->routerlist_index = -1; - ri->cache_info.signed_descriptor_body = NULL; + signed_descriptor_move(sd, &ri->cache_info); routerinfo_free(ri); return sd; } @@ -3126,7 +3194,7 @@ extrainfo_insert,(routerlist_t *rl, extrainfo_t *ei, int warn_if_incompatible)) "Mismatch in digest in extrainfo map."); goto done; } - if (routerinfo_incompatible_with_extrainfo(ri, ei, sd, + if (routerinfo_incompatible_with_extrainfo(ri->identity_pkey, ei, sd, &compatibility_error_msg)) { char d1[HEX_DIGEST_LEN+1], d2[HEX_DIGEST_LEN+1]; r = (ri->cache_info.extrainfo_is_bogus) ? @@ -3434,9 +3502,7 @@ routerlist_reparse_old(routerlist_t *rl, signed_descriptor_t *sd) 0, 1, NULL, NULL); if (!ri) return NULL; - memcpy(&ri->cache_info, sd, sizeof(signed_descriptor_t)); - sd->signed_descriptor_body = NULL; /* Steal reference. */ - ri->cache_info.routerlist_index = -1; + signed_descriptor_move(&ri->cache_info, sd); routerlist_remove_old(rl, sd, -1); @@ -5165,25 +5231,32 @@ router_differences_are_cosmetic(const routerinfo_t *r1, const routerinfo_t *r2) return 1; } -/** Check whether <b>ri</b> (a.k.a. sd) is a router compatible with the - * extrainfo document - * <b>ei</b>. If no router is compatible with <b>ei</b>, <b>ei</b> should be +/** Check whether <b>sd</b> describes a router descriptor compatible with the + * extrainfo document <b>ei</b>. + * + * <b>identity_pkey</b> (which must also be provided) is RSA1024 identity key + * for the router. We use it to check the signature of the extrainfo document, + * if it has not already been checked. + * + * If no router is compatible with <b>ei</b>, <b>ei</b> should be * dropped. Return 0 for "compatible", return 1 for "reject, and inform * whoever uploaded <b>ei</b>, and return -1 for "reject silently.". If * <b>msg</b> is present, set *<b>msg</b> to a description of the * incompatibility (if any). + * + * Set the extrainfo_is_bogus field in <b>sd</b> if the digests matched + * but the extrainfo was nonetheless incompatible. **/ int -routerinfo_incompatible_with_extrainfo(const routerinfo_t *ri, +routerinfo_incompatible_with_extrainfo(const crypto_pk_t *identity_pkey, extrainfo_t *ei, signed_descriptor_t *sd, const char **msg) { int digest_matches, digest256_matches, r=1; - tor_assert(ri); + tor_assert(identity_pkey); + tor_assert(sd); tor_assert(ei); - if (!sd) - sd = (signed_descriptor_t*)&ri->cache_info; if (ei->bad_sig) { if (msg) *msg = "Extrainfo signature was bad, or signed with wrong key."; @@ -5195,27 +5268,28 @@ routerinfo_incompatible_with_extrainfo(const routerinfo_t *ri, /* Set digest256_matches to 1 if the digest is correct, or if no * digest256 was in the ri. */ digest256_matches = tor_memeq(ei->digest256, - ri->extra_info_digest256, DIGEST256_LEN); + sd->extra_info_digest256, DIGEST256_LEN); digest256_matches |= - tor_mem_is_zero(ri->extra_info_digest256, DIGEST256_LEN); + tor_mem_is_zero(sd->extra_info_digest256, DIGEST256_LEN); /* The identity must match exactly to have been generated at the same time * by the same router. */ - if (tor_memneq(ri->cache_info.identity_digest, + if (tor_memneq(sd->identity_digest, ei->cache_info.identity_digest, DIGEST_LEN)) { if (msg) *msg = "Extrainfo nickname or identity did not match routerinfo"; goto err; /* different servers */ } - if (! tor_cert_opt_eq(ri->signing_key_cert, ei->signing_key_cert)) { + if (! tor_cert_opt_eq(sd->signing_key_cert, + ei->cache_info.signing_key_cert)) { if (msg) *msg = "Extrainfo signing key cert didn't match routerinfo"; goto err; /* different servers */ } if (ei->pending_sig) { char signed_digest[128]; - if (crypto_pk_public_checksig(ri->identity_pkey, + if (crypto_pk_public_checksig(identity_pkey, signed_digest, sizeof(signed_digest), ei->pending_sig, ei->pending_sig_len) != DIGEST_LEN || tor_memneq(signed_digest, ei->cache_info.signed_descriptor_digest, @@ -5226,7 +5300,7 @@ routerinfo_incompatible_with_extrainfo(const routerinfo_t *ri, goto err; /* Bad signature, or no match. */ } - ei->cache_info.send_unencrypted = ri->cache_info.send_unencrypted; + ei->cache_info.send_unencrypted = sd->send_unencrypted; tor_free(ei->pending_sig); } diff --git a/src/or/routerlist.h b/src/or/routerlist.h index bc48c2087c..d5a9b77a82 100644 --- a/src/or/routerlist.h +++ b/src/or/routerlist.h @@ -29,7 +29,7 @@ int trusted_dirs_reload_certs(void); #define TRUSTED_DIRS_CERTS_SRC_FROM_VOTE 4 int trusted_dirs_load_certs_from_string(const char *contents, int source, - int flush); + int flush, const char *source_dir); void trusted_dirs_flush_certs_to_disk(void); authority_cert_t *authority_cert_get_newest_by_id(const char *id_digest); authority_cert_t *authority_cert_get_by_sk_digest(const char *sk_digest); @@ -38,7 +38,8 @@ authority_cert_t *authority_cert_get_by_digests(const char *id_digest, void authority_cert_get_all(smartlist_t *certs_out); void authority_cert_dl_failed(const char *id_digest, const char *signing_key_digest, int status); -void authority_certs_fetch_missing(networkstatus_t *status, time_t now); +void authority_certs_fetch_missing(networkstatus_t *status, time_t now, + const char *dir_hint); int router_reload_router_list(void); int authority_cert_dl_looks_uncertain(const char *id_digest); const smartlist_t *router_get_trusted_dir_servers(void); @@ -191,7 +192,7 @@ void update_extrainfo_downloads(time_t now); void router_reset_descriptor_download_failures(void); int router_differences_are_cosmetic(const routerinfo_t *r1, const routerinfo_t *r2); -int routerinfo_incompatible_with_extrainfo(const routerinfo_t *ri, +int routerinfo_incompatible_with_extrainfo(const crypto_pk_t *ri, extrainfo_t *ei, signed_descriptor_t *sd, const char **msg); @@ -217,17 +218,10 @@ int hex_digest_nickname_matches(const char *hexdigest, const char *nickname, int is_named); #ifdef ROUTERLIST_PRIVATE -/** Helper type for choosing routers by bandwidth: contains a union of - * double and uint64_t. Before we call scale_array_elements_to_u64, it holds - * a double; after, it holds a uint64_t. */ -typedef union u64_dbl_t { - uint64_t u64; - double dbl; -} u64_dbl_t; - -STATIC int choose_array_element_by_weight(const u64_dbl_t *entries, +STATIC int choose_array_element_by_weight(const uint64_t *entries, int n_entries); -STATIC void scale_array_elements_to_u64(u64_dbl_t *entries, int n_entries, +STATIC void scale_array_elements_to_u64(uint64_t *entries_out, + const double *entries_in, int n_entries, uint64_t *total_out); STATIC const routerstatus_t *router_pick_directory_server_impl( dirinfo_type_t auth, int flags, diff --git a/src/or/routerparse.c b/src/or/routerparse.c index 600d55294f..6ab042e35f 100644 --- a/src/or/routerparse.c +++ b/src/or/routerparse.c @@ -1,4 +1,4 @@ - /* Copyright (c) 2001 Matej Pfajfar. +/* Copyright (c) 2001 Matej Pfajfar. * Copyright (c) 2001-2004, Roger Dingledine. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson. * Copyright (c) 2007-2016, The Tor Project, Inc. */ @@ -1405,7 +1405,8 @@ router_parse_entry_from_string(const char *s, const char *end, log_warn(LD_DIR, "Couldn't parse ed25519 cert"); goto err; } - router->signing_key_cert = cert; /* makes sure it gets freed. */ + /* makes sure it gets freed. */ + router->cache_info.signing_key_cert = cert; if (cert->cert_type != CERT_TYPE_ID_SIGNING || ! cert->signing_key_included) { @@ -1600,8 +1601,8 @@ router_parse_entry_from_string(const char *s, const char *end, } if (tok->n_args >= 2) { - if (digest256_from_base64(router->extra_info_digest256, tok->args[1]) - < 0) { + if (digest256_from_base64(router->cache_info.extra_info_digest256, + tok->args[1]) < 0) { log_warn(LD_DIR, "Invalid extra info digest256 %s", escaped(tok->args[1])); } @@ -1786,7 +1787,9 @@ extrainfo_parse_entry_from_string(const char *s, const char *end, log_warn(LD_DIR, "Couldn't parse ed25519 cert"); goto err; } - extrainfo->signing_key_cert = cert; /* makes sure it gets freed. */ + /* makes sure it gets freed. */ + extrainfo->cache_info.signing_key_cert = cert; + if (cert->cert_type != CERT_TYPE_ID_SIGNING || ! cert->signing_key_included) { log_warn(LD_DIR, "Invalid form for ed25519 cert"); @@ -3505,7 +3508,7 @@ networkstatus_parse_detached_signatures(const char *s, const char *eos) digest_algorithm_t alg; const char *flavor; const char *hexdigest; - size_t expected_length; + size_t expected_length, digest_length; tok = _tok; @@ -3528,8 +3531,8 @@ networkstatus_parse_detached_signatures(const char *s, const char *eos) continue; } - expected_length = - (alg == DIGEST_SHA1) ? HEX_DIGEST_LEN : HEX_DIGEST256_LEN; + digest_length = crypto_digest_algorithm_get_length(alg); + expected_length = digest_length * 2; /* hex encoding */ if (strlen(hexdigest) != expected_length) { log_warn(LD_DIR, "Wrong length on consensus-digest in detached " @@ -3538,12 +3541,12 @@ networkstatus_parse_detached_signatures(const char *s, const char *eos) } digests = detached_get_digests(sigs, flavor); tor_assert(digests); - if (!tor_mem_is_zero(digests->d[alg], DIGEST256_LEN)) { + if (!tor_mem_is_zero(digests->d[alg], digest_length)) { log_warn(LD_DIR, "Multiple digests for %s with %s on detached " "signatures document", flavor, algname); continue; } - if (base16_decode(digests->d[alg], DIGEST256_LEN, + if (base16_decode(digests->d[alg], digest_length, hexdigest, strlen(hexdigest)) < 0) { log_warn(LD_DIR, "Bad encoding on consensus-digest in detached " "networkstatus signatures"); diff --git a/src/test/include.am b/src/test/include.am index d1e1cbd7f6..fa660f5302 100644 --- a/src/test/include.am +++ b/src/test/include.am @@ -153,6 +153,7 @@ src_test_test_switch_id_CFLAGS = $(AM_CFLAGS) $(TEST_CFLAGS) src_test_test_switch_id_LDFLAGS = @TOR_LDFLAGS_zlib@ src_test_test_switch_id_LDADD = \ src/common/libor-testing.a \ + src/common/libor-ctime-testing.a \ @TOR_ZLIB_LIBS@ @TOR_LIB_MATH@ src_test_test_LDFLAGS = @TOR_LDFLAGS_zlib@ @TOR_LDFLAGS_openssl@ \ @@ -162,6 +163,7 @@ src_test_test_LDADD = src/or/libtor-testing.a \ $(LIBKECCAK_TINY) \ $(LIBDONNA) \ src/common/libor-testing.a \ + src/common/libor-ctime-testing.a \ src/common/libor-event-testing.a \ src/trunnel/libor-trunnel-testing.a \ @TOR_ZLIB_LIBS@ @TOR_LIB_MATH@ @TOR_LIBEVENT_LIBS@ \ @@ -174,13 +176,17 @@ src_test_test_slow_LDADD = $(src_test_test_LDADD) src_test_test_slow_LDFLAGS = $(src_test_test_LDFLAGS) src_test_test_memwipe_CPPFLAGS = $(src_test_test_CPPFLAGS) -src_test_test_memwipe_CFLAGS = $(src_test_test_CFLAGS) +# Don't use bugtrap cflags here: memwipe tests require memory violations. +src_test_test_memwipe_CFLAGS = $(TEST_CFLAGS) src_test_test_memwipe_LDADD = $(src_test_test_LDADD) -src_test_test_memwipe_LDFLAGS = $(src_test_test_LDFLAGS) +# The LDFLAGS need to include the bugtrap cflags, or else we won't link +# successfully with the libraries built with them. +src_test_test_memwipe_LDFLAGS = $(src_test_test_LDFLAGS) @CFLAGS_BUGTRAP@ src_test_bench_LDFLAGS = @TOR_LDFLAGS_zlib@ @TOR_LDFLAGS_openssl@ \ @TOR_LDFLAGS_libevent@ src_test_bench_LDADD = src/or/libtor.a src/common/libor.a \ + src/common/libor-ctime.a \ src/common/libor-crypto.a $(LIBKECCAK_TINY) $(LIBDONNA) \ src/common/libor-event.a src/trunnel/libor-trunnel.a \ @TOR_ZLIB_LIBS@ @TOR_LIB_MATH@ @TOR_LIBEVENT_LIBS@ \ @@ -191,6 +197,7 @@ src_test_test_workqueue_LDFLAGS = @TOR_LDFLAGS_zlib@ @TOR_LDFLAGS_openssl@ \ @TOR_LDFLAGS_libevent@ src_test_test_workqueue_LDADD = src/or/libtor-testing.a \ src/common/libor-testing.a \ + src/common/libor-ctime-testing.a \ src/common/libor-crypto-testing.a $(LIBKECCAK_TINY) $(LIBDONNA) \ src/common/libor-event-testing.a \ @TOR_ZLIB_LIBS@ @TOR_LIB_MATH@ @TOR_LIBEVENT_LIBS@ \ @@ -199,9 +206,10 @@ src_test_test_workqueue_LDADD = src/or/libtor-testing.a \ src_test_test_timers_CPPFLAGS = $(src_test_test_CPPFLAGS) src_test_test_timers_CFLAGS = $(src_test_test_CFLAGS) src_test_test_timers_LDADD = \ + src/common/libor-testing.a \ + src/common/libor-ctime-testing.a \ src/common/libor-event-testing.a \ src/common/libor-crypto-testing.a $(LIBKECCAK_TINY) $(LIBDONNA) \ - src/common/libor-testing.a \ @TOR_ZLIB_LIBS@ @TOR_LIB_MATH@ @TOR_LIBEVENT_LIBS@ \ @TOR_OPENSSL_LIBS@ @TOR_LIB_WS32@ @TOR_LIB_GDI@ @CURVE25519_LIBS@ src_test_test_timers_LDFLAGS = $(src_test_test_LDFLAGS) @@ -224,6 +232,7 @@ noinst_PROGRAMS+= src/test/test-ntor-cl src_test_test_ntor_cl_SOURCES = src/test/test_ntor_cl.c src_test_test_ntor_cl_LDFLAGS = @TOR_LDFLAGS_zlib@ @TOR_LDFLAGS_openssl@ src_test_test_ntor_cl_LDADD = src/or/libtor.a src/common/libor.a \ + src/common/libor-ctime.a \ src/common/libor-crypto.a $(LIBKECCAK_TINY) $(LIBDONNA) \ @TOR_ZLIB_LIBS@ @TOR_LIB_MATH@ \ @TOR_OPENSSL_LIBS@ @TOR_LIB_WS32@ @TOR_LIB_GDI@ @CURVE25519_LIBS@ @@ -233,6 +242,7 @@ src_test_test_ntor_cl_AM_CPPFLAGS = \ noinst_PROGRAMS += src/test/test-bt-cl src_test_test_bt_cl_SOURCES = src/test/test_bt_cl.c src_test_test_bt_cl_LDADD = src/common/libor-testing.a \ + src/common/libor-ctime-testing.a \ @TOR_LIB_MATH@ \ @TOR_LIB_WS32@ @TOR_LIB_GDI@ src_test_test_bt_cl_CFLAGS = $(AM_CFLAGS) $(TEST_CFLAGS) @@ -244,7 +254,7 @@ EXTRA_DIST += \ src/test/slownacl_curve25519.py \ src/test/zero_length_keys.sh \ src/test/test_keygen.sh \ - src/test/test_zero_length_keys.sh \ - src/test/test_ntor.sh src/test/test_bt.sh \ + src/test/test_zero_length_keys.sh \ + src/test/test_ntor.sh src/test/test_bt.sh \ src/test/test-network.sh \ src/test/test_switch_id.sh diff --git a/src/test/test_bt.sh b/src/test/test_bt.sh index 83fa3ff24b..fe1781659d 100755 --- a/src/test/test_bt.sh +++ b/src/test/test_bt.sh @@ -3,6 +3,7 @@ exitcode=0 +export ASAN_OPTIONS="handle_segv=0:allow_user_segv_handler=1" "${builddir:-.}/src/test/test-bt-cl" backtraces || exit $? "${builddir:-.}/src/test/test-bt-cl" assert 2>&1 | "${PYTHON:-python}" "${abs_top_srcdir:-.}/src/test/bt_test.py" || exitcode="$?" "${builddir:-.}/src/test/test-bt-cl" crash 2>&1 | "${PYTHON:-python}" "${abs_top_srcdir:-.}/src/test/bt_test.py" || exitcode="$?" diff --git a/src/test/test_connection.c b/src/test/test_connection.c index 6f7aef879c..bf95b0b59f 100644 --- a/src/test/test_connection.c +++ b/src/test/test_connection.c @@ -11,6 +11,7 @@ #include "connection.h" #include "main.h" +#include "microdesc.h" #include "networkstatus.h" #include "rendcache.h" #include "directory.h" @@ -54,7 +55,11 @@ static int test_conn_get_rsrc_teardown(const struct testcase_t *tc, #define TEST_CONN_RSRC_STATE_SUCCESSFUL (DIR_CONN_STATE_CLIENT_FINISHED) #define TEST_CONN_RSRC_2 (networkstatus_get_flavor_name(FLAV_NS)) -#define TEST_CONN_DL_STATE (DIR_CONN_STATE_CLIENT_SENDING) +#define TEST_CONN_DL_STATE (DIR_CONN_STATE_CLIENT_READING) + +/* see AP_CONN_STATE_IS_UNATTACHED() */ +#define TEST_CONN_UNATTACHED_STATE (AP_CONN_STATE_CIRCUIT_WAIT) +#define TEST_CONN_ATTACHED_STATE (AP_CONN_STATE_CONNECT_WAIT) #define TEST_CONN_FD_INIT 50 static int mock_connection_connect_sockaddr_called = 0; @@ -109,27 +114,25 @@ test_conn_lookup_addr_helper(const char *address, int family, tor_addr_t *addr) tor_addr_make_null(addr, TEST_CONN_FAMILY); } -static void * -test_conn_get_basic_setup(const struct testcase_t *tc) +static connection_t * +test_conn_get_connection(uint8_t state, uint8_t type, uint8_t purpose) { connection_t *conn = NULL; tor_addr_t addr; int socket_err = 0; int in_progress = 0; - (void)tc; MOCK(connection_connect_sockaddr, mock_connection_connect_sockaddr); init_connection_lists(); - conn = connection_new(TEST_CONN_TYPE, TEST_CONN_FAMILY); + conn = connection_new(type, TEST_CONN_FAMILY); tt_assert(conn); test_conn_lookup_addr_helper(TEST_CONN_ADDRESS, TEST_CONN_FAMILY, &addr); tt_assert(!tor_addr_is_null(&addr)); - /* XXXX - connection_connect doesn't set these, should it? */ tor_addr_copy_tight(&conn->addr, &addr); conn->port = TEST_CONN_PORT; mock_connection_connect_sockaddr_called = 0; @@ -140,8 +143,8 @@ test_conn_get_basic_setup(const struct testcase_t *tc) tt_assert(in_progress == 0 || in_progress == 1); /* fake some of the attributes so the connection looks OK */ - conn->state = TEST_CONN_STATE; - conn->purpose = TEST_CONN_BASIC_PURPOSE; + conn->state = state; + conn->purpose = purpose; assert_connection_ok(conn, time(NULL)); UNMOCK(connection_connect_sockaddr); @@ -151,12 +154,17 @@ test_conn_get_basic_setup(const struct testcase_t *tc) /* On failure */ done: UNMOCK(connection_connect_sockaddr); - test_conn_get_basic_teardown(tc, conn); - - /* Returning NULL causes the unit test to fail */ return NULL; } +static void * +test_conn_get_basic_setup(const struct testcase_t *tc) +{ + (void)tc; + return test_conn_get_connection(TEST_CONN_STATE, TEST_CONN_TYPE, + TEST_CONN_BASIC_PURPOSE); +} + static int test_conn_get_basic_teardown(const struct testcase_t *tc, void *arg) { @@ -186,9 +194,8 @@ test_conn_get_basic_teardown(const struct testcase_t *tc, void *arg) connection_close_immediate(conn->linked_conn); connection_mark_for_close(conn->linked_conn); } - conn->linked_conn->linked_conn = NULL; - connection_free(conn->linked_conn); - conn->linked_conn = NULL; + + close_closeable_connections(); } /* We didn't set the events up properly, so we can't use event_del() in @@ -222,7 +229,10 @@ static void * test_conn_get_rend_setup(const struct testcase_t *tc) { dir_connection_t *conn = DOWNCAST(dir_connection_t, - test_conn_get_basic_setup(tc)); + test_conn_get_connection( + TEST_CONN_STATE, + TEST_CONN_TYPE, + TEST_CONN_REND_PURPOSE)); tt_assert(conn); assert_connection_ok(&conn->base_, time(NULL)); @@ -235,7 +245,6 @@ test_conn_get_rend_setup(const struct testcase_t *tc) TEST_CONN_REND_ADDR, REND_SERVICE_ID_LEN_BASE32+1); conn->rend_data->hsdirs_fp = smartlist_new(); - conn->base_.purpose = TEST_CONN_REND_PURPOSE; assert_connection_ok(&conn->base_, time(NULL)); return conn; @@ -266,42 +275,64 @@ test_conn_get_rend_teardown(const struct testcase_t *tc, void *arg) return rv; } -static void * -test_conn_get_rsrc_setup(const struct testcase_t *tc) +static dir_connection_t * +test_conn_download_status_add_a_connection(const char *resource) { dir_connection_t *conn = DOWNCAST(dir_connection_t, - test_conn_get_basic_setup(tc)); + test_conn_get_connection( + TEST_CONN_STATE, + TEST_CONN_TYPE, + TEST_CONN_RSRC_PURPOSE)); + tt_assert(conn); assert_connection_ok(&conn->base_, time(NULL)); - /* TODO: use the canonical function to do this - maybe? */ - conn->requested_resource = tor_strdup(TEST_CONN_RSRC); - conn->base_.purpose = TEST_CONN_RSRC_PURPOSE; + /* Replace the existing resource with the one we want */ + if (resource) { + if (conn->requested_resource) { + tor_free(conn->requested_resource); + } + conn->requested_resource = tor_strdup(resource); + assert_connection_ok(&conn->base_, time(NULL)); + } - assert_connection_ok(&conn->base_, time(NULL)); return conn; - /* On failure */ done: - test_conn_get_rend_teardown(tc, conn); - /* Returning NULL causes the unit test to fail */ + test_conn_get_rsrc_teardown(NULL, conn); return NULL; } +static void * +test_conn_get_rsrc_setup(const struct testcase_t *tc) +{ + (void)tc; + return test_conn_download_status_add_a_connection(TEST_CONN_RSRC); +} + static int test_conn_get_rsrc_teardown(const struct testcase_t *tc, void *arg) { - dir_connection_t *conn = DOWNCAST(dir_connection_t, arg); int rv = 0; + connection_t *conn = (connection_t *)arg; tt_assert(conn); - assert_connection_ok(&conn->base_, time(NULL)); + assert_connection_ok(conn, time(NULL)); + + if (conn->type == CONN_TYPE_DIR) { + dir_connection_t *dir_conn = DOWNCAST(dir_connection_t, arg); + + tt_assert(dir_conn); + assert_connection_ok(&dir_conn->base_, time(NULL)); - /* avoid a last-ditch attempt to refetch the consensus */ - conn->base_.state = TEST_CONN_RSRC_STATE_SUCCESSFUL; + /* avoid a last-ditch attempt to refetch the consensus */ + dir_conn->base_.state = TEST_CONN_RSRC_STATE_SUCCESSFUL; + assert_connection_ok(&dir_conn->base_, time(NULL)); + } /* connection_free_() cleans up requested_resource */ - rv = test_conn_get_basic_teardown(tc, arg); + rv = test_conn_get_basic_teardown(tc, conn); + done: return rv; } @@ -336,14 +367,30 @@ test_conn_download_status_teardown(const struct testcase_t *tc, void *arg) return rv; } -static dir_connection_t * -test_conn_download_status_add_a_connection(void) +/* Like connection_ap_make_link(), but does much less */ +static connection_t * +test_conn_get_linked_connection(connection_t *l_conn, uint8_t state) { - dir_connection_t *conn = DOWNCAST(dir_connection_t, - test_conn_get_rsrc_setup(NULL)); + tt_assert(l_conn); + assert_connection_ok(l_conn, time(NULL)); + + /* AP connections don't seem to have purposes */ + connection_t *conn = test_conn_get_connection(state, CONN_TYPE_AP, + 0); tt_assert(conn); - assert_connection_ok(&conn->base_, time(NULL)); + assert_connection_ok(conn, time(NULL)); + + conn->linked = 1; + l_conn->linked = 1; + conn->linked_conn = l_conn; + l_conn->linked_conn = conn; + /* we never opened a real socket, so we can just overwrite it */ + conn->s = TOR_INVALID_SOCKET; + l_conn->s = TOR_INVALID_SOCKET; + + assert_connection_ok(conn, time(NULL)); + assert_connection_ok(l_conn, time(NULL)); return conn; @@ -524,44 +571,6 @@ test_conn_get_rsrc(void *arg) tt_assert(conn); assert_connection_ok(&conn->base_, time(NULL)); - tt_assert(connection_dir_get_by_purpose_and_resource( - conn->base_.purpose, - conn->requested_resource) - == conn); - tt_assert(connection_dir_get_by_purpose_and_resource( - TEST_CONN_RSRC_PURPOSE, - TEST_CONN_RSRC) - == conn); - tt_assert(connection_dir_get_by_purpose_and_resource( - !conn->base_.purpose, - "") - == NULL); - tt_assert(connection_dir_get_by_purpose_and_resource( - !TEST_CONN_RSRC_PURPOSE, - TEST_CONN_RSRC_2) - == NULL); - - tt_assert(connection_dir_get_by_purpose_resource_and_state( - conn->base_.purpose, - conn->requested_resource, - conn->base_.state) - == conn); - tt_assert(connection_dir_get_by_purpose_resource_and_state( - TEST_CONN_RSRC_PURPOSE, - TEST_CONN_RSRC, - TEST_CONN_STATE) - == conn); - tt_assert(connection_dir_get_by_purpose_resource_and_state( - !conn->base_.purpose, - "", - !conn->base_.state) - == NULL); - tt_assert(connection_dir_get_by_purpose_resource_and_state( - !TEST_CONN_RSRC_PURPOSE, - TEST_CONN_RSRC_2, - !TEST_CONN_STATE) - == NULL); - sl_is_conn_assert(connection_dir_list_by_purpose_and_resource( conn->base_.purpose, conn->requested_resource), @@ -641,120 +650,208 @@ test_conn_get_rsrc(void *arg) static void test_conn_download_status(void *arg) { - (void)arg; dir_connection_t *conn = NULL; dir_connection_t *conn2 = NULL; - dir_connection_t *conn3 = NULL; - - /* no connections, no excess, not downloading */ - tt_assert(networkstatus_consensus_has_excess_connections() == 0); - tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 0); - tt_assert(connection_dir_avoid_extra_connection_for_purpose( - TEST_CONN_RSRC_PURPOSE) == 0); - - /* one connection, no excess, not downloading */ - conn = test_conn_download_status_add_a_connection(); - tt_assert(networkstatus_consensus_has_excess_connections() == 0); - tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 0); - tt_assert(connection_dir_avoid_extra_connection_for_purpose( - TEST_CONN_RSRC_PURPOSE) == 0); - - /* one connection, no excess, but downloading */ + dir_connection_t *conn4 = NULL; + connection_t *ap_conn = NULL; + + consensus_flavor_t usable_flavor = (consensus_flavor_t)arg; + + /* The "other flavor" trick only works if there are two flavors */ + tor_assert(N_CONSENSUS_FLAVORS == 2); + consensus_flavor_t other_flavor = ((usable_flavor == FLAV_NS) + ? FLAV_MICRODESC + : FLAV_NS); + const char *res = networkstatus_get_flavor_name(usable_flavor); + const char *other_res = networkstatus_get_flavor_name(other_flavor); + + /* no connections */ + tt_assert(networkstatus_consensus_is_already_downloading(res) == 0); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 0); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + res) == 0); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + other_res) == 0); + + /* one connection, not downloading */ + conn = test_conn_download_status_add_a_connection(res); + tt_assert(networkstatus_consensus_is_already_downloading(res) == 0); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 0); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + res) == 1); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + other_res) == 0); + + /* one connection, downloading but not linked (not possible on a client, + * but possible on a relay) */ conn->base_.state = TEST_CONN_DL_STATE; - tt_assert(networkstatus_consensus_has_excess_connections() == 0); - tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 1); - tt_assert(connection_dir_avoid_extra_connection_for_purpose( - TEST_CONN_RSRC_PURPOSE) == 1); - conn->base_.state = TEST_CONN_STATE; + tt_assert(networkstatus_consensus_is_already_downloading(res) == 0); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 0); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + res) == 1); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + other_res) == 0); - /* two connections, excess, but not downloading */ - conn2 = test_conn_download_status_add_a_connection(); - tt_assert(networkstatus_consensus_has_excess_connections() == 1); - tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 0); - tt_assert(connection_dir_avoid_extra_connection_for_purpose( - TEST_CONN_RSRC_PURPOSE) == 0); + /* one connection, downloading and linked, but not yet attached */ + ap_conn = test_conn_get_linked_connection(TO_CONN(conn), + TEST_CONN_UNATTACHED_STATE); + tt_assert(networkstatus_consensus_is_already_downloading(res) == 0); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 0); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + res) == 1); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + other_res) == 0); - /* two connections, excess, downloading */ - conn2->base_.state = TEST_CONN_DL_STATE; - tt_assert(networkstatus_consensus_has_excess_connections() == 1); - tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 1); - tt_assert(connection_dir_avoid_extra_connection_for_purpose( - TEST_CONN_RSRC_PURPOSE) == 1); - conn2->base_.state = TEST_CONN_STATE; - - /* more connections, excess, but not downloading */ - conn3 = test_conn_download_status_add_a_connection(); - tt_assert(networkstatus_consensus_has_excess_connections() == 1); - tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 0); - tt_assert(connection_dir_avoid_extra_connection_for_purpose( - TEST_CONN_RSRC_PURPOSE) == 0); - - /* more connections, excess, downloading */ - conn3->base_.state = TEST_CONN_DL_STATE; - tt_assert(networkstatus_consensus_has_excess_connections() == 1); - tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 1); - tt_assert(connection_dir_avoid_extra_connection_for_purpose( - TEST_CONN_RSRC_PURPOSE) == 1); - - /* more connections, more downloading */ - conn2->base_.state = TEST_CONN_DL_STATE; - tt_assert(networkstatus_consensus_has_excess_connections() == 1); - tt_assert(networkstatus_consensus_is_downloading_usable_flavor() == 1); - tt_assert(connection_dir_avoid_extra_connection_for_purpose( - TEST_CONN_RSRC_PURPOSE) == 1); + /* one connection, downloading and linked and attached */ + ap_conn->state = TEST_CONN_ATTACHED_STATE; + tt_assert(networkstatus_consensus_is_already_downloading(res) == 1); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 0); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + res) == 1); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + other_res) == 0); + + /* one connection, linked and attached but not downloading */ + conn->base_.state = TEST_CONN_STATE; + tt_assert(networkstatus_consensus_is_already_downloading(res) == 0); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 0); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + res) == 1); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + other_res) == 0); - /* now try closing the one that isn't downloading: - * these tests won't work unless tor thinks it is bootstrapping */ - tt_assert(networkstatus_consensus_is_bootstrapping(time(NULL))); + /* two connections, both not downloading */ + conn2 = test_conn_download_status_add_a_connection(res); + tt_assert(networkstatus_consensus_is_already_downloading(res) == 0); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 0); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + res) == 2); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + other_res) == 0); + /* two connections, one downloading */ + conn->base_.state = TEST_CONN_DL_STATE; + tt_assert(networkstatus_consensus_is_already_downloading(res) == 1); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 0); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + res) == 2); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + other_res) == 0); + conn->base_.state = TEST_CONN_STATE; + + /* more connections, all not downloading */ + /* ignore the return value, it's free'd using the connection list */ + (void)test_conn_download_status_add_a_connection(res); + tt_assert(networkstatus_consensus_is_already_downloading(res) == 0); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 0); tt_assert(connection_dir_count_by_purpose_and_resource( TEST_CONN_RSRC_PURPOSE, - TEST_CONN_RSRC) == 3); - tt_assert(connection_dir_avoid_extra_connection_for_purpose( - TEST_CONN_RSRC_PURPOSE) == 1); - tt_assert(connection_dir_close_consensus_conn_if_extra(conn) == -1); + res) == 3); tt_assert(connection_dir_count_by_purpose_and_resource( TEST_CONN_RSRC_PURPOSE, - TEST_CONN_RSRC) == 2); - tt_assert(connection_dir_avoid_extra_connection_for_purpose( - TEST_CONN_RSRC_PURPOSE) == 1); + other_res) == 0); - /* now try closing one that is already closed - nothing happens */ - tt_assert(connection_dir_close_consensus_conn_if_extra(conn) == 0); + /* more connections, one downloading */ + conn->base_.state = TEST_CONN_DL_STATE; + tt_assert(networkstatus_consensus_is_already_downloading(res) == 1); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 0); tt_assert(connection_dir_count_by_purpose_and_resource( TEST_CONN_RSRC_PURPOSE, - TEST_CONN_RSRC) == 2); - tt_assert(connection_dir_avoid_extra_connection_for_purpose( - TEST_CONN_RSRC_PURPOSE) == 1); + res) == 3); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + other_res) == 0); - /* now try closing one that is downloading - it stays open */ - tt_assert(connection_dir_close_consensus_conn_if_extra(conn2) == 0); + /* more connections, two downloading (should never happen, but needs + * to be tested for completeness) */ + conn2->base_.state = TEST_CONN_DL_STATE; + /* ignore the return value, it's free'd using the connection list */ + (void)test_conn_get_linked_connection(TO_CONN(conn2), + TEST_CONN_ATTACHED_STATE); + tt_assert(networkstatus_consensus_is_already_downloading(res) == 1); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 0); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + res) == 3); tt_assert(connection_dir_count_by_purpose_and_resource( TEST_CONN_RSRC_PURPOSE, - TEST_CONN_RSRC) == 2); - tt_assert(connection_dir_avoid_extra_connection_for_purpose( - TEST_CONN_RSRC_PURPOSE) == 1); + other_res) == 0); + conn->base_.state = TEST_CONN_STATE; - /* now try closing all excess connections */ - connection_dir_close_extra_consensus_conns(); + /* more connections, a different one downloading */ + tt_assert(networkstatus_consensus_is_already_downloading(res) == 1); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 0); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + res) == 3); tt_assert(connection_dir_count_by_purpose_and_resource( TEST_CONN_RSRC_PURPOSE, - TEST_CONN_RSRC) == 1); - tt_assert(connection_dir_avoid_extra_connection_for_purpose( - TEST_CONN_RSRC_PURPOSE) == 1); + other_res) == 0); + + /* a connection for the other flavor (could happen if a client is set to + * cache directory documents), one preferred flavor downloading + */ + conn4 = test_conn_download_status_add_a_connection(other_res); + tt_assert(networkstatus_consensus_is_already_downloading(res) == 1); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 0); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + res) == 3); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + other_res) == 1); + + /* a connection for the other flavor (could happen if a client is set to + * cache directory documents), both flavors downloading + */ + conn4->base_.state = TEST_CONN_DL_STATE; + /* ignore the return value, it's free'd using the connection list */ + (void)test_conn_get_linked_connection(TO_CONN(conn4), + TEST_CONN_ATTACHED_STATE); + tt_assert(networkstatus_consensus_is_already_downloading(res) == 1); + tt_assert(networkstatus_consensus_is_already_downloading(other_res) == 1); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + res) == 3); + tt_assert(connection_dir_count_by_purpose_and_resource( + TEST_CONN_RSRC_PURPOSE, + other_res) == 1); done: - /* the teardown function removes all the connections */; + /* the teardown function removes all the connections in the global list*/; } #define CONNECTION_TESTCASE(name, fork, setup) \ { #name, test_conn_##name, fork, &setup, NULL } +/* where arg is an expression (constant, varaible, compound expression) */ +#define CONNECTION_TESTCASE_ARG(name, fork, setup, arg) \ + { #name "_" #arg, test_conn_##name, fork, &setup, (void *)arg } + struct testcase_t connection_tests[] = { CONNECTION_TESTCASE(get_basic, TT_FORK, test_conn_get_basic_st), CONNECTION_TESTCASE(get_rend, TT_FORK, test_conn_get_rend_st), CONNECTION_TESTCASE(get_rsrc, TT_FORK, test_conn_get_rsrc_st), - CONNECTION_TESTCASE(download_status, TT_FORK, test_conn_download_status_st), + CONNECTION_TESTCASE_ARG(download_status, TT_FORK, + test_conn_download_status_st, FLAV_MICRODESC), + CONNECTION_TESTCASE_ARG(download_status, TT_FORK, + test_conn_download_status_st, FLAV_NS), //CONNECTION_TESTCASE(func_suffix, TT_FORK, setup_func_pair), END_OF_TESTCASES }; diff --git a/src/test/test_crypto.c b/src/test/test_crypto.c index 6a95e92733..e6b250a677 100644 --- a/src/test/test_crypto.c +++ b/src/test/test_crypto.c @@ -27,6 +27,7 @@ static void test_crypto_dh(void *arg) { crypto_dh_t *dh1 = crypto_dh_new(DH_TYPE_CIRCUIT); + crypto_dh_t *dh1_dup = NULL; crypto_dh_t *dh2 = crypto_dh_new(DH_TYPE_CIRCUIT); char p1[DH_BYTES]; char p2[DH_BYTES]; @@ -41,6 +42,9 @@ test_crypto_dh(void *arg) memset(p1, 0, DH_BYTES); memset(p2, 0, DH_BYTES); tt_mem_op(p1,OP_EQ, p2, DH_BYTES); + + tt_int_op(-1, OP_EQ, crypto_dh_get_public(dh1, p1, 6)); /* too short */ + tt_assert(! crypto_dh_get_public(dh1, p1, DH_BYTES)); tt_mem_op(p1,OP_NE, p2, DH_BYTES); tt_assert(! crypto_dh_get_public(dh2, p2, DH_BYTES)); @@ -54,15 +58,117 @@ test_crypto_dh(void *arg) tt_int_op(s1len,OP_EQ, s2len); tt_mem_op(s1,OP_EQ, s2, s1len); + /* test dh_dup; make sure it works the same. */ + dh1_dup = crypto_dh_dup(dh1); + s1len = crypto_dh_compute_secret(LOG_WARN, dh1_dup, p2, DH_BYTES, s1, 50); + tt_mem_op(s1,OP_EQ, s2, s1len); + { - /* XXXX Now fabricate some bad values and make sure they get caught, - * Check 0, 1, N-1, >= N, etc. - */ + /* Now fabricate some bad values and make sure they get caught. */ + + /* 1 and 0 should both fail. */ + s1len = crypto_dh_compute_secret(LOG_WARN, dh1, "\x01", 1, s1, 50); + tt_int_op(-1, OP_EQ, s1len); + + s1len = crypto_dh_compute_secret(LOG_WARN, dh1, "\x00", 1, s1, 50); + tt_int_op(-1, OP_EQ, s1len); + + memset(p1, 0, DH_BYTES); /* 0 with padding. */ + s1len = crypto_dh_compute_secret(LOG_WARN, dh1, p1, DH_BYTES, s1, 50); + tt_int_op(-1, OP_EQ, s1len); + + p1[DH_BYTES-1] = 1; /* 1 with padding*/ + s1len = crypto_dh_compute_secret(LOG_WARN, dh1, p1, DH_BYTES, s1, 50); + tt_int_op(-1, OP_EQ, s1len); + + /* 2 is okay, though weird. */ + s1len = crypto_dh_compute_secret(LOG_WARN, dh1, "\x02", 1, s1, 50); + tt_int_op(50, OP_EQ, s1len); + + const char P[] = + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE65381FFFFFFFFFFFFFFFF"; + + /* p-1, p, and so on are not okay. */ + base16_decode(p1, sizeof(p1), P, strlen(P)); + + s1len = crypto_dh_compute_secret(LOG_WARN, dh1, p1, DH_BYTES, s1, 50); + tt_int_op(-1, OP_EQ, s1len); + + p1[DH_BYTES-1] = 0xFE; /* p-1 */ + s1len = crypto_dh_compute_secret(LOG_WARN, dh1, p1, DH_BYTES, s1, 50); + tt_int_op(-1, OP_EQ, s1len); + + p1[DH_BYTES-1] = 0xFD; /* p-2 works fine */ + s1len = crypto_dh_compute_secret(LOG_WARN, dh1, p1, DH_BYTES, s1, 50); + tt_int_op(50, OP_EQ, s1len); + + const char P_plus_one[] = + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE653820000000000000000"; + + base16_decode(p1, sizeof(p1), P_plus_one, strlen(P_plus_one)); + + s1len = crypto_dh_compute_secret(LOG_WARN, dh1, p1, DH_BYTES, s1, 50); + tt_int_op(-1, OP_EQ, s1len); + + p1[DH_BYTES-1] = 0x01; /* p+2 */ + s1len = crypto_dh_compute_secret(LOG_WARN, dh1, p1, DH_BYTES, s1, 50); + tt_int_op(-1, OP_EQ, s1len); + + p1[DH_BYTES-1] = 0xff; /* p+256 */ + s1len = crypto_dh_compute_secret(LOG_WARN, dh1, p1, DH_BYTES, s1, 50); + tt_int_op(-1, OP_EQ, s1len); + + memset(p1, 0xff, DH_BYTES), /* 2^1024-1 */ + s1len = crypto_dh_compute_secret(LOG_WARN, dh1, p1, DH_BYTES, s1, 50); + tt_int_op(-1, OP_EQ, s1len); + } + + { + /* provoke an error in the openssl DH_compute_key function; make sure we + * survive. */ + tt_assert(! crypto_dh_get_public(dh1, p1, DH_BYTES)); + + crypto_dh_free(dh2); + dh2= crypto_dh_new(DH_TYPE_CIRCUIT); /* no private key set */ + s1len = crypto_dh_compute_secret(LOG_WARN, dh2, + p1, DH_BYTES, + s1, 50); + tt_int_op(s1len, OP_EQ, -1); } done: crypto_dh_free(dh1); crypto_dh_free(dh2); + crypto_dh_free(dh1_dup); +} + +static void +test_crypto_openssl_version(void *arg) +{ + (void)arg; + const char *version = crypto_openssl_get_version_str(); + const char *h_version = crypto_openssl_get_header_version_str(); + + tt_assert(version); + tt_assert(h_version); + tt_assert(!strcmpstart(version, h_version)); /* "-fips" suffix, etc */ + tt_assert(!strstr(version, "OpenSSL")); + int a=-1,b=-1,c=-1; + sscanf(version, "%d.%d.%d", &a,&b,&c); + tt_int_op(a, OP_GE, 0); + tt_int_op(b, OP_GE, 0); + tt_int_op(c, OP_GE, 0); + + done: + ; } /** Run unit tests for our random number generation function and its wrappers. @@ -73,6 +179,7 @@ test_crypto_rng(void *arg) int i, j, allok; char data1[100], data2[100]; double d; + char *h=NULL; /* Try out RNG. */ (void)arg; @@ -104,9 +211,16 @@ test_crypto_rng(void *arg) allok = 0; tor_free(host); } + + /* Make sure crypto_random_hostname clips its inputs properly. */ + h = crypto_random_hostname(20000, 9000, "www.", ".onion"); + tt_assert(! strcmpstart(h,"www.")); + tt_assert(! strcmpend(h,".onion")); + tt_int_op(63+4+6, OP_EQ, strlen(h)); + tt_assert(allok); done: - ; + tor_free(h); } static void @@ -125,14 +239,103 @@ test_crypto_rng_range(void *arg) if (x == 8) got_largest = 1; } - /* These fail with probability 1/10^603. */ tt_assert(got_smallest); tt_assert(got_largest); + + got_smallest = got_largest = 0; + const uint64_t ten_billion = 10 * ((uint64_t)1000000000000); + for (i = 0; i < 1000; ++i) { + uint64_t x = crypto_rand_uint64_range(ten_billion, ten_billion+10); + tt_u64_op(x, OP_GE, ten_billion); + tt_u64_op(x, OP_LT, ten_billion+10); + if (x == ten_billion) + got_smallest = 1; + if (x == ten_billion+9) + got_largest = 1; + } + + tt_assert(got_smallest); + tt_assert(got_largest); + + const time_t now = time(NULL); + for (i = 0; i < 2000; ++i) { + time_t x = crypto_rand_time_range(now, now+60); + tt_i64_op(x, OP_GE, now); + tt_i64_op(x, OP_LT, now+60); + if (x == now) + got_smallest = 1; + if (x == now+59) + got_largest = 1; + } + + tt_assert(got_smallest); + tt_assert(got_largest); done: ; } +extern int break_strongest_rng_fallback; +extern int break_strongest_rng_syscall; + +static void +test_crypto_rng_strongest(void *arg) +{ + const char *how = arg; + int broken = 0; + + if (how == NULL) { + ; + } else if (!strcmp(how, "nosyscall")) { + break_strongest_rng_syscall = 1; + } else if (!strcmp(how, "nofallback")) { + break_strongest_rng_fallback = 1; + } else if (!strcmp(how, "broken")) { + broken = break_strongest_rng_syscall = break_strongest_rng_fallback = 1; + } + +#define N 128 + uint8_t combine_and[N]; + uint8_t combine_or[N]; + int i, j; + + memset(combine_and, 0xff, N); + memset(combine_or, 0, N); + + for (i = 0; i < 100; ++i) { /* 2^-100 chances just don't happen. */ + uint8_t output[N]; + memset(output, 0, N); + if (how == NULL) { + /* this one can't fail. */ + crypto_strongest_rand(output, sizeof(output)); + } else { + int r = crypto_strongest_rand_raw(output, sizeof(output)); + if (r == -1) { + if (broken) { + goto done; /* we're fine. */ + } + /* This function is allowed to break, but only if it always breaks. */ + tt_int_op(i, OP_EQ, 0); + tt_skip(); + } else { + tt_assert(! broken); + } + } + for (j = 0; j < N; ++j) { + combine_and[j] &= output[j]; + combine_or[j] |= output[j]; + } + } + + for (j = 0; j < N; ++j) { + tt_int_op(combine_and[j], OP_EQ, 0); + tt_int_op(combine_or[j], OP_EQ, 0xff); + } + done: + ; +#undef N +} + /* Test for rectifying openssl RAND engine. */ static void test_crypto_rng_engine(void *arg) @@ -312,6 +515,42 @@ test_crypto_aes(void *arg) tor_free(data3); } +static void +test_crypto_aes_ctr_testvec(void *arg) +{ + (void)arg; + char *mem_op_hex_tmp=NULL; + + /* from NIST SP800-38a, section F.5 */ + const char key16[] = "2b7e151628aed2a6abf7158809cf4f3c"; + const char ctr16[] = "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff"; + const char plaintext16[] = + "6bc1bee22e409f96e93d7e117393172a" + "ae2d8a571e03ac9c9eb76fac45af8e51" + "30c81c46a35ce411e5fbc1191a0a52ef" + "f69f2445df4f9b17ad2b417be66c3710"; + const char ciphertext16[] = + "874d6191b620e3261bef6864990db6ce" + "9806f66b7970fdff8617187bb9fffdff" + "5ae4df3edbd5d35e5b4f09020db03eab" + "1e031dda2fbe03d1792170a0f3009cee"; + + char key[16]; + char iv[16]; + char plaintext[16*4]; + base16_decode(key, sizeof(key), key16, strlen(key16)); + base16_decode(iv, sizeof(iv), ctr16, strlen(ctr16)); + base16_decode(plaintext, sizeof(plaintext), plaintext16, strlen(plaintext16)); + + crypto_cipher_t *c = crypto_cipher_new_with_iv(key, iv); + crypto_cipher_crypt_inplace(c, plaintext, sizeof(plaintext)); + test_memeq_hex(plaintext, ciphertext16); + + done: + tor_free(mem_op_hex_tmp); + crypto_cipher_free(c); +} + /** Run unit tests for our SHA-1 functionality */ static void test_crypto_sha(void *arg) @@ -1084,6 +1323,29 @@ test_crypto_pk_base64(void *arg) tor_free(encoded); } +#ifdef HAVE_TRUNCATE +#define do_truncate truncate +#else +static int +do_truncate(const char *fname, size_t len) +{ + struct stat st; + char *bytes; + + bytes = read_file_to_str(fname, RFTS_BIN, &st); + if (!bytes) + return -1; + /* This cast isn't so great, but it should be safe given the actual files + * and lengths we're using. */ + if (st.st_size < (off_t)len) + len = MIN(len, (size_t)st.st_size); + + int r = write_bytes_to_file(fname, bytes, len, 1); + tor_free(bytes); + return r; +} +#endif + /** Sanity check for crypto pk digests */ static void test_crypto_digests(void *arg) @@ -1114,6 +1376,31 @@ test_crypto_digests(void *arg) crypto_pk_free(k); } +static void +test_crypto_digest_names(void *arg) +{ + static const struct { + int a; const char *n; + } names[] = { + { DIGEST_SHA1, "sha1" }, + { DIGEST_SHA256, "sha256" }, + { DIGEST_SHA512, "sha512" }, + { DIGEST_SHA3_256, "sha3-256" }, + { DIGEST_SHA3_512, "sha3-512" }, + { -1, NULL } + }; + (void)arg; + + int i; + for (i = 0; names[i].n; ++i) { + tt_str_op(names[i].n, OP_EQ,crypto_digest_algorithm_get_name(names[i].a)); + tt_int_op(names[i].a, OP_EQ,crypto_digest_algorithm_parse_name(names[i].n)); + } + tt_int_op(-1, OP_EQ, crypto_digest_algorithm_parse_name("TimeCubeHash-4444")); + done: + ; +} + #ifndef OPENSSL_1_1_API #define EVP_ENCODE_CTX_new() tor_malloc_zero(sizeof(EVP_ENCODE_CTX)) #define EVP_ENCODE_CTX_free(ctx) tor_free(ctx) @@ -1507,13 +1794,99 @@ test_crypto_hkdf_sha256(void *arg) "b206fa34e5bc78d063fc291501beec53b36e5a0e434561200c" "5f8bd13e0f88b3459600b4dc21d69363e2895321c06184879d" "94b18f078411be70b767c7fc40679a9440a0c95ea83a23efbf"); - done: tor_free(mem_op_hex_tmp); #undef EXPAND } static void +test_crypto_hkdf_sha256_testvecs(void *arg) +{ + (void) arg; + /* Test vectors from RFC5869, sections A.1 through A.3 */ + const struct { + const char *ikm16, *salt16, *info16; + int L; + const char *okm16; + } vecs[] = { + { /* from A.1 */ + "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", + "000102030405060708090a0b0c", + "f0f1f2f3f4f5f6f7f8f9", + 42, + "3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf" + "34007208d5b887185865" + }, + { /* from A.2 */ + "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f" + "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f" + "404142434445464748494a4b4c4d4e4f", + "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f" + "808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9f" + "a0a1a2a3a4a5a6a7a8a9aaabacadaeaf", + "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecf" + "d0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeef" + "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", + 82, + "b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c" + "59045a99cac7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71" + "cc30c58179ec3e87c14c01d5c1f3434f1d87" + }, + { /* from A.3 */ + "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", + "", + "", + 42, + "8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d" + "9d201395faa4b61a96c8", + }, + { NULL, NULL, NULL, -1, NULL } + }; + + int i; + char *ikm = NULL; + char *salt = NULL; + char *info = NULL; + char *okm = NULL; + char *mem_op_hex_tmp = NULL; + + for (i = 0; vecs[i].ikm16; ++i) { + size_t ikm_len = strlen(vecs[i].ikm16)/2; + size_t salt_len = strlen(vecs[i].salt16)/2; + size_t info_len = strlen(vecs[i].info16)/2; + size_t okm_len = vecs[i].L; + + ikm = tor_malloc(ikm_len); + salt = tor_malloc(salt_len); + info = tor_malloc(info_len); + okm = tor_malloc(okm_len); + + base16_decode(ikm, ikm_len, vecs[i].ikm16, strlen(vecs[i].ikm16)); + base16_decode(salt, salt_len, vecs[i].salt16, strlen(vecs[i].salt16)); + base16_decode(info, info_len, vecs[i].info16, strlen(vecs[i].info16)); + + int r = crypto_expand_key_material_rfc5869_sha256( + (const uint8_t*)ikm, ikm_len, + (const uint8_t*)salt, salt_len, + (const uint8_t*)info, info_len, + (uint8_t*)okm, okm_len); + tt_int_op(r, OP_EQ, 0); + test_memeq_hex(okm, vecs[i].okm16); + tor_free(ikm); + tor_free(salt); + tor_free(info); + tor_free(okm); + } + done: + tor_free(ikm); + tor_free(salt); + tor_free(info); + tor_free(okm); + tor_free(mem_op_hex_tmp); +} + + +static void test_crypto_curve25519_impl(void *arg) { /* adapted from curve25519_donna, which adapted it from test-curve25519 @@ -1605,6 +1978,47 @@ test_crypto_curve25519_basepoint(void *arg) } static void +test_crypto_curve25519_testvec(void *arg) +{ + (void)arg; + char *mem_op_hex_tmp = NULL; + + /* From RFC 7748, section 6.1 */ + /* Alice's private key, a: */ + const char a16[] = + "77076d0a7318a57d3c16c17251b26645df4c2f87ebc0992ab177fba51db92c2a"; + /* Alice's public key, X25519(a, 9): */ + const char a_pub16[] = + "8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a"; + /* Bob's private key, b: */ + const char b16[] = + "5dab087e624a8a4b79e17f8b83800ee66f3bb1292618b6fd1c2f8b27ff88e0eb"; + /* Bob's public key, X25519(b, 9): */ + const char b_pub16[] = + "de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e146f882b4f"; + /* Their shared secret, K: */ + const char k16[] = + "4a5d9d5ba4ce2de1728e3bf480350f25e07e21c947d19e3376f09b3c1e161742"; + + uint8_t a[32], b[32], a_pub[32], b_pub[32], k1[32], k2[32]; + base16_decode((char*)a, sizeof(a), a16, strlen(a16)); + base16_decode((char*)b, sizeof(b), b16, strlen(b16)); + curve25519_basepoint_impl(a_pub, a); + curve25519_basepoint_impl(b_pub, b); + curve25519_impl(k1, a, b_pub); + curve25519_impl(k2, b, a_pub); + + test_memeq_hex(a, a16); + test_memeq_hex(b, b16); + test_memeq_hex(a_pub, a_pub16); + test_memeq_hex(b_pub, b_pub16); + test_memeq_hex(k1, k16); + test_memeq_hex(k2, k16); + done: + tor_free(mem_op_hex_tmp); +} + +static void test_crypto_curve25519_wrappers(void *arg) { curve25519_public_key_t pubkey1, pubkey2; @@ -1896,7 +2310,67 @@ test_crypto_ed25519_test_vectors(void *arg) "1fbc1e08682f2cc0c92efe8f4985dec61dcbd54d4b94a22547d24451271c8b00", "0a688e79be24f866286d4646b5d81c" }, - + /* These come from draft-irtf-cfrg-eddsa-05 section 7.1 */ + { + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a", + "e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e06522490155" + "5fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b", + "" + }, + { + "4ccd089b28ff96da9db6c346ec114e0f5b8a319f35aba624da8cf6ed4fb8a6fb", + "3d4017c3e843895a92b70aa74d1b7ebc9c982ccf2ec4968cc0cd55f12af4660c", + "92a009a9f0d4cab8720e820b5f642540a2b27b5416503f8fb3762223ebdb69da" + "085ac1e43e15996e458f3613d0f11d8c387b2eaeb4302aeeb00d291612bb0c00", + "72" + }, + { + "f5e5767cf153319517630f226876b86c8160cc583bc013744c6bf255f5cc0ee5", + "278117fc144c72340f67d0f2316e8386ceffbf2b2428c9c51fef7c597f1d426e", + "0aab4c900501b3e24d7cdf4663326a3a87df5e4843b2cbdb67cbf6e460fec350" + "aa5371b1508f9f4528ecea23c436d94b5e8fcd4f681e30a6ac00a9704a188a03", + "08b8b2b733424243760fe426a4b54908632110a66c2f6591eabd3345e3e4eb98" + "fa6e264bf09efe12ee50f8f54e9f77b1e355f6c50544e23fb1433ddf73be84d8" + "79de7c0046dc4996d9e773f4bc9efe5738829adb26c81b37c93a1b270b20329d" + "658675fc6ea534e0810a4432826bf58c941efb65d57a338bbd2e26640f89ffbc" + "1a858efcb8550ee3a5e1998bd177e93a7363c344fe6b199ee5d02e82d522c4fe" + "ba15452f80288a821a579116ec6dad2b3b310da903401aa62100ab5d1a36553e" + "06203b33890cc9b832f79ef80560ccb9a39ce767967ed628c6ad573cb116dbef" + "efd75499da96bd68a8a97b928a8bbc103b6621fcde2beca1231d206be6cd9ec7" + "aff6f6c94fcd7204ed3455c68c83f4a41da4af2b74ef5c53f1d8ac70bdcb7ed1" + "85ce81bd84359d44254d95629e9855a94a7c1958d1f8ada5d0532ed8a5aa3fb2" + "d17ba70eb6248e594e1a2297acbbb39d502f1a8c6eb6f1ce22b3de1a1f40cc24" + "554119a831a9aad6079cad88425de6bde1a9187ebb6092cf67bf2b13fd65f270" + "88d78b7e883c8759d2c4f5c65adb7553878ad575f9fad878e80a0c9ba63bcbcc" + "2732e69485bbc9c90bfbd62481d9089beccf80cfe2df16a2cf65bd92dd597b07" + "07e0917af48bbb75fed413d238f5555a7a569d80c3414a8d0859dc65a46128ba" + "b27af87a71314f318c782b23ebfe808b82b0ce26401d2e22f04d83d1255dc51a" + "ddd3b75a2b1ae0784504df543af8969be3ea7082ff7fc9888c144da2af58429e" + "c96031dbcad3dad9af0dcbaaaf268cb8fcffead94f3c7ca495e056a9b47acdb7" + "51fb73e666c6c655ade8297297d07ad1ba5e43f1bca32301651339e22904cc8c" + "42f58c30c04aafdb038dda0847dd988dcda6f3bfd15c4b4c4525004aa06eeff8" + "ca61783aacec57fb3d1f92b0fe2fd1a85f6724517b65e614ad6808d6f6ee34df" + "f7310fdc82aebfd904b01e1dc54b2927094b2db68d6f903b68401adebf5a7e08" + "d78ff4ef5d63653a65040cf9bfd4aca7984a74d37145986780fc0b16ac451649" + "de6188a7dbdf191f64b5fc5e2ab47b57f7f7276cd419c17a3ca8e1b939ae49e4" + "88acba6b965610b5480109c8b17b80e1b7b750dfc7598d5d5011fd2dcc5600a3" + "2ef5b52a1ecc820e308aa342721aac0943bf6686b64b2579376504ccc493d97e" + "6aed3fb0f9cd71a43dd497f01f17c0e2cb3797aa2a2f256656168e6c496afc5f" + "b93246f6b1116398a346f1a641f3b041e989f7914f90cc2c7fff357876e506b5" + "0d334ba77c225bc307ba537152f3f1610e4eafe595f6d9d90d11faa933a15ef1" + "369546868a7f3a45a96768d40fd9d03412c091c6315cf4fde7cb68606937380d" + "b2eaaa707b4c4185c32eddcdd306705e4dc1ffc872eeee475a64dfac86aba41c" + "0618983f8741c5ef68d3a101e8a3b8cac60c905c15fc910840b94c00a0b9d0" + }, + { + "833fe62409237b9d62ec77587520911e9a759cec1d19755b7da901b96dca3d42", + "ec172b93ad5e563bf4932c70e1245034c35467ef2efd4d64ebf819683467e2bf", + "dc2a4459e7369633a52b1bf277839a00201009a3efbf3ecb69bea2186c26b589" + "09351fc9ac90b3ecfdfbc7c66431e0303dca179c138ac17ad9bef1177331a704", + "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a" + "2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f" + }, { NULL, NULL, NULL, NULL} }; @@ -2183,6 +2657,54 @@ test_crypto_ed25519_fuzz_donna(void *arg) } static void +test_crypto_ed25519_storage(void *arg) +{ + (void)arg; + ed25519_keypair_t *keypair = NULL; + ed25519_public_key_t pub; + ed25519_secret_key_t sec; + char *fname_1 = tor_strdup(get_fname("ed_seckey_1")); + char *fname_2 = tor_strdup(get_fname("ed_pubkey_2")); + char *contents = NULL; + char *tag = NULL; + + keypair = tor_malloc_zero(sizeof(ed25519_keypair_t)); + tt_int_op(0,OP_EQ,ed25519_keypair_generate(keypair, 0)); + tt_int_op(0,OP_EQ, + ed25519_seckey_write_to_file(&keypair->seckey, fname_1, "foo")); + tt_int_op(0,OP_EQ, + ed25519_pubkey_write_to_file(&keypair->pubkey, fname_2, "bar")); + + tt_int_op(-1, OP_EQ, ed25519_pubkey_read_from_file(&pub, &tag, fname_1)); + tt_ptr_op(tag, OP_EQ, NULL); + tt_int_op(-1, OP_EQ, ed25519_seckey_read_from_file(&sec, &tag, fname_2)); + tt_ptr_op(tag, OP_EQ, NULL); + + tt_int_op(0, OP_EQ, ed25519_pubkey_read_from_file(&pub, &tag, fname_2)); + tt_str_op(tag, OP_EQ, "bar"); + tor_free(tag); + tt_int_op(0, OP_EQ, ed25519_seckey_read_from_file(&sec, &tag, fname_1)); + tt_str_op(tag, OP_EQ, "foo"); + tor_free(tag); + + /* whitebox test: truncated keys. */ + tt_int_op(0, ==, do_truncate(fname_1, 40)); + tt_int_op(0, ==, do_truncate(fname_2, 40)); + tt_int_op(-1, OP_EQ, ed25519_pubkey_read_from_file(&pub, &tag, fname_2)); + tt_ptr_op(tag, OP_EQ, NULL); + tor_free(tag); + tt_int_op(-1, OP_EQ, ed25519_seckey_read_from_file(&sec, &tag, fname_1)); + tt_ptr_op(tag, OP_EQ, NULL); + + done: + tor_free(fname_1); + tor_free(fname_2); + tor_free(contents); + tor_free(tag); + ed25519_keypair_free(keypair); +} + +static void test_crypto_siphash(void *arg) { /* From the reference implementation, taking @@ -2398,13 +2920,23 @@ struct testcase_t crypto_tests[] = { CRYPTO_LEGACY(rng), { "rng_range", test_crypto_rng_range, 0, NULL, NULL }, { "rng_engine", test_crypto_rng_engine, TT_FORK, NULL, NULL }, + { "rng_strongest", test_crypto_rng_strongest, TT_FORK, NULL, NULL }, + { "rng_strongest_nosyscall", test_crypto_rng_strongest, TT_FORK, + &passthrough_setup, (void*)"nosyscall" }, + { "rng_strongest_nofallback", test_crypto_rng_strongest, TT_FORK, + &passthrough_setup, (void*)"nofallback" }, + { "rng_strongest_broken", test_crypto_rng_strongest, TT_FORK, + &passthrough_setup, (void*)"broken" }, + { "openssl_version", test_crypto_openssl_version, TT_FORK, NULL, NULL }, { "aes_AES", test_crypto_aes, TT_FORK, &passthrough_setup, (void*)"aes" }, { "aes_EVP", test_crypto_aes, TT_FORK, &passthrough_setup, (void*)"evp" }, + { "aes_ctr_testvec", test_crypto_aes_ctr_testvec, 0, NULL, NULL }, CRYPTO_LEGACY(sha), CRYPTO_LEGACY(pk), { "pk_fingerprints", test_crypto_pk_fingerprints, TT_FORK, NULL, NULL }, { "pk_base64", test_crypto_pk_base64, TT_FORK, NULL, NULL }, CRYPTO_LEGACY(digests), + { "digest_names", test_crypto_digest_names, 0, NULL, NULL }, { "sha3", test_crypto_sha3, TT_FORK, NULL, NULL}, { "sha3_xof", test_crypto_sha3_xof, TT_FORK, NULL, NULL}, CRYPTO_LEGACY(dh), @@ -2415,8 +2947,10 @@ struct testcase_t crypto_tests[] = { CRYPTO_LEGACY(base32_decode), { "kdf_TAP", test_crypto_kdf_TAP, 0, NULL, NULL }, { "hkdf_sha256", test_crypto_hkdf_sha256, 0, NULL, NULL }, + { "hkdf_sha256_testvecs", test_crypto_hkdf_sha256_testvecs, 0, NULL, NULL }, { "curve25519_impl", test_crypto_curve25519_impl, 0, NULL, NULL }, { "curve25519_impl_hibit", test_crypto_curve25519_impl, 0, NULL, (void*)"y"}, + { "curve25516_testvec", test_crypto_curve25519_testvec, 0, NULL, NULL }, { "curve25519_basepoint", test_crypto_curve25519_basepoint, TT_FORK, NULL, NULL }, { "curve25519_wrappers", test_crypto_curve25519_wrappers, 0, NULL, NULL }, @@ -2429,6 +2963,7 @@ struct testcase_t crypto_tests[] = { ED25519_TEST(blinding, 0), ED25519_TEST(testvectors, 0), ED25519_TEST(fuzz_donna, TT_FORK), + { "ed25519_storage", test_crypto_ed25519_storage, 0, NULL, NULL }, { "siphash", test_crypto_siphash, 0, NULL, NULL }, { "failure_modes", test_crypto_failure_modes, TT_FORK, NULL, NULL }, END_OF_TESTCASES diff --git a/src/test/test_crypto_slow.c b/src/test/test_crypto_slow.c index 9b39199cd0..6f3e40e0ab 100644 --- a/src/test/test_crypto_slow.c +++ b/src/test/test_crypto_slow.c @@ -10,7 +10,8 @@ #include "crypto_s2k.h" #include "crypto_pwbox.h" -#if defined(HAVE_LIBSCRYPT_H) +#if defined(HAVE_LIBSCRYPT_H) && defined(HAVE_LIBSCRYPT_SCRYPT) +#define HAVE_LIBSCRYPT #include <libscrypt.h> #endif @@ -129,7 +130,7 @@ test_crypto_s2k_general(void *arg) } } -#if defined(HAVE_LIBSCRYPT_H) && defined(HAVE_EVP_PBE_SCRYPT) +#if defined(HAVE_LIBSCRYPT) && defined(HAVE_EVP_PBE_SCRYPT) static void test_libscrypt_eq_openssl(void *arg) { @@ -276,7 +277,7 @@ test_crypto_s2k_errors(void *arg) buf, sizeof(buf), "ABC", 3)); /* Truncated output */ -#ifdef HAVE_LIBSCRYPT_H +#ifdef HAVE_LIBSCRYPT tt_int_op(S2K_TRUNCATED, OP_EQ, secret_to_key_new(buf, 50, &sz, "ABC", 3, 0)); tt_int_op(S2K_TRUNCATED, OP_EQ, secret_to_key_new(buf, 50, &sz, @@ -287,7 +288,7 @@ test_crypto_s2k_errors(void *arg) tt_int_op(S2K_TRUNCATED, OP_EQ, secret_to_key_new(buf, 29, &sz, "ABC", 3, S2K_FLAG_NO_SCRYPT)); -#ifdef HAVE_LIBSCRYPT_H +#ifdef HAVE_LIBSCRYPT tt_int_op(S2K_TRUNCATED, OP_EQ, secret_to_key_make_specifier(buf, 18, 0)); tt_int_op(S2K_TRUNCATED, OP_EQ, secret_to_key_make_specifier(buf, 18, S2K_FLAG_LOW_MEM)); @@ -308,7 +309,7 @@ test_crypto_s2k_errors(void *arg) secret_to_key_derivekey(buf2, sizeof(buf2), buf, 18, "ABC", 3)); -#ifdef HAVE_LIBSCRYPT_H +#ifdef HAVE_LIBSCRYPT /* It's a bad scrypt buffer if N would overflow uint64 */ memset(buf, 0, sizeof(buf)); buf[0] = 2; /* scrypt */ @@ -329,7 +330,7 @@ test_crypto_scrypt_vectors(void *arg) uint8_t spec[64], out[64]; (void)arg; -#ifndef HAVE_LIBSCRYPT_H +#ifndef HAVE_LIBSCRYPT if (1) tt_skip(); #endif @@ -507,7 +508,7 @@ test_crypto_pwbox(void *arg) struct testcase_t slow_crypto_tests[] = { CRYPTO_LEGACY(s2k_rfc2440), -#ifdef HAVE_LIBSCRYPT_H +#ifdef HAVE_LIBSCRYPT { "s2k_scrypt", test_crypto_s2k_general, 0, &passthrough_setup, (void*)"scrypt" }, { "s2k_scrypt_low", test_crypto_s2k_general, 0, &passthrough_setup, diff --git a/src/test/test_dir.c b/src/test/test_dir.c index ea179fb02c..bddbe9f18e 100644 --- a/src/test/test_dir.c +++ b/src/test/test_dir.c @@ -85,6 +85,15 @@ test_dir_nicknames(void *arg) ; } +static smartlist_t *mocked_configured_ports = NULL; + +/** Returns mocked_configured_ports */ +static const smartlist_t * +mock_get_configured_ports(void) +{ + return mocked_configured_ports; +} + /** Run unit tests for router descriptor generation logic. */ static void test_dir_formats(void *arg) @@ -104,6 +113,7 @@ test_dir_formats(void *arg) or_options_t *options = get_options_mutable(); const addr_policy_t *p; time_t now = time(NULL); + port_cfg_t orport, dirport; (void)arg; pk1 = pk_generate(0); @@ -150,15 +160,15 @@ test_dir_formats(void *arg) ed25519_secret_key_from_seed(&kp2.seckey, (const uint8_t*)"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); ed25519_public_key_generate(&kp2.pubkey, &kp2.seckey); - r2->signing_key_cert = tor_cert_create(&kp1, + r2->cache_info.signing_key_cert = tor_cert_create(&kp1, CERT_TYPE_ID_SIGNING, &kp2.pubkey, now, 86400, CERT_FLAG_INCLUDE_SIGNING_KEY); char cert_buf[256]; base64_encode(cert_buf, sizeof(cert_buf), - (const char*)r2->signing_key_cert->encoded, - r2->signing_key_cert->encoded_len, + (const char*)r2->cache_info.signing_key_cert->encoded, + r2->cache_info.signing_key_cert->encoded_len, BASE64_ENCODE_MULTILINE); r2->platform = tor_strdup(platform); r2->cache_info.published_on = 5; @@ -185,9 +195,31 @@ test_dir_formats(void *arg) /* XXXX025 router_dump_to_string should really take this from ri.*/ options->ContactInfo = tor_strdup("Magri White " "<magri@elsewhere.example.com>"); + /* Skip reachability checks for DirPort and tunnelled-dir-server */ + options->AssumeReachable = 1; + + /* Fake just enough of an ORPort and DirPort to get by */ + MOCK(get_configured_ports, mock_get_configured_ports); + mocked_configured_ports = smartlist_new(); + + memset(&orport, 0, sizeof(orport)); + orport.type = CONN_TYPE_OR_LISTENER; + orport.addr.family = AF_INET; + orport.port = 9000; + smartlist_add(mocked_configured_ports, &orport); + + memset(&dirport, 0, sizeof(dirport)); + dirport.type = CONN_TYPE_DIR_LISTENER; + dirport.addr.family = AF_INET; + dirport.port = 9003; + smartlist_add(mocked_configured_ports, &dirport); buf = router_dump_router_to_string(r1, pk2, NULL, NULL, NULL); + UNMOCK(get_configured_ports); + smartlist_free(mocked_configured_ports); + mocked_configured_ports = NULL; + tor_free(options->ContactInfo); tt_assert(buf); @@ -247,7 +279,8 @@ test_dir_formats(void *arg) strlcat(buf2, "master-key-ed25519 ", sizeof(buf2)); { char k[ED25519_BASE64_LEN+1]; - tt_assert(ed25519_public_to_base64(k, &r2->signing_key_cert->signing_key) + tt_assert(ed25519_public_to_base64(k, + &r2->cache_info.signing_key_cert->signing_key) >= 0); strlcat(buf2, k, sizeof(buf2)); strlcat(buf2, "\n", sizeof(buf2)); @@ -308,6 +341,16 @@ test_dir_formats(void *arg) strlcat(buf2, "tunnelled-dir-server\n", sizeof(buf2)); strlcat(buf2, "router-sig-ed25519 ", sizeof(buf2)); + /* Fake just enough of an ORPort to get by */ + MOCK(get_configured_ports, mock_get_configured_ports); + mocked_configured_ports = smartlist_new(); + + memset(&orport, 0, sizeof(orport)); + orport.type = CONN_TYPE_OR_LISTENER; + orport.addr.family = AF_INET; + orport.port = 9005; + smartlist_add(mocked_configured_ports, &orport); + buf = router_dump_router_to_string(r2, pk1, pk2, &r2_onion_keypair, &kp2); tt_assert(buf); buf[strlen(buf2)] = '\0'; /* Don't compare the sig; it's never the same @@ -318,6 +361,10 @@ test_dir_formats(void *arg) buf = router_dump_router_to_string(r2, pk1, NULL, NULL, NULL); + UNMOCK(get_configured_ports); + smartlist_free(mocked_configured_ports); + mocked_configured_ports = NULL; + /* Reset for later */ cp = buf; rp2 = router_parse_entry_from_string((const char*)cp,NULL,1,0,NULL,NULL); @@ -2149,56 +2196,57 @@ test_dir_scale_bw(void *testdata) 1.0/7, 12.0, 24.0 }; - u64_dbl_t vals[8]; + double vals_dbl[8]; + uint64_t vals_u64[8]; uint64_t total; int i; (void) testdata; for (i=0; i<8; ++i) - vals[i].dbl = v[i]; + vals_dbl[i] = v[i]; - scale_array_elements_to_u64(vals, 8, &total); + scale_array_elements_to_u64(vals_u64, vals_dbl, 8, &total); tt_int_op((int)total, OP_EQ, 48); total = 0; for (i=0; i<8; ++i) { - total += vals[i].u64; + total += vals_u64[i]; } tt_assert(total >= (U64_LITERAL(1)<<60)); tt_assert(total <= (U64_LITERAL(1)<<62)); for (i=0; i<8; ++i) { /* vals[2].u64 is the scaled value of 1.0 */ - double ratio = ((double)vals[i].u64) / vals[2].u64; + double ratio = ((double)vals_u64[i]) / vals_u64[2]; tt_double_op(fabs(ratio - v[i]), OP_LT, .00001); } /* test handling of no entries */ total = 1; - scale_array_elements_to_u64(vals, 0, &total); + scale_array_elements_to_u64(vals_u64, vals_dbl, 0, &total); tt_assert(total == 0); /* make sure we don't read the array when we have no entries * may require compiler flags to catch NULL dereferences */ total = 1; - scale_array_elements_to_u64(NULL, 0, &total); + scale_array_elements_to_u64(NULL, NULL, 0, &total); tt_assert(total == 0); - scale_array_elements_to_u64(NULL, 0, NULL); + scale_array_elements_to_u64(NULL, NULL, 0, NULL); /* test handling of zero totals */ total = 1; - vals[0].dbl = 0.0; - scale_array_elements_to_u64(vals, 1, &total); + vals_dbl[0] = 0.0; + scale_array_elements_to_u64(vals_u64, vals_dbl, 1, &total); tt_assert(total == 0); - tt_assert(vals[0].u64 == 0); + tt_assert(vals_u64[0] == 0); - vals[0].dbl = 0.0; - vals[1].dbl = 0.0; - scale_array_elements_to_u64(vals, 2, NULL); - tt_assert(vals[0].u64 == 0); - tt_assert(vals[1].u64 == 0); + vals_dbl[0] = 0.0; + vals_dbl[1] = 0.0; + scale_array_elements_to_u64(vals_u64, vals_dbl, 2, NULL); + tt_assert(vals_u64[0] == 0); + tt_assert(vals_u64[1] == 0); done: ; @@ -2209,7 +2257,7 @@ test_dir_random_weighted(void *testdata) { int histogram[10]; uint64_t vals[10] = {3,1,2,4,6,0,7,5,8,9}, total=0; - u64_dbl_t inp[10]; + uint64_t inp_u64[10]; int i, choice; const int n = 50000; double max_sq_error; @@ -2219,12 +2267,12 @@ test_dir_random_weighted(void *testdata) * in a scrambled order to make sure we don't depend on order. */ memset(histogram,0,sizeof(histogram)); for (i=0; i<10; ++i) { - inp[i].u64 = vals[i]; + inp_u64[i] = vals[i]; total += vals[i]; } tt_u64_op(total, OP_EQ, 45); for (i=0; i<n; ++i) { - choice = choose_array_element_by_weight(inp, 10); + choice = choose_array_element_by_weight(inp_u64, 10); tt_int_op(choice, OP_GE, 0); tt_int_op(choice, OP_LT, 10); histogram[choice]++; @@ -2251,16 +2299,16 @@ test_dir_random_weighted(void *testdata) /* Now try a singleton; do we choose it? */ for (i = 0; i < 100; ++i) { - choice = choose_array_element_by_weight(inp, 1); + choice = choose_array_element_by_weight(inp_u64, 1); tt_int_op(choice, OP_EQ, 0); } /* Now try an array of zeros. We should choose randomly. */ memset(histogram,0,sizeof(histogram)); for (i = 0; i < 5; ++i) - inp[i].u64 = 0; + inp_u64[i] = 0; for (i = 0; i < n; ++i) { - choice = choose_array_element_by_weight(inp, 5); + choice = choose_array_element_by_weight(inp_u64, 5); tt_int_op(choice, OP_GE, 0); tt_int_op(choice, OP_LT, 5); histogram[choice]++; @@ -3995,12 +4043,56 @@ test_dir_choose_compression_level(void* data) done: ; } +static int mock_networkstatus_consensus_is_bootstrapping_value = 0; +static int +mock_networkstatus_consensus_is_bootstrapping(time_t now) +{ + (void)now; + return mock_networkstatus_consensus_is_bootstrapping_value; +} + +static int mock_networkstatus_consensus_can_use_extra_fallbacks_value = 0; +static int +mock_networkstatus_consensus_can_use_extra_fallbacks( + const or_options_t *options) +{ + (void)options; + return mock_networkstatus_consensus_can_use_extra_fallbacks_value; +} + +/* data is a 2 character nul-terminated string. + * If data[0] is 'b', set bootstrapping, anything else means not bootstrapping + * If data[1] is 'f', set extra fallbacks, anything else means no extra + * fallbacks. + */ static void test_dir_find_dl_schedule(void* data) { + const char *str = (const char *)data; + + tt_assert(strlen(data) == 2); + + if (str[0] == 'b') { + mock_networkstatus_consensus_is_bootstrapping_value = 1; + } else { + mock_networkstatus_consensus_is_bootstrapping_value = 0; + } + + if (str[1] == 'f') { + mock_networkstatus_consensus_can_use_extra_fallbacks_value = 1; + } else { + mock_networkstatus_consensus_can_use_extra_fallbacks_value = 0; + } + + MOCK(networkstatus_consensus_is_bootstrapping, + mock_networkstatus_consensus_is_bootstrapping); + MOCK(networkstatus_consensus_can_use_extra_fallbacks, + mock_networkstatus_consensus_can_use_extra_fallbacks); + download_status_t dls; - smartlist_t server, client, server_cons, client_cons, bridge; - (void)data; + smartlist_t server, client, server_cons, client_cons; + smartlist_t client_boot_auth_only_cons, client_boot_auth_cons; + smartlist_t client_boot_fallback_cons, bridge; mock_options = malloc(sizeof(or_options_t)); reset_options(mock_options, &mock_get_options_calls); @@ -4010,43 +4102,121 @@ test_dir_find_dl_schedule(void* data) mock_options->TestingClientDownloadSchedule = &client; mock_options->TestingServerConsensusDownloadSchedule = &server_cons; mock_options->TestingClientConsensusDownloadSchedule = &client_cons; + mock_options->ClientBootstrapConsensusAuthorityOnlyDownloadSchedule = + &client_boot_auth_only_cons; + mock_options->ClientBootstrapConsensusAuthorityDownloadSchedule = + &client_boot_auth_cons; + mock_options->ClientBootstrapConsensusFallbackDownloadSchedule = + &client_boot_fallback_cons; mock_options->TestingBridgeDownloadSchedule = &bridge; dls.schedule = DL_SCHED_GENERIC; + /* client */ mock_options->ClientOnly = 1; tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, &client); mock_options->ClientOnly = 0; + + /* dir mode */ mock_options->DirPort_set = 1; - mock_options->ORPort_set = 1; mock_options->DirCache = 1; tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, &server); + mock_options->DirPort_set = 0; + mock_options->DirCache = 0; -#if 0 dls.schedule = DL_SCHED_CONSENSUS; - mock_options->ClientOnly = 1; - mock_options->DirCache = 0; - tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, &client_cons); - mock_options->ClientOnly = 0; - mock_options->DirCache = 1; + /* public server mode */ + mock_options->ORPort_set = 1; tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, &server_cons); -#endif + mock_options->ORPort_set = 0; + + /* client and bridge modes */ + if (networkstatus_consensus_is_bootstrapping(time(NULL))) { + if (networkstatus_consensus_can_use_extra_fallbacks(mock_options)) { + dls.want_authority = 1; + /* client */ + mock_options->ClientOnly = 1; + tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, + &client_boot_auth_cons); + mock_options->ClientOnly = 0; + + /* bridge relay */ + mock_options->ORPort_set = 1; + mock_options->BridgeRelay = 1; + tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, + &client_boot_auth_cons); + mock_options->ORPort_set = 0; + mock_options->BridgeRelay = 0; + + dls.want_authority = 0; + /* client */ + mock_options->ClientOnly = 1; + tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, + &client_boot_fallback_cons); + mock_options->ClientOnly = 0; + + /* bridge relay */ + mock_options->ORPort_set = 1; + mock_options->BridgeRelay = 1; + tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, + &client_boot_fallback_cons); + mock_options->ORPort_set = 0; + mock_options->BridgeRelay = 0; + + } else { + /* dls.want_authority is ignored */ + /* client */ + mock_options->ClientOnly = 1; + tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, + &client_boot_auth_only_cons); + mock_options->ClientOnly = 0; + + /* bridge relay */ + mock_options->ORPort_set = 1; + mock_options->BridgeRelay = 1; + tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, + &client_boot_auth_only_cons); + mock_options->ORPort_set = 0; + mock_options->BridgeRelay = 0; + } + } else { + /* client */ + mock_options->ClientOnly = 1; + tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, + &client_cons); + mock_options->ClientOnly = 0; + + /* bridge relay */ + mock_options->ORPort_set = 1; + mock_options->BridgeRelay = 1; + tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, + &client_cons); + mock_options->ORPort_set = 0; + mock_options->BridgeRelay = 0; + } dls.schedule = DL_SCHED_BRIDGE; + /* client */ mock_options->ClientOnly = 1; tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, &bridge); - mock_options->ClientOnly = 0; - tt_ptr_op(find_dl_schedule(&dls, mock_options), OP_EQ, &bridge); done: + UNMOCK(networkstatus_consensus_is_bootstrapping); + UNMOCK(networkstatus_consensus_can_use_extra_fallbacks); UNMOCK(get_options); + free(mock_options); + mock_options = NULL; } -#define DIR_LEGACY(name) \ +#define DIR_LEGACY(name) \ { #name, test_dir_ ## name , TT_FORK, NULL, NULL } #define DIR(name,flags) \ { #name, test_dir_##name, (flags), NULL, NULL } +/* where arg is a string constant */ +#define DIR_ARG(name,flags,arg) \ + { #name "_" arg, test_dir_##name, (flags), &passthrough_setup, (void*) arg } + struct testcase_t dir_tests[] = { DIR_LEGACY(nicknames), DIR_LEGACY(formats), @@ -4081,7 +4251,10 @@ struct testcase_t dir_tests[] = { DIR(should_not_init_request_to_dir_auths_without_v3_info, 0), DIR(should_init_request_to_dir_auths, 0), DIR(choose_compression_level, 0), - DIR(find_dl_schedule, 0), + DIR_ARG(find_dl_schedule, TT_FORK, "bf"), + DIR_ARG(find_dl_schedule, TT_FORK, "ba"), + DIR_ARG(find_dl_schedule, TT_FORK, "cf"), + DIR_ARG(find_dl_schedule, TT_FORK, "ca"), END_OF_TESTCASES }; diff --git a/src/test/test_dir_handle_get.c b/src/test/test_dir_handle_get.c index 0c6530d4ff..3029e98e2f 100644 --- a/src/test/test_dir_handle_get.c +++ b/src/test/test_dir_handle_get.c @@ -1192,7 +1192,7 @@ test_dir_handle_get_server_keys_all(void* data) base16_decode(ds->v3_identity_digest, DIGEST_LEN, TEST_CERT_IDENT_KEY, HEX_DIGEST_LEN); tt_int_op(0, OP_EQ, trusted_dirs_load_certs_from_string(TEST_CERTIFICATE, - TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1)); + TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1, NULL)); conn = dir_connection_new(tor_addr_family(&MOCK_TOR_ADDR)); @@ -1351,7 +1351,7 @@ test_dir_handle_get_server_keys_fp(void* data) TEST_CERT_IDENT_KEY, HEX_DIGEST_LEN); tt_int_op(0, OP_EQ, trusted_dirs_load_certs_from_string(TEST_CERTIFICATE, - TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1)); + TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1, NULL)); conn = dir_connection_new(tor_addr_family(&MOCK_TOR_ADDR)); char req[71]; @@ -1423,7 +1423,7 @@ test_dir_handle_get_server_keys_sk(void* data) routerlist_free_all(); tt_int_op(0, OP_EQ, trusted_dirs_load_certs_from_string(TEST_CERTIFICATE, - TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1)); + TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1, NULL)); conn = dir_connection_new(tor_addr_family(&MOCK_TOR_ADDR)); char req[71]; @@ -1505,7 +1505,7 @@ test_dir_handle_get_server_keys_fpsk(void* data) dir_server_add(ds); tt_int_op(0, OP_EQ, trusted_dirs_load_certs_from_string(TEST_CERTIFICATE, - TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1)); + TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1, NULL)); conn = dir_connection_new(tor_addr_family(&MOCK_TOR_ADDR)); @@ -1561,7 +1561,7 @@ test_dir_handle_get_server_keys_busy(void* data) dir_server_add(ds); tt_int_op(0, OP_EQ, trusted_dirs_load_certs_from_string(TEST_CERTIFICATE, - TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1)); + TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1, NULL)); MOCK(get_options, mock_get_options); MOCK(connection_write_to_buf_impl_, connection_write_to_buf_mock); @@ -2299,7 +2299,7 @@ test_dir_handle_get_status_vote_next_authority(void* data) base16_decode(ds->v3_identity_digest, DIGEST_LEN, TEST_CERT_IDENT_KEY, HEX_DIGEST_LEN); tt_int_op(0, OP_EQ, trusted_dirs_load_certs_from_string(TEST_CERTIFICATE, - TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1)); + TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1, NULL)); init_mock_options(); mock_options->AuthoritativeDir = 1; @@ -2378,7 +2378,7 @@ test_dir_handle_get_status_vote_current_authority(void* data) TEST_CERT_IDENT_KEY, HEX_DIGEST_LEN); tt_int_op(0, OP_EQ, trusted_dirs_load_certs_from_string(TEST_CERTIFICATE, - TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1)); + TRUSTED_DIRS_CERTS_SRC_DL_BY_ID_DIGEST, 1, NULL)); init_mock_options(); mock_options->AuthoritativeDir = 1; diff --git a/src/test/test_microdesc.c b/src/test/test_microdesc.c index 7db819a622..581f58b45f 100644 --- a/src/test/test_microdesc.c +++ b/src/test/test_microdesc.c @@ -483,7 +483,7 @@ test_md_generate(void *arg) md = dirvote_create_microdescriptor(ri, 21); tt_str_op(md->body, ==, test_md2_21); tt_assert(ed25519_pubkey_eq(md->ed25519_identity_pkey, - &ri->signing_key_cert->signing_key)); + &ri->cache_info.signing_key_cert->signing_key)); done: microdesc_free(md); diff --git a/src/test/test_ntor_cl.c b/src/test/test_ntor_cl.c index 6df123162e..49c9ad76d4 100644 --- a/src/test/test_ntor_cl.c +++ b/src/test/test_ntor_cl.c @@ -153,7 +153,10 @@ main(int argc, char **argv) if (argc < 2) { fprintf(stderr, "I need arguments. Read source for more info.\n"); return 1; - } else if (!strcmp(argv[1], "client1")) { + } + + curve25519_init(); + if (!strcmp(argv[1], "client1")) { return client1(argc, argv); } else if (!strcmp(argv[1], "server1")) { return server1(argc, argv); diff --git a/src/test/test_policy.c b/src/test/test_policy.c index 48e82551e3..a939ebf54f 100644 --- a/src/test/test_policy.c +++ b/src/test/test_policy.c @@ -716,10 +716,9 @@ test_policies_reject_exit_address(void *arg) } static smartlist_t *test_configured_ports = NULL; -const smartlist_t *mock_get_configured_ports(void); /** Returns test_configured_ports */ -const smartlist_t * +static const smartlist_t * mock_get_configured_ports(void) { return test_configured_ports; diff --git a/src/test/test_routerlist.c b/src/test/test_routerlist.c index 497606920d..2cffa6e801 100644 --- a/src/test/test_routerlist.c +++ b/src/test/test_routerlist.c @@ -15,6 +15,7 @@ #include "container.h" #include "directory.h" #include "dirvote.h" +#include "microdesc.h" #include "networkstatus.h" #include "nodelist.h" #include "policies.h" @@ -190,6 +191,14 @@ construct_consensus(char **consensus_text_md) crypto_pk_free(sign_skey_leg); } +static int mock_usable_consensus_flavor_value = FLAV_NS; + +static int +mock_usable_consensus_flavor(void) +{ + return mock_usable_consensus_flavor_value; +} + static void test_router_pick_directory_server_impl(void *arg) { @@ -209,6 +218,22 @@ test_router_pick_directory_server_impl(void *arg) (void)arg; + MOCK(usable_consensus_flavor, mock_usable_consensus_flavor); + + /* With no consensus, we must be bootstrapping, regardless of time or flavor + */ + mock_usable_consensus_flavor_value = FLAV_NS; + tt_assert(networkstatus_consensus_is_bootstrapping(now)); + tt_assert(networkstatus_consensus_is_bootstrapping(now + 2000)); + tt_assert(networkstatus_consensus_is_bootstrapping(now + 2*24*60*60)); + tt_assert(networkstatus_consensus_is_bootstrapping(now - 2*24*60*60)); + + mock_usable_consensus_flavor_value = FLAV_MICRODESC; + tt_assert(networkstatus_consensus_is_bootstrapping(now)); + tt_assert(networkstatus_consensus_is_bootstrapping(now + 2000)); + tt_assert(networkstatus_consensus_is_bootstrapping(now + 2*24*60*60)); + tt_assert(networkstatus_consensus_is_bootstrapping(now - 2*24*60*60)); + /* No consensus available, fail early */ rs = router_pick_directory_server_impl(V3_DIRINFO, (const int) 0, NULL); tt_assert(rs == NULL); @@ -223,6 +248,28 @@ test_router_pick_directory_server_impl(void *arg) tt_int_op(smartlist_len(con_md->routerstatus_list), ==, 3); tt_assert(!networkstatus_set_current_consensus_from_ns(con_md, "microdesc")); + + /* If the consensus time or flavor doesn't match, we are still + * bootstrapping */ + mock_usable_consensus_flavor_value = FLAV_NS; + tt_assert(networkstatus_consensus_is_bootstrapping(now)); + tt_assert(networkstatus_consensus_is_bootstrapping(now + 2000)); + tt_assert(networkstatus_consensus_is_bootstrapping(now + 2*24*60*60)); + tt_assert(networkstatus_consensus_is_bootstrapping(now - 2*24*60*60)); + + /* With a valid consensus for the current time and flavor, we stop + * bootstrapping, even if we have no certificates */ + mock_usable_consensus_flavor_value = FLAV_MICRODESC; + tt_assert(!networkstatus_consensus_is_bootstrapping(now + 2000)); + tt_assert(!networkstatus_consensus_is_bootstrapping(con_md->valid_after)); + tt_assert(!networkstatus_consensus_is_bootstrapping(con_md->valid_until)); + tt_assert(!networkstatus_consensus_is_bootstrapping(con_md->valid_until + + 24*60*60)); + /* These times are outside the test validity period */ + tt_assert(networkstatus_consensus_is_bootstrapping(now)); + tt_assert(networkstatus_consensus_is_bootstrapping(now + 2*24*60*60)); + tt_assert(networkstatus_consensus_is_bootstrapping(now - 2*24*60*60)); + nodelist_set_consensus(con_md); nodelist_assert_ok(); @@ -362,6 +409,7 @@ test_router_pick_directory_server_impl(void *arg) node_router1->rs->last_dir_503_at = 0; done: + UNMOCK(usable_consensus_flavor); if (router1_id) tor_free(router1_id); if (router2_id) diff --git a/src/test/test_util.c b/src/test/test_util.c index 21ff75741a..2726c31fe8 100644 --- a/src/test/test_util.c +++ b/src/test/test_util.c @@ -4223,21 +4223,6 @@ test_util_round_to_next_multiple_of(void *arg) tt_u64_op(round_uint64_to_next_multiple_of(UINT64_MAX,2), ==, UINT64_MAX); - tt_i64_op(round_int64_to_next_multiple_of(0,1), ==, 0); - tt_i64_op(round_int64_to_next_multiple_of(0,7), ==, 0); - - tt_i64_op(round_int64_to_next_multiple_of(99,1), ==, 99); - tt_i64_op(round_int64_to_next_multiple_of(99,7), ==, 105); - tt_i64_op(round_int64_to_next_multiple_of(99,9), ==, 99); - - tt_i64_op(round_int64_to_next_multiple_of(-99,1), ==, -99); - tt_i64_op(round_int64_to_next_multiple_of(-99,7), ==, -98); - tt_i64_op(round_int64_to_next_multiple_of(-99,9), ==, -99); - - tt_i64_op(round_int64_to_next_multiple_of(INT64_MIN,2), ==, INT64_MIN); - tt_i64_op(round_int64_to_next_multiple_of(INT64_MAX,2), ==, - INT64_MAX); - tt_int_op(round_uint32_to_next_multiple_of(0,1), ==, 0); tt_int_op(round_uint32_to_next_multiple_of(0,7), ==, 0); @@ -4819,7 +4804,7 @@ struct testcase_t util_tests[] = { UTIL_LEGACY(memarea), UTIL_LEGACY(control_formats), UTIL_LEGACY(mmap), - UTIL_LEGACY(sscanf), + UTIL_TEST(sscanf, TT_FORK), UTIL_LEGACY(format_time_interval), UTIL_LEGACY(path_is_relative), UTIL_LEGACY(strtok), diff --git a/src/test/testing_common.c b/src/test/testing_common.c index aeb1fa794d..39c3d02ab1 100644 --- a/src/test/testing_common.c +++ b/src/test/testing_common.c @@ -242,6 +242,8 @@ main(int c, const char **v) options = options_new(); tor_threads_init(); + network_init(); + struct tor_libevent_cfg cfg; memset(&cfg, 0, sizeof(cfg)); tor_libevent_initialize(&cfg); @@ -284,7 +286,6 @@ main(int c, const char **v) return 1; } rep_hist_init(); - network_init(); setup_directory(); options_init(options); options->DataDirectory = tor_strdup(temp_dir); diff --git a/src/tools/include.am b/src/tools/include.am index 38ed57546f..d0185b5887 100644 --- a/src/tools/include.am +++ b/src/tools/include.am @@ -7,23 +7,27 @@ endif src_tools_tor_resolve_SOURCES = src/tools/tor-resolve.c src_tools_tor_resolve_LDFLAGS = -src_tools_tor_resolve_LDADD = src/common/libor.a @TOR_LIB_MATH@ @TOR_LIB_WS32@ +src_tools_tor_resolve_LDADD = src/common/libor.a \ + src/common/libor-ctime.a \ + @TOR_LIB_MATH@ @TOR_LIB_WS32@ if COVERAGE_ENABLED src_tools_tor_cov_resolve_SOURCES = src/tools/tor-resolve.c src_tools_tor_cov_resolve_CPPFLAGS = $(AM_CPPFLAGS) $(TEST_CPPFLAGS) src_tools_tor_cov_resolve_CFLAGS = $(AM_CFLAGS) $(TEST_CFLAGS) src_tools_tor_cov_resolve_LDADD = src/common/libor-testing.a \ - @TOR_LIB_MATH@ @TOR_LIB_WS32@ + src/common/libor-ctime-testing.a \ + @TOR_LIB_MATH@ @TOR_LIB_WS32@ endif src_tools_tor_gencert_SOURCES = src/tools/tor-gencert.c src_tools_tor_gencert_LDFLAGS = @TOR_LDFLAGS_zlib@ @TOR_LDFLAGS_openssl@ src_tools_tor_gencert_LDADD = src/common/libor.a src/common/libor-crypto.a \ + src/common/libor-ctime.a \ $(LIBKECCAK_TINY) \ $(LIBDONNA) \ - @TOR_LIB_MATH@ @TOR_ZLIB_LIBS@ @TOR_OPENSSL_LIBS@ \ - @TOR_LIB_WS32@ @TOR_LIB_GDI@ @CURVE25519_LIBS@ + @TOR_LIB_MATH@ @TOR_ZLIB_LIBS@ @TOR_OPENSSL_LIBS@ \ + @TOR_LIB_WS32@ @TOR_LIB_GDI@ @CURVE25519_LIBS@ if COVERAGE_ENABLED src_tools_tor_cov_gencert_SOURCES = src/tools/tor-gencert.c @@ -32,18 +36,21 @@ src_tools_tor_cov_gencert_CFLAGS = $(AM_CFLAGS) $(TEST_CFLAGS) src_tools_tor_cov_gencert_LDFLAGS = @TOR_LDFLAGS_zlib@ @TOR_LDFLAGS_openssl@ src_tools_tor_cov_gencert_LDADD = src/common/libor-testing.a \ src/common/libor-crypto-testing.a \ + src/common/libor-ctime-testing.a \ $(LIBKECCAK_TINY) \ $(LIBDONNA) \ - @TOR_LIB_MATH@ @TOR_ZLIB_LIBS@ @TOR_OPENSSL_LIBS@ \ - @TOR_LIB_WS32@ @TOR_LIB_GDI@ @CURVE25519_LIBS@ + @TOR_LIB_MATH@ @TOR_ZLIB_LIBS@ @TOR_OPENSSL_LIBS@ \ + @TOR_LIB_WS32@ @TOR_LIB_GDI@ @CURVE25519_LIBS@ endif src_tools_tor_checkkey_SOURCES = src/tools/tor-checkkey.c src_tools_tor_checkkey_LDFLAGS = @TOR_LDFLAGS_zlib@ @TOR_LDFLAGS_openssl@ -src_tools_tor_checkkey_LDADD = src/common/libor.a src/common/libor-crypto.a \ +src_tools_tor_checkkey_LDADD = src/common/libor.a \ + src/common/libor-ctime.a \ + src/common/libor-crypto.a \ $(LIBKECCAK_TINY) \ $(LIBDONNA) \ - @TOR_LIB_MATH@ @TOR_ZLIB_LIBS@ @TOR_OPENSSL_LIBS@ \ - @TOR_LIB_WS32@ @TOR_LIB_GDI@ @CURVE25519_LIBS@ + @TOR_LIB_MATH@ @TOR_ZLIB_LIBS@ @TOR_OPENSSL_LIBS@ \ + @TOR_LIB_WS32@ @TOR_LIB_GDI@ @CURVE25519_LIBS@ EXTRA_DIST += src/tools/tor-fw-helper/README |