summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorNick Mathewson <nickm@torproject.org>2018-07-05 16:31:38 -0400
committerNick Mathewson <nickm@torproject.org>2018-07-05 17:15:50 -0400
commit63b4ea22af8e8314dd718f02046de5f4b91edf9d (patch)
treeaf52b6fba37f22c86447fd5267dd5eb557807c8b /src/core
parentce84200542f48a92e8b56a8d032401ecd153e90c (diff)
downloadtor-63b4ea22af8e8314dd718f02046de5f4b91edf9d.tar.gz
tor-63b4ea22af8e8314dd718f02046de5f4b91edf9d.zip
Move literally everything out of src/or
This commit won't build yet -- it just puts everything in a slightly more logical place. The reasoning here is that "src/core" will hold the stuff that every (or nearly every) tor instance will need in order to do onion routing. Other features (including some necessary ones) will live in "src/feature". The "src/app" directory will hold the stuff needed to have Tor be an application you can actually run. This commit DOES NOT refactor the former contents of src/or into a logical set of acyclic libraries, or change any code at all. That will have to come in the future. We will continue to move things around and split them in the future, but I hope this lays a reasonable groundwork for doing so.
Diffstat (limited to 'src/core')
-rw-r--r--src/core/crypto/hs_ntor.c620
-rw-r--r--src/core/crypto/hs_ntor.h69
-rw-r--r--src/core/crypto/onion.c1346
-rw-r--r--src/core/crypto/onion.h128
-rw-r--r--src/core/crypto/onion_fast.c144
-rw-r--r--src/core/crypto/onion_fast.h41
-rw-r--r--src/core/crypto/onion_ntor.c341
-rw-r--r--src/core/crypto/onion_ntor.h65
-rw-r--r--src/core/crypto/onion_tap.c246
-rw-r--r--src/core/crypto/onion_tap.h40
-rw-r--r--src/core/crypto/relay_crypto.c332
-rw-r--r--src/core/crypto/relay_crypto.h31
-rw-r--r--src/core/mainloop/connection.c5331
-rw-r--r--src/core/mainloop/connection.h343
-rw-r--r--src/core/mainloop/cpuworker.c599
-rw-r--r--src/core/mainloop/cpuworker.h37
-rw-r--r--src/core/mainloop/main.c4311
-rw-r--r--src/core/mainloop/main.h122
-rw-r--r--src/core/mainloop/periodic.c172
-rw-r--r--src/core/mainloop/periodic.h88
-rw-r--r--src/core/or/addr_policy_st.h46
-rw-r--r--src/core/or/address_set.c71
-rw-r--r--src/core/or/address_set.h31
-rw-r--r--src/core/or/cell_queue_st.h29
-rw-r--r--src/core/or/cell_st.h20
-rw-r--r--src/core/or/channel.c3480
-rw-r--r--src/core/or/channel.h780
-rw-r--r--src/core/or/channelpadding.c800
-rw-r--r--src/core/or/channelpadding.h45
-rw-r--r--src/core/or/channeltls.c2458
-rw-r--r--src/core/or/channeltls.h77
-rw-r--r--src/core/or/circuit_st.h182
-rw-r--r--src/core/or/circuitbuild.c3098
-rw-r--r--src/core/or/circuitbuild.h107
-rw-r--r--src/core/or/circuitlist.c2742
-rw-r--r--src/core/or/circuitlist.h247
-rw-r--r--src/core/or/circuitmux.c1364
-rw-r--r--src/core/or/circuitmux.h162
-rw-r--r--src/core/or/circuitmux_ewma.c829
-rw-r--r--src/core/or/circuitmux_ewma.h30
-rw-r--r--src/core/or/circuitstats.c1952
-rw-r--r--src/core/or/circuitstats.h213
-rw-r--r--src/core/or/circuituse.c3155
-rw-r--r--src/core/or/circuituse.h93
-rw-r--r--src/core/or/command.c665
-rw-r--r--src/core/or/command.h31
-rw-r--r--src/core/or/connection_edge.c4224
-rw-r--r--src/core/or/connection_edge.h248
-rw-r--r--src/core/or/connection_or.c2996
-rw-r--r--src/core/or/connection_or.h165
-rw-r--r--src/core/or/connection_st.h149
-rw-r--r--src/core/or/cpath_build_state_st.h38
-rw-r--r--src/core/or/crypt_path_reference_st.h23
-rw-r--r--src/core/or/crypt_path_st.h70
-rw-r--r--src/core/or/destroy_cell_queue_st.h27
-rw-r--r--src/core/or/dos.c801
-rw-r--r--src/core/or/dos.h140
-rw-r--r--src/core/or/edge_connection_st.h77
-rw-r--r--src/core/or/entry_connection_st.h100
-rw-r--r--src/core/or/entry_port_cfg_st.h54
-rw-r--r--src/core/or/extend_info_st.h30
-rw-r--r--src/core/or/git_revision.c17
-rw-r--r--src/core/or/git_revision.h12
-rw-r--r--src/core/or/listener_connection_st.h25
-rw-r--r--src/core/or/or.h1088
-rw-r--r--src/core/or/or_circuit_st.h80
-rw-r--r--src/core/or/or_connection_st.h92
-rw-r--r--src/core/or/or_handshake_certs_st.h40
-rw-r--r--src/core/or/or_handshake_state_st.h78
-rw-r--r--src/core/or/origin_circuit_st.h290
-rw-r--r--src/core/or/policies.c3147
-rw-r--r--src/core/or/policies.h187
-rw-r--r--src/core/or/port_cfg_st.h35
-rw-r--r--src/core/or/reasons.c497
-rw-r--r--src/core/or/reasons.h34
-rw-r--r--src/core/or/relay.c3088
-rw-r--r--src/core/or/relay.h124
-rw-r--r--src/core/or/relay_crypto_st.h31
-rw-r--r--src/core/or/scheduler.c768
-rw-r--r--src/core/or/scheduler.h218
-rw-r--r--src/core/or/scheduler_kist.c842
-rw-r--r--src/core/or/scheduler_vanilla.c175
-rw-r--r--src/core/or/server_port_cfg_st.h20
-rw-r--r--src/core/or/socks_request_st.h75
-rw-r--r--src/core/or/status.c251
-rw-r--r--src/core/or/status.h18
-rw-r--r--src/core/or/tor_version_st.h32
-rw-r--r--src/core/or/var_cell_st.h23
-rw-r--r--src/core/proto/proto_cell.c86
-rw-r--r--src/core/proto/proto_cell.h17
-rw-r--r--src/core/proto/proto_control0.c26
-rw-r--r--src/core/proto/proto_control0.h14
-rw-r--r--src/core/proto/proto_ext_or.c40
-rw-r--r--src/core/proto/proto_ext_or.h22
-rw-r--r--src/core/proto/proto_http.c171
-rw-r--r--src/core/proto/proto_http.h24
-rw-r--r--src/core/proto/proto_socks.c713
-rw-r--r--src/core/proto/proto_socks.h21
-rw-r--r--src/core/proto/protover.c923
-rw-r--r--src/core/proto/protover.h97
-rw-r--r--src/core/proto/protover_rust.c34
101 files changed, 59730 insertions, 0 deletions
diff --git a/src/core/crypto/hs_ntor.c b/src/core/crypto/hs_ntor.c
new file mode 100644
index 0000000000..b5007545db
--- /dev/null
+++ b/src/core/crypto/hs_ntor.c
@@ -0,0 +1,620 @@
+/* Copyright (c) 2017-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/** \file hs_ntor.c
+ * \brief Implements the ntor variant used in Tor hidden services.
+ *
+ * \details
+ * This module handles the variant of the ntor handshake that is documented in
+ * section [NTOR-WITH-EXTRA-DATA] of rend-spec-ng.txt .
+ *
+ * The functions in this file provide an API that should be used when sending
+ * or receiving INTRODUCE1/RENDEZVOUS1 cells to generate the various key
+ * material required to create and handle those cells.
+ *
+ * In the case of INTRODUCE1 it provides encryption and MAC keys to
+ * encode/decode the encrypted blob (see hs_ntor_intro_cell_keys_t). The
+ * relevant pub functions are hs_ntor_{client,service}_get_introduce1_keys().
+ *
+ * In the case of RENDEZVOUS1 it calculates the MAC required to authenticate
+ * the cell, and also provides the key seed that is used to derive the crypto
+ * material for rendezvous encryption (see hs_ntor_rend_cell_keys_t). The
+ * relevant pub functions are hs_ntor_{client,service}_get_rendezvous1_keys().
+ * It also provides a function (hs_ntor_circuit_key_expansion()) that does the
+ * rendezvous key expansion to setup end-to-end rend circuit keys.
+ */
+
+#include "or/or.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "lib/crypt_ops/crypto_curve25519.h"
+#include "lib/crypt_ops/crypto_ed25519.h"
+#include "or/hs_ntor.h"
+
+/* String constants used by the ntor HS protocol */
+#define PROTOID "tor-hs-ntor-curve25519-sha3-256-1"
+#define PROTOID_LEN (sizeof(PROTOID) - 1)
+#define SERVER_STR "Server"
+#define SERVER_STR_LEN (sizeof(SERVER_STR) - 1)
+
+/* Protocol-specific tweaks to our crypto inputs */
+#define T_HSENC PROTOID ":hs_key_extract"
+#define T_HSENC_LEN (sizeof(T_HSENC) - 1)
+#define T_HSVERIFY PROTOID ":hs_verify"
+#define T_HSMAC PROTOID ":hs_mac"
+#define M_HSEXPAND PROTOID ":hs_key_expand"
+#define M_HSEXPAND_LEN (sizeof(M_HSEXPAND) - 1)
+
+/************************* Helper functions: *******************************/
+
+/** Helper macro: copy <b>len</b> bytes from <b>inp</b> to <b>ptr</b> and
+ *advance <b>ptr</b> by the number of bytes copied. Stolen from onion_ntor.c */
+#define APPEND(ptr, inp, len) \
+ STMT_BEGIN { \
+ memcpy(ptr, (inp), (len)); \
+ ptr += len; \
+ } STMT_END
+
+/* Length of EXP(X,y) | EXP(X,b) | AUTH_KEY | B | X | Y | PROTOID */
+#define REND_SECRET_HS_INPUT_LEN (CURVE25519_OUTPUT_LEN * 2 + \
+ ED25519_PUBKEY_LEN + CURVE25519_PUBKEY_LEN * 3 + PROTOID_LEN)
+/* Length of auth_input = verify | AUTH_KEY | B | Y | X | PROTOID | "Server" */
+#define REND_AUTH_INPUT_LEN (DIGEST256_LEN + ED25519_PUBKEY_LEN + \
+ CURVE25519_PUBKEY_LEN * 3 + PROTOID_LEN + SERVER_STR_LEN)
+
+/** Helper function: Compute the last part of the HS ntor handshake which
+ * derives key material necessary to create and handle RENDEZVOUS1
+ * cells. Function used by both client and service. The actual calculations is
+ * as follows:
+ *
+ * NTOR_KEY_SEED = MAC(rend_secret_hs_input, t_hsenc)
+ * verify = MAC(rend_secret_hs_input, t_hsverify)
+ * auth_input = verify | AUTH_KEY | B | Y | X | PROTOID | "Server"
+ * auth_input_mac = MAC(auth_input, t_hsmac)
+ *
+ * where in the above, AUTH_KEY is <b>intro_auth_pubkey</b>, B is
+ * <b>intro_enc_pubkey</b>, Y is <b>service_ephemeral_rend_pubkey</b>, and X
+ * is <b>client_ephemeral_enc_pubkey</b>. The provided
+ * <b>rend_secret_hs_input</b> is of size REND_SECRET_HS_INPUT_LEN.
+ *
+ * The final results of NTOR_KEY_SEED and auth_input_mac are placed in
+ * <b>hs_ntor_rend_cell_keys_out</b>. Return 0 if everything went fine. */
+static int
+get_rendezvous1_key_material(const uint8_t *rend_secret_hs_input,
+ const ed25519_public_key_t *intro_auth_pubkey,
+ const curve25519_public_key_t *intro_enc_pubkey,
+ const curve25519_public_key_t *service_ephemeral_rend_pubkey,
+ const curve25519_public_key_t *client_ephemeral_enc_pubkey,
+ hs_ntor_rend_cell_keys_t *hs_ntor_rend_cell_keys_out)
+{
+ int bad = 0;
+ uint8_t ntor_key_seed[DIGEST256_LEN];
+ uint8_t ntor_verify[DIGEST256_LEN];
+ uint8_t rend_auth_input[REND_AUTH_INPUT_LEN];
+ uint8_t rend_cell_auth[DIGEST256_LEN];
+ uint8_t *ptr;
+
+ /* Let's build NTOR_KEY_SEED */
+ crypto_mac_sha3_256(ntor_key_seed, sizeof(ntor_key_seed),
+ rend_secret_hs_input, REND_SECRET_HS_INPUT_LEN,
+ (const uint8_t *)T_HSENC, strlen(T_HSENC));
+ bad |= safe_mem_is_zero(ntor_key_seed, DIGEST256_LEN);
+
+ /* Let's build ntor_verify */
+ crypto_mac_sha3_256(ntor_verify, sizeof(ntor_verify),
+ rend_secret_hs_input, REND_SECRET_HS_INPUT_LEN,
+ (const uint8_t *)T_HSVERIFY, strlen(T_HSVERIFY));
+ bad |= safe_mem_is_zero(ntor_verify, DIGEST256_LEN);
+
+ /* Let's build auth_input: */
+ ptr = rend_auth_input;
+ /* Append ntor_verify */
+ APPEND(ptr, ntor_verify, sizeof(ntor_verify));
+ /* Append AUTH_KEY */
+ APPEND(ptr, intro_auth_pubkey->pubkey, ED25519_PUBKEY_LEN);
+ /* Append B */
+ APPEND(ptr, intro_enc_pubkey->public_key, CURVE25519_PUBKEY_LEN);
+ /* Append Y */
+ APPEND(ptr,
+ service_ephemeral_rend_pubkey->public_key, CURVE25519_PUBKEY_LEN);
+ /* Append X */
+ APPEND(ptr,
+ client_ephemeral_enc_pubkey->public_key, CURVE25519_PUBKEY_LEN);
+ /* Append PROTOID */
+ APPEND(ptr, PROTOID, strlen(PROTOID));
+ /* Append "Server" */
+ APPEND(ptr, SERVER_STR, strlen(SERVER_STR));
+ tor_assert(ptr == rend_auth_input + sizeof(rend_auth_input));
+
+ /* Let's build auth_input_mac that goes in RENDEZVOUS1 cell */
+ crypto_mac_sha3_256(rend_cell_auth, sizeof(rend_cell_auth),
+ rend_auth_input, sizeof(rend_auth_input),
+ (const uint8_t *)T_HSMAC, strlen(T_HSMAC));
+ bad |= safe_mem_is_zero(ntor_verify, DIGEST256_LEN);
+
+ { /* Get the computed RENDEZVOUS1 material! */
+ memcpy(&hs_ntor_rend_cell_keys_out->rend_cell_auth_mac,
+ rend_cell_auth, DIGEST256_LEN);
+ memcpy(&hs_ntor_rend_cell_keys_out->ntor_key_seed,
+ ntor_key_seed, DIGEST256_LEN);
+ }
+
+ memwipe(rend_cell_auth, 0, sizeof(rend_cell_auth));
+ memwipe(rend_auth_input, 0, sizeof(rend_auth_input));
+ memwipe(ntor_key_seed, 0, sizeof(ntor_key_seed));
+
+ return bad;
+}
+
+/** Length of secret_input = EXP(B,x) | AUTH_KEY | X | B | PROTOID */
+#define INTRO_SECRET_HS_INPUT_LEN (CURVE25519_OUTPUT_LEN +ED25519_PUBKEY_LEN +\
+ CURVE25519_PUBKEY_LEN + CURVE25519_PUBKEY_LEN + PROTOID_LEN)
+/* Length of info = m_hsexpand | subcredential */
+#define INFO_BLOB_LEN (M_HSEXPAND_LEN + DIGEST256_LEN)
+/* Length of KDF input = intro_secret_hs_input | t_hsenc | info */
+#define KDF_INPUT_LEN (INTRO_SECRET_HS_INPUT_LEN + T_HSENC_LEN + INFO_BLOB_LEN)
+
+/** Helper function: Compute the part of the HS ntor handshake that generates
+ * key material for creating and handling INTRODUCE1 cells. Function used
+ * by both client and service. Specifically, calculate the following:
+ *
+ * info = m_hsexpand | subcredential
+ * hs_keys = KDF(intro_secret_hs_input | t_hsenc | info, S_KEY_LEN+MAC_LEN)
+ * ENC_KEY = hs_keys[0:S_KEY_LEN]
+ * MAC_KEY = hs_keys[S_KEY_LEN:S_KEY_LEN+MAC_KEY_LEN]
+ *
+ * where intro_secret_hs_input is <b>secret_input</b> (of size
+ * INTRO_SECRET_HS_INPUT_LEN), and <b>subcredential</b> is of size
+ * DIGEST256_LEN.
+ *
+ * If everything went well, fill <b>hs_ntor_intro_cell_keys_out</b> with the
+ * necessary key material, and return 0. */
+static void
+get_introduce1_key_material(const uint8_t *secret_input,
+ const uint8_t *subcredential,
+ hs_ntor_intro_cell_keys_t *hs_ntor_intro_cell_keys_out)
+{
+ uint8_t keystream[CIPHER256_KEY_LEN + DIGEST256_LEN];
+ uint8_t info_blob[INFO_BLOB_LEN];
+ uint8_t kdf_input[KDF_INPUT_LEN];
+ crypto_xof_t *xof;
+ uint8_t *ptr;
+
+ /* Let's build info */
+ ptr = info_blob;
+ APPEND(ptr, M_HSEXPAND, strlen(M_HSEXPAND));
+ APPEND(ptr, subcredential, DIGEST256_LEN);
+ tor_assert(ptr == info_blob + sizeof(info_blob));
+
+ /* Let's build the input to the KDF */
+ ptr = kdf_input;
+ APPEND(ptr, secret_input, INTRO_SECRET_HS_INPUT_LEN);
+ APPEND(ptr, T_HSENC, strlen(T_HSENC));
+ APPEND(ptr, info_blob, sizeof(info_blob));
+ tor_assert(ptr == kdf_input + sizeof(kdf_input));
+
+ /* Now we need to run kdf_input over SHAKE-256 */
+ xof = crypto_xof_new();
+ crypto_xof_add_bytes(xof, kdf_input, sizeof(kdf_input));
+ crypto_xof_squeeze_bytes(xof, keystream, sizeof(keystream)) ;
+ crypto_xof_free(xof);
+
+ { /* Get the keys */
+ memcpy(&hs_ntor_intro_cell_keys_out->enc_key, keystream,CIPHER256_KEY_LEN);
+ memcpy(&hs_ntor_intro_cell_keys_out->mac_key,
+ keystream+CIPHER256_KEY_LEN, DIGEST256_LEN);
+ }
+
+ memwipe(keystream, 0, sizeof(keystream));
+ memwipe(kdf_input, 0, sizeof(kdf_input));
+}
+
+/** Helper function: Calculate the 'intro_secret_hs_input' element used by the
+ * HS ntor handshake and place it in <b>secret_input_out</b>. This function is
+ * used by both client and service code.
+ *
+ * For the client-side it looks like this:
+ *
+ * intro_secret_hs_input = EXP(B,x) | AUTH_KEY | X | B | PROTOID
+ *
+ * whereas for the service-side it looks like this:
+ *
+ * intro_secret_hs_input = EXP(X,b) | AUTH_KEY | X | B | PROTOID
+ *
+ * In this function, <b>dh_result</b> carries the EXP() result (and has size
+ * CURVE25519_OUTPUT_LEN) <b>intro_auth_pubkey</b> is AUTH_KEY,
+ * <b>client_ephemeral_enc_pubkey</b> is X, and <b>intro_enc_pubkey</b> is B.
+ */
+static void
+get_intro_secret_hs_input(const uint8_t *dh_result,
+ const ed25519_public_key_t *intro_auth_pubkey,
+ const curve25519_public_key_t *client_ephemeral_enc_pubkey,
+ const curve25519_public_key_t *intro_enc_pubkey,
+ uint8_t *secret_input_out)
+{
+ uint8_t *ptr;
+
+ /* Append EXP() */
+ ptr = secret_input_out;
+ APPEND(ptr, dh_result, CURVE25519_OUTPUT_LEN);
+ /* Append AUTH_KEY */
+ APPEND(ptr, intro_auth_pubkey->pubkey, ED25519_PUBKEY_LEN);
+ /* Append X */
+ APPEND(ptr, client_ephemeral_enc_pubkey->public_key, CURVE25519_PUBKEY_LEN);
+ /* Append B */
+ APPEND(ptr, intro_enc_pubkey->public_key, CURVE25519_PUBKEY_LEN);
+ /* Append PROTOID */
+ APPEND(ptr, PROTOID, strlen(PROTOID));
+ tor_assert(ptr == secret_input_out + INTRO_SECRET_HS_INPUT_LEN);
+}
+
+/** Calculate the 'rend_secret_hs_input' element used by the HS ntor handshake
+ * and place it in <b>rend_secret_hs_input_out</b>. This function is used by
+ * both client and service code.
+ *
+ * The computation on the client side is:
+ * rend_secret_hs_input = EXP(X,y) | EXP(X,b) | AUTH_KEY | B | X | Y | PROTOID
+ * whereas on the service side it is:
+ * rend_secret_hs_input = EXP(Y,x) | EXP(B,x) | AUTH_KEY | B | X | Y | PROTOID
+ *
+ * where:
+ * <b>dh_result1</b> and <b>dh_result2</b> carry the two EXP() results (of size
+ * CURVE25519_OUTPUT_LEN)
+ * <b>intro_auth_pubkey</b> is AUTH_KEY,
+ * <b>intro_enc_pubkey</b> is B,
+ * <b>client_ephemeral_enc_pubkey</b> is X, and
+ * <b>service_ephemeral_rend_pubkey</b> is Y.
+ */
+static void
+get_rend_secret_hs_input(const uint8_t *dh_result1, const uint8_t *dh_result2,
+ const ed25519_public_key_t *intro_auth_pubkey,
+ const curve25519_public_key_t *intro_enc_pubkey,
+ const curve25519_public_key_t *client_ephemeral_enc_pubkey,
+ const curve25519_public_key_t *service_ephemeral_rend_pubkey,
+ uint8_t *rend_secret_hs_input_out)
+{
+ uint8_t *ptr;
+
+ ptr = rend_secret_hs_input_out;
+ /* Append the first EXP() */
+ APPEND(ptr, dh_result1, CURVE25519_OUTPUT_LEN);
+ /* Append the other EXP() */
+ APPEND(ptr, dh_result2, CURVE25519_OUTPUT_LEN);
+ /* Append AUTH_KEY */
+ APPEND(ptr, intro_auth_pubkey->pubkey, ED25519_PUBKEY_LEN);
+ /* Append B */
+ APPEND(ptr, intro_enc_pubkey->public_key, CURVE25519_PUBKEY_LEN);
+ /* Append X */
+ APPEND(ptr,
+ client_ephemeral_enc_pubkey->public_key, CURVE25519_PUBKEY_LEN);
+ /* Append Y */
+ APPEND(ptr,
+ service_ephemeral_rend_pubkey->public_key, CURVE25519_PUBKEY_LEN);
+ /* Append PROTOID */
+ APPEND(ptr, PROTOID, strlen(PROTOID));
+ tor_assert(ptr == rend_secret_hs_input_out + REND_SECRET_HS_INPUT_LEN);
+}
+
+/************************* Public functions: *******************************/
+
+/* Public function: Do the appropriate ntor calculations and derive the keys
+ * needed to encrypt and authenticate INTRODUCE1 cells. Return 0 and place the
+ * final key material in <b>hs_ntor_intro_cell_keys_out</b> if everything went
+ * well, otherwise return -1;
+ *
+ * The relevant calculations are as follows:
+ *
+ * intro_secret_hs_input = EXP(B,x) | AUTH_KEY | X | B | PROTOID
+ * info = m_hsexpand | subcredential
+ * hs_keys = KDF(intro_secret_hs_input | t_hsenc | info, S_KEY_LEN+MAC_LEN)
+ * ENC_KEY = hs_keys[0:S_KEY_LEN]
+ * MAC_KEY = hs_keys[S_KEY_LEN:S_KEY_LEN+MAC_KEY_LEN]
+ *
+ * where:
+ * <b>intro_auth_pubkey</b> is AUTH_KEY (found in HS descriptor),
+ * <b>intro_enc_pubkey</b> is B (also found in HS descriptor),
+ * <b>client_ephemeral_enc_keypair</b> is freshly generated keypair (x,X)
+ * <b>subcredential</b> is the hidden service subcredential (of size
+ * DIGEST256_LEN). */
+int
+hs_ntor_client_get_introduce1_keys(
+ const ed25519_public_key_t *intro_auth_pubkey,
+ const curve25519_public_key_t *intro_enc_pubkey,
+ const curve25519_keypair_t *client_ephemeral_enc_keypair,
+ const uint8_t *subcredential,
+ hs_ntor_intro_cell_keys_t *hs_ntor_intro_cell_keys_out)
+{
+ int bad = 0;
+ uint8_t secret_input[INTRO_SECRET_HS_INPUT_LEN];
+ uint8_t dh_result[CURVE25519_OUTPUT_LEN];
+
+ tor_assert(intro_auth_pubkey);
+ tor_assert(intro_enc_pubkey);
+ tor_assert(client_ephemeral_enc_keypair);
+ tor_assert(subcredential);
+ tor_assert(hs_ntor_intro_cell_keys_out);
+
+ /* Calculate EXP(B,x) */
+ curve25519_handshake(dh_result,
+ &client_ephemeral_enc_keypair->seckey,
+ intro_enc_pubkey);
+ bad |= safe_mem_is_zero(dh_result, CURVE25519_OUTPUT_LEN);
+
+ /* Get intro_secret_hs_input */
+ get_intro_secret_hs_input(dh_result, intro_auth_pubkey,
+ &client_ephemeral_enc_keypair->pubkey,
+ intro_enc_pubkey, secret_input);
+ bad |= safe_mem_is_zero(secret_input, CURVE25519_OUTPUT_LEN);
+
+ /* Get ENC_KEY and MAC_KEY! */
+ get_introduce1_key_material(secret_input, subcredential,
+ hs_ntor_intro_cell_keys_out);
+
+ /* Cleanup */
+ memwipe(secret_input, 0, sizeof(secret_input));
+ if (bad) {
+ memwipe(hs_ntor_intro_cell_keys_out, 0, sizeof(hs_ntor_intro_cell_keys_t));
+ }
+
+ return bad ? -1 : 0;
+}
+
+/* Public function: Do the appropriate ntor calculations and derive the keys
+ * needed to verify RENDEZVOUS1 cells and encrypt further rendezvous
+ * traffic. Return 0 and place the final key material in
+ * <b>hs_ntor_rend_cell_keys_out</b> if everything went well, else return -1.
+ *
+ * The relevant calculations are as follows:
+ *
+ * rend_secret_hs_input = EXP(Y,x) | EXP(B,x) | AUTH_KEY | B | X | Y | PROTOID
+ * NTOR_KEY_SEED = MAC(rend_secret_hs_input, t_hsenc)
+ * verify = MAC(rend_secret_hs_input, t_hsverify)
+ * auth_input = verify | AUTH_KEY | B | Y | X | PROTOID | "Server"
+ * auth_input_mac = MAC(auth_input, t_hsmac)
+ *
+ * where:
+ * <b>intro_auth_pubkey</b> is AUTH_KEY (found in HS descriptor),
+ * <b>client_ephemeral_enc_keypair</b> is freshly generated keypair (x,X)
+ * <b>intro_enc_pubkey</b> is B (also found in HS descriptor),
+ * <b>service_ephemeral_rend_pubkey</b> is Y (SERVER_PK in RENDEZVOUS1 cell) */
+int
+hs_ntor_client_get_rendezvous1_keys(
+ const ed25519_public_key_t *intro_auth_pubkey,
+ const curve25519_keypair_t *client_ephemeral_enc_keypair,
+ const curve25519_public_key_t *intro_enc_pubkey,
+ const curve25519_public_key_t *service_ephemeral_rend_pubkey,
+ hs_ntor_rend_cell_keys_t *hs_ntor_rend_cell_keys_out)
+{
+ int bad = 0;
+ uint8_t rend_secret_hs_input[REND_SECRET_HS_INPUT_LEN];
+ uint8_t dh_result1[CURVE25519_OUTPUT_LEN];
+ uint8_t dh_result2[CURVE25519_OUTPUT_LEN];
+
+ tor_assert(intro_auth_pubkey);
+ tor_assert(client_ephemeral_enc_keypair);
+ tor_assert(intro_enc_pubkey);
+ tor_assert(service_ephemeral_rend_pubkey);
+ tor_assert(hs_ntor_rend_cell_keys_out);
+
+ /* Compute EXP(Y, x) */
+ curve25519_handshake(dh_result1,
+ &client_ephemeral_enc_keypair->seckey,
+ service_ephemeral_rend_pubkey);
+ bad |= safe_mem_is_zero(dh_result1, CURVE25519_OUTPUT_LEN);
+
+ /* Compute EXP(B, x) */
+ curve25519_handshake(dh_result2,
+ &client_ephemeral_enc_keypair->seckey,
+ intro_enc_pubkey);
+ bad |= safe_mem_is_zero(dh_result2, CURVE25519_OUTPUT_LEN);
+
+ /* Get rend_secret_hs_input */
+ get_rend_secret_hs_input(dh_result1, dh_result2,
+ intro_auth_pubkey, intro_enc_pubkey,
+ &client_ephemeral_enc_keypair->pubkey,
+ service_ephemeral_rend_pubkey,
+ rend_secret_hs_input);
+
+ /* Get NTOR_KEY_SEED and the auth_input MAC */
+ bad |= get_rendezvous1_key_material(rend_secret_hs_input,
+ intro_auth_pubkey,
+ intro_enc_pubkey,
+ service_ephemeral_rend_pubkey,
+ &client_ephemeral_enc_keypair->pubkey,
+ hs_ntor_rend_cell_keys_out);
+
+ memwipe(rend_secret_hs_input, 0, sizeof(rend_secret_hs_input));
+ if (bad) {
+ memwipe(hs_ntor_rend_cell_keys_out, 0, sizeof(hs_ntor_rend_cell_keys_t));
+ }
+
+ return bad ? -1 : 0;
+}
+
+/* Public function: Do the appropriate ntor calculations and derive the keys
+ * needed to decrypt and verify INTRODUCE1 cells. Return 0 and place the final
+ * key material in <b>hs_ntor_intro_cell_keys_out</b> if everything went well,
+ * otherwise return -1;
+ *
+ * The relevant calculations are as follows:
+ *
+ * intro_secret_hs_input = EXP(X,b) | AUTH_KEY | X | B | PROTOID
+ * info = m_hsexpand | subcredential
+ * hs_keys = KDF(intro_secret_hs_input | t_hsenc | info, S_KEY_LEN+MAC_LEN)
+ * HS_DEC_KEY = hs_keys[0:S_KEY_LEN]
+ * HS_MAC_KEY = hs_keys[S_KEY_LEN:S_KEY_LEN+MAC_KEY_LEN]
+ *
+ * where:
+ * <b>intro_auth_pubkey</b> is AUTH_KEY (introduction point auth key),
+ * <b>intro_enc_keypair</b> is (b,B) (introduction point encryption keypair),
+ * <b>client_ephemeral_enc_pubkey</b> is X (CLIENT_PK in INTRODUCE2 cell),
+ * <b>subcredential</b> is the HS subcredential (of size DIGEST256_LEN) */
+int
+hs_ntor_service_get_introduce1_keys(
+ const ed25519_public_key_t *intro_auth_pubkey,
+ const curve25519_keypair_t *intro_enc_keypair,
+ const curve25519_public_key_t *client_ephemeral_enc_pubkey,
+ const uint8_t *subcredential,
+ hs_ntor_intro_cell_keys_t *hs_ntor_intro_cell_keys_out)
+{
+ int bad = 0;
+ uint8_t secret_input[INTRO_SECRET_HS_INPUT_LEN];
+ uint8_t dh_result[CURVE25519_OUTPUT_LEN];
+
+ tor_assert(intro_auth_pubkey);
+ tor_assert(intro_enc_keypair);
+ tor_assert(client_ephemeral_enc_pubkey);
+ tor_assert(subcredential);
+ tor_assert(hs_ntor_intro_cell_keys_out);
+
+ /* Compute EXP(X, b) */
+ curve25519_handshake(dh_result,
+ &intro_enc_keypair->seckey,
+ client_ephemeral_enc_pubkey);
+ bad |= safe_mem_is_zero(dh_result, CURVE25519_OUTPUT_LEN);
+
+ /* Get intro_secret_hs_input */
+ get_intro_secret_hs_input(dh_result, intro_auth_pubkey,
+ client_ephemeral_enc_pubkey,
+ &intro_enc_keypair->pubkey,
+ secret_input);
+ bad |= safe_mem_is_zero(secret_input, CURVE25519_OUTPUT_LEN);
+
+ /* Get ENC_KEY and MAC_KEY! */
+ get_introduce1_key_material(secret_input, subcredential,
+ hs_ntor_intro_cell_keys_out);
+
+ memwipe(secret_input, 0, sizeof(secret_input));
+ if (bad) {
+ memwipe(hs_ntor_intro_cell_keys_out, 0, sizeof(hs_ntor_intro_cell_keys_t));
+ }
+
+ return bad ? -1 : 0;
+}
+
+/* Public function: Do the appropriate ntor calculations and derive the keys
+ * needed to create and authenticate RENDEZVOUS1 cells. Return 0 and place the
+ * final key material in <b>hs_ntor_rend_cell_keys_out</b> if all went fine,
+ * return -1 if error happened.
+ *
+ * The relevant calculations are as follows:
+ *
+ * rend_secret_hs_input = EXP(X,y) | EXP(X,b) | AUTH_KEY | B | X | Y | PROTOID
+ * NTOR_KEY_SEED = MAC(rend_secret_hs_input, t_hsenc)
+ * verify = MAC(rend_secret_hs_input, t_hsverify)
+ * auth_input = verify | AUTH_KEY | B | Y | X | PROTOID | "Server"
+ * auth_input_mac = MAC(auth_input, t_hsmac)
+ *
+ * where:
+ * <b>intro_auth_pubkey</b> is AUTH_KEY (intro point auth key),
+ * <b>intro_enc_keypair</b> is (b,B) (intro point enc keypair)
+ * <b>service_ephemeral_rend_keypair</b> is a fresh (y,Y) keypair
+ * <b>client_ephemeral_enc_pubkey</b> is X (CLIENT_PK in INTRODUCE2 cell) */
+int
+hs_ntor_service_get_rendezvous1_keys(
+ const ed25519_public_key_t *intro_auth_pubkey,
+ const curve25519_keypair_t *intro_enc_keypair,
+ const curve25519_keypair_t *service_ephemeral_rend_keypair,
+ const curve25519_public_key_t *client_ephemeral_enc_pubkey,
+ hs_ntor_rend_cell_keys_t *hs_ntor_rend_cell_keys_out)
+{
+ int bad = 0;
+ uint8_t rend_secret_hs_input[REND_SECRET_HS_INPUT_LEN];
+ uint8_t dh_result1[CURVE25519_OUTPUT_LEN];
+ uint8_t dh_result2[CURVE25519_OUTPUT_LEN];
+
+ tor_assert(intro_auth_pubkey);
+ tor_assert(intro_enc_keypair);
+ tor_assert(service_ephemeral_rend_keypair);
+ tor_assert(client_ephemeral_enc_pubkey);
+ tor_assert(hs_ntor_rend_cell_keys_out);
+
+ /* Compute EXP(X, y) */
+ curve25519_handshake(dh_result1,
+ &service_ephemeral_rend_keypair->seckey,
+ client_ephemeral_enc_pubkey);
+ bad |= safe_mem_is_zero(dh_result1, CURVE25519_OUTPUT_LEN);
+
+ /* Compute EXP(X, b) */
+ curve25519_handshake(dh_result2,
+ &intro_enc_keypair->seckey,
+ client_ephemeral_enc_pubkey);
+ bad |= safe_mem_is_zero(dh_result2, CURVE25519_OUTPUT_LEN);
+
+ /* Get rend_secret_hs_input */
+ get_rend_secret_hs_input(dh_result1, dh_result2,
+ intro_auth_pubkey,
+ &intro_enc_keypair->pubkey,
+ client_ephemeral_enc_pubkey,
+ &service_ephemeral_rend_keypair->pubkey,
+ rend_secret_hs_input);
+
+ /* Get NTOR_KEY_SEED and AUTH_INPUT_MAC! */
+ bad |= get_rendezvous1_key_material(rend_secret_hs_input,
+ intro_auth_pubkey,
+ &intro_enc_keypair->pubkey,
+ &service_ephemeral_rend_keypair->pubkey,
+ client_ephemeral_enc_pubkey,
+ hs_ntor_rend_cell_keys_out);
+
+ memwipe(rend_secret_hs_input, 0, sizeof(rend_secret_hs_input));
+ if (bad) {
+ memwipe(hs_ntor_rend_cell_keys_out, 0, sizeof(hs_ntor_rend_cell_keys_t));
+ }
+
+ return bad ? -1 : 0;
+}
+
+/** Given a received RENDEZVOUS2 MAC in <b>mac</b> (of length DIGEST256_LEN),
+ * and the RENDEZVOUS1 key material in <b>hs_ntor_rend_cell_keys</b>, return 1
+ * if the MAC is good, otherwise return 0. */
+int
+hs_ntor_client_rendezvous2_mac_is_good(
+ const hs_ntor_rend_cell_keys_t *hs_ntor_rend_cell_keys,
+ const uint8_t *rcvd_mac)
+{
+ tor_assert(rcvd_mac);
+ tor_assert(hs_ntor_rend_cell_keys);
+
+ return tor_memeq(hs_ntor_rend_cell_keys->rend_cell_auth_mac,
+ rcvd_mac, DIGEST256_LEN);
+}
+
+/* Input length to KDF for key expansion */
+#define NTOR_KEY_EXPANSION_KDF_INPUT_LEN (DIGEST256_LEN + M_HSEXPAND_LEN)
+
+/** Given the rendezvous key seed in <b>ntor_key_seed</b> (of size
+ * DIGEST256_LEN), do the circuit key expansion as specified by section
+ * '4.2.1. Key expansion' and place the keys in <b>keys_out</b> (which must be
+ * of size HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN).
+ *
+ * Return 0 if things went well, else return -1. */
+int
+hs_ntor_circuit_key_expansion(const uint8_t *ntor_key_seed, size_t seed_len,
+ uint8_t *keys_out, size_t keys_out_len)
+{
+ uint8_t *ptr;
+ uint8_t kdf_input[NTOR_KEY_EXPANSION_KDF_INPUT_LEN];
+ crypto_xof_t *xof;
+
+ /* Sanity checks on lengths to make sure we are good */
+ if (BUG(seed_len != DIGEST256_LEN)) {
+ return -1;
+ }
+ if (BUG(keys_out_len != HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN)) {
+ return -1;
+ }
+
+ /* Let's build the input to the KDF */
+ ptr = kdf_input;
+ APPEND(ptr, ntor_key_seed, DIGEST256_LEN);
+ APPEND(ptr, M_HSEXPAND, strlen(M_HSEXPAND));
+ tor_assert(ptr == kdf_input + sizeof(kdf_input));
+
+ /* Generate the keys */
+ xof = crypto_xof_new();
+ crypto_xof_add_bytes(xof, kdf_input, sizeof(kdf_input));
+ crypto_xof_squeeze_bytes(xof, keys_out, HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN);
+ crypto_xof_free(xof);
+
+ return 0;
+}
diff --git a/src/core/crypto/hs_ntor.h b/src/core/crypto/hs_ntor.h
new file mode 100644
index 0000000000..67a9573436
--- /dev/null
+++ b/src/core/crypto/hs_ntor.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2017-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_HS_NTOR_H
+#define TOR_HS_NTOR_H
+
+#include "or/or.h"
+struct ed25519_public_key_t;
+struct curve25519_public_key_t;
+struct curve25519_keypair_t;
+
+/* Output length of KDF for key expansion */
+#define HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN \
+ (DIGEST256_LEN*2 + CIPHER256_KEY_LEN*2)
+
+/* Key material needed to encode/decode INTRODUCE1 cells */
+typedef struct {
+ /* Key used for encryption of encrypted INTRODUCE1 blob */
+ uint8_t enc_key[CIPHER256_KEY_LEN];
+ /* MAC key used to protect encrypted INTRODUCE1 blob */
+ uint8_t mac_key[DIGEST256_LEN];
+} hs_ntor_intro_cell_keys_t;
+
+/* Key material needed to encode/decode RENDEZVOUS1 cells */
+typedef struct {
+ /* This is the MAC of the HANDSHAKE_INFO field */
+ uint8_t rend_cell_auth_mac[DIGEST256_LEN];
+ /* This is the key seed used to derive further rendezvous crypto keys as
+ * detailed in section 4.2.1 of rend-spec-ng.txt. */
+ uint8_t ntor_key_seed[DIGEST256_LEN];
+} hs_ntor_rend_cell_keys_t;
+
+int hs_ntor_client_get_introduce1_keys(
+ const struct ed25519_public_key_t *intro_auth_pubkey,
+ const struct curve25519_public_key_t *intro_enc_pubkey,
+ const struct curve25519_keypair_t *client_ephemeral_enc_keypair,
+ const uint8_t *subcredential,
+ hs_ntor_intro_cell_keys_t *hs_ntor_intro_cell_keys_out);
+
+int hs_ntor_client_get_rendezvous1_keys(
+ const struct ed25519_public_key_t *intro_auth_pubkey,
+ const struct curve25519_keypair_t *client_ephemeral_enc_keypair,
+ const struct curve25519_public_key_t *intro_enc_pubkey,
+ const struct curve25519_public_key_t *service_ephemeral_rend_pubkey,
+ hs_ntor_rend_cell_keys_t *hs_ntor_rend_cell_keys_out);
+
+int hs_ntor_service_get_introduce1_keys(
+ const struct ed25519_public_key_t *intro_auth_pubkey,
+ const struct curve25519_keypair_t *intro_enc_keypair,
+ const struct curve25519_public_key_t *client_ephemeral_enc_pubkey,
+ const uint8_t *subcredential,
+ hs_ntor_intro_cell_keys_t *hs_ntor_intro_cell_keys_out);
+
+int hs_ntor_service_get_rendezvous1_keys(
+ const struct ed25519_public_key_t *intro_auth_pubkey,
+ const struct curve25519_keypair_t *intro_enc_keypair,
+ const struct curve25519_keypair_t *service_ephemeral_rend_keypair,
+ const struct curve25519_public_key_t *client_ephemeral_enc_pubkey,
+ hs_ntor_rend_cell_keys_t *hs_ntor_rend_cell_keys_out);
+
+int hs_ntor_circuit_key_expansion(const uint8_t *ntor_key_seed,
+ size_t seed_len,
+ uint8_t *keys_out, size_t keys_out_len);
+
+int hs_ntor_client_rendezvous2_mac_is_good(
+ const hs_ntor_rend_cell_keys_t *hs_ntor_rend_cell_keys,
+ const uint8_t *rcvd_mac);
+
+#endif /* !defined(TOR_HS_NTOR_H) */
diff --git a/src/core/crypto/onion.c b/src/core/crypto/onion.c
new file mode 100644
index 0000000000..80d8e1a8b1
--- /dev/null
+++ b/src/core/crypto/onion.c
@@ -0,0 +1,1346 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file onion.c
+ * \brief Functions to queue create cells, wrap the various onionskin types,
+ * and parse and create the CREATE cell and its allies.
+ *
+ * This module has a few functions, all related to the CREATE/CREATED
+ * handshake that we use on links in order to create a circuit, and the
+ * related EXTEND/EXTENDED handshake that we use over circuits in order to
+ * extend them an additional hop.
+ *
+ * In this module, we provide a set of abstractions to create a uniform
+ * interface over the three circuit extension handshakes that Tor has used
+ * over the years (TAP, CREATE_FAST, and ntor). These handshakes are
+ * implemented in onion_tap.c, onion_fast.c, and onion_ntor.c respectively.
+ *
+ * All[*] of these handshakes follow a similar pattern: a client, knowing
+ * some key from the relay it wants to extend through, generates the
+ * first part of a handshake. A relay receives that handshake, and sends
+ * a reply. Once the client handles the reply, it knows that it is
+ * talking to the right relay, and it shares some freshly negotiated key
+ * material with that relay.
+ *
+ * We sometimes call the client's part of the handshake an "onionskin".
+ * We do this because historically, Onion Routing used a multi-layer
+ * structure called an "onion" to construct circuits. Each layer of the
+ * onion contained key material chosen by the client, the identity of
+ * the next relay in the circuit, and a smaller onion, encrypted with
+ * the key of the next relay. When we changed Tor to use a telescoping
+ * circuit extension design, it corresponded to sending each layer of the
+ * onion separately -- as a series of onionskins.
+ *
+ * Clients invoke these functions when creating or extending a circuit,
+ * from circuitbuild.c.
+ *
+ * Relays invoke these functions when they receive a CREATE or EXTEND
+ * cell in command.c or relay.c, in order to queue the pending request.
+ * They also invoke them from cpuworker.c, which handles dispatching
+ * onionskin requests to different worker threads.
+ *
+ * <br>
+ *
+ * This module also handles:
+ * <ul>
+ * <li> Queueing incoming onionskins on the relay side before passing
+ * them to worker threads.
+ * <li>Expiring onionskins on the relay side if they have waited for
+ * too long.
+ * <li>Packaging private keys on the server side in order to pass
+ * them to worker threads.
+ * <li>Encoding and decoding CREATE, CREATED, CREATE2, and CREATED2 cells.
+ * <li>Encoding and decodign EXTEND, EXTENDED, EXTEND2, and EXTENDED2
+ * relay cells.
+ * </ul>
+ *
+ * [*] The CREATE_FAST handshake is weaker than described here; see
+ * onion_fast.c for more information.
+ **/
+
+#include "or/or.h"
+#include "or/circuitbuild.h"
+#include "or/circuitlist.h"
+#include "or/config.h"
+#include "or/cpuworker.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "lib/crypt_ops/crypto_dh.h"
+#include "or/networkstatus.h"
+#include "or/onion.h"
+#include "or/onion_fast.h"
+#include "or/onion_ntor.h"
+#include "or/onion_tap.h"
+#include "or/relay.h"
+#include "or/rephist.h"
+#include "or/router.h"
+
+#include "or/cell_st.h"
+#include "or/extend_info_st.h"
+#include "or/or_circuit_st.h"
+
+// trunnel
+#include "trunnel/ed25519_cert.h"
+
+/** Type for a linked list of circuits that are waiting for a free CPU worker
+ * to process a waiting onion handshake. */
+typedef struct onion_queue_t {
+ TOR_TAILQ_ENTRY(onion_queue_t) next;
+ or_circuit_t *circ;
+ uint16_t handshake_type;
+ create_cell_t *onionskin;
+ time_t when_added;
+} onion_queue_t;
+
+/** 5 seconds on the onion queue til we just send back a destroy */
+#define ONIONQUEUE_WAIT_CUTOFF 5
+
+/** Array of queues of circuits waiting for CPU workers. An element is NULL
+ * if that queue is empty.*/
+static TOR_TAILQ_HEAD(onion_queue_head_t, onion_queue_t)
+ ol_list[MAX_ONION_HANDSHAKE_TYPE+1] =
+{ TOR_TAILQ_HEAD_INITIALIZER(ol_list[0]), /* tap */
+ TOR_TAILQ_HEAD_INITIALIZER(ol_list[1]), /* fast */
+ TOR_TAILQ_HEAD_INITIALIZER(ol_list[2]), /* ntor */
+};
+
+/** Number of entries of each type currently in each element of ol_list[]. */
+static int ol_entries[MAX_ONION_HANDSHAKE_TYPE+1];
+
+static int num_ntors_per_tap(void);
+static void onion_queue_entry_remove(onion_queue_t *victim);
+
+/* XXXX Check lengths vs MAX_ONIONSKIN_{CHALLENGE,REPLY}_LEN.
+ *
+ * (By which I think I meant, "make sure that no
+ * X_ONIONSKIN_CHALLENGE/REPLY_LEN is greater than
+ * MAX_ONIONSKIN_CHALLENGE/REPLY_LEN." Also, make sure that we can pass
+ * over-large values via EXTEND2/EXTENDED2, for future-compatibility.*/
+
+/** Return true iff we have room to queue another onionskin of type
+ * <b>type</b>. */
+static int
+have_room_for_onionskin(uint16_t type)
+{
+ const or_options_t *options = get_options();
+ int num_cpus;
+ uint64_t tap_usec, ntor_usec;
+ uint64_t ntor_during_tap_usec, tap_during_ntor_usec;
+
+ /* If we've got fewer than 50 entries, we always have room for one more. */
+ if (ol_entries[type] < 50)
+ return 1;
+ num_cpus = get_num_cpus(options);
+ /* Compute how many microseconds we'd expect to need to clear all
+ * onionskins in various combinations of the queues. */
+
+ /* How long would it take to process all the TAP cells in the queue? */
+ tap_usec = estimated_usec_for_onionskins(
+ ol_entries[ONION_HANDSHAKE_TYPE_TAP],
+ ONION_HANDSHAKE_TYPE_TAP) / num_cpus;
+
+ /* How long would it take to process all the NTor cells in the queue? */
+ ntor_usec = estimated_usec_for_onionskins(
+ ol_entries[ONION_HANDSHAKE_TYPE_NTOR],
+ ONION_HANDSHAKE_TYPE_NTOR) / num_cpus;
+
+ /* How long would it take to process the tap cells that we expect to
+ * process while draining the ntor queue? */
+ tap_during_ntor_usec = estimated_usec_for_onionskins(
+ MIN(ol_entries[ONION_HANDSHAKE_TYPE_TAP],
+ ol_entries[ONION_HANDSHAKE_TYPE_NTOR] / num_ntors_per_tap()),
+ ONION_HANDSHAKE_TYPE_TAP) / num_cpus;
+
+ /* How long would it take to process the ntor cells that we expect to
+ * process while draining the tap queue? */
+ ntor_during_tap_usec = estimated_usec_for_onionskins(
+ MIN(ol_entries[ONION_HANDSHAKE_TYPE_NTOR],
+ ol_entries[ONION_HANDSHAKE_TYPE_TAP] * num_ntors_per_tap()),
+ ONION_HANDSHAKE_TYPE_NTOR) / num_cpus;
+
+ /* See whether that exceeds MaxOnionQueueDelay. If so, we can't queue
+ * this. */
+ if (type == ONION_HANDSHAKE_TYPE_NTOR &&
+ (ntor_usec + tap_during_ntor_usec) / 1000 >
+ (uint64_t)options->MaxOnionQueueDelay)
+ return 0;
+
+ if (type == ONION_HANDSHAKE_TYPE_TAP &&
+ (tap_usec + ntor_during_tap_usec) / 1000 >
+ (uint64_t)options->MaxOnionQueueDelay)
+ return 0;
+
+ /* If we support the ntor handshake, then don't let TAP handshakes use
+ * more than 2/3 of the space on the queue. */
+ if (type == ONION_HANDSHAKE_TYPE_TAP &&
+ tap_usec / 1000 > (uint64_t)options->MaxOnionQueueDelay * 2 / 3)
+ return 0;
+
+ return 1;
+}
+
+/** Add <b>circ</b> to the end of ol_list and return 0, except
+ * if ol_list is too long, in which case do nothing and return -1.
+ */
+int
+onion_pending_add(or_circuit_t *circ, create_cell_t *onionskin)
+{
+ onion_queue_t *tmp;
+ time_t now = time(NULL);
+
+ if (onionskin->handshake_type > MAX_ONION_HANDSHAKE_TYPE) {
+ /* LCOV_EXCL_START
+ * We should have rejected this far before this point */
+ log_warn(LD_BUG, "Handshake %d out of range! Dropping.",
+ onionskin->handshake_type);
+ return -1;
+ /* LCOV_EXCL_STOP */
+ }
+
+ tmp = tor_malloc_zero(sizeof(onion_queue_t));
+ tmp->circ = circ;
+ tmp->handshake_type = onionskin->handshake_type;
+ tmp->onionskin = onionskin;
+ tmp->when_added = now;
+
+ if (!have_room_for_onionskin(onionskin->handshake_type)) {
+#define WARN_TOO_MANY_CIRC_CREATIONS_INTERVAL (60)
+ static ratelim_t last_warned =
+ RATELIM_INIT(WARN_TOO_MANY_CIRC_CREATIONS_INTERVAL);
+ char *m;
+ if (onionskin->handshake_type == ONION_HANDSHAKE_TYPE_NTOR &&
+ (m = rate_limit_log(&last_warned, approx_time()))) {
+ log_warn(LD_GENERAL,
+ "Your computer is too slow to handle this many circuit "
+ "creation requests! Please consider using the "
+ "MaxAdvertisedBandwidth config option or choosing a more "
+ "restricted exit policy.%s",m);
+ tor_free(m);
+ }
+ tor_free(tmp);
+ return -1;
+ }
+
+ ++ol_entries[onionskin->handshake_type];
+ log_info(LD_OR, "New create (%s). Queues now ntor=%d and tap=%d.",
+ onionskin->handshake_type == ONION_HANDSHAKE_TYPE_NTOR ? "ntor" : "tap",
+ ol_entries[ONION_HANDSHAKE_TYPE_NTOR],
+ ol_entries[ONION_HANDSHAKE_TYPE_TAP]);
+
+ circ->onionqueue_entry = tmp;
+ TOR_TAILQ_INSERT_TAIL(&ol_list[onionskin->handshake_type], tmp, next);
+
+ /* cull elderly requests. */
+ while (1) {
+ onion_queue_t *head = TOR_TAILQ_FIRST(&ol_list[onionskin->handshake_type]);
+ if (now - head->when_added < (time_t)ONIONQUEUE_WAIT_CUTOFF)
+ break;
+
+ circ = head->circ;
+ circ->onionqueue_entry = NULL;
+ onion_queue_entry_remove(head);
+ log_info(LD_CIRC,
+ "Circuit create request is too old; canceling due to overload.");
+ if (! TO_CIRCUIT(circ)->marked_for_close) {
+ circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_RESOURCELIMIT);
+ }
+ }
+ return 0;
+}
+
+/** Return a fairness parameter, to prefer processing NTOR style
+ * handshakes but still slowly drain the TAP queue so we don't starve
+ * it entirely. */
+static int
+num_ntors_per_tap(void)
+{
+#define DEFAULT_NUM_NTORS_PER_TAP 10
+#define MIN_NUM_NTORS_PER_TAP 1
+#define MAX_NUM_NTORS_PER_TAP 100000
+
+ return networkstatus_get_param(NULL, "NumNTorsPerTAP",
+ DEFAULT_NUM_NTORS_PER_TAP,
+ MIN_NUM_NTORS_PER_TAP,
+ MAX_NUM_NTORS_PER_TAP);
+}
+
+/** Choose which onion queue we'll pull from next. If one is empty choose
+ * the other; if they both have elements, load balance across them but
+ * favoring NTOR. */
+static uint16_t
+decide_next_handshake_type(void)
+{
+ /* The number of times we've chosen ntor lately when both were available. */
+ static int recently_chosen_ntors = 0;
+
+ if (!ol_entries[ONION_HANDSHAKE_TYPE_NTOR])
+ return ONION_HANDSHAKE_TYPE_TAP; /* no ntors? try tap */
+
+ if (!ol_entries[ONION_HANDSHAKE_TYPE_TAP]) {
+
+ /* Nick wants us to prioritize new tap requests when there aren't
+ * any in the queue and we've processed k ntor cells since the last
+ * tap cell. This strategy is maybe a good idea, since it starves tap
+ * less in the case where tap is rare, or maybe a poor idea, since it
+ * makes the new tap cell unfairly jump in front of ntor cells that
+ * got here first. In any case this edge case will only become relevant
+ * once tap is rare. We should reevaluate whether we like this decision
+ * once tap gets more rare. */
+ if (ol_entries[ONION_HANDSHAKE_TYPE_NTOR] &&
+ recently_chosen_ntors <= num_ntors_per_tap())
+ ++recently_chosen_ntors;
+
+ return ONION_HANDSHAKE_TYPE_NTOR; /* no taps? try ntor */
+ }
+
+ /* They both have something queued. Pick ntor if we haven't done that
+ * too much lately. */
+ if (++recently_chosen_ntors <= num_ntors_per_tap()) {
+ return ONION_HANDSHAKE_TYPE_NTOR;
+ }
+
+ /* Else, it's time to let tap have its turn. */
+ recently_chosen_ntors = 0;
+ return ONION_HANDSHAKE_TYPE_TAP;
+}
+
+/** Remove the highest priority item from ol_list[] and return it, or
+ * return NULL if the lists are empty.
+ */
+or_circuit_t *
+onion_next_task(create_cell_t **onionskin_out)
+{
+ or_circuit_t *circ;
+ uint16_t handshake_to_choose = decide_next_handshake_type();
+ onion_queue_t *head = TOR_TAILQ_FIRST(&ol_list[handshake_to_choose]);
+
+ if (!head)
+ return NULL; /* no onions pending, we're done */
+
+ tor_assert(head->circ);
+ tor_assert(head->handshake_type <= MAX_ONION_HANDSHAKE_TYPE);
+// tor_assert(head->circ->p_chan); /* make sure it's still valid */
+/* XXX I only commented out the above line to make the unit tests
+ * more manageable. That's probably not good long-term. -RD */
+ circ = head->circ;
+ if (head->onionskin)
+ --ol_entries[head->handshake_type];
+ log_info(LD_OR, "Processing create (%s). Queues now ntor=%d and tap=%d.",
+ head->handshake_type == ONION_HANDSHAKE_TYPE_NTOR ? "ntor" : "tap",
+ ol_entries[ONION_HANDSHAKE_TYPE_NTOR],
+ ol_entries[ONION_HANDSHAKE_TYPE_TAP]);
+
+ *onionskin_out = head->onionskin;
+ head->onionskin = NULL; /* prevent free. */
+ circ->onionqueue_entry = NULL;
+ onion_queue_entry_remove(head);
+ return circ;
+}
+
+/** Return the number of <b>handshake_type</b>-style create requests pending.
+ */
+int
+onion_num_pending(uint16_t handshake_type)
+{
+ return ol_entries[handshake_type];
+}
+
+/** Go through ol_list, find the onion_queue_t element which points to
+ * circ, remove and free that element. Leave circ itself alone.
+ */
+void
+onion_pending_remove(or_circuit_t *circ)
+{
+ onion_queue_t *victim;
+
+ if (!circ)
+ return;
+
+ victim = circ->onionqueue_entry;
+ if (victim)
+ onion_queue_entry_remove(victim);
+
+ cpuworker_cancel_circ_handshake(circ);
+}
+
+/** Remove a queue entry <b>victim</b> from the queue, unlinking it from
+ * its circuit and freeing it and any structures it owns.*/
+static void
+onion_queue_entry_remove(onion_queue_t *victim)
+{
+ if (victim->handshake_type > MAX_ONION_HANDSHAKE_TYPE) {
+ /* LCOV_EXCL_START
+ * We should have rejected this far before this point */
+ log_warn(LD_BUG, "Handshake %d out of range! Dropping.",
+ victim->handshake_type);
+ /* XXX leaks */
+ return;
+ /* LCOV_EXCL_STOP */
+ }
+
+ TOR_TAILQ_REMOVE(&ol_list[victim->handshake_type], victim, next);
+
+ if (victim->circ)
+ victim->circ->onionqueue_entry = NULL;
+
+ if (victim->onionskin)
+ --ol_entries[victim->handshake_type];
+
+ tor_free(victim->onionskin);
+ tor_free(victim);
+}
+
+/** Remove all circuits from the pending list. Called from tor_free_all. */
+void
+clear_pending_onions(void)
+{
+ onion_queue_t *victim, *next;
+ int i;
+ for (i=0; i<=MAX_ONION_HANDSHAKE_TYPE; i++) {
+ for (victim = TOR_TAILQ_FIRST(&ol_list[i]); victim; victim = next) {
+ next = TOR_TAILQ_NEXT(victim,next);
+ onion_queue_entry_remove(victim);
+ }
+ tor_assert(TOR_TAILQ_EMPTY(&ol_list[i]));
+ }
+ memset(ol_entries, 0, sizeof(ol_entries));
+}
+
+/* ============================================================ */
+
+/** Return a new server_onion_keys_t object with all of the keys
+ * and other info we might need to do onion handshakes. (We make a copy of
+ * our keys for each cpuworker to avoid race conditions with the main thread,
+ * and to avoid locking) */
+server_onion_keys_t *
+server_onion_keys_new(void)
+{
+ server_onion_keys_t *keys = tor_malloc_zero(sizeof(server_onion_keys_t));
+ memcpy(keys->my_identity, router_get_my_id_digest(), DIGEST_LEN);
+ dup_onion_keys(&keys->onion_key, &keys->last_onion_key);
+ keys->curve25519_key_map = construct_ntor_key_map();
+ keys->junk_keypair = tor_malloc_zero(sizeof(curve25519_keypair_t));
+ curve25519_keypair_generate(keys->junk_keypair, 0);
+ return keys;
+}
+
+/** Release all storage held in <b>keys</b>. */
+void
+server_onion_keys_free_(server_onion_keys_t *keys)
+{
+ if (! keys)
+ return;
+
+ crypto_pk_free(keys->onion_key);
+ crypto_pk_free(keys->last_onion_key);
+ ntor_key_map_free(keys->curve25519_key_map);
+ tor_free(keys->junk_keypair);
+ memwipe(keys, 0, sizeof(server_onion_keys_t));
+ tor_free(keys);
+}
+
+/** Release whatever storage is held in <b>state</b>, depending on its
+ * type, and clear its pointer. */
+void
+onion_handshake_state_release(onion_handshake_state_t *state)
+{
+ switch (state->tag) {
+ case ONION_HANDSHAKE_TYPE_TAP:
+ crypto_dh_free(state->u.tap);
+ state->u.tap = NULL;
+ break;
+ case ONION_HANDSHAKE_TYPE_FAST:
+ fast_handshake_state_free(state->u.fast);
+ state->u.fast = NULL;
+ break;
+ case ONION_HANDSHAKE_TYPE_NTOR:
+ ntor_handshake_state_free(state->u.ntor);
+ state->u.ntor = NULL;
+ break;
+ default:
+ /* LCOV_EXCL_START
+ * This state should not even exist. */
+ log_warn(LD_BUG, "called with unknown handshake state type %d",
+ (int)state->tag);
+ tor_fragile_assert();
+ /* LCOV_EXCL_STOP */
+ }
+}
+
+/** Perform the first step of a circuit-creation handshake of type <b>type</b>
+ * (one of ONION_HANDSHAKE_TYPE_*): generate the initial "onion skin" in
+ * <b>onion_skin_out</b>, and store any state information in <b>state_out</b>.
+ * Return -1 on failure, and the length of the onionskin on acceptance.
+ */
+int
+onion_skin_create(int type,
+ const extend_info_t *node,
+ onion_handshake_state_t *state_out,
+ uint8_t *onion_skin_out)
+{
+ int r = -1;
+
+ switch (type) {
+ case ONION_HANDSHAKE_TYPE_TAP:
+ if (!node->onion_key)
+ return -1;
+
+ if (onion_skin_TAP_create(node->onion_key,
+ &state_out->u.tap,
+ (char*)onion_skin_out) < 0)
+ return -1;
+
+ r = TAP_ONIONSKIN_CHALLENGE_LEN;
+ break;
+ case ONION_HANDSHAKE_TYPE_FAST:
+ if (fast_onionskin_create(&state_out->u.fast, onion_skin_out) < 0)
+ return -1;
+
+ r = CREATE_FAST_LEN;
+ break;
+ case ONION_HANDSHAKE_TYPE_NTOR:
+ if (!extend_info_supports_ntor(node))
+ return -1;
+ if (onion_skin_ntor_create((const uint8_t*)node->identity_digest,
+ &node->curve25519_onion_key,
+ &state_out->u.ntor,
+ onion_skin_out) < 0)
+ return -1;
+
+ r = NTOR_ONIONSKIN_LEN;
+ break;
+ default:
+ /* LCOV_EXCL_START
+ * We should never try to create an impossible handshake type. */
+ log_warn(LD_BUG, "called with unknown handshake state type %d", type);
+ tor_fragile_assert();
+ r = -1;
+ /* LCOV_EXCL_STOP */
+ }
+
+ if (r > 0)
+ state_out->tag = (uint16_t) type;
+
+ return r;
+}
+
+/* This is the maximum value for keys_out_len passed to
+ * onion_skin_server_handshake, plus 16. We can make it bigger if needed:
+ * It just defines how many bytes to stack-allocate. */
+#define MAX_KEYS_TMP_LEN 128
+
+/** Perform the second (server-side) step of a circuit-creation handshake of
+ * type <b>type</b>, responding to the client request in <b>onion_skin</b>
+ * using the keys in <b>keys</b>. On success, write our response into
+ * <b>reply_out</b>, generate <b>keys_out_len</b> bytes worth of key material
+ * in <b>keys_out_len</b>, a hidden service nonce to <b>rend_nonce_out</b>,
+ * and return the length of the reply. On failure, return -1.
+ */
+int
+onion_skin_server_handshake(int type,
+ const uint8_t *onion_skin, size_t onionskin_len,
+ const server_onion_keys_t *keys,
+ uint8_t *reply_out,
+ uint8_t *keys_out, size_t keys_out_len,
+ uint8_t *rend_nonce_out)
+{
+ int r = -1;
+
+ switch (type) {
+ case ONION_HANDSHAKE_TYPE_TAP:
+ if (onionskin_len != TAP_ONIONSKIN_CHALLENGE_LEN)
+ return -1;
+ if (onion_skin_TAP_server_handshake((const char*)onion_skin,
+ keys->onion_key, keys->last_onion_key,
+ (char*)reply_out,
+ (char*)keys_out, keys_out_len)<0)
+ return -1;
+ r = TAP_ONIONSKIN_REPLY_LEN;
+ memcpy(rend_nonce_out, reply_out+DH1024_KEY_LEN, DIGEST_LEN);
+ break;
+ case ONION_HANDSHAKE_TYPE_FAST:
+ if (onionskin_len != CREATE_FAST_LEN)
+ return -1;
+ if (fast_server_handshake(onion_skin, reply_out, keys_out, keys_out_len)<0)
+ return -1;
+ r = CREATED_FAST_LEN;
+ memcpy(rend_nonce_out, reply_out+DIGEST_LEN, DIGEST_LEN);
+ break;
+ case ONION_HANDSHAKE_TYPE_NTOR:
+ if (onionskin_len < NTOR_ONIONSKIN_LEN)
+ return -1;
+ {
+ size_t keys_tmp_len = keys_out_len + DIGEST_LEN;
+ tor_assert(keys_tmp_len <= MAX_KEYS_TMP_LEN);
+ uint8_t keys_tmp[MAX_KEYS_TMP_LEN];
+
+ if (onion_skin_ntor_server_handshake(
+ onion_skin, keys->curve25519_key_map,
+ keys->junk_keypair,
+ keys->my_identity,
+ reply_out, keys_tmp, keys_tmp_len)<0) {
+ /* no need to memwipe here, since the output will never be used */
+ return -1;
+ }
+
+ memcpy(keys_out, keys_tmp, keys_out_len);
+ memcpy(rend_nonce_out, keys_tmp+keys_out_len, DIGEST_LEN);
+ memwipe(keys_tmp, 0, sizeof(keys_tmp));
+ r = NTOR_REPLY_LEN;
+ }
+ break;
+ default:
+ /* LCOV_EXCL_START
+ * We should have rejected this far before this point */
+ log_warn(LD_BUG, "called with unknown handshake state type %d", type);
+ tor_fragile_assert();
+ return -1;
+ /* LCOV_EXCL_STOP */
+ }
+
+ return r;
+}
+
+/** Perform the final (client-side) step of a circuit-creation handshake of
+ * type <b>type</b>, using our state in <b>handshake_state</b> and the
+ * server's response in <b>reply</b>. On success, generate <b>keys_out_len</b>
+ * bytes worth of key material in <b>keys_out_len</b>, set
+ * <b>rend_authenticator_out</b> to the "KH" field that can be used to
+ * establish introduction points at this hop, and return 0. On failure,
+ * return -1, and set *msg_out to an error message if this is worth
+ * complaining to the user about. */
+int
+onion_skin_client_handshake(int type,
+ const onion_handshake_state_t *handshake_state,
+ const uint8_t *reply, size_t reply_len,
+ uint8_t *keys_out, size_t keys_out_len,
+ uint8_t *rend_authenticator_out,
+ const char **msg_out)
+{
+ if (handshake_state->tag != type)
+ return -1;
+
+ switch (type) {
+ case ONION_HANDSHAKE_TYPE_TAP:
+ if (reply_len != TAP_ONIONSKIN_REPLY_LEN) {
+ if (msg_out)
+ *msg_out = "TAP reply was not of the correct length.";
+ return -1;
+ }
+ if (onion_skin_TAP_client_handshake(handshake_state->u.tap,
+ (const char*)reply,
+ (char *)keys_out, keys_out_len,
+ msg_out) < 0)
+ return -1;
+
+ memcpy(rend_authenticator_out, reply+DH1024_KEY_LEN, DIGEST_LEN);
+
+ return 0;
+ case ONION_HANDSHAKE_TYPE_FAST:
+ if (reply_len != CREATED_FAST_LEN) {
+ if (msg_out)
+ *msg_out = "TAP reply was not of the correct length.";
+ return -1;
+ }
+ if (fast_client_handshake(handshake_state->u.fast, reply,
+ keys_out, keys_out_len, msg_out) < 0)
+ return -1;
+
+ memcpy(rend_authenticator_out, reply+DIGEST_LEN, DIGEST_LEN);
+ return 0;
+ case ONION_HANDSHAKE_TYPE_NTOR:
+ if (reply_len < NTOR_REPLY_LEN) {
+ if (msg_out)
+ *msg_out = "ntor reply was not of the correct length.";
+ return -1;
+ }
+ {
+ size_t keys_tmp_len = keys_out_len + DIGEST_LEN;
+ uint8_t *keys_tmp = tor_malloc(keys_tmp_len);
+ if (onion_skin_ntor_client_handshake(handshake_state->u.ntor,
+ reply,
+ keys_tmp, keys_tmp_len, msg_out) < 0) {
+ tor_free(keys_tmp);
+ return -1;
+ }
+ memcpy(keys_out, keys_tmp, keys_out_len);
+ memcpy(rend_authenticator_out, keys_tmp + keys_out_len, DIGEST_LEN);
+ memwipe(keys_tmp, 0, keys_tmp_len);
+ tor_free(keys_tmp);
+ }
+ return 0;
+ default:
+ log_warn(LD_BUG, "called with unknown handshake state type %d", type);
+ tor_fragile_assert();
+ return -1;
+ }
+}
+
+/** Helper: return 0 if <b>cell</b> appears valid, -1 otherwise. If
+ * <b>unknown_ok</b> is true, allow cells with handshake types we don't
+ * recognize. */
+static int
+check_create_cell(const create_cell_t *cell, int unknown_ok)
+{
+ switch (cell->cell_type) {
+ case CELL_CREATE:
+ if (cell->handshake_type != ONION_HANDSHAKE_TYPE_TAP &&
+ cell->handshake_type != ONION_HANDSHAKE_TYPE_NTOR)
+ return -1;
+ break;
+ case CELL_CREATE_FAST:
+ if (cell->handshake_type != ONION_HANDSHAKE_TYPE_FAST)
+ return -1;
+ break;
+ case CELL_CREATE2:
+ break;
+ default:
+ return -1;
+ }
+
+ switch (cell->handshake_type) {
+ case ONION_HANDSHAKE_TYPE_TAP:
+ if (cell->handshake_len != TAP_ONIONSKIN_CHALLENGE_LEN)
+ return -1;
+ break;
+ case ONION_HANDSHAKE_TYPE_FAST:
+ if (cell->handshake_len != CREATE_FAST_LEN)
+ return -1;
+ break;
+ case ONION_HANDSHAKE_TYPE_NTOR:
+ if (cell->handshake_len != NTOR_ONIONSKIN_LEN)
+ return -1;
+ break;
+ default:
+ if (! unknown_ok)
+ return -1;
+ }
+
+ return 0;
+}
+
+/** Write the various parameters into the create cell. Separate from
+ * create_cell_parse() to make unit testing easier.
+ */
+void
+create_cell_init(create_cell_t *cell_out, uint8_t cell_type,
+ uint16_t handshake_type, uint16_t handshake_len,
+ const uint8_t *onionskin)
+{
+ memset(cell_out, 0, sizeof(*cell_out));
+
+ cell_out->cell_type = cell_type;
+ cell_out->handshake_type = handshake_type;
+ cell_out->handshake_len = handshake_len;
+ memcpy(cell_out->onionskin, onionskin, handshake_len);
+}
+
+/** Helper: parse the CREATE2 payload at <b>p</b>, which could be up to
+ * <b>p_len</b> bytes long, and use it to fill the fields of
+ * <b>cell_out</b>. Return 0 on success and -1 on failure.
+ *
+ * Note that part of the body of an EXTEND2 cell is a CREATE2 payload, so
+ * this function is also used for parsing those.
+ */
+static int
+parse_create2_payload(create_cell_t *cell_out, const uint8_t *p, size_t p_len)
+{
+ uint16_t handshake_type, handshake_len;
+
+ if (p_len < 4)
+ return -1;
+
+ handshake_type = ntohs(get_uint16(p));
+ handshake_len = ntohs(get_uint16(p+2));
+
+ if (handshake_len > CELL_PAYLOAD_SIZE - 4 || handshake_len > p_len - 4)
+ return -1;
+ if (handshake_type == ONION_HANDSHAKE_TYPE_FAST)
+ return -1;
+
+ create_cell_init(cell_out, CELL_CREATE2, handshake_type, handshake_len,
+ p+4);
+ return 0;
+}
+
+/** Magic string which, in a CREATE or EXTEND cell, indicates that a seeming
+ * TAP payload is really an ntor payload. We'd do away with this if every
+ * relay supported EXTEND2, but we want to be able to extend from A to B with
+ * ntor even when A doesn't understand EXTEND2 and so can't generate a
+ * CREATE2 cell.
+ **/
+#define NTOR_CREATE_MAGIC "ntorNTORntorNTOR"
+
+/** Parse a CREATE, CREATE_FAST, or CREATE2 cell from <b>cell_in</b> into
+ * <b>cell_out</b>. Return 0 on success, -1 on failure. (We reject some
+ * syntactically valid CREATE2 cells that we can't generate or react to.) */
+int
+create_cell_parse(create_cell_t *cell_out, const cell_t *cell_in)
+{
+ switch (cell_in->command) {
+ case CELL_CREATE:
+ if (tor_memeq(cell_in->payload, NTOR_CREATE_MAGIC, 16)) {
+ create_cell_init(cell_out, CELL_CREATE, ONION_HANDSHAKE_TYPE_NTOR,
+ NTOR_ONIONSKIN_LEN, cell_in->payload+16);
+ } else {
+ create_cell_init(cell_out, CELL_CREATE, ONION_HANDSHAKE_TYPE_TAP,
+ TAP_ONIONSKIN_CHALLENGE_LEN, cell_in->payload);
+ }
+ break;
+ case CELL_CREATE_FAST:
+ create_cell_init(cell_out, CELL_CREATE_FAST, ONION_HANDSHAKE_TYPE_FAST,
+ CREATE_FAST_LEN, cell_in->payload);
+ break;
+ case CELL_CREATE2:
+ if (parse_create2_payload(cell_out, cell_in->payload,
+ CELL_PAYLOAD_SIZE) < 0)
+ return -1;
+ break;
+ default:
+ return -1;
+ }
+
+ return check_create_cell(cell_out, 0);
+}
+
+/** Helper: return 0 if <b>cell</b> appears valid, -1 otherwise. */
+static int
+check_created_cell(const created_cell_t *cell)
+{
+ switch (cell->cell_type) {
+ case CELL_CREATED:
+ if (cell->handshake_len != TAP_ONIONSKIN_REPLY_LEN &&
+ cell->handshake_len != NTOR_REPLY_LEN)
+ return -1;
+ break;
+ case CELL_CREATED_FAST:
+ if (cell->handshake_len != CREATED_FAST_LEN)
+ return -1;
+ break;
+ case CELL_CREATED2:
+ if (cell->handshake_len > RELAY_PAYLOAD_SIZE-2)
+ return -1;
+ break;
+ }
+
+ return 0;
+}
+
+/** Parse a CREATED, CREATED_FAST, or CREATED2 cell from <b>cell_in</b> into
+ * <b>cell_out</b>. Return 0 on success, -1 on failure. */
+int
+created_cell_parse(created_cell_t *cell_out, const cell_t *cell_in)
+{
+ memset(cell_out, 0, sizeof(*cell_out));
+
+ switch (cell_in->command) {
+ case CELL_CREATED:
+ cell_out->cell_type = CELL_CREATED;
+ cell_out->handshake_len = TAP_ONIONSKIN_REPLY_LEN;
+ memcpy(cell_out->reply, cell_in->payload, TAP_ONIONSKIN_REPLY_LEN);
+ break;
+ case CELL_CREATED_FAST:
+ cell_out->cell_type = CELL_CREATED_FAST;
+ cell_out->handshake_len = CREATED_FAST_LEN;
+ memcpy(cell_out->reply, cell_in->payload, CREATED_FAST_LEN);
+ break;
+ case CELL_CREATED2:
+ {
+ const uint8_t *p = cell_in->payload;
+ cell_out->cell_type = CELL_CREATED2;
+ cell_out->handshake_len = ntohs(get_uint16(p));
+ if (cell_out->handshake_len > CELL_PAYLOAD_SIZE - 2)
+ return -1;
+ memcpy(cell_out->reply, p+2, cell_out->handshake_len);
+ break;
+ }
+ }
+
+ return check_created_cell(cell_out);
+}
+
+/** Helper: return 0 if <b>cell</b> appears valid, -1 otherwise. */
+static int
+check_extend_cell(const extend_cell_t *cell)
+{
+ if (tor_digest_is_zero((const char*)cell->node_id))
+ return -1;
+ /* We don't currently allow EXTEND2 cells without an IPv4 address */
+ if (tor_addr_family(&cell->orport_ipv4.addr) == AF_UNSPEC)
+ return -1;
+ if (cell->create_cell.cell_type == CELL_CREATE) {
+ if (cell->cell_type != RELAY_COMMAND_EXTEND)
+ return -1;
+ } else if (cell->create_cell.cell_type == CELL_CREATE2) {
+ if (cell->cell_type != RELAY_COMMAND_EXTEND2 &&
+ cell->cell_type != RELAY_COMMAND_EXTEND)
+ return -1;
+ } else {
+ /* In particular, no CREATE_FAST cells are allowed */
+ return -1;
+ }
+ if (cell->create_cell.handshake_type == ONION_HANDSHAKE_TYPE_FAST)
+ return -1;
+
+ return check_create_cell(&cell->create_cell, 1);
+}
+
+static int
+extend_cell_from_extend1_cell_body(extend_cell_t *cell_out,
+ const extend1_cell_body_t *cell)
+{
+ tor_assert(cell_out);
+ tor_assert(cell);
+ memset(cell_out, 0, sizeof(*cell_out));
+ tor_addr_make_unspec(&cell_out->orport_ipv4.addr);
+ tor_addr_make_unspec(&cell_out->orport_ipv6.addr);
+
+ cell_out->cell_type = RELAY_COMMAND_EXTEND;
+ tor_addr_from_ipv4h(&cell_out->orport_ipv4.addr, cell->ipv4addr);
+ cell_out->orport_ipv4.port = cell->port;
+ if (tor_memeq(cell->onionskin, NTOR_CREATE_MAGIC, 16)) {
+ cell_out->create_cell.cell_type = CELL_CREATE2;
+ cell_out->create_cell.handshake_type = ONION_HANDSHAKE_TYPE_NTOR;
+ cell_out->create_cell.handshake_len = NTOR_ONIONSKIN_LEN;
+ memcpy(cell_out->create_cell.onionskin, cell->onionskin + 16,
+ NTOR_ONIONSKIN_LEN);
+ } else {
+ cell_out->create_cell.cell_type = CELL_CREATE;
+ cell_out->create_cell.handshake_type = ONION_HANDSHAKE_TYPE_TAP;
+ cell_out->create_cell.handshake_len = TAP_ONIONSKIN_CHALLENGE_LEN;
+ memcpy(cell_out->create_cell.onionskin, cell->onionskin,
+ TAP_ONIONSKIN_CHALLENGE_LEN);
+ }
+ memcpy(cell_out->node_id, cell->identity, DIGEST_LEN);
+ return 0;
+}
+
+static int
+create_cell_from_create2_cell_body(create_cell_t *cell_out,
+ const create2_cell_body_t *cell)
+{
+ tor_assert(cell_out);
+ tor_assert(cell);
+ memset(cell_out, 0, sizeof(create_cell_t));
+ if (BUG(cell->handshake_len > sizeof(cell_out->onionskin))) {
+ /* This should be impossible because there just isn't enough room in the
+ * input cell to make the handshake_len this large and provide a
+ * handshake_data to match. */
+ return -1;
+ }
+
+ cell_out->cell_type = CELL_CREATE2;
+ cell_out->handshake_type = cell->handshake_type;
+ cell_out->handshake_len = cell->handshake_len;
+ memcpy(cell_out->onionskin,
+ create2_cell_body_getconstarray_handshake_data(cell),
+ cell->handshake_len);
+ return 0;
+}
+
+static int
+extend_cell_from_extend2_cell_body(extend_cell_t *cell_out,
+ const extend2_cell_body_t *cell)
+{
+ tor_assert(cell_out);
+ tor_assert(cell);
+ int found_ipv4 = 0, found_ipv6 = 0, found_rsa_id = 0, found_ed_id = 0;
+ memset(cell_out, 0, sizeof(*cell_out));
+ tor_addr_make_unspec(&cell_out->orport_ipv4.addr);
+ tor_addr_make_unspec(&cell_out->orport_ipv6.addr);
+ cell_out->cell_type = RELAY_COMMAND_EXTEND2;
+
+ unsigned i;
+ for (i = 0; i < cell->n_spec; ++i) {
+ const link_specifier_t *ls = extend2_cell_body_getconst_ls(cell, i);
+ switch (ls->ls_type) {
+ case LS_IPV4:
+ if (found_ipv4)
+ continue;
+ found_ipv4 = 1;
+ tor_addr_from_ipv4h(&cell_out->orport_ipv4.addr, ls->un_ipv4_addr);
+ cell_out->orport_ipv4.port = ls->un_ipv4_port;
+ break;
+ case LS_IPV6:
+ if (found_ipv6)
+ continue;
+ found_ipv6 = 1;
+ tor_addr_from_ipv6_bytes(&cell_out->orport_ipv6.addr,
+ (const char *)ls->un_ipv6_addr);
+ cell_out->orport_ipv6.port = ls->un_ipv6_port;
+ break;
+ case LS_LEGACY_ID:
+ if (found_rsa_id)
+ return -1;
+ found_rsa_id = 1;
+ memcpy(cell_out->node_id, ls->un_legacy_id, 20);
+ break;
+ case LS_ED25519_ID:
+ if (found_ed_id)
+ return -1;
+ found_ed_id = 1;
+ memcpy(cell_out->ed_pubkey.pubkey, ls->un_ed25519_id, 32);
+ break;
+ default:
+ /* Ignore this, whatever it is. */
+ break;
+ }
+ }
+
+ if (!found_rsa_id || !found_ipv4) /* These are mandatory */
+ return -1;
+
+ return create_cell_from_create2_cell_body(&cell_out->create_cell,
+ cell->create2);
+}
+
+/** Parse an EXTEND or EXTEND2 cell (according to <b>command</b>) from the
+ * <b>payload_length</b> bytes of <b>payload</b> into <b>cell_out</b>. Return
+ * 0 on success, -1 on failure. */
+int
+extend_cell_parse(extend_cell_t *cell_out, const uint8_t command,
+ const uint8_t *payload, size_t payload_length)
+{
+
+ tor_assert(cell_out);
+ tor_assert(payload);
+
+ if (payload_length > RELAY_PAYLOAD_SIZE)
+ return -1;
+
+ switch (command) {
+ case RELAY_COMMAND_EXTEND:
+ {
+ extend1_cell_body_t *cell = NULL;
+ if (extend1_cell_body_parse(&cell, payload, payload_length)<0 ||
+ cell == NULL) {
+ if (cell)
+ extend1_cell_body_free(cell);
+ return -1;
+ }
+ int r = extend_cell_from_extend1_cell_body(cell_out, cell);
+ extend1_cell_body_free(cell);
+ if (r < 0)
+ return r;
+ }
+ break;
+ case RELAY_COMMAND_EXTEND2:
+ {
+ extend2_cell_body_t *cell = NULL;
+ if (extend2_cell_body_parse(&cell, payload, payload_length) < 0 ||
+ cell == NULL) {
+ if (cell)
+ extend2_cell_body_free(cell);
+ return -1;
+ }
+ int r = extend_cell_from_extend2_cell_body(cell_out, cell);
+ extend2_cell_body_free(cell);
+ if (r < 0)
+ return r;
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ return check_extend_cell(cell_out);
+}
+
+/** Helper: return 0 if <b>cell</b> appears valid, -1 otherwise. */
+static int
+check_extended_cell(const extended_cell_t *cell)
+{
+ tor_assert(cell);
+ if (cell->created_cell.cell_type == CELL_CREATED) {
+ if (cell->cell_type != RELAY_COMMAND_EXTENDED)
+ return -1;
+ } else if (cell->created_cell.cell_type == CELL_CREATED2) {
+ if (cell->cell_type != RELAY_COMMAND_EXTENDED2)
+ return -1;
+ } else {
+ return -1;
+ }
+
+ return check_created_cell(&cell->created_cell);
+}
+
+/** Parse an EXTENDED or EXTENDED2 cell (according to <b>command</b>) from the
+ * <b>payload_length</b> bytes of <b>payload</b> into <b>cell_out</b>. Return
+ * 0 on success, -1 on failure. */
+int
+extended_cell_parse(extended_cell_t *cell_out,
+ const uint8_t command, const uint8_t *payload,
+ size_t payload_len)
+{
+ tor_assert(cell_out);
+ tor_assert(payload);
+
+ memset(cell_out, 0, sizeof(*cell_out));
+ if (payload_len > RELAY_PAYLOAD_SIZE)
+ return -1;
+
+ switch (command) {
+ case RELAY_COMMAND_EXTENDED:
+ if (payload_len != TAP_ONIONSKIN_REPLY_LEN)
+ return -1;
+ cell_out->cell_type = RELAY_COMMAND_EXTENDED;
+ cell_out->created_cell.cell_type = CELL_CREATED;
+ cell_out->created_cell.handshake_len = TAP_ONIONSKIN_REPLY_LEN;
+ memcpy(cell_out->created_cell.reply, payload, TAP_ONIONSKIN_REPLY_LEN);
+ break;
+ case RELAY_COMMAND_EXTENDED2:
+ {
+ cell_out->cell_type = RELAY_COMMAND_EXTENDED2;
+ cell_out->created_cell.cell_type = CELL_CREATED2;
+ cell_out->created_cell.handshake_len = ntohs(get_uint16(payload));
+ if (cell_out->created_cell.handshake_len > RELAY_PAYLOAD_SIZE - 2 ||
+ cell_out->created_cell.handshake_len > payload_len - 2)
+ return -1;
+ memcpy(cell_out->created_cell.reply, payload+2,
+ cell_out->created_cell.handshake_len);
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ return check_extended_cell(cell_out);
+}
+
+/** Fill <b>cell_out</b> with a correctly formatted version of the
+ * CREATE{,_FAST,2} cell in <b>cell_in</b>. Return 0 on success, -1 on
+ * failure. This is a cell we didn't originate if <b>relayed</b> is true. */
+static int
+create_cell_format_impl(cell_t *cell_out, const create_cell_t *cell_in,
+ int relayed)
+{
+ uint8_t *p;
+ size_t space;
+ if (check_create_cell(cell_in, relayed) < 0)
+ return -1;
+
+ memset(cell_out->payload, 0, sizeof(cell_out->payload));
+ cell_out->command = cell_in->cell_type;
+
+ p = cell_out->payload;
+ space = sizeof(cell_out->payload);
+
+ switch (cell_in->cell_type) {
+ case CELL_CREATE:
+ if (cell_in->handshake_type == ONION_HANDSHAKE_TYPE_NTOR) {
+ memcpy(p, NTOR_CREATE_MAGIC, 16);
+ p += 16;
+ space -= 16;
+ }
+ /* Fall through */
+ case CELL_CREATE_FAST:
+ tor_assert(cell_in->handshake_len <= space);
+ memcpy(p, cell_in->onionskin, cell_in->handshake_len);
+ break;
+ case CELL_CREATE2:
+ tor_assert(cell_in->handshake_len <= sizeof(cell_out->payload)-4);
+ set_uint16(cell_out->payload, htons(cell_in->handshake_type));
+ set_uint16(cell_out->payload+2, htons(cell_in->handshake_len));
+ memcpy(cell_out->payload + 4, cell_in->onionskin, cell_in->handshake_len);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+create_cell_format(cell_t *cell_out, const create_cell_t *cell_in)
+{
+ return create_cell_format_impl(cell_out, cell_in, 0);
+}
+
+int
+create_cell_format_relayed(cell_t *cell_out, const create_cell_t *cell_in)
+{
+ return create_cell_format_impl(cell_out, cell_in, 1);
+}
+
+/** Fill <b>cell_out</b> with a correctly formatted version of the
+ * CREATED{,_FAST,2} cell in <b>cell_in</b>. Return 0 on success, -1 on
+ * failure. */
+int
+created_cell_format(cell_t *cell_out, const created_cell_t *cell_in)
+{
+ if (check_created_cell(cell_in) < 0)
+ return -1;
+
+ memset(cell_out->payload, 0, sizeof(cell_out->payload));
+ cell_out->command = cell_in->cell_type;
+
+ switch (cell_in->cell_type) {
+ case CELL_CREATED:
+ case CELL_CREATED_FAST:
+ tor_assert(cell_in->handshake_len <= sizeof(cell_out->payload));
+ memcpy(cell_out->payload, cell_in->reply, cell_in->handshake_len);
+ break;
+ case CELL_CREATED2:
+ tor_assert(cell_in->handshake_len <= sizeof(cell_out->payload)-2);
+ set_uint16(cell_out->payload, htons(cell_in->handshake_len));
+ memcpy(cell_out->payload + 2, cell_in->reply, cell_in->handshake_len);
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+/** Return true iff we are configured (by torrc or by the networkstatus
+ * parameters) to use Ed25519 identities in our Extend2 cells. */
+static int
+should_include_ed25519_id_extend_cells(const networkstatus_t *ns,
+ const or_options_t *options)
+{
+ if (options->ExtendByEd25519ID != -1)
+ return options->ExtendByEd25519ID; /* The user has an opinion. */
+
+ return (int) networkstatus_get_param(ns, "ExtendByEd25519ID",
+ 0 /* default */,
+ 0 /* min */,
+ 1 /*max*/);
+}
+
+/** Format the EXTEND{,2} cell in <b>cell_in</b>, storing its relay payload in
+ * <b>payload_out</b>, the number of bytes used in *<b>len_out</b>, and the
+ * relay command in *<b>command_out</b>. The <b>payload_out</b> must have
+ * RELAY_PAYLOAD_SIZE bytes available. Return 0 on success, -1 on failure. */
+int
+extend_cell_format(uint8_t *command_out, uint16_t *len_out,
+ uint8_t *payload_out, const extend_cell_t *cell_in)
+{
+ uint8_t *p;
+ if (check_extend_cell(cell_in) < 0)
+ return -1;
+
+ p = payload_out;
+
+ memset(p, 0, RELAY_PAYLOAD_SIZE);
+
+ switch (cell_in->cell_type) {
+ case RELAY_COMMAND_EXTEND:
+ {
+ *command_out = RELAY_COMMAND_EXTEND;
+ *len_out = 6 + TAP_ONIONSKIN_CHALLENGE_LEN + DIGEST_LEN;
+ set_uint32(p, tor_addr_to_ipv4n(&cell_in->orport_ipv4.addr));
+ set_uint16(p+4, htons(cell_in->orport_ipv4.port));
+ if (cell_in->create_cell.handshake_type == ONION_HANDSHAKE_TYPE_NTOR) {
+ memcpy(p+6, NTOR_CREATE_MAGIC, 16);
+ memcpy(p+22, cell_in->create_cell.onionskin, NTOR_ONIONSKIN_LEN);
+ } else {
+ memcpy(p+6, cell_in->create_cell.onionskin,
+ TAP_ONIONSKIN_CHALLENGE_LEN);
+ }
+ memcpy(p+6+TAP_ONIONSKIN_CHALLENGE_LEN, cell_in->node_id, DIGEST_LEN);
+ }
+ break;
+ case RELAY_COMMAND_EXTEND2:
+ {
+ uint8_t n_specifiers = 2;
+ *command_out = RELAY_COMMAND_EXTEND2;
+ extend2_cell_body_t *cell = extend2_cell_body_new();
+ link_specifier_t *ls;
+ {
+ /* IPv4 specifier first. */
+ ls = link_specifier_new();
+ extend2_cell_body_add_ls(cell, ls);
+ ls->ls_type = LS_IPV4;
+ ls->ls_len = 6;
+ ls->un_ipv4_addr = tor_addr_to_ipv4h(&cell_in->orport_ipv4.addr);
+ ls->un_ipv4_port = cell_in->orport_ipv4.port;
+ }
+ {
+ /* Then RSA id */
+ ls = link_specifier_new();
+ extend2_cell_body_add_ls(cell, ls);
+ ls->ls_type = LS_LEGACY_ID;
+ ls->ls_len = DIGEST_LEN;
+ memcpy(ls->un_legacy_id, cell_in->node_id, DIGEST_LEN);
+ }
+ if (should_include_ed25519_id_extend_cells(NULL, get_options()) &&
+ !ed25519_public_key_is_zero(&cell_in->ed_pubkey)) {
+ /* Then, maybe, the ed25519 id! */
+ ++n_specifiers;
+ ls = link_specifier_new();
+ extend2_cell_body_add_ls(cell, ls);
+ ls->ls_type = LS_ED25519_ID;
+ ls->ls_len = 32;
+ memcpy(ls->un_ed25519_id, cell_in->ed_pubkey.pubkey, 32);
+ }
+ cell->n_spec = n_specifiers;
+
+ /* Now, the handshake */
+ cell->create2 = create2_cell_body_new();
+ cell->create2->handshake_type = cell_in->create_cell.handshake_type;
+ cell->create2->handshake_len = cell_in->create_cell.handshake_len;
+ create2_cell_body_setlen_handshake_data(cell->create2,
+ cell_in->create_cell.handshake_len);
+ memcpy(create2_cell_body_getarray_handshake_data(cell->create2),
+ cell_in->create_cell.onionskin,
+ cell_in->create_cell.handshake_len);
+
+ ssize_t len_encoded = extend2_cell_body_encode(
+ payload_out, RELAY_PAYLOAD_SIZE,
+ cell);
+ extend2_cell_body_free(cell);
+ if (len_encoded < 0 || len_encoded > UINT16_MAX)
+ return -1;
+ *len_out = (uint16_t) len_encoded;
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/** Format the EXTENDED{,2} cell in <b>cell_in</b>, storing its relay payload
+ * in <b>payload_out</b>, the number of bytes used in *<b>len_out</b>, and the
+ * relay command in *<b>command_out</b>. The <b>payload_out</b> must have
+ * RELAY_PAYLOAD_SIZE bytes available. Return 0 on success, -1 on failure. */
+int
+extended_cell_format(uint8_t *command_out, uint16_t *len_out,
+ uint8_t *payload_out, const extended_cell_t *cell_in)
+{
+ uint8_t *p;
+ if (check_extended_cell(cell_in) < 0)
+ return -1;
+
+ p = payload_out;
+ memset(p, 0, RELAY_PAYLOAD_SIZE);
+
+ switch (cell_in->cell_type) {
+ case RELAY_COMMAND_EXTENDED:
+ {
+ *command_out = RELAY_COMMAND_EXTENDED;
+ *len_out = TAP_ONIONSKIN_REPLY_LEN;
+ memcpy(payload_out, cell_in->created_cell.reply,
+ TAP_ONIONSKIN_REPLY_LEN);
+ }
+ break;
+ case RELAY_COMMAND_EXTENDED2:
+ {
+ *command_out = RELAY_COMMAND_EXTENDED2;
+ *len_out = 2 + cell_in->created_cell.handshake_len;
+ set_uint16(payload_out, htons(cell_in->created_cell.handshake_len));
+ if (2+cell_in->created_cell.handshake_len > RELAY_PAYLOAD_SIZE)
+ return -1;
+ memcpy(payload_out+2, cell_in->created_cell.reply,
+ cell_in->created_cell.handshake_len);
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/src/core/crypto/onion.h b/src/core/crypto/onion.h
new file mode 100644
index 0000000000..ff70f299d5
--- /dev/null
+++ b/src/core/crypto/onion.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file onion.h
+ * \brief Header file for onion.c.
+ **/
+
+#ifndef TOR_ONION_H
+#define TOR_ONION_H
+
+struct create_cell_t;
+struct curve25519_keypair_t;
+struct curve25519_public_key_t;
+#include "lib/crypt_ops/crypto_ed25519.h"
+
+int onion_pending_add(or_circuit_t *circ, struct create_cell_t *onionskin);
+or_circuit_t *onion_next_task(struct create_cell_t **onionskin_out);
+int onion_num_pending(uint16_t handshake_type);
+void onion_pending_remove(or_circuit_t *circ);
+void clear_pending_onions(void);
+
+typedef struct server_onion_keys_t {
+ uint8_t my_identity[DIGEST_LEN];
+ crypto_pk_t *onion_key;
+ crypto_pk_t *last_onion_key;
+ struct di_digest256_map_t *curve25519_key_map;
+ struct curve25519_keypair_t *junk_keypair;
+} server_onion_keys_t;
+
+#define MAX_ONIONSKIN_CHALLENGE_LEN 255
+#define MAX_ONIONSKIN_REPLY_LEN 255
+
+server_onion_keys_t *server_onion_keys_new(void);
+void server_onion_keys_free_(server_onion_keys_t *keys);
+#define server_onion_keys_free(keys) \
+ FREE_AND_NULL(server_onion_keys_t, server_onion_keys_free_, (keys))
+
+void onion_handshake_state_release(onion_handshake_state_t *state);
+
+int onion_skin_create(int type,
+ const extend_info_t *node,
+ onion_handshake_state_t *state_out,
+ uint8_t *onion_skin_out);
+int onion_skin_server_handshake(int type,
+ const uint8_t *onion_skin, size_t onionskin_len,
+ const server_onion_keys_t *keys,
+ uint8_t *reply_out,
+ uint8_t *keys_out, size_t key_out_len,
+ uint8_t *rend_nonce_out);
+int onion_skin_client_handshake(int type,
+ const onion_handshake_state_t *handshake_state,
+ const uint8_t *reply, size_t reply_len,
+ uint8_t *keys_out, size_t key_out_len,
+ uint8_t *rend_authenticator_out,
+ const char **msg_out);
+
+/** A parsed CREATE, CREATE_FAST, or CREATE2 cell. */
+typedef struct create_cell_t {
+ /** The cell command. One of CREATE{,_FAST,2} */
+ uint8_t cell_type;
+ /** One of the ONION_HANDSHAKE_TYPE_* values */
+ uint16_t handshake_type;
+ /** The number of bytes used in <b>onionskin</b>. */
+ uint16_t handshake_len;
+ /** The client-side message for the circuit creation handshake. */
+ uint8_t onionskin[CELL_PAYLOAD_SIZE - 4];
+} create_cell_t;
+
+/** A parsed CREATED, CREATED_FAST, or CREATED2 cell. */
+typedef struct created_cell_t {
+ /** The cell command. One of CREATED{,_FAST,2} */
+ uint8_t cell_type;
+ /** The number of bytes used in <b>reply</b>. */
+ uint16_t handshake_len;
+ /** The server-side message for the circuit creation handshake. */
+ uint8_t reply[CELL_PAYLOAD_SIZE - 2];
+} created_cell_t;
+
+/** A parsed RELAY_EXTEND or RELAY_EXTEND2 cell */
+typedef struct extend_cell_t {
+ /** One of RELAY_EXTEND or RELAY_EXTEND2 */
+ uint8_t cell_type;
+ /** An IPv4 address and port for the node we're connecting to. */
+ tor_addr_port_t orport_ipv4;
+ /** An IPv6 address and port for the node we're connecting to. Not currently
+ * used. */
+ tor_addr_port_t orport_ipv6;
+ /** Identity fingerprint of the node we're conecting to.*/
+ uint8_t node_id[DIGEST_LEN];
+ /** Ed25519 public identity key. Zero if not set. */
+ struct ed25519_public_key_t ed_pubkey;
+ /** The "create cell" embedded in this extend cell. Note that unlike the
+ * create cells we generate ourself, this once can have a handshake type we
+ * don't recognize. */
+ create_cell_t create_cell;
+} extend_cell_t;
+
+/** A parsed RELAY_EXTEND or RELAY_EXTEND2 cell */
+typedef struct extended_cell_t {
+ /** One of RELAY_EXTENDED or RELAY_EXTENDED2. */
+ uint8_t cell_type;
+ /** The "created cell" embedded in this extended cell. */
+ created_cell_t created_cell;
+} extended_cell_t;
+
+void create_cell_init(create_cell_t *cell_out, uint8_t cell_type,
+ uint16_t handshake_type, uint16_t handshake_len,
+ const uint8_t *onionskin);
+int create_cell_parse(create_cell_t *cell_out, const cell_t *cell_in);
+int created_cell_parse(created_cell_t *cell_out, const cell_t *cell_in);
+int extend_cell_parse(extend_cell_t *cell_out, const uint8_t command,
+ const uint8_t *payload_in, size_t payload_len);
+int extended_cell_parse(extended_cell_t *cell_out, const uint8_t command,
+ const uint8_t *payload_in, size_t payload_len);
+
+int create_cell_format(cell_t *cell_out, const create_cell_t *cell_in);
+int create_cell_format_relayed(cell_t *cell_out, const create_cell_t *cell_in);
+int created_cell_format(cell_t *cell_out, const created_cell_t *cell_in);
+int extend_cell_format(uint8_t *command_out, uint16_t *len_out,
+ uint8_t *payload_out, const extend_cell_t *cell_in);
+int extended_cell_format(uint8_t *command_out, uint16_t *len_out,
+ uint8_t *payload_out, const extended_cell_t *cell_in);
+
+#endif /* !defined(TOR_ONION_H) */
diff --git a/src/core/crypto/onion_fast.c b/src/core/crypto/onion_fast.c
new file mode 100644
index 0000000000..6e834ccf95
--- /dev/null
+++ b/src/core/crypto/onion_fast.c
@@ -0,0 +1,144 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file onion_fast.c
+ * \brief Functions implement the CREATE_FAST circuit handshake.
+ *
+ * The "CREATE_FAST" handshake is an unauthenticated, non-forward-secure
+ * key derivation mechanism based on SHA1. We used to use it for the
+ * first hop of each circuit, since the TAP handshake provided no
+ * additional security beyond the security already provided by the TLS
+ * handshake [*].
+ *
+ * When we switched to ntor, we deprecated CREATE_FAST, since ntor is
+ * stronger than our TLS handshake was, and fast enough to not be worrisome.
+ *
+ * This handshake, like the other circuit-extension handshakes, is
+ * invoked from onion.c.
+ *
+ * [*]Actually, it's possible that TAP _was_ a little better than TLS with
+ * RSA1024 certificates and EDH1024 for forward secrecy, if you
+ * hypothesize an adversary who can compute discrete logarithms on a
+ * small number of targeted DH1024 fields, but who can't break all that
+ * many RSA1024 keys.
+ **/
+
+#include "or/or.h"
+#include "or/onion_fast.h"
+#include "lib/crypt_ops/crypto_hkdf.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "lib/crypt_ops/crypto_util.h"
+
+/** Release all state held in <b>victim</b>. */
+void
+fast_handshake_state_free_(fast_handshake_state_t *victim)
+{
+ if (! victim)
+ return;
+ memwipe(victim, 0, sizeof(fast_handshake_state_t));
+ tor_free(victim);
+}
+
+/** Create the state needed to perform a CREATE_FAST handshake. Return 0
+ * on success, -1 on failure. */
+int
+fast_onionskin_create(fast_handshake_state_t **handshake_state_out,
+ uint8_t *handshake_out)
+{
+ fast_handshake_state_t *s;
+ *handshake_state_out = s = tor_malloc(sizeof(fast_handshake_state_t));
+ crypto_rand((char*)s->state, sizeof(s->state));
+ memcpy(handshake_out, s->state, DIGEST_LEN);
+ return 0;
+}
+
+/** Implement the server side of the CREATE_FAST abbreviated handshake. The
+ * client has provided DIGEST_LEN key bytes in <b>key_in</b> ("x"). We
+ * generate a reply of DIGEST_LEN*2 bytes in <b>key_out</b>, consisting of a
+ * new random "y", followed by H(x|y) to check for correctness. We set
+ * <b>key_out_len</b> bytes of key material in <b>key_out</b>.
+ * Return 0 on success, &lt;0 on failure.
+ **/
+int
+fast_server_handshake(const uint8_t *key_in, /* DIGEST_LEN bytes */
+ uint8_t *handshake_reply_out, /* DIGEST_LEN*2 bytes */
+ uint8_t *key_out,
+ size_t key_out_len)
+{
+ uint8_t tmp[DIGEST_LEN+DIGEST_LEN];
+ uint8_t *out = NULL;
+ size_t out_len;
+ int r = -1;
+
+ crypto_rand((char*)handshake_reply_out, DIGEST_LEN);
+
+ memcpy(tmp, key_in, DIGEST_LEN);
+ memcpy(tmp+DIGEST_LEN, handshake_reply_out, DIGEST_LEN);
+ out_len = key_out_len+DIGEST_LEN;
+ out = tor_malloc(out_len);
+ if (BUG(crypto_expand_key_material_TAP(tmp, sizeof(tmp), out, out_len))) {
+ goto done; // LCOV_EXCL_LINE
+ }
+ memcpy(handshake_reply_out+DIGEST_LEN, out, DIGEST_LEN);
+ memcpy(key_out, out+DIGEST_LEN, key_out_len);
+ r = 0;
+ done:
+ memwipe(tmp, 0, sizeof(tmp));
+ memwipe(out, 0, out_len);
+ tor_free(out);
+ return r;
+}
+
+/** Implement the second half of the client side of the CREATE_FAST handshake.
+ * We sent the server <b>handshake_state</b> ("x") already, and the server
+ * told us <b>handshake_reply_out</b> (y|H(x|y)). Make sure that the hash is
+ * correct, and generate key material in <b>key_out</b>. Return 0 on success,
+ * true on failure.
+ *
+ * NOTE: The "CREATE_FAST" handshake path is distinguishable from regular
+ * "onionskin" handshakes, and is not secure if an adversary can see or modify
+ * the messages. Therefore, it should only be used by clients, and only as
+ * the first hop of a circuit (since the first hop is already authenticated
+ * and protected by TLS).
+ */
+int
+fast_client_handshake(const fast_handshake_state_t *handshake_state,
+ const uint8_t *handshake_reply_out,/*DIGEST_LEN*2 bytes*/
+ uint8_t *key_out,
+ size_t key_out_len,
+ const char **msg_out)
+{
+ uint8_t tmp[DIGEST_LEN+DIGEST_LEN];
+ uint8_t *out;
+ size_t out_len;
+ int r = -1;
+
+ memcpy(tmp, handshake_state->state, DIGEST_LEN);
+ memcpy(tmp+DIGEST_LEN, handshake_reply_out, DIGEST_LEN);
+ out_len = key_out_len+DIGEST_LEN;
+ out = tor_malloc(out_len);
+ if (BUG(crypto_expand_key_material_TAP(tmp, sizeof(tmp), out, out_len))) {
+ /* LCOV_EXCL_START */
+ if (msg_out)
+ *msg_out = "Failed to expand key material";
+ goto done;
+ /* LCOV_EXCL_STOP */
+ }
+ if (tor_memneq(out, handshake_reply_out+DIGEST_LEN, DIGEST_LEN)) {
+ /* H(K) does *not* match. Something fishy. */
+ if (msg_out)
+ *msg_out = "Digest DOES NOT MATCH on fast handshake. Bug or attack.";
+ goto done;
+ }
+ memcpy(key_out, out+DIGEST_LEN, key_out_len);
+ r = 0;
+ done:
+ memwipe(tmp, 0, sizeof(tmp));
+ memwipe(out, 0, out_len);
+ tor_free(out);
+ return r;
+}
diff --git a/src/core/crypto/onion_fast.h b/src/core/crypto/onion_fast.h
new file mode 100644
index 0000000000..a7b6ec53f4
--- /dev/null
+++ b/src/core/crypto/onion_fast.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file onion_fast.h
+ * \brief Header file for onion_fast.c.
+ **/
+
+#ifndef TOR_ONION_FAST_H
+#define TOR_ONION_FAST_H
+
+#define CREATE_FAST_LEN DIGEST_LEN
+#define CREATED_FAST_LEN (DIGEST_LEN*2)
+
+typedef struct fast_handshake_state_t {
+ uint8_t state[DIGEST_LEN];
+} fast_handshake_state_t;
+
+void fast_handshake_state_free_(fast_handshake_state_t *victim);
+#define fast_handshake_state_free(st) \
+ FREE_AND_NULL(fast_handshake_state_t, fast_handshake_state_free_, (st))
+
+int fast_onionskin_create(fast_handshake_state_t **handshake_state_out,
+ uint8_t *handshake_out);
+
+int fast_server_handshake(const uint8_t *message_in,
+ uint8_t *handshake_reply_out,
+ uint8_t *key_out,
+ size_t key_out_len);
+
+int fast_client_handshake(const fast_handshake_state_t *handshake_state,
+ const uint8_t *handshake_reply_out,
+ uint8_t *key_out,
+ size_t key_out_len,
+ const char **msg_out);
+
+#endif /* !defined(TOR_ONION_FAST_H) */
+
diff --git a/src/core/crypto/onion_ntor.c b/src/core/crypto/onion_ntor.c
new file mode 100644
index 0000000000..59c923cb97
--- /dev/null
+++ b/src/core/crypto/onion_ntor.c
@@ -0,0 +1,341 @@
+/* Copyright (c) 2012-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file onion_ntor.c
+ *
+ * \brief Implementation for the ntor handshake.
+ *
+ * The ntor circuit-extension handshake was developed as a replacement
+ * for the old TAP handshake. It uses Elliptic-curve Diffie-Hellman and
+ * a hash function in order to perform a one-way authenticated key
+ * exchange. The ntor handshake is meant to replace the old "TAP"
+ * handshake.
+ *
+ * We instantiate ntor with curve25519, HMAC-SHA256, and HKDF.
+ *
+ * This handshake, like the other circuit-extension handshakes, is
+ * invoked from onion.c.
+ */
+
+#include "orconfig.h"
+
+#define ONION_NTOR_PRIVATE
+
+#include "lib/crypt_ops/crypto.h"
+#include "lib/crypt_ops/crypto_digest.h"
+#include "lib/crypt_ops/crypto_hkdf.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "lib/ctime/di_ops.h"
+#include "lib/log/torlog.h"
+#include "lib/log/util_bug.h"
+#include "or/onion_ntor.h"
+
+#include <string.h>
+
+/** Free storage held in an ntor handshake state. */
+void
+ntor_handshake_state_free_(ntor_handshake_state_t *state)
+{
+ if (!state)
+ return;
+ memwipe(state, 0, sizeof(*state));
+ tor_free(state);
+}
+
+/** Convenience function to represent HMAC_SHA256 as our instantiation of
+ * ntor's "tweaked hash'. Hash the <b>inp_len</b> bytes at <b>inp</b> into
+ * a DIGEST256_LEN-byte digest at <b>out</b>, with the hash changing
+ * depending on the value of <b>tweak</b>. */
+static void
+h_tweak(uint8_t *out,
+ const uint8_t *inp, size_t inp_len,
+ const char *tweak)
+{
+ size_t tweak_len = strlen(tweak);
+ crypto_hmac_sha256((char*)out, tweak, tweak_len, (const char*)inp, inp_len);
+}
+
+/** Wrapper around a set of tweak-values for use with the ntor handshake. */
+typedef struct tweakset_t {
+ const char *t_mac;
+ const char *t_key;
+ const char *t_verify;
+ const char *m_expand;
+} tweakset_t;
+
+/** The tweaks to be used with our handshake. */
+static const tweakset_t proto1_tweaks = {
+#define PROTOID "ntor-curve25519-sha256-1"
+#define PROTOID_LEN 24
+ PROTOID ":mac",
+ PROTOID ":key_extract",
+ PROTOID ":verify",
+ PROTOID ":key_expand"
+};
+
+/** Convenience macro: copy <b>len</b> bytes from <b>inp</b> to <b>ptr</b>,
+ * and advance <b>ptr</b> by the number of bytes copied. */
+#define APPEND(ptr, inp, len) \
+ STMT_BEGIN { \
+ memcpy(ptr, (inp), (len)); \
+ ptr += len; \
+ } STMT_END
+
+/**
+ * Compute the first client-side step of the ntor handshake for communicating
+ * with a server whose DIGEST_LEN-byte server identity is <b>router_id</b>,
+ * and whose onion key is <b>router_key</b>. Store the NTOR_ONIONSKIN_LEN-byte
+ * message in <b>onion_skin_out</b>, and store the handshake state in
+ * *<b>handshake_state_out</b>. Return 0 on success, -1 on failure.
+ */
+int
+onion_skin_ntor_create(const uint8_t *router_id,
+ const curve25519_public_key_t *router_key,
+ ntor_handshake_state_t **handshake_state_out,
+ uint8_t *onion_skin_out)
+{
+ ntor_handshake_state_t *state;
+ uint8_t *op;
+
+ state = tor_malloc_zero(sizeof(ntor_handshake_state_t));
+
+ memcpy(state->router_id, router_id, DIGEST_LEN);
+ memcpy(&state->pubkey_B, router_key, sizeof(curve25519_public_key_t));
+ if (curve25519_secret_key_generate(&state->seckey_x, 0) < 0) {
+ /* LCOV_EXCL_START
+ * Secret key generation should be unable to fail when the key isn't
+ * marked as "extra-strong" */
+ tor_assert_nonfatal_unreached();
+ tor_free(state);
+ return -1;
+ /* LCOV_EXCL_STOP */
+ }
+ curve25519_public_key_generate(&state->pubkey_X, &state->seckey_x);
+
+ op = onion_skin_out;
+ APPEND(op, router_id, DIGEST_LEN);
+ APPEND(op, router_key->public_key, CURVE25519_PUBKEY_LEN);
+ APPEND(op, state->pubkey_X.public_key, CURVE25519_PUBKEY_LEN);
+ tor_assert(op == onion_skin_out + NTOR_ONIONSKIN_LEN);
+
+ *handshake_state_out = state;
+
+ return 0;
+}
+
+#define SERVER_STR "Server"
+#define SERVER_STR_LEN 6
+
+#define SECRET_INPUT_LEN (CURVE25519_PUBKEY_LEN * 3 + \
+ CURVE25519_OUTPUT_LEN * 2 + \
+ DIGEST_LEN + PROTOID_LEN)
+#define AUTH_INPUT_LEN (DIGEST256_LEN + DIGEST_LEN + \
+ CURVE25519_PUBKEY_LEN*3 + \
+ PROTOID_LEN + SERVER_STR_LEN)
+
+/**
+ * Perform the server side of an ntor handshake. Given an
+ * NTOR_ONIONSKIN_LEN-byte message in <b>onion_skin</b>, our own identity
+ * fingerprint as <b>my_node_id</b>, and an associative array mapping public
+ * onion keys to curve25519_keypair_t in <b>private_keys</b>, attempt to
+ * perform the handshake. Use <b>junk_keys</b> if present if the handshake
+ * indicates an unrecognized public key. Write an NTOR_REPLY_LEN-byte
+ * message to send back to the client into <b>handshake_reply_out</b>, and
+ * generate <b>key_out_len</b> bytes of key material in <b>key_out</b>. Return
+ * 0 on success, -1 on failure.
+ */
+int
+onion_skin_ntor_server_handshake(const uint8_t *onion_skin,
+ const di_digest256_map_t *private_keys,
+ const curve25519_keypair_t *junk_keys,
+ const uint8_t *my_node_id,
+ uint8_t *handshake_reply_out,
+ uint8_t *key_out,
+ size_t key_out_len)
+{
+ const tweakset_t *T = &proto1_tweaks;
+ /* Sensitive stack-allocated material. Kept in an anonymous struct to make
+ * it easy to wipe. */
+ struct {
+ uint8_t secret_input[SECRET_INPUT_LEN];
+ uint8_t auth_input[AUTH_INPUT_LEN];
+ curve25519_public_key_t pubkey_X;
+ curve25519_secret_key_t seckey_y;
+ curve25519_public_key_t pubkey_Y;
+ uint8_t verify[DIGEST256_LEN];
+ } s;
+ uint8_t *si = s.secret_input, *ai = s.auth_input;
+ const curve25519_keypair_t *keypair_bB;
+ int bad;
+
+ /* Decode the onion skin */
+ /* XXXX Does this possible early-return business threaten our security? */
+ if (tor_memneq(onion_skin, my_node_id, DIGEST_LEN))
+ return -1;
+ /* Note that on key-not-found, we go through with this operation anyway,
+ * using "junk_keys". This will result in failed authentication, but won't
+ * leak whether we recognized the key. */
+ keypair_bB = dimap_search(private_keys, onion_skin + DIGEST_LEN,
+ (void*)junk_keys);
+ if (!keypair_bB)
+ return -1;
+
+ memcpy(s.pubkey_X.public_key, onion_skin+DIGEST_LEN+DIGEST256_LEN,
+ CURVE25519_PUBKEY_LEN);
+
+ /* Make y, Y */
+ curve25519_secret_key_generate(&s.seckey_y, 0);
+ curve25519_public_key_generate(&s.pubkey_Y, &s.seckey_y);
+
+ /* NOTE: If we ever use a group other than curve25519, or a different
+ * representation for its points, we may need to perform different or
+ * additional checks on X here and on Y in the client handshake, or lose our
+ * security properties. What checks we need would depend on the properties
+ * of the group and its representation.
+ *
+ * In short: if you use anything other than curve25519, this aspect of the
+ * code will need to be reconsidered carefully. */
+
+ /* build secret_input */
+ curve25519_handshake(si, &s.seckey_y, &s.pubkey_X);
+ bad = safe_mem_is_zero(si, CURVE25519_OUTPUT_LEN);
+ si += CURVE25519_OUTPUT_LEN;
+ curve25519_handshake(si, &keypair_bB->seckey, &s.pubkey_X);
+ bad |= safe_mem_is_zero(si, CURVE25519_OUTPUT_LEN);
+ si += CURVE25519_OUTPUT_LEN;
+
+ APPEND(si, my_node_id, DIGEST_LEN);
+ APPEND(si, keypair_bB->pubkey.public_key, CURVE25519_PUBKEY_LEN);
+ APPEND(si, s.pubkey_X.public_key, CURVE25519_PUBKEY_LEN);
+ APPEND(si, s.pubkey_Y.public_key, CURVE25519_PUBKEY_LEN);
+ APPEND(si, PROTOID, PROTOID_LEN);
+ tor_assert(si == s.secret_input + sizeof(s.secret_input));
+
+ /* Compute hashes of secret_input */
+ h_tweak(s.verify, s.secret_input, sizeof(s.secret_input), T->t_verify);
+
+ /* Compute auth_input */
+ APPEND(ai, s.verify, DIGEST256_LEN);
+ APPEND(ai, my_node_id, DIGEST_LEN);
+ APPEND(ai, keypair_bB->pubkey.public_key, CURVE25519_PUBKEY_LEN);
+ APPEND(ai, s.pubkey_Y.public_key, CURVE25519_PUBKEY_LEN);
+ APPEND(ai, s.pubkey_X.public_key, CURVE25519_PUBKEY_LEN);
+ APPEND(ai, PROTOID, PROTOID_LEN);
+ APPEND(ai, SERVER_STR, SERVER_STR_LEN);
+ tor_assert(ai == s.auth_input + sizeof(s.auth_input));
+
+ /* Build the reply */
+ memcpy(handshake_reply_out, s.pubkey_Y.public_key, CURVE25519_PUBKEY_LEN);
+ h_tweak(handshake_reply_out+CURVE25519_PUBKEY_LEN,
+ s.auth_input, sizeof(s.auth_input),
+ T->t_mac);
+
+ /* Generate the key material */
+ crypto_expand_key_material_rfc5869_sha256(
+ s.secret_input, sizeof(s.secret_input),
+ (const uint8_t*)T->t_key, strlen(T->t_key),
+ (const uint8_t*)T->m_expand, strlen(T->m_expand),
+ key_out, key_out_len);
+
+ /* Wipe all of our local state */
+ memwipe(&s, 0, sizeof(s));
+
+ return bad ? -1 : 0;
+}
+
+/**
+ * Perform the final client side of the ntor handshake, using the state in
+ * <b>handshake_state</b> and the server's NTOR_REPLY_LEN-byte reply in
+ * <b>handshake_reply</b>. Generate <b>key_out_len</b> bytes of key material
+ * in <b>key_out</b>. Return 0 on success, -1 on failure.
+ */
+int
+onion_skin_ntor_client_handshake(
+ const ntor_handshake_state_t *handshake_state,
+ const uint8_t *handshake_reply,
+ uint8_t *key_out,
+ size_t key_out_len,
+ const char **msg_out)
+{
+ const tweakset_t *T = &proto1_tweaks;
+ /* Sensitive stack-allocated material. Kept in an anonymous struct to make
+ * it easy to wipe. */
+ struct {
+ curve25519_public_key_t pubkey_Y;
+ uint8_t secret_input[SECRET_INPUT_LEN];
+ uint8_t verify[DIGEST256_LEN];
+ uint8_t auth_input[AUTH_INPUT_LEN];
+ uint8_t auth[DIGEST256_LEN];
+ } s;
+ uint8_t *ai = s.auth_input, *si = s.secret_input;
+ const uint8_t *auth_candidate;
+ int bad;
+
+ /* Decode input */
+ memcpy(s.pubkey_Y.public_key, handshake_reply, CURVE25519_PUBKEY_LEN);
+ auth_candidate = handshake_reply + CURVE25519_PUBKEY_LEN;
+
+ /* See note in server_handshake above about checking points. The
+ * circumstances under which we'd need to check Y for membership are
+ * different than those under which we'd be checking X. */
+
+ /* Compute secret_input */
+ curve25519_handshake(si, &handshake_state->seckey_x, &s.pubkey_Y);
+ bad = safe_mem_is_zero(si, CURVE25519_OUTPUT_LEN);
+ si += CURVE25519_OUTPUT_LEN;
+ curve25519_handshake(si, &handshake_state->seckey_x,
+ &handshake_state->pubkey_B);
+ bad |= (safe_mem_is_zero(si, CURVE25519_OUTPUT_LEN) << 1);
+ si += CURVE25519_OUTPUT_LEN;
+ APPEND(si, handshake_state->router_id, DIGEST_LEN);
+ APPEND(si, handshake_state->pubkey_B.public_key, CURVE25519_PUBKEY_LEN);
+ APPEND(si, handshake_state->pubkey_X.public_key, CURVE25519_PUBKEY_LEN);
+ APPEND(si, s.pubkey_Y.public_key, CURVE25519_PUBKEY_LEN);
+ APPEND(si, PROTOID, PROTOID_LEN);
+ tor_assert(si == s.secret_input + sizeof(s.secret_input));
+
+ /* Compute verify from secret_input */
+ h_tweak(s.verify, s.secret_input, sizeof(s.secret_input), T->t_verify);
+
+ /* Compute auth_input */
+ APPEND(ai, s.verify, DIGEST256_LEN);
+ APPEND(ai, handshake_state->router_id, DIGEST_LEN);
+ APPEND(ai, handshake_state->pubkey_B.public_key, CURVE25519_PUBKEY_LEN);
+ APPEND(ai, s.pubkey_Y.public_key, CURVE25519_PUBKEY_LEN);
+ APPEND(ai, handshake_state->pubkey_X.public_key, CURVE25519_PUBKEY_LEN);
+ APPEND(ai, PROTOID, PROTOID_LEN);
+ APPEND(ai, SERVER_STR, SERVER_STR_LEN);
+ tor_assert(ai == s.auth_input + sizeof(s.auth_input));
+
+ /* Compute auth */
+ h_tweak(s.auth, s.auth_input, sizeof(s.auth_input), T->t_mac);
+
+ bad |= (tor_memneq(s.auth, auth_candidate, DIGEST256_LEN) << 2);
+
+ crypto_expand_key_material_rfc5869_sha256(
+ s.secret_input, sizeof(s.secret_input),
+ (const uint8_t*)T->t_key, strlen(T->t_key),
+ (const uint8_t*)T->m_expand, strlen(T->m_expand),
+ key_out, key_out_len);
+
+ memwipe(&s, 0, sizeof(s));
+
+ if (bad) {
+ if (bad & 4) {
+ if (msg_out)
+ *msg_out = NULL; /* Don't report this one; we probably just had the
+ * wrong onion key.*/
+ log_fn(LOG_INFO, LD_PROTOCOL,
+ "Invalid result from curve25519 handshake: %d", bad);
+ }
+ if (bad & 3) {
+ if (msg_out)
+ *msg_out = "Zero output from curve25519 handshake";
+ log_fn(LOG_WARN, LD_PROTOCOL,
+ "Invalid result from curve25519 handshake: %d", bad);
+ }
+ }
+
+ return bad ? -1 : 0;
+}
diff --git a/src/core/crypto/onion_ntor.h b/src/core/crypto/onion_ntor.h
new file mode 100644
index 0000000000..0ba4abe49e
--- /dev/null
+++ b/src/core/crypto/onion_ntor.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2012-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_ONION_NTOR_H
+#define TOR_ONION_NTOR_H
+
+#include "lib/cc/torint.h"
+
+struct di_digest256_map_t;
+struct curve25519_public_key_t;
+struct curve25519_keypair_t;
+
+/** State to be maintained by a client between sending an ntor onionskin
+ * and receiving a reply. */
+typedef struct ntor_handshake_state_t ntor_handshake_state_t;
+
+/** Length of an ntor onionskin, as sent from the client to server. */
+#define NTOR_ONIONSKIN_LEN 84
+/** Length of an ntor reply, as sent from server to client. */
+#define NTOR_REPLY_LEN 64
+
+void ntor_handshake_state_free_(ntor_handshake_state_t *state);
+#define ntor_handshake_state_free(state) \
+ FREE_AND_NULL(ntor_handshake_state_t, ntor_handshake_state_free_, (state))
+
+int onion_skin_ntor_create(const uint8_t *router_id,
+ const struct curve25519_public_key_t *router_key,
+ ntor_handshake_state_t **handshake_state_out,
+ uint8_t *onion_skin_out);
+
+int onion_skin_ntor_server_handshake(const uint8_t *onion_skin,
+ const struct di_digest256_map_t *private_keys,
+ const struct curve25519_keypair_t *junk_keypair,
+ const uint8_t *my_node_id,
+ uint8_t *handshake_reply_out,
+ uint8_t *key_out,
+ size_t key_out_len);
+
+int onion_skin_ntor_client_handshake(
+ const ntor_handshake_state_t *handshake_state,
+ const uint8_t *handshake_reply,
+ uint8_t *key_out,
+ size_t key_out_len,
+ const char **msg_out);
+
+#ifdef ONION_NTOR_PRIVATE
+#include "lib/crypt_ops/crypto_curve25519.h"
+
+/** Storage held by a client while waiting for an ntor reply from a server. */
+struct ntor_handshake_state_t {
+ /** Identity digest of the router we're talking to. */
+ uint8_t router_id[DIGEST_LEN];
+ /** Onion key of the router we're talking to. */
+ curve25519_public_key_t pubkey_B;
+
+ /**
+ * Short-lived keypair for use with this handshake.
+ * @{ */
+ curve25519_secret_key_t seckey_x;
+ curve25519_public_key_t pubkey_X;
+ /** @} */
+};
+#endif /* defined(ONION_NTOR_PRIVATE) */
+
+#endif /* !defined(TOR_ONION_NTOR_H) */
diff --git a/src/core/crypto/onion_tap.c b/src/core/crypto/onion_tap.c
new file mode 100644
index 0000000000..05bcce2e87
--- /dev/null
+++ b/src/core/crypto/onion_tap.c
@@ -0,0 +1,246 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file onion_tap.c
+ * \brief Functions to implement the original Tor circuit extension handshake
+ * (a.k.a TAP).
+ *
+ * The "TAP" handshake is the first one that was widely used in Tor: It
+ * combines RSA1024-OAEP and AES128-CTR to perform a hybrid encryption over
+ * the first message DH1024 key exchange. (The RSA-encrypted part of the
+ * encryption is authenticated; the AES-encrypted part isn't. This was
+ * not a smart choice.)
+ *
+ * We didn't call it "TAP" ourselves -- Ian Goldberg named it in "On the
+ * Security of the Tor Authentication Protocol". (Spoiler: it's secure, but
+ * its security is kind of fragile and implementation dependent. Never modify
+ * this implementation without reading and understanding that paper at least.)
+ *
+ * We have deprecated TAP since the ntor handshake came into general use. It
+ * is still used for hidden service IP and RP connections, however.
+ *
+ * This handshake, like the other circuit-extension handshakes, is
+ * invoked from onion.c.
+ **/
+
+#include "or/or.h"
+#include "or/config.h"
+#include "lib/crypt_ops/crypto_dh.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "or/onion_tap.h"
+#include "or/rephist.h"
+
+/*----------------------------------------------------------------------*/
+
+/** Given a router's 128 byte public key,
+ * stores the following in onion_skin_out:
+ * - [42 bytes] OAEP padding
+ * - [16 bytes] Symmetric key for encrypting blob past RSA
+ * - [70 bytes] g^x part 1 (inside the RSA)
+ * - [58 bytes] g^x part 2 (symmetrically encrypted)
+ *
+ * Stores the DH private key into handshake_state_out for later completion
+ * of the handshake.
+ *
+ * The meeting point/cookies and auth are zeroed out for now.
+ */
+int
+onion_skin_TAP_create(crypto_pk_t *dest_router_key,
+ crypto_dh_t **handshake_state_out,
+ char *onion_skin_out) /* TAP_ONIONSKIN_CHALLENGE_LEN bytes */
+{
+ char challenge[DH1024_KEY_LEN];
+ crypto_dh_t *dh = NULL;
+ int dhbytes, pkbytes;
+
+ tor_assert(dest_router_key);
+ tor_assert(handshake_state_out);
+ tor_assert(onion_skin_out);
+ *handshake_state_out = NULL;
+ memset(onion_skin_out, 0, TAP_ONIONSKIN_CHALLENGE_LEN);
+
+ if (!(dh = crypto_dh_new(DH_TYPE_CIRCUIT)))
+ goto err;
+
+ dhbytes = crypto_dh_get_bytes(dh);
+ pkbytes = (int) crypto_pk_keysize(dest_router_key);
+ tor_assert(dhbytes == 128);
+ tor_assert(pkbytes == 128);
+
+ if (crypto_dh_get_public(dh, challenge, dhbytes))
+ goto err;
+
+ /* set meeting point, meeting cookie, etc here. Leave zero for now. */
+ if (crypto_pk_obsolete_public_hybrid_encrypt(dest_router_key, onion_skin_out,
+ TAP_ONIONSKIN_CHALLENGE_LEN,
+ challenge, DH1024_KEY_LEN,
+ PK_PKCS1_OAEP_PADDING, 1)<0)
+ goto err;
+
+ memwipe(challenge, 0, sizeof(challenge));
+ *handshake_state_out = dh;
+
+ return 0;
+ err:
+ /* LCOV_EXCL_START
+ * We only get here if RSA encryption fails or DH keygen fails. Those
+ * shouldn't be possible. */
+ memwipe(challenge, 0, sizeof(challenge));
+ if (dh) crypto_dh_free(dh);
+ return -1;
+ /* LCOV_EXCL_STOP */
+}
+
+/** Given an encrypted DH public key as generated by onion_skin_create,
+ * and the private key for this onion router, generate the reply (128-byte
+ * DH plus the first 20 bytes of shared key material), and store the
+ * next key_out_len bytes of key material in key_out.
+ */
+int
+onion_skin_TAP_server_handshake(
+ /*TAP_ONIONSKIN_CHALLENGE_LEN*/
+ const char *onion_skin,
+ crypto_pk_t *private_key,
+ crypto_pk_t *prev_private_key,
+ /*TAP_ONIONSKIN_REPLY_LEN*/
+ char *handshake_reply_out,
+ char *key_out,
+ size_t key_out_len)
+{
+ char challenge[TAP_ONIONSKIN_CHALLENGE_LEN];
+ crypto_dh_t *dh = NULL;
+ ssize_t len;
+ char *key_material=NULL;
+ size_t key_material_len=0;
+ int i;
+ crypto_pk_t *k;
+
+ len = -1;
+ for (i=0;i<2;++i) {
+ k = i==0?private_key:prev_private_key;
+ if (!k)
+ break;
+ len = crypto_pk_obsolete_private_hybrid_decrypt(k, challenge,
+ TAP_ONIONSKIN_CHALLENGE_LEN,
+ onion_skin,
+ TAP_ONIONSKIN_CHALLENGE_LEN,
+ PK_PKCS1_OAEP_PADDING,0);
+ if (len>0)
+ break;
+ }
+ if (len<0) {
+ log_info(LD_PROTOCOL,
+ "Couldn't decrypt onionskin: client may be using old onion key");
+ goto err;
+ } else if (len != DH1024_KEY_LEN) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Unexpected onionskin length after decryption: %ld",
+ (long)len);
+ goto err;
+ }
+
+ dh = crypto_dh_new(DH_TYPE_CIRCUIT);
+ if (!dh) {
+ /* LCOV_EXCL_START
+ * Failure to allocate a DH key should be impossible.
+ */
+ log_warn(LD_BUG, "Couldn't allocate DH key");
+ goto err;
+ /* LCOV_EXCL_STOP */
+ }
+ if (crypto_dh_get_public(dh, handshake_reply_out, DH1024_KEY_LEN)) {
+ /* LCOV_EXCL_START
+ * This can only fail if the length of the key we just allocated is too
+ * big. That should be impossible. */
+ log_info(LD_GENERAL, "crypto_dh_get_public failed.");
+ goto err;
+ /* LCOV_EXCL_STOP */
+ }
+
+ key_material_len = DIGEST_LEN+key_out_len;
+ key_material = tor_malloc(key_material_len);
+ len = crypto_dh_compute_secret(LOG_PROTOCOL_WARN, dh, challenge,
+ DH1024_KEY_LEN, key_material,
+ key_material_len);
+ if (len < 0) {
+ log_info(LD_GENERAL, "crypto_dh_compute_secret failed.");
+ goto err;
+ }
+
+ /* send back H(K|0) as proof that we learned K. */
+ memcpy(handshake_reply_out+DH1024_KEY_LEN, key_material, DIGEST_LEN);
+
+ /* use the rest of the key material for our shared keys, digests, etc */
+ memcpy(key_out, key_material+DIGEST_LEN, key_out_len);
+
+ memwipe(challenge, 0, sizeof(challenge));
+ memwipe(key_material, 0, key_material_len);
+ tor_free(key_material);
+ crypto_dh_free(dh);
+ return 0;
+ err:
+ memwipe(challenge, 0, sizeof(challenge));
+ if (key_material) {
+ memwipe(key_material, 0, key_material_len);
+ tor_free(key_material);
+ }
+ if (dh) crypto_dh_free(dh);
+
+ return -1;
+}
+
+/** Finish the client side of the DH handshake.
+ * Given the 128 byte DH reply + 20 byte hash as generated by
+ * onion_skin_server_handshake and the handshake state generated by
+ * onion_skin_create, verify H(K) with the first 20 bytes of shared
+ * key material, then generate key_out_len more bytes of shared key
+ * material and store them in key_out.
+ *
+ * After the invocation, call crypto_dh_free on handshake_state.
+ */
+int
+onion_skin_TAP_client_handshake(crypto_dh_t *handshake_state,
+ const char *handshake_reply, /* TAP_ONIONSKIN_REPLY_LEN bytes */
+ char *key_out,
+ size_t key_out_len,
+ const char **msg_out)
+{
+ ssize_t len;
+ char *key_material=NULL;
+ size_t key_material_len;
+ tor_assert(crypto_dh_get_bytes(handshake_state) == DH1024_KEY_LEN);
+
+ key_material_len = DIGEST_LEN + key_out_len;
+ key_material = tor_malloc(key_material_len);
+ len = crypto_dh_compute_secret(LOG_PROTOCOL_WARN, handshake_state,
+ handshake_reply, DH1024_KEY_LEN, key_material,
+ key_material_len);
+ if (len < 0) {
+ if (msg_out)
+ *msg_out = "DH computation failed.";
+ goto err;
+ }
+
+ if (tor_memneq(key_material, handshake_reply+DH1024_KEY_LEN, DIGEST_LEN)) {
+ /* H(K) does *not* match. Something fishy. */
+ if (msg_out)
+ *msg_out = "Digest DOES NOT MATCH on onion handshake. Bug or attack.";
+ goto err;
+ }
+
+ /* use the rest of the key material for our shared keys, digests, etc */
+ memcpy(key_out, key_material+DIGEST_LEN, key_out_len);
+
+ memwipe(key_material, 0, key_material_len);
+ tor_free(key_material);
+ return 0;
+ err:
+ memwipe(key_material, 0, key_material_len);
+ tor_free(key_material);
+ return -1;
+}
diff --git a/src/core/crypto/onion_tap.h b/src/core/crypto/onion_tap.h
new file mode 100644
index 0000000000..9a3df684d6
--- /dev/null
+++ b/src/core/crypto/onion_tap.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file onion_tap.h
+ * \brief Header file for onion_tap.c.
+ **/
+
+#ifndef TOR_ONION_TAP_H
+#define TOR_ONION_TAP_H
+
+#define TAP_ONIONSKIN_CHALLENGE_LEN (PKCS1_OAEP_PADDING_OVERHEAD+\
+ CIPHER_KEY_LEN+\
+ DH1024_KEY_LEN)
+#define TAP_ONIONSKIN_REPLY_LEN (DH1024_KEY_LEN+DIGEST_LEN)
+
+struct crypto_dh_t;
+struct crypto_pk_t;
+
+int onion_skin_TAP_create(struct crypto_pk_t *router_key,
+ struct crypto_dh_t **handshake_state_out,
+ char *onion_skin_out);
+
+int onion_skin_TAP_server_handshake(const char *onion_skin,
+ struct crypto_pk_t *private_key,
+ struct crypto_pk_t *prev_private_key,
+ char *handshake_reply_out,
+ char *key_out,
+ size_t key_out_len);
+
+int onion_skin_TAP_client_handshake(struct crypto_dh_t *handshake_state,
+ const char *handshake_reply,
+ char *key_out,
+ size_t key_out_len,
+ const char **msg_out);
+
+#endif /* !defined(TOR_ONION_TAP_H) */
diff --git a/src/core/crypto/relay_crypto.c b/src/core/crypto/relay_crypto.c
new file mode 100644
index 0000000000..1fcfae0b3a
--- /dev/null
+++ b/src/core/crypto/relay_crypto.c
@@ -0,0 +1,332 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#include "or/or.h"
+#include "or/circuitlist.h"
+#include "or/config.h"
+#include "lib/crypt_ops/crypto.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "or/hs_ntor.h" // for HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN
+#include "or/relay.h"
+#include "or/relay_crypto.h"
+
+#include "or/cell_st.h"
+#include "or/or_circuit_st.h"
+#include "or/origin_circuit_st.h"
+
+/** Update digest from the payload of cell. Assign integrity part to
+ * cell.
+ */
+static void
+relay_set_digest(crypto_digest_t *digest, cell_t *cell)
+{
+ char integrity[4];
+ relay_header_t rh;
+
+ crypto_digest_add_bytes(digest, (char*)cell->payload, CELL_PAYLOAD_SIZE);
+ crypto_digest_get_digest(digest, integrity, 4);
+// log_fn(LOG_DEBUG,"Putting digest of %u %u %u %u into relay cell.",
+// integrity[0], integrity[1], integrity[2], integrity[3]);
+ relay_header_unpack(&rh, cell->payload);
+ memcpy(rh.integrity, integrity, 4);
+ relay_header_pack(cell->payload, &rh);
+}
+
+/** Does the digest for this circuit indicate that this cell is for us?
+ *
+ * Update digest from the payload of cell (with the integrity part set
+ * to 0). If the integrity part is valid, return 1, else restore digest
+ * and cell to their original state and return 0.
+ */
+static int
+relay_digest_matches(crypto_digest_t *digest, cell_t *cell)
+{
+ uint32_t received_integrity, calculated_integrity;
+ relay_header_t rh;
+ crypto_digest_checkpoint_t backup_digest;
+
+ crypto_digest_checkpoint(&backup_digest, digest);
+
+ relay_header_unpack(&rh, cell->payload);
+ memcpy(&received_integrity, rh.integrity, 4);
+ memset(rh.integrity, 0, 4);
+ relay_header_pack(cell->payload, &rh);
+
+// log_fn(LOG_DEBUG,"Reading digest of %u %u %u %u from relay cell.",
+// received_integrity[0], received_integrity[1],
+// received_integrity[2], received_integrity[3]);
+
+ crypto_digest_add_bytes(digest, (char*) cell->payload, CELL_PAYLOAD_SIZE);
+ crypto_digest_get_digest(digest, (char*) &calculated_integrity, 4);
+
+ int rv = 1;
+
+ if (calculated_integrity != received_integrity) {
+// log_fn(LOG_INFO,"Recognized=0 but bad digest. Not recognizing.");
+// (%d vs %d).", received_integrity, calculated_integrity);
+ /* restore digest to its old form */
+ crypto_digest_restore(digest, &backup_digest);
+ /* restore the relay header */
+ memcpy(rh.integrity, &received_integrity, 4);
+ relay_header_pack(cell->payload, &rh);
+ rv = 0;
+ }
+
+ memwipe(&backup_digest, 0, sizeof(backup_digest));
+ return rv;
+}
+
+/** Apply <b>cipher</b> to CELL_PAYLOAD_SIZE bytes of <b>in</b>
+ * (in place).
+ *
+ * Note that we use the same operation for encrypting and for decrypting.
+ */
+static void
+relay_crypt_one_payload(crypto_cipher_t *cipher, uint8_t *in)
+{
+ crypto_cipher_crypt_inplace(cipher, (char*) in, CELL_PAYLOAD_SIZE);
+}
+
+/** Do the appropriate en/decryptions for <b>cell</b> arriving on
+ * <b>circ</b> in direction <b>cell_direction</b>.
+ *
+ * If cell_direction == CELL_DIRECTION_IN:
+ * - If we're at the origin (we're the OP), for hops 1..N,
+ * decrypt cell. If recognized, stop.
+ * - Else (we're not the OP), encrypt one hop. Cell is not recognized.
+ *
+ * If cell_direction == CELL_DIRECTION_OUT:
+ * - decrypt one hop. Check if recognized.
+ *
+ * If cell is recognized, set *recognized to 1, and set
+ * *layer_hint to the hop that recognized it.
+ *
+ * Return -1 to indicate that we should mark the circuit for close,
+ * else return 0.
+ */
+int
+relay_decrypt_cell(circuit_t *circ, cell_t *cell,
+ cell_direction_t cell_direction,
+ crypt_path_t **layer_hint, char *recognized)
+{
+ relay_header_t rh;
+
+ tor_assert(circ);
+ tor_assert(cell);
+ tor_assert(recognized);
+ tor_assert(cell_direction == CELL_DIRECTION_IN ||
+ cell_direction == CELL_DIRECTION_OUT);
+
+ if (cell_direction == CELL_DIRECTION_IN) {
+ if (CIRCUIT_IS_ORIGIN(circ)) { /* We're at the beginning of the circuit.
+ * We'll want to do layered decrypts. */
+ crypt_path_t *thishop, *cpath = TO_ORIGIN_CIRCUIT(circ)->cpath;
+ thishop = cpath;
+ if (thishop->state != CPATH_STATE_OPEN) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Relay cell before first created cell? Closing.");
+ return -1;
+ }
+ do { /* Remember: cpath is in forward order, that is, first hop first. */
+ tor_assert(thishop);
+
+ /* decrypt one layer */
+ relay_crypt_one_payload(thishop->crypto.b_crypto, cell->payload);
+
+ relay_header_unpack(&rh, cell->payload);
+ if (rh.recognized == 0) {
+ /* it's possibly recognized. have to check digest to be sure. */
+ if (relay_digest_matches(thishop->crypto.b_digest, cell)) {
+ *recognized = 1;
+ *layer_hint = thishop;
+ return 0;
+ }
+ }
+
+ thishop = thishop->next;
+ } while (thishop != cpath && thishop->state == CPATH_STATE_OPEN);
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Incoming cell at client not recognized. Closing.");
+ return -1;
+ } else {
+ relay_crypto_t *crypto = &TO_OR_CIRCUIT(circ)->crypto;
+ /* We're in the middle. Encrypt one layer. */
+ relay_crypt_one_payload(crypto->b_crypto, cell->payload);
+ }
+ } else /* cell_direction == CELL_DIRECTION_OUT */ {
+ /* We're in the middle. Decrypt one layer. */
+ relay_crypto_t *crypto = &TO_OR_CIRCUIT(circ)->crypto;
+
+ relay_crypt_one_payload(crypto->f_crypto, cell->payload);
+
+ relay_header_unpack(&rh, cell->payload);
+ if (rh.recognized == 0) {
+ /* it's possibly recognized. have to check digest to be sure. */
+ if (relay_digest_matches(crypto->f_digest, cell)) {
+ *recognized = 1;
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Encrypt a cell <b>cell</b> that we are creating, and sending outbound on
+ * <b>circ</b> until the hop corresponding to <b>layer_hint</b>.
+ *
+ * The integrity field and recognized field of <b>cell</b>'s relay headers
+ * must be set to zero.
+ */
+void
+relay_encrypt_cell_outbound(cell_t *cell,
+ origin_circuit_t *circ,
+ crypt_path_t *layer_hint)
+{
+ crypt_path_t *thishop; /* counter for repeated crypts */
+ relay_set_digest(layer_hint->crypto.f_digest, cell);
+
+ thishop = layer_hint;
+ /* moving from farthest to nearest hop */
+ do {
+ tor_assert(thishop);
+ log_debug(LD_OR,"encrypting a layer of the relay cell.");
+ relay_crypt_one_payload(thishop->crypto.f_crypto, cell->payload);
+
+ thishop = thishop->prev;
+ } while (thishop != circ->cpath->prev);
+}
+
+/**
+ * Encrypt a cell <b>cell</b> that we are creating, and sending on
+ * <b>circuit</b> to the origin.
+ *
+ * The integrity field and recognized field of <b>cell</b>'s relay headers
+ * must be set to zero.
+ */
+void
+relay_encrypt_cell_inbound(cell_t *cell,
+ or_circuit_t *or_circ)
+{
+ relay_set_digest(or_circ->crypto.b_digest, cell);
+ /* encrypt one layer */
+ relay_crypt_one_payload(or_circ->crypto.b_crypto, cell->payload);
+}
+
+/**
+ * Release all storage held inside <b>crypto</b>, but do not free
+ * <b>crypto</b> itself: it lives inside another object.
+ */
+void
+relay_crypto_clear(relay_crypto_t *crypto)
+{
+ if (BUG(!crypto))
+ return;
+ crypto_cipher_free(crypto->f_crypto);
+ crypto_cipher_free(crypto->b_crypto);
+ crypto_digest_free(crypto->f_digest);
+ crypto_digest_free(crypto->b_digest);
+}
+
+/** Initialize <b>crypto</b> from the key material in key_data.
+ *
+ * If <b>is_hs_v3</b> is set, this cpath will be used for next gen hidden
+ * service circuits and <b>key_data</b> must be at least
+ * HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN bytes in length.
+ *
+ * If <b>is_hs_v3</b> is not set, key_data must contain CPATH_KEY_MATERIAL_LEN
+ * bytes, which are used as follows:
+ * - 20 to initialize f_digest
+ * - 20 to initialize b_digest
+ * - 16 to key f_crypto
+ * - 16 to key b_crypto
+ *
+ * (If 'reverse' is true, then f_XX and b_XX are swapped.)
+ *
+ * Return 0 if init was successful, else -1 if it failed.
+ */
+int
+relay_crypto_init(relay_crypto_t *crypto,
+ const char *key_data, size_t key_data_len,
+ int reverse, int is_hs_v3)
+{
+ crypto_digest_t *tmp_digest;
+ crypto_cipher_t *tmp_crypto;
+ size_t digest_len = 0;
+ size_t cipher_key_len = 0;
+
+ tor_assert(crypto);
+ tor_assert(key_data);
+ tor_assert(!(crypto->f_crypto || crypto->b_crypto ||
+ crypto->f_digest || crypto->b_digest));
+
+ /* Basic key size validation */
+ if (is_hs_v3 && BUG(key_data_len != HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN)) {
+ goto err;
+ } else if (!is_hs_v3 && BUG(key_data_len != CPATH_KEY_MATERIAL_LEN)) {
+ goto err;
+ }
+
+ /* If we are using this crypto for next gen onion services use SHA3-256,
+ otherwise use good ol' SHA1 */
+ if (is_hs_v3) {
+ digest_len = DIGEST256_LEN;
+ cipher_key_len = CIPHER256_KEY_LEN;
+ crypto->f_digest = crypto_digest256_new(DIGEST_SHA3_256);
+ crypto->b_digest = crypto_digest256_new(DIGEST_SHA3_256);
+ } else {
+ digest_len = DIGEST_LEN;
+ cipher_key_len = CIPHER_KEY_LEN;
+ crypto->f_digest = crypto_digest_new();
+ crypto->b_digest = crypto_digest_new();
+ }
+
+ tor_assert(digest_len != 0);
+ tor_assert(cipher_key_len != 0);
+ const int cipher_key_bits = (int) cipher_key_len * 8;
+
+ crypto_digest_add_bytes(crypto->f_digest, key_data, digest_len);
+ crypto_digest_add_bytes(crypto->b_digest, key_data+digest_len, digest_len);
+
+ crypto->f_crypto = crypto_cipher_new_with_bits(key_data+(2*digest_len),
+ cipher_key_bits);
+ if (!crypto->f_crypto) {
+ log_warn(LD_BUG,"Forward cipher initialization failed.");
+ goto err;
+ }
+
+ crypto->b_crypto = crypto_cipher_new_with_bits(
+ key_data+(2*digest_len)+cipher_key_len,
+ cipher_key_bits);
+ if (!crypto->b_crypto) {
+ log_warn(LD_BUG,"Backward cipher initialization failed.");
+ goto err;
+ }
+
+ if (reverse) {
+ tmp_digest = crypto->f_digest;
+ crypto->f_digest = crypto->b_digest;
+ crypto->b_digest = tmp_digest;
+ tmp_crypto = crypto->f_crypto;
+ crypto->f_crypto = crypto->b_crypto;
+ crypto->b_crypto = tmp_crypto;
+ }
+
+ return 0;
+ err:
+ relay_crypto_clear(crypto);
+ return -1;
+}
+
+/** Assert that <b>crypto</b> is valid and set. */
+void
+relay_crypto_assert_ok(const relay_crypto_t *crypto)
+{
+ tor_assert(crypto->f_crypto);
+ tor_assert(crypto->b_crypto);
+ tor_assert(crypto->f_digest);
+ tor_assert(crypto->b_digest);
+}
diff --git a/src/core/crypto/relay_crypto.h b/src/core/crypto/relay_crypto.h
new file mode 100644
index 0000000000..67da93344f
--- /dev/null
+++ b/src/core/crypto/relay_crypto.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file relay.h
+ * \brief Header file for relay.c.
+ **/
+
+#ifndef TOR_RELAY_CRYPTO_H
+#define TOR_RELAY_CRYPTO_H
+
+int relay_crypto_init(relay_crypto_t *crypto,
+ const char *key_data, size_t key_data_len,
+ int reverse, int is_hs_v3);
+
+int relay_decrypt_cell(circuit_t *circ, cell_t *cell,
+ cell_direction_t cell_direction,
+ crypt_path_t **layer_hint, char *recognized);
+void relay_encrypt_cell_outbound(cell_t *cell, origin_circuit_t *or_circ,
+ crypt_path_t *layer_hint);
+void relay_encrypt_cell_inbound(cell_t *cell, or_circuit_t *or_circ);
+
+void relay_crypto_clear(relay_crypto_t *crypto);
+
+void relay_crypto_assert_ok(const relay_crypto_t *crypto);
+
+#endif /* !defined(TOR_RELAY_CRYPTO_H) */
+
diff --git a/src/core/mainloop/connection.c b/src/core/mainloop/connection.c
new file mode 100644
index 0000000000..6fd8fbf771
--- /dev/null
+++ b/src/core/mainloop/connection.c
@@ -0,0 +1,5331 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file connection.c
+ * \brief General high-level functions to handle reading and writing
+ * on connections.
+ *
+ * Each connection (ideally) represents a TLS connection, a TCP socket, a unix
+ * socket, or a UDP socket on which reads and writes can occur. (But see
+ * connection_edge.c for cases where connections can also represent streams
+ * that do not have a corresponding socket.)
+ *
+ * The module implements the abstract type, connection_t. The subtypes are:
+ * <ul>
+ * <li>listener_connection_t, implemented here in connection.c
+ * <li>dir_connection_t, implemented in directory.c
+ * <li>or_connection_t, implemented in connection_or.c
+ * <li>edge_connection_t, implemented in connection_edge.c, along with
+ * its subtype(s):
+ * <ul><li>entry_connection_t, also implemented in connection_edge.c
+ * </ul>
+ * <li>control_connection_t, implemented in control.c
+ * </ul>
+ *
+ * The base type implemented in this module is responsible for basic
+ * rate limiting, flow control, and marshalling bytes onto and off of the
+ * network (either directly or via TLS).
+ *
+ * Connections are registered with the main loop with connection_add(). As
+ * they become able to read or write register the fact with the event main
+ * loop by calling connection_watch_events(), connection_start_reading(), or
+ * connection_start_writing(). When they no longer want to read or write,
+ * they call connection_stop_reading() or connection_stop_writing().
+ *
+ * To queue data to be written on a connection, call
+ * connection_buf_add(). When data arrives, the
+ * connection_process_inbuf() callback is invoked, which dispatches to a
+ * type-specific function (such as connection_edge_process_inbuf() for
+ * example). Connection types that need notice of when data has been written
+ * receive notification via connection_flushed_some() and
+ * connection_finished_flushing(). These functions all delegate to
+ * type-specific implementations.
+ *
+ * Additionally, beyond the core of connection_t, this module also implements:
+ * <ul>
+ * <li>Listeners, which wait for incoming sockets and launch connections
+ * <li>Outgoing SOCKS proxy support
+ * <li>Outgoing HTTP proxy support
+ * <li>An out-of-sockets handler for dealing with socket exhaustion
+ * </ul>
+ **/
+
+#define CONNECTION_PRIVATE
+#include "or/or.h"
+#include "or/bridges.h"
+#include "lib/container/buffers.h"
+#include "lib/tls/buffers_tls.h"
+/*
+ * Define this so we get channel internal functions, since we're implementing
+ * part of a subclass (channel_tls_t).
+ */
+#define TOR_CHANNEL_INTERNAL_
+#define CONNECTION_PRIVATE
+#include "lib/err/backtrace.h"
+#include "or/channel.h"
+#include "or/channeltls.h"
+#include "or/circuitbuild.h"
+#include "or/circuitlist.h"
+#include "or/circuituse.h"
+#include "or/config.h"
+#include "or/connection.h"
+#include "or/connection_edge.h"
+#include "or/connection_or.h"
+#include "or/control.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "or/directory.h"
+#include "or/dirserv.h"
+#include "or/dns.h"
+#include "or/dnsserv.h"
+#include "or/dos.h"
+#include "or/entrynodes.h"
+#include "or/ext_orport.h"
+#include "or/geoip.h"
+#include "or/main.h"
+#include "or/hibernate.h"
+#include "or/hs_common.h"
+#include "or/hs_ident.h"
+#include "or/nodelist.h"
+#include "or/proto_http.h"
+#include "or/proto_socks.h"
+#include "or/policies.h"
+#include "or/reasons.h"
+#include "or/relay.h"
+#include "or/rendclient.h"
+#include "or/rendcommon.h"
+#include "or/rephist.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+#include "or/transports.h"
+#include "or/routerparse.h"
+#include "lib/sandbox/sandbox.h"
+#include "lib/net/buffers_net.h"
+#include "lib/tls/tortls.h"
+#include "lib/evloop/compat_libevent.h"
+#include "lib/compress/compress.h"
+
+#ifdef HAVE_PWD_H
+#include <pwd.h>
+#endif
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+#ifdef HAVE_SYS_UN_H
+#include <sys/socket.h>
+#include <sys/un.h>
+#endif
+
+#include "or/dir_connection_st.h"
+#include "or/control_connection_st.h"
+#include "or/entry_connection_st.h"
+#include "or/listener_connection_st.h"
+#include "or/or_connection_st.h"
+#include "or/port_cfg_st.h"
+#include "or/routerinfo_st.h"
+#include "or/socks_request_st.h"
+
+static connection_t *connection_listener_new(
+ const struct sockaddr *listensockaddr,
+ socklen_t listensocklen, int type,
+ const char *address,
+ const port_cfg_t *portcfg);
+static void connection_init(time_t now, connection_t *conn, int type,
+ int socket_family);
+static int connection_handle_listener_read(connection_t *conn, int new_type);
+static int connection_finished_flushing(connection_t *conn);
+static int connection_flushed_some(connection_t *conn);
+static int connection_finished_connecting(connection_t *conn);
+static int connection_reached_eof(connection_t *conn);
+static int connection_buf_read_from_socket(connection_t *conn,
+ ssize_t *max_to_read,
+ int *socket_error);
+static int connection_process_inbuf(connection_t *conn, int package_partial);
+static void client_check_address_changed(tor_socket_t sock);
+static void set_constrained_socket_buffers(tor_socket_t sock, int size);
+
+static const char *connection_proxy_state_to_string(int state);
+static int connection_read_https_proxy_response(connection_t *conn);
+static void connection_send_socks5_connect(connection_t *conn);
+static const char *proxy_type_to_string(int proxy_type);
+static int get_proxy_type(void);
+const tor_addr_t *conn_get_outbound_address(sa_family_t family,
+ const or_options_t *options, unsigned int conn_type);
+static void reenable_blocked_connection_init(const or_options_t *options);
+static void reenable_blocked_connection_schedule(void);
+
+/** The last addresses that our network interface seemed to have been
+ * binding to. We use this as one way to detect when our IP changes.
+ *
+ * XXXX+ We should really use the entire list of interfaces here.
+ **/
+static tor_addr_t *last_interface_ipv4 = NULL;
+/* DOCDOC last_interface_ipv6 */
+static tor_addr_t *last_interface_ipv6 = NULL;
+/** A list of tor_addr_t for addresses we've used in outgoing connections.
+ * Used to detect IP address changes. */
+static smartlist_t *outgoing_addrs = NULL;
+
+#define CASE_ANY_LISTENER_TYPE \
+ case CONN_TYPE_OR_LISTENER: \
+ case CONN_TYPE_EXT_OR_LISTENER: \
+ case CONN_TYPE_AP_LISTENER: \
+ case CONN_TYPE_DIR_LISTENER: \
+ case CONN_TYPE_CONTROL_LISTENER: \
+ case CONN_TYPE_AP_TRANS_LISTENER: \
+ case CONN_TYPE_AP_NATD_LISTENER: \
+ case CONN_TYPE_AP_DNS_LISTENER: \
+ case CONN_TYPE_AP_HTTP_CONNECT_LISTENER
+
+/**************************************************************/
+
+/** Convert a connection_t* to an listener_connection_t*; assert if the cast
+ * is invalid. */
+listener_connection_t *
+TO_LISTENER_CONN(connection_t *c)
+{
+ tor_assert(c->magic == LISTENER_CONNECTION_MAGIC);
+ return DOWNCAST(listener_connection_t, c);
+}
+
+size_t
+connection_get_inbuf_len(connection_t *conn)
+{
+ return conn->inbuf ? buf_datalen(conn->inbuf) : 0;
+}
+
+size_t
+connection_get_outbuf_len(connection_t *conn)
+{
+ return conn->outbuf ? buf_datalen(conn->outbuf) : 0;
+}
+
+/**
+ * Return the human-readable name for the connection type <b>type</b>
+ */
+const char *
+conn_type_to_string(int type)
+{
+ static char buf[64];
+ switch (type) {
+ case CONN_TYPE_OR_LISTENER: return "OR listener";
+ case CONN_TYPE_OR: return "OR";
+ case CONN_TYPE_EXIT: return "Exit";
+ case CONN_TYPE_AP_LISTENER: return "Socks listener";
+ case CONN_TYPE_AP_TRANS_LISTENER:
+ return "Transparent pf/netfilter listener";
+ case CONN_TYPE_AP_NATD_LISTENER: return "Transparent natd listener";
+ case CONN_TYPE_AP_DNS_LISTENER: return "DNS listener";
+ case CONN_TYPE_AP: return "Socks";
+ case CONN_TYPE_DIR_LISTENER: return "Directory listener";
+ case CONN_TYPE_DIR: return "Directory";
+ case CONN_TYPE_CONTROL_LISTENER: return "Control listener";
+ case CONN_TYPE_CONTROL: return "Control";
+ case CONN_TYPE_EXT_OR: return "Extended OR";
+ case CONN_TYPE_EXT_OR_LISTENER: return "Extended OR listener";
+ case CONN_TYPE_AP_HTTP_CONNECT_LISTENER: return "HTTP tunnel listener";
+ default:
+ log_warn(LD_BUG, "unknown connection type %d", type);
+ tor_snprintf(buf, sizeof(buf), "unknown [%d]", type);
+ return buf;
+ }
+}
+
+/**
+ * Return the human-readable name for the connection state <b>state</b>
+ * for the connection type <b>type</b>
+ */
+const char *
+conn_state_to_string(int type, int state)
+{
+ static char buf[96];
+ switch (type) {
+ CASE_ANY_LISTENER_TYPE:
+ if (state == LISTENER_STATE_READY)
+ return "ready";
+ break;
+ case CONN_TYPE_OR:
+ switch (state) {
+ case OR_CONN_STATE_CONNECTING: return "connect()ing";
+ case OR_CONN_STATE_PROXY_HANDSHAKING: return "handshaking (proxy)";
+ case OR_CONN_STATE_TLS_HANDSHAKING: return "handshaking (TLS)";
+ case OR_CONN_STATE_TLS_CLIENT_RENEGOTIATING:
+ return "renegotiating (TLS, v2 handshake)";
+ case OR_CONN_STATE_TLS_SERVER_RENEGOTIATING:
+ return "waiting for renegotiation or V3 handshake";
+ case OR_CONN_STATE_OR_HANDSHAKING_V2:
+ return "handshaking (Tor, v2 handshake)";
+ case OR_CONN_STATE_OR_HANDSHAKING_V3:
+ return "handshaking (Tor, v3 handshake)";
+ case OR_CONN_STATE_OPEN: return "open";
+ }
+ break;
+ case CONN_TYPE_EXT_OR:
+ switch (state) {
+ case EXT_OR_CONN_STATE_AUTH_WAIT_AUTH_TYPE:
+ return "waiting for authentication type";
+ case EXT_OR_CONN_STATE_AUTH_WAIT_CLIENT_NONCE:
+ return "waiting for client nonce";
+ case EXT_OR_CONN_STATE_AUTH_WAIT_CLIENT_HASH:
+ return "waiting for client hash";
+ case EXT_OR_CONN_STATE_OPEN: return "open";
+ case EXT_OR_CONN_STATE_FLUSHING: return "flushing final OKAY";
+ }
+ break;
+ case CONN_TYPE_EXIT:
+ switch (state) {
+ case EXIT_CONN_STATE_RESOLVING: return "waiting for dest info";
+ case EXIT_CONN_STATE_CONNECTING: return "connecting";
+ case EXIT_CONN_STATE_OPEN: return "open";
+ case EXIT_CONN_STATE_RESOLVEFAILED: return "resolve failed";
+ }
+ break;
+ case CONN_TYPE_AP:
+ switch (state) {
+ case AP_CONN_STATE_SOCKS_WAIT: return "waiting for socks info";
+ case AP_CONN_STATE_NATD_WAIT: return "waiting for natd dest info";
+ case AP_CONN_STATE_RENDDESC_WAIT: return "waiting for rendezvous desc";
+ case AP_CONN_STATE_CONTROLLER_WAIT: return "waiting for controller";
+ case AP_CONN_STATE_CIRCUIT_WAIT: return "waiting for circuit";
+ case AP_CONN_STATE_CONNECT_WAIT: return "waiting for connect response";
+ case AP_CONN_STATE_RESOLVE_WAIT: return "waiting for resolve response";
+ case AP_CONN_STATE_OPEN: return "open";
+ }
+ break;
+ case CONN_TYPE_DIR:
+ switch (state) {
+ case DIR_CONN_STATE_CONNECTING: return "connecting";
+ case DIR_CONN_STATE_CLIENT_SENDING: return "client sending";
+ case DIR_CONN_STATE_CLIENT_READING: return "client reading";
+ case DIR_CONN_STATE_CLIENT_FINISHED: return "client finished";
+ case DIR_CONN_STATE_SERVER_COMMAND_WAIT: return "waiting for command";
+ case DIR_CONN_STATE_SERVER_WRITING: return "writing";
+ }
+ break;
+ case CONN_TYPE_CONTROL:
+ switch (state) {
+ case CONTROL_CONN_STATE_OPEN: return "open (protocol v1)";
+ case CONTROL_CONN_STATE_NEEDAUTH:
+ return "waiting for authentication (protocol v1)";
+ }
+ break;
+ }
+
+ log_warn(LD_BUG, "unknown connection state %d (type %d)", state, type);
+ tor_snprintf(buf, sizeof(buf),
+ "unknown state [%d] on unknown [%s] connection",
+ state, conn_type_to_string(type));
+ return buf;
+}
+
+/** Allocate and return a new dir_connection_t, initialized as by
+ * connection_init(). */
+dir_connection_t *
+dir_connection_new(int socket_family)
+{
+ dir_connection_t *dir_conn = tor_malloc_zero(sizeof(dir_connection_t));
+ connection_init(time(NULL), TO_CONN(dir_conn), CONN_TYPE_DIR, socket_family);
+ return dir_conn;
+}
+
+/** Allocate and return a new or_connection_t, initialized as by
+ * connection_init().
+ *
+ * Initialize active_circuit_pqueue.
+ *
+ * Set active_circuit_pqueue_last_recalibrated to current cell_ewma tick.
+ */
+or_connection_t *
+or_connection_new(int type, int socket_family)
+{
+ or_connection_t *or_conn = tor_malloc_zero(sizeof(or_connection_t));
+ time_t now = time(NULL);
+ tor_assert(type == CONN_TYPE_OR || type == CONN_TYPE_EXT_OR);
+ connection_init(now, TO_CONN(or_conn), type, socket_family);
+
+ connection_or_set_canonical(or_conn, 0);
+
+ if (type == CONN_TYPE_EXT_OR)
+ connection_or_set_ext_or_identifier(or_conn);
+
+ return or_conn;
+}
+
+/** Allocate and return a new entry_connection_t, initialized as by
+ * connection_init().
+ *
+ * Allocate space to store the socks_request.
+ */
+entry_connection_t *
+entry_connection_new(int type, int socket_family)
+{
+ entry_connection_t *entry_conn = tor_malloc_zero(sizeof(entry_connection_t));
+ tor_assert(type == CONN_TYPE_AP);
+ connection_init(time(NULL), ENTRY_TO_CONN(entry_conn), type, socket_family);
+ entry_conn->socks_request = socks_request_new();
+ /* If this is coming from a listener, we'll set it up based on the listener
+ * in a little while. Otherwise, we're doing this as a linked connection
+ * of some kind, and we should set it up here based on the socket family */
+ if (socket_family == AF_INET)
+ entry_conn->entry_cfg.ipv4_traffic = 1;
+ else if (socket_family == AF_INET6)
+ entry_conn->entry_cfg.ipv6_traffic = 1;
+ return entry_conn;
+}
+
+/** Allocate and return a new edge_connection_t, initialized as by
+ * connection_init(). */
+edge_connection_t *
+edge_connection_new(int type, int socket_family)
+{
+ edge_connection_t *edge_conn = tor_malloc_zero(sizeof(edge_connection_t));
+ tor_assert(type == CONN_TYPE_EXIT);
+ connection_init(time(NULL), TO_CONN(edge_conn), type, socket_family);
+ return edge_conn;
+}
+
+/** Allocate and return a new control_connection_t, initialized as by
+ * connection_init(). */
+control_connection_t *
+control_connection_new(int socket_family)
+{
+ control_connection_t *control_conn =
+ tor_malloc_zero(sizeof(control_connection_t));
+ connection_init(time(NULL),
+ TO_CONN(control_conn), CONN_TYPE_CONTROL, socket_family);
+ return control_conn;
+}
+
+/** Allocate and return a new listener_connection_t, initialized as by
+ * connection_init(). */
+listener_connection_t *
+listener_connection_new(int type, int socket_family)
+{
+ listener_connection_t *listener_conn =
+ tor_malloc_zero(sizeof(listener_connection_t));
+ connection_init(time(NULL), TO_CONN(listener_conn), type, socket_family);
+ return listener_conn;
+}
+
+/** Allocate, initialize, and return a new connection_t subtype of <b>type</b>
+ * to make or receive connections of address family <b>socket_family</b>. The
+ * type should be one of the CONN_TYPE_* constants. */
+connection_t *
+connection_new(int type, int socket_family)
+{
+ switch (type) {
+ case CONN_TYPE_OR:
+ case CONN_TYPE_EXT_OR:
+ return TO_CONN(or_connection_new(type, socket_family));
+
+ case CONN_TYPE_EXIT:
+ return TO_CONN(edge_connection_new(type, socket_family));
+
+ case CONN_TYPE_AP:
+ return ENTRY_TO_CONN(entry_connection_new(type, socket_family));
+
+ case CONN_TYPE_DIR:
+ return TO_CONN(dir_connection_new(socket_family));
+
+ case CONN_TYPE_CONTROL:
+ return TO_CONN(control_connection_new(socket_family));
+
+ CASE_ANY_LISTENER_TYPE:
+ return TO_CONN(listener_connection_new(type, socket_family));
+
+ default: {
+ connection_t *conn = tor_malloc_zero(sizeof(connection_t));
+ connection_init(time(NULL), conn, type, socket_family);
+ return conn;
+ }
+ }
+}
+
+/** Initializes conn. (you must call connection_add() to link it into the main
+ * array).
+ *
+ * Set conn-\>magic to the correct value.
+ *
+ * Set conn-\>type to <b>type</b>. Set conn-\>s and conn-\>conn_array_index to
+ * -1 to signify they are not yet assigned.
+ *
+ * Initialize conn's timestamps to now.
+ */
+static void
+connection_init(time_t now, connection_t *conn, int type, int socket_family)
+{
+ static uint64_t n_connections_allocated = 1;
+
+ switch (type) {
+ case CONN_TYPE_OR:
+ case CONN_TYPE_EXT_OR:
+ conn->magic = OR_CONNECTION_MAGIC;
+ break;
+ case CONN_TYPE_EXIT:
+ conn->magic = EDGE_CONNECTION_MAGIC;
+ break;
+ case CONN_TYPE_AP:
+ conn->magic = ENTRY_CONNECTION_MAGIC;
+ break;
+ case CONN_TYPE_DIR:
+ conn->magic = DIR_CONNECTION_MAGIC;
+ break;
+ case CONN_TYPE_CONTROL:
+ conn->magic = CONTROL_CONNECTION_MAGIC;
+ break;
+ CASE_ANY_LISTENER_TYPE:
+ conn->magic = LISTENER_CONNECTION_MAGIC;
+ break;
+ default:
+ conn->magic = BASE_CONNECTION_MAGIC;
+ break;
+ }
+
+ conn->s = TOR_INVALID_SOCKET; /* give it a default of 'not used' */
+ conn->conn_array_index = -1; /* also default to 'not used' */
+ conn->global_identifier = n_connections_allocated++;
+
+ conn->type = type;
+ conn->socket_family = socket_family;
+ if (!connection_is_listener(conn)) {
+ /* listeners never use their buf */
+ conn->inbuf = buf_new();
+ conn->outbuf = buf_new();
+ }
+
+ conn->timestamp_created = now;
+ conn->timestamp_last_read_allowed = now;
+ conn->timestamp_last_write_allowed = now;
+}
+
+/** Create a link between <b>conn_a</b> and <b>conn_b</b>. */
+void
+connection_link_connections(connection_t *conn_a, connection_t *conn_b)
+{
+ tor_assert(! SOCKET_OK(conn_a->s));
+ tor_assert(! SOCKET_OK(conn_b->s));
+
+ conn_a->linked = 1;
+ conn_b->linked = 1;
+ conn_a->linked_conn = conn_b;
+ conn_b->linked_conn = conn_a;
+}
+
+/** Return true iff the provided connection listener type supports AF_UNIX
+ * sockets. */
+int
+conn_listener_type_supports_af_unix(int type)
+{
+ /* For now only control ports or SOCKS ports can be Unix domain sockets
+ * and listeners at the same time */
+ switch (type) {
+ case CONN_TYPE_CONTROL_LISTENER:
+ case CONN_TYPE_AP_LISTENER:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/** Deallocate memory used by <b>conn</b>. Deallocate its buffers if
+ * necessary, close its socket if necessary, and mark the directory as dirty
+ * if <b>conn</b> is an OR or OP connection.
+ */
+STATIC void
+connection_free_minimal(connection_t *conn)
+{
+ void *mem;
+ size_t memlen;
+ if (!conn)
+ return;
+
+ switch (conn->type) {
+ case CONN_TYPE_OR:
+ case CONN_TYPE_EXT_OR:
+ tor_assert(conn->magic == OR_CONNECTION_MAGIC);
+ mem = TO_OR_CONN(conn);
+ memlen = sizeof(or_connection_t);
+ break;
+ case CONN_TYPE_AP:
+ tor_assert(conn->magic == ENTRY_CONNECTION_MAGIC);
+ mem = TO_ENTRY_CONN(conn);
+ memlen = sizeof(entry_connection_t);
+ break;
+ case CONN_TYPE_EXIT:
+ tor_assert(conn->magic == EDGE_CONNECTION_MAGIC);
+ mem = TO_EDGE_CONN(conn);
+ memlen = sizeof(edge_connection_t);
+ break;
+ case CONN_TYPE_DIR:
+ tor_assert(conn->magic == DIR_CONNECTION_MAGIC);
+ mem = TO_DIR_CONN(conn);
+ memlen = sizeof(dir_connection_t);
+ break;
+ case CONN_TYPE_CONTROL:
+ tor_assert(conn->magic == CONTROL_CONNECTION_MAGIC);
+ mem = TO_CONTROL_CONN(conn);
+ memlen = sizeof(control_connection_t);
+ break;
+ CASE_ANY_LISTENER_TYPE:
+ tor_assert(conn->magic == LISTENER_CONNECTION_MAGIC);
+ mem = TO_LISTENER_CONN(conn);
+ memlen = sizeof(listener_connection_t);
+ break;
+ default:
+ tor_assert(conn->magic == BASE_CONNECTION_MAGIC);
+ mem = conn;
+ memlen = sizeof(connection_t);
+ break;
+ }
+
+ if (conn->linked) {
+ log_info(LD_GENERAL, "Freeing linked %s connection [%s] with %d "
+ "bytes on inbuf, %d on outbuf.",
+ conn_type_to_string(conn->type),
+ conn_state_to_string(conn->type, conn->state),
+ (int)connection_get_inbuf_len(conn),
+ (int)connection_get_outbuf_len(conn));
+ }
+
+ if (!connection_is_listener(conn)) {
+ buf_free(conn->inbuf);
+ buf_free(conn->outbuf);
+ } else {
+ if (conn->socket_family == AF_UNIX) {
+ /* For now only control and SOCKS ports can be Unix domain sockets
+ * and listeners at the same time */
+ tor_assert(conn_listener_type_supports_af_unix(conn->type));
+
+ if (unlink(conn->address) < 0 && errno != ENOENT) {
+ log_warn(LD_NET, "Could not unlink %s: %s", conn->address,
+ strerror(errno));
+ }
+ }
+ }
+
+ tor_free(conn->address);
+
+ if (connection_speaks_cells(conn)) {
+ or_connection_t *or_conn = TO_OR_CONN(conn);
+ tor_tls_free(or_conn->tls);
+ or_conn->tls = NULL;
+ or_handshake_state_free(or_conn->handshake_state);
+ or_conn->handshake_state = NULL;
+ tor_free(or_conn->nickname);
+ if (or_conn->chan) {
+ /* Owww, this shouldn't happen, but... */
+ log_info(LD_CHANNEL,
+ "Freeing orconn at %p, saw channel %p with ID "
+ "%"PRIu64 " left un-NULLed",
+ or_conn, TLS_CHAN_TO_BASE(or_conn->chan),
+ (
+ TLS_CHAN_TO_BASE(or_conn->chan)->global_identifier));
+ if (!CHANNEL_FINISHED(TLS_CHAN_TO_BASE(or_conn->chan))) {
+ channel_close_for_error(TLS_CHAN_TO_BASE(or_conn->chan));
+ }
+
+ or_conn->chan->conn = NULL;
+ or_conn->chan = NULL;
+ }
+ }
+ if (conn->type == CONN_TYPE_AP) {
+ entry_connection_t *entry_conn = TO_ENTRY_CONN(conn);
+ tor_free(entry_conn->chosen_exit_name);
+ tor_free(entry_conn->original_dest_address);
+ if (entry_conn->socks_request)
+ socks_request_free(entry_conn->socks_request);
+ if (entry_conn->pending_optimistic_data) {
+ buf_free(entry_conn->pending_optimistic_data);
+ }
+ if (entry_conn->sending_optimistic_data) {
+ buf_free(entry_conn->sending_optimistic_data);
+ }
+ }
+ if (CONN_IS_EDGE(conn)) {
+ rend_data_free(TO_EDGE_CONN(conn)->rend_data);
+ hs_ident_edge_conn_free(TO_EDGE_CONN(conn)->hs_ident);
+ }
+ if (conn->type == CONN_TYPE_CONTROL) {
+ control_connection_t *control_conn = TO_CONTROL_CONN(conn);
+ tor_free(control_conn->safecookie_client_hash);
+ tor_free(control_conn->incoming_cmd);
+ if (control_conn->ephemeral_onion_services) {
+ SMARTLIST_FOREACH(control_conn->ephemeral_onion_services, char *, cp, {
+ memwipe(cp, 0, strlen(cp));
+ tor_free(cp);
+ });
+ smartlist_free(control_conn->ephemeral_onion_services);
+ }
+ }
+
+ /* Probably already freed by connection_free. */
+ tor_event_free(conn->read_event);
+ tor_event_free(conn->write_event);
+ conn->read_event = conn->write_event = NULL;
+
+ if (conn->type == CONN_TYPE_DIR) {
+ dir_connection_t *dir_conn = TO_DIR_CONN(conn);
+ tor_free(dir_conn->requested_resource);
+
+ tor_compress_free(dir_conn->compress_state);
+ if (dir_conn->spool) {
+ SMARTLIST_FOREACH(dir_conn->spool, spooled_resource_t *, spooled,
+ spooled_resource_free(spooled));
+ smartlist_free(dir_conn->spool);
+ }
+
+ rend_data_free(dir_conn->rend_data);
+ hs_ident_dir_conn_free(dir_conn->hs_ident);
+ if (dir_conn->guard_state) {
+ /* Cancel before freeing, if it's still there. */
+ entry_guard_cancel(&dir_conn->guard_state);
+ }
+ circuit_guard_state_free(dir_conn->guard_state);
+ }
+
+ if (SOCKET_OK(conn->s)) {
+ log_debug(LD_NET,"closing fd %d.",(int)conn->s);
+ tor_close_socket(conn->s);
+ conn->s = TOR_INVALID_SOCKET;
+ }
+
+ if (conn->type == CONN_TYPE_OR &&
+ !tor_digest_is_zero(TO_OR_CONN(conn)->identity_digest)) {
+ log_warn(LD_BUG, "called on OR conn with non-zeroed identity_digest");
+ connection_or_clear_identity(TO_OR_CONN(conn));
+ }
+ if (conn->type == CONN_TYPE_OR || conn->type == CONN_TYPE_EXT_OR) {
+ connection_or_remove_from_ext_or_id_map(TO_OR_CONN(conn));
+ tor_free(TO_OR_CONN(conn)->ext_or_conn_id);
+ tor_free(TO_OR_CONN(conn)->ext_or_auth_correct_client_hash);
+ tor_free(TO_OR_CONN(conn)->ext_or_transport);
+ }
+
+ memwipe(mem, 0xCC, memlen); /* poison memory */
+ tor_free(mem);
+}
+
+/** Make sure <b>conn</b> isn't in any of the global conn lists; then free it.
+ */
+MOCK_IMPL(void,
+connection_free_,(connection_t *conn))
+{
+ if (!conn)
+ return;
+ tor_assert(!connection_is_on_closeable_list(conn));
+ tor_assert(!connection_in_array(conn));
+ if (BUG(conn->linked_conn)) {
+ conn->linked_conn->linked_conn = NULL;
+ if (! conn->linked_conn->marked_for_close &&
+ conn->linked_conn->reading_from_linked_conn)
+ connection_start_reading(conn->linked_conn);
+ conn->linked_conn = NULL;
+ }
+ if (connection_speaks_cells(conn)) {
+ if (!tor_digest_is_zero(TO_OR_CONN(conn)->identity_digest)) {
+ connection_or_clear_identity(TO_OR_CONN(conn));
+ }
+ }
+ if (conn->type == CONN_TYPE_CONTROL) {
+ connection_control_closed(TO_CONTROL_CONN(conn));
+ }
+#if 1
+ /* DEBUGGING */
+ if (conn->type == CONN_TYPE_AP) {
+ connection_ap_warn_and_unmark_if_pending_circ(TO_ENTRY_CONN(conn),
+ "connection_free");
+ }
+#endif /* 1 */
+
+ /* Notify the circuit creation DoS mitigation subsystem that an OR client
+ * connection has been closed. And only do that if we track it. */
+ if (conn->type == CONN_TYPE_OR) {
+ dos_close_client_conn(TO_OR_CONN(conn));
+ }
+
+ connection_unregister_events(conn);
+ connection_free_minimal(conn);
+}
+
+/**
+ * Called when we're about to finally unlink and free a connection:
+ * perform necessary accounting and cleanup
+ * - Directory conns that failed to fetch a rendezvous descriptor
+ * need to inform pending rendezvous streams.
+ * - OR conns need to call rep_hist_note_*() to record status.
+ * - AP conns need to send a socks reject if necessary.
+ * - Exit conns need to call connection_dns_remove() if necessary.
+ * - AP and Exit conns need to send an end cell if they can.
+ * - DNS conns need to fail any resolves that are pending on them.
+ * - OR and edge connections need to be unlinked from circuits.
+ */
+void
+connection_about_to_close_connection(connection_t *conn)
+{
+ tor_assert(conn->marked_for_close);
+
+ switch (conn->type) {
+ case CONN_TYPE_DIR:
+ connection_dir_about_to_close(TO_DIR_CONN(conn));
+ break;
+ case CONN_TYPE_OR:
+ case CONN_TYPE_EXT_OR:
+ connection_or_about_to_close(TO_OR_CONN(conn));
+ break;
+ case CONN_TYPE_AP:
+ connection_ap_about_to_close(TO_ENTRY_CONN(conn));
+ break;
+ case CONN_TYPE_EXIT:
+ connection_exit_about_to_close(TO_EDGE_CONN(conn));
+ break;
+ }
+}
+
+/** Return true iff connection_close_immediate() has been called on this
+ * connection. */
+#define CONN_IS_CLOSED(c) \
+ ((c)->linked ? ((c)->linked_conn_is_closed) : (! SOCKET_OK(c->s)))
+
+/** Close the underlying socket for <b>conn</b>, so we don't try to
+ * flush it. Must be used in conjunction with (right before)
+ * connection_mark_for_close().
+ */
+void
+connection_close_immediate(connection_t *conn)
+{
+ assert_connection_ok(conn,0);
+ if (CONN_IS_CLOSED(conn)) {
+ log_err(LD_BUG,"Attempt to close already-closed connection.");
+ tor_fragile_assert();
+ return;
+ }
+ if (conn->outbuf_flushlen) {
+ log_info(LD_NET,"fd %d, type %s, state %s, %d bytes on outbuf.",
+ (int)conn->s, conn_type_to_string(conn->type),
+ conn_state_to_string(conn->type, conn->state),
+ (int)conn->outbuf_flushlen);
+ }
+
+ connection_unregister_events(conn);
+
+ /* Prevent the event from getting unblocked. */
+ conn->read_blocked_on_bw = 0;
+ conn->write_blocked_on_bw = 0;
+
+ if (SOCKET_OK(conn->s))
+ tor_close_socket(conn->s);
+ conn->s = TOR_INVALID_SOCKET;
+ if (conn->linked)
+ conn->linked_conn_is_closed = 1;
+ if (conn->outbuf)
+ buf_clear(conn->outbuf);
+ conn->outbuf_flushlen = 0;
+}
+
+/** Mark <b>conn</b> to be closed next time we loop through
+ * conn_close_if_marked() in main.c. */
+void
+connection_mark_for_close_(connection_t *conn, int line, const char *file)
+{
+ assert_connection_ok(conn,0);
+ tor_assert(line);
+ tor_assert(line < 1<<16); /* marked_for_close can only fit a uint16_t. */
+ tor_assert(file);
+
+ if (conn->type == CONN_TYPE_OR) {
+ /*
+ * An or_connection should have been closed through one of the channel-
+ * aware functions in connection_or.c. We'll assume this is an error
+ * close and do that, and log a bug warning.
+ */
+ log_warn(LD_CHANNEL | LD_BUG,
+ "Something tried to close an or_connection_t without going "
+ "through channels at %s:%d",
+ file, line);
+ connection_or_close_for_error(TO_OR_CONN(conn), 0);
+ } else {
+ /* Pass it down to the real function */
+ connection_mark_for_close_internal_(conn, line, file);
+ }
+}
+
+/** Mark <b>conn</b> to be closed next time we loop through
+ * conn_close_if_marked() in main.c; the _internal version bypasses the
+ * CONN_TYPE_OR checks; this should be called when you either are sure that
+ * if this is an or_connection_t the controlling channel has been notified
+ * (e.g. with connection_or_notify_error()), or you actually are the
+ * connection_or_close_for_error() or connection_or_close_normally() function.
+ * For all other cases, use connection_mark_and_flush() instead, which
+ * checks for or_connection_t properly, instead. See below.
+ */
+MOCK_IMPL(void,
+connection_mark_for_close_internal_, (connection_t *conn,
+ int line, const char *file))
+{
+ assert_connection_ok(conn,0);
+ tor_assert(line);
+ tor_assert(line < 1<<16); /* marked_for_close can only fit a uint16_t. */
+ tor_assert(file);
+
+ if (conn->marked_for_close) {
+ log_warn(LD_BUG,"Duplicate call to connection_mark_for_close at %s:%d"
+ " (first at %s:%d)", file, line, conn->marked_for_close_file,
+ conn->marked_for_close);
+ tor_fragile_assert();
+ return;
+ }
+
+ if (conn->type == CONN_TYPE_OR) {
+ /*
+ * Bad news if this happens without telling the controlling channel; do
+ * this so we can find things that call this wrongly when the asserts hit.
+ */
+ log_debug(LD_CHANNEL,
+ "Calling connection_mark_for_close_internal_() on an OR conn "
+ "at %s:%d",
+ file, line);
+ }
+
+ conn->marked_for_close = line;
+ conn->marked_for_close_file = file;
+ add_connection_to_closeable_list(conn);
+
+ /* in case we're going to be held-open-til-flushed, reset
+ * the number of seconds since last successful write, so
+ * we get our whole 15 seconds */
+ conn->timestamp_last_write_allowed = time(NULL);
+}
+
+/** Find each connection that has hold_open_until_flushed set to
+ * 1 but hasn't written in the past 15 seconds, and set
+ * hold_open_until_flushed to 0. This means it will get cleaned
+ * up in the next loop through close_if_marked() in main.c.
+ */
+void
+connection_expire_held_open(void)
+{
+ time_t now;
+ smartlist_t *conns = get_connection_array();
+
+ now = time(NULL);
+
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
+ /* If we've been holding the connection open, but we haven't written
+ * for 15 seconds...
+ */
+ if (conn->hold_open_until_flushed) {
+ tor_assert(conn->marked_for_close);
+ if (now - conn->timestamp_last_write_allowed >= 15) {
+ int severity;
+ if (conn->type == CONN_TYPE_EXIT ||
+ (conn->type == CONN_TYPE_DIR &&
+ conn->purpose == DIR_PURPOSE_SERVER))
+ severity = LOG_INFO;
+ else
+ severity = LOG_NOTICE;
+ log_fn(severity, LD_NET,
+ "Giving up on marked_for_close conn that's been flushing "
+ "for 15s (fd %d, type %s, state %s).",
+ (int)conn->s, conn_type_to_string(conn->type),
+ conn_state_to_string(conn->type, conn->state));
+ conn->hold_open_until_flushed = 0;
+ }
+ }
+ } SMARTLIST_FOREACH_END(conn);
+}
+
+#if defined(HAVE_SYS_UN_H) || defined(RUNNING_DOXYGEN)
+/** Create an AF_UNIX listenaddr struct.
+ * <b>listenaddress</b> provides the path to the Unix socket.
+ *
+ * Eventually <b>listenaddress</b> will also optionally contain user, group,
+ * and file permissions for the new socket. But not yet. XXX
+ * Also, since we do not create the socket here the information doesn't help
+ * here.
+ *
+ * If not NULL <b>readable_address</b> will contain a copy of the path part of
+ * <b>listenaddress</b>.
+ *
+ * The listenaddr struct has to be freed by the caller.
+ */
+static struct sockaddr_un *
+create_unix_sockaddr(const char *listenaddress, char **readable_address,
+ socklen_t *len_out)
+{
+ struct sockaddr_un *sockaddr = NULL;
+
+ sockaddr = tor_malloc_zero(sizeof(struct sockaddr_un));
+ sockaddr->sun_family = AF_UNIX;
+ if (strlcpy(sockaddr->sun_path, listenaddress, sizeof(sockaddr->sun_path))
+ >= sizeof(sockaddr->sun_path)) {
+ log_warn(LD_CONFIG, "Unix socket path '%s' is too long to fit.",
+ escaped(listenaddress));
+ tor_free(sockaddr);
+ return NULL;
+ }
+
+ if (readable_address)
+ *readable_address = tor_strdup(listenaddress);
+
+ *len_out = sizeof(struct sockaddr_un);
+ return sockaddr;
+}
+#else /* !(defined(HAVE_SYS_UN_H) || defined(RUNNING_DOXYGEN)) */
+static struct sockaddr *
+create_unix_sockaddr(const char *listenaddress, char **readable_address,
+ socklen_t *len_out)
+{
+ (void)listenaddress;
+ (void)readable_address;
+ log_fn(LOG_ERR, LD_BUG,
+ "Unix domain sockets not supported, yet we tried to create one.");
+ *len_out = 0;
+ tor_fragile_assert();
+ return NULL;
+}
+#endif /* defined(HAVE_SYS_UN_H) || defined(RUNNING_DOXYGEN) */
+
+/** Warn that an accept or a connect has failed because we're running out of
+ * TCP sockets we can use on current system. Rate-limit these warnings so
+ * that we don't spam the log. */
+static void
+warn_too_many_conns(void)
+{
+#define WARN_TOO_MANY_CONNS_INTERVAL (6*60*60)
+ static ratelim_t last_warned = RATELIM_INIT(WARN_TOO_MANY_CONNS_INTERVAL);
+ char *m;
+ if ((m = rate_limit_log(&last_warned, approx_time()))) {
+ int n_conns = get_n_open_sockets();
+ log_warn(LD_NET,"Failing because we have %d connections already. Please "
+ "read doc/TUNING for guidance.%s", n_conns, m);
+ tor_free(m);
+ control_event_general_status(LOG_WARN, "TOO_MANY_CONNECTIONS CURRENT=%d",
+ n_conns);
+ }
+}
+
+#ifdef HAVE_SYS_UN_H
+
+#define UNIX_SOCKET_PURPOSE_CONTROL_SOCKET 0
+#define UNIX_SOCKET_PURPOSE_SOCKS_SOCKET 1
+
+/** Check if the purpose isn't one of the ones we know what to do with */
+
+static int
+is_valid_unix_socket_purpose(int purpose)
+{
+ int valid = 0;
+
+ switch (purpose) {
+ case UNIX_SOCKET_PURPOSE_CONTROL_SOCKET:
+ case UNIX_SOCKET_PURPOSE_SOCKS_SOCKET:
+ valid = 1;
+ break;
+ }
+
+ return valid;
+}
+
+/** Return a string description of a unix socket purpose */
+static const char *
+unix_socket_purpose_to_string(int purpose)
+{
+ const char *s = "unknown-purpose socket";
+
+ switch (purpose) {
+ case UNIX_SOCKET_PURPOSE_CONTROL_SOCKET:
+ s = "control socket";
+ break;
+ case UNIX_SOCKET_PURPOSE_SOCKS_SOCKET:
+ s = "SOCKS socket";
+ break;
+ }
+
+ return s;
+}
+
+/** Check whether we should be willing to open an AF_UNIX socket in
+ * <b>path</b>. Return 0 if we should go ahead and -1 if we shouldn't. */
+static int
+check_location_for_unix_socket(const or_options_t *options, const char *path,
+ int purpose, const port_cfg_t *port)
+{
+ int r = -1;
+ char *p = NULL;
+
+ tor_assert(is_valid_unix_socket_purpose(purpose));
+
+ p = tor_strdup(path);
+ cpd_check_t flags = CPD_CHECK_MODE_ONLY;
+ if (get_parent_directory(p)<0 || p[0] != '/') {
+ log_warn(LD_GENERAL, "Bad unix socket address '%s'. Tor does not support "
+ "relative paths for unix sockets.", path);
+ goto done;
+ }
+
+ if (port->is_world_writable) {
+ /* World-writable sockets can go anywhere. */
+ r = 0;
+ goto done;
+ }
+
+ if (port->is_group_writable) {
+ flags |= CPD_GROUP_OK;
+ }
+
+ if (port->relax_dirmode_check) {
+ flags |= CPD_RELAX_DIRMODE_CHECK;
+ }
+
+ if (check_private_dir(p, flags, options->User) < 0) {
+ char *escpath, *escdir;
+ escpath = esc_for_log(path);
+ escdir = esc_for_log(p);
+ log_warn(LD_GENERAL, "Before Tor can create a %s in %s, the directory "
+ "%s needs to exist, and to be accessible only by the user%s "
+ "account that is running Tor. (On some Unix systems, anybody "
+ "who can list a socket can connect to it, so Tor is being "
+ "careful.)",
+ unix_socket_purpose_to_string(purpose), escpath, escdir,
+ port->is_group_writable ? " and group" : "");
+ tor_free(escpath);
+ tor_free(escdir);
+ goto done;
+ }
+
+ r = 0;
+ done:
+ tor_free(p);
+ return r;
+}
+#endif /* defined(HAVE_SYS_UN_H) */
+
+/** Tell the TCP stack that it shouldn't wait for a long time after
+ * <b>sock</b> has closed before reusing its port. Return 0 on success,
+ * -1 on failure. */
+static int
+make_socket_reuseable(tor_socket_t sock)
+{
+#ifdef _WIN32
+ (void) sock;
+ return 0;
+#else
+ int one=1;
+
+ /* REUSEADDR on normal places means you can rebind to the port
+ * right after somebody else has let it go. But REUSEADDR on win32
+ * means you can bind to the port _even when somebody else
+ * already has it bound_. So, don't do that on Win32. */
+ if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (void*) &one,
+ (socklen_t)sizeof(one)) == -1) {
+ return -1;
+ }
+ return 0;
+#endif /* defined(_WIN32) */
+}
+
+#ifdef _WIN32
+/** Tell the Windows TCP stack to prevent other applications from receiving
+ * traffic from tor's open ports. Return 0 on success, -1 on failure. */
+static int
+make_win32_socket_exclusive(tor_socket_t sock)
+{
+#ifdef SO_EXCLUSIVEADDRUSE
+ int one=1;
+
+ /* Any socket that sets REUSEADDR on win32 can bind to a port _even when
+ * somebody else already has it bound_, and _even if the original socket
+ * didn't set REUSEADDR_. Use EXCLUSIVEADDRUSE to prevent this port-stealing
+ * on win32. */
+ if (setsockopt(sock, SOL_SOCKET, SO_EXCLUSIVEADDRUSE, (void*) &one,
+ (socklen_t)sizeof(one))) {
+ return -1;
+ }
+ return 0;
+#else /* !(defined(SO_EXCLUSIVEADDRUSE)) */
+ (void) sock;
+ return 0;
+#endif /* defined(SO_EXCLUSIVEADDRUSE) */
+}
+#endif /* defined(_WIN32) */
+
+/** Max backlog to pass to listen. We start at */
+static int listen_limit = INT_MAX;
+
+/* Listen on <b>fd</b> with appropriate backlog. Return as for listen. */
+static int
+tor_listen(tor_socket_t fd)
+{
+ int r;
+
+ if ((r = listen(fd, listen_limit)) < 0) {
+ if (listen_limit == SOMAXCONN)
+ return r;
+ if ((r = listen(fd, SOMAXCONN)) == 0) {
+ listen_limit = SOMAXCONN;
+ log_warn(LD_NET, "Setting listen backlog to INT_MAX connections "
+ "didn't work, but SOMAXCONN did. Lowering backlog limit.");
+ }
+ }
+ return r;
+}
+
+/** Bind a new non-blocking socket listening to the socket described
+ * by <b>listensockaddr</b>.
+ *
+ * <b>address</b> is only used for logging purposes and to add the information
+ * to the conn.
+ */
+static connection_t *
+connection_listener_new(const struct sockaddr *listensockaddr,
+ socklen_t socklen,
+ int type, const char *address,
+ const port_cfg_t *port_cfg)
+{
+ listener_connection_t *lis_conn;
+ connection_t *conn = NULL;
+ tor_socket_t s = TOR_INVALID_SOCKET; /* the socket we're going to make */
+ or_options_t const *options = get_options();
+ (void) options; /* Windows doesn't use this. */
+#if defined(HAVE_PWD_H) && defined(HAVE_SYS_UN_H)
+ const struct passwd *pw = NULL;
+#endif
+ uint16_t usePort = 0, gotPort = 0;
+ int start_reading = 0;
+ static int global_next_session_group = SESSION_GROUP_FIRST_AUTO;
+ tor_addr_t addr;
+ int exhaustion = 0;
+
+ if (listensockaddr->sa_family == AF_INET ||
+ listensockaddr->sa_family == AF_INET6) {
+ int is_stream = (type != CONN_TYPE_AP_DNS_LISTENER);
+ if (is_stream)
+ start_reading = 1;
+
+ tor_addr_from_sockaddr(&addr, listensockaddr, &usePort);
+ log_notice(LD_NET, "Opening %s on %s",
+ conn_type_to_string(type), fmt_addrport(&addr, usePort));
+
+ s = tor_open_socket_nonblocking(tor_addr_family(&addr),
+ is_stream ? SOCK_STREAM : SOCK_DGRAM,
+ is_stream ? IPPROTO_TCP: IPPROTO_UDP);
+ if (!SOCKET_OK(s)) {
+ int e = tor_socket_errno(s);
+ if (ERRNO_IS_RESOURCE_LIMIT(e)) {
+ warn_too_many_conns();
+ /*
+ * We'll call the OOS handler at the error exit, so set the
+ * exhaustion flag for it.
+ */
+ exhaustion = 1;
+ } else {
+ log_warn(LD_NET, "Socket creation failed: %s",
+ tor_socket_strerror(e));
+ }
+ goto err;
+ }
+
+ if (make_socket_reuseable(s) < 0) {
+ log_warn(LD_NET, "Error setting SO_REUSEADDR flag on %s: %s",
+ conn_type_to_string(type),
+ tor_socket_strerror(errno));
+ }
+
+#ifdef _WIN32
+ if (make_win32_socket_exclusive(s) < 0) {
+ log_warn(LD_NET, "Error setting SO_EXCLUSIVEADDRUSE flag on %s: %s",
+ conn_type_to_string(type),
+ tor_socket_strerror(errno));
+ }
+#endif /* defined(_WIN32) */
+
+#if defined(USE_TRANSPARENT) && defined(IP_TRANSPARENT)
+ if (options->TransProxyType_parsed == TPT_TPROXY &&
+ type == CONN_TYPE_AP_TRANS_LISTENER) {
+ int one = 1;
+ if (setsockopt(s, SOL_IP, IP_TRANSPARENT, (void*)&one,
+ (socklen_t)sizeof(one)) < 0) {
+ const char *extra = "";
+ int e = tor_socket_errno(s);
+ if (e == EPERM)
+ extra = "TransTPROXY requires root privileges or similar"
+ " capabilities.";
+ log_warn(LD_NET, "Error setting IP_TRANSPARENT flag: %s.%s",
+ tor_socket_strerror(e), extra);
+ }
+ }
+#endif /* defined(USE_TRANSPARENT) && defined(IP_TRANSPARENT) */
+
+#ifdef IPV6_V6ONLY
+ if (listensockaddr->sa_family == AF_INET6) {
+ int one = 1;
+ /* We need to set IPV6_V6ONLY so that this socket can't get used for
+ * IPv4 connections. */
+ if (setsockopt(s,IPPROTO_IPV6, IPV6_V6ONLY,
+ (void*)&one, (socklen_t)sizeof(one)) < 0) {
+ int e = tor_socket_errno(s);
+ log_warn(LD_NET, "Error setting IPV6_V6ONLY flag: %s",
+ tor_socket_strerror(e));
+ /* Keep going; probably not harmful. */
+ }
+ }
+#endif /* defined(IPV6_V6ONLY) */
+
+ if (bind(s,listensockaddr,socklen) < 0) {
+ const char *helpfulhint = "";
+ int e = tor_socket_errno(s);
+ if (ERRNO_IS_EADDRINUSE(e))
+ helpfulhint = ". Is Tor already running?";
+ log_warn(LD_NET, "Could not bind to %s:%u: %s%s", address, usePort,
+ tor_socket_strerror(e), helpfulhint);
+ goto err;
+ }
+
+ if (is_stream) {
+ if (tor_listen(s) < 0) {
+ log_warn(LD_NET, "Could not listen on %s:%u: %s", address, usePort,
+ tor_socket_strerror(tor_socket_errno(s)));
+ goto err;
+ }
+ }
+
+ if (usePort != 0) {
+ gotPort = usePort;
+ } else {
+ tor_addr_t addr2;
+ struct sockaddr_storage ss;
+ socklen_t ss_len=sizeof(ss);
+ if (getsockname(s, (struct sockaddr*)&ss, &ss_len)<0) {
+ log_warn(LD_NET, "getsockname() couldn't learn address for %s: %s",
+ conn_type_to_string(type),
+ tor_socket_strerror(tor_socket_errno(s)));
+ gotPort = 0;
+ }
+ tor_addr_from_sockaddr(&addr2, (struct sockaddr*)&ss, &gotPort);
+ }
+#ifdef HAVE_SYS_UN_H
+ /*
+ * AF_UNIX generic setup stuff
+ */
+ } else if (listensockaddr->sa_family == AF_UNIX) {
+ /* We want to start reading for both AF_UNIX cases */
+ start_reading = 1;
+
+ tor_assert(conn_listener_type_supports_af_unix(type));
+
+ if (check_location_for_unix_socket(options, address,
+ (type == CONN_TYPE_CONTROL_LISTENER) ?
+ UNIX_SOCKET_PURPOSE_CONTROL_SOCKET :
+ UNIX_SOCKET_PURPOSE_SOCKS_SOCKET, port_cfg) < 0) {
+ goto err;
+ }
+
+ log_notice(LD_NET, "Opening %s on %s",
+ conn_type_to_string(type), address);
+
+ tor_addr_make_unspec(&addr);
+
+ if (unlink(address) < 0 && errno != ENOENT) {
+ log_warn(LD_NET, "Could not unlink %s: %s", address,
+ strerror(errno));
+ goto err;
+ }
+
+ s = tor_open_socket_nonblocking(AF_UNIX, SOCK_STREAM, 0);
+ if (! SOCKET_OK(s)) {
+ int e = tor_socket_errno(s);
+ if (ERRNO_IS_RESOURCE_LIMIT(e)) {
+ warn_too_many_conns();
+ /*
+ * We'll call the OOS handler at the error exit, so set the
+ * exhaustion flag for it.
+ */
+ exhaustion = 1;
+ } else {
+ log_warn(LD_NET,"Socket creation failed: %s.", strerror(e));
+ }
+ goto err;
+ }
+
+ if (bind(s, listensockaddr,
+ (socklen_t)sizeof(struct sockaddr_un)) == -1) {
+ log_warn(LD_NET,"Bind to %s failed: %s.", address,
+ tor_socket_strerror(tor_socket_errno(s)));
+ goto err;
+ }
+
+#ifdef HAVE_PWD_H
+ if (options->User) {
+ pw = tor_getpwnam(options->User);
+ struct stat st;
+ if (pw == NULL) {
+ log_warn(LD_NET,"Unable to chown() %s socket: user %s not found.",
+ address, options->User);
+ goto err;
+ } else if (fstat(s, &st) == 0 &&
+ st.st_uid == pw->pw_uid && st.st_gid == pw->pw_gid) {
+ /* No change needed */
+ } else if (chown(sandbox_intern_string(address),
+ pw->pw_uid, pw->pw_gid) < 0) {
+ log_warn(LD_NET,"Unable to chown() %s socket: %s.",
+ address, strerror(errno));
+ goto err;
+ }
+ }
+#endif /* defined(HAVE_PWD_H) */
+
+ {
+ unsigned mode;
+ const char *status;
+ struct stat st;
+ if (port_cfg->is_world_writable) {
+ mode = 0666;
+ status = "world-writable";
+ } else if (port_cfg->is_group_writable) {
+ mode = 0660;
+ status = "group-writable";
+ } else {
+ mode = 0600;
+ status = "private";
+ }
+ /* We need to use chmod; fchmod doesn't work on sockets on all
+ * platforms. */
+ if (fstat(s, &st) == 0 && (st.st_mode & 0777) == mode) {
+ /* no change needed */
+ } else if (chmod(sandbox_intern_string(address), mode) < 0) {
+ log_warn(LD_FS,"Unable to make %s %s.", address, status);
+ goto err;
+ }
+ }
+
+ if (listen(s, SOMAXCONN) < 0) {
+ log_warn(LD_NET, "Could not listen on %s: %s", address,
+ tor_socket_strerror(tor_socket_errno(s)));
+ goto err;
+ }
+#endif /* defined(HAVE_SYS_UN_H) */
+ } else {
+ log_err(LD_BUG, "Got unexpected address family %d.",
+ listensockaddr->sa_family);
+ tor_assert(0);
+ }
+
+ lis_conn = listener_connection_new(type, listensockaddr->sa_family);
+ conn = TO_CONN(lis_conn);
+ conn->socket_family = listensockaddr->sa_family;
+ conn->s = s;
+ s = TOR_INVALID_SOCKET; /* Prevent double-close */
+ conn->address = tor_strdup(address);
+ conn->port = gotPort;
+ tor_addr_copy(&conn->addr, &addr);
+
+ memcpy(&lis_conn->entry_cfg, &port_cfg->entry_cfg, sizeof(entry_port_cfg_t));
+
+ if (port_cfg->entry_cfg.isolation_flags) {
+ lis_conn->entry_cfg.isolation_flags = port_cfg->entry_cfg.isolation_flags;
+ if (port_cfg->entry_cfg.session_group >= 0) {
+ lis_conn->entry_cfg.session_group = port_cfg->entry_cfg.session_group;
+ } else {
+ /* This can wrap after around INT_MAX listeners are opened. But I don't
+ * believe that matters, since you would need to open a ridiculous
+ * number of listeners while keeping the early ones open before you ever
+ * hit this. An OR with a dozen ports open, for example, would have to
+ * close and re-open its listeners every second for 4 years nonstop.
+ */
+ lis_conn->entry_cfg.session_group = global_next_session_group--;
+ }
+ }
+
+ if (type != CONN_TYPE_AP_LISTENER) {
+ lis_conn->entry_cfg.ipv4_traffic = 1;
+ lis_conn->entry_cfg.ipv6_traffic = 1;
+ lis_conn->entry_cfg.prefer_ipv6 = 0;
+ }
+
+ if (connection_add(conn) < 0) { /* no space, forget it */
+ log_warn(LD_NET,"connection_add for listener failed. Giving up.");
+ goto err;
+ }
+
+ log_fn(usePort==gotPort ? LOG_DEBUG : LOG_NOTICE, LD_NET,
+ "%s listening on port %u.",
+ conn_type_to_string(type), gotPort);
+
+ conn->state = LISTENER_STATE_READY;
+ if (start_reading) {
+ connection_start_reading(conn);
+ } else {
+ tor_assert(type == CONN_TYPE_AP_DNS_LISTENER);
+ dnsserv_configure_listener(conn);
+ }
+
+ /*
+ * Normal exit; call the OOS handler since connection count just changed;
+ * the exhaustion flag will always be zero here though.
+ */
+ connection_check_oos(get_n_open_sockets(), 0);
+
+ return conn;
+
+ err:
+ if (SOCKET_OK(s))
+ tor_close_socket(s);
+ if (conn)
+ connection_free(conn);
+
+ /* Call the OOS handler, indicate if we saw an exhaustion-related error */
+ connection_check_oos(get_n_open_sockets(), exhaustion);
+
+ return NULL;
+}
+
+/** Do basic sanity checking on a newly received socket. Return 0
+ * if it looks ok, else return -1.
+ *
+ * Notably, some TCP stacks can erroneously have accept() return successfully
+ * with socklen 0, when the client sends an RST before the accept call (as
+ * nmap does). We want to detect that, and not go on with the connection.
+ */
+static int
+check_sockaddr(const struct sockaddr *sa, int len, int level)
+{
+ int ok = 1;
+
+ if (sa->sa_family == AF_INET) {
+ struct sockaddr_in *sin=(struct sockaddr_in*)sa;
+ if (len != sizeof(struct sockaddr_in)) {
+ log_fn(level, LD_NET, "Length of address not as expected: %d vs %d",
+ len,(int)sizeof(struct sockaddr_in));
+ ok = 0;
+ }
+ if (sin->sin_addr.s_addr == 0 || sin->sin_port == 0) {
+ log_fn(level, LD_NET,
+ "Address for new connection has address/port equal to zero.");
+ ok = 0;
+ }
+ } else if (sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6=(struct sockaddr_in6*)sa;
+ if (len != sizeof(struct sockaddr_in6)) {
+ log_fn(level, LD_NET, "Length of address not as expected: %d vs %d",
+ len,(int)sizeof(struct sockaddr_in6));
+ ok = 0;
+ }
+ if (tor_mem_is_zero((void*)sin6->sin6_addr.s6_addr, 16) ||
+ sin6->sin6_port == 0) {
+ log_fn(level, LD_NET,
+ "Address for new connection has address/port equal to zero.");
+ ok = 0;
+ }
+ } else if (sa->sa_family == AF_UNIX) {
+ ok = 1;
+ } else {
+ ok = 0;
+ }
+ return ok ? 0 : -1;
+}
+
+/** Check whether the socket family from an accepted socket <b>got</b> is the
+ * same as the one that <b>listener</b> is waiting for. If it isn't, log
+ * a useful message and return -1. Else return 0.
+ *
+ * This is annoying, but can apparently happen on some Darwins. */
+static int
+check_sockaddr_family_match(sa_family_t got, connection_t *listener)
+{
+ if (got != listener->socket_family) {
+ log_info(LD_BUG, "A listener connection returned a socket with a "
+ "mismatched family. %s for addr_family %d gave us a socket "
+ "with address family %d. Dropping.",
+ conn_type_to_string(listener->type),
+ (int)listener->socket_family,
+ (int)got);
+ return -1;
+ }
+ return 0;
+}
+
+/** The listener connection <b>conn</b> told poll() it wanted to read.
+ * Call accept() on conn-\>s, and add the new connection if necessary.
+ */
+static int
+connection_handle_listener_read(connection_t *conn, int new_type)
+{
+ tor_socket_t news; /* the new socket */
+ connection_t *newconn = 0;
+ /* information about the remote peer when connecting to other routers */
+ struct sockaddr_storage addrbuf;
+ struct sockaddr *remote = (struct sockaddr*)&addrbuf;
+ /* length of the remote address. Must be whatever accept() needs. */
+ socklen_t remotelen = (socklen_t)sizeof(addrbuf);
+ const or_options_t *options = get_options();
+
+ tor_assert((size_t)remotelen >= sizeof(struct sockaddr_in));
+ memset(&addrbuf, 0, sizeof(addrbuf));
+
+ news = tor_accept_socket_nonblocking(conn->s,remote,&remotelen);
+ if (!SOCKET_OK(news)) { /* accept() error */
+ int e = tor_socket_errno(conn->s);
+ if (ERRNO_IS_ACCEPT_EAGAIN(e)) {
+ /*
+ * they hung up before we could accept(). that's fine.
+ *
+ * give the OOS handler a chance to run though
+ */
+ connection_check_oos(get_n_open_sockets(), 0);
+ return 0;
+ } else if (ERRNO_IS_RESOURCE_LIMIT(e)) {
+ warn_too_many_conns();
+ /* Exhaustion; tell the OOS handler */
+ connection_check_oos(get_n_open_sockets(), 1);
+ return 0;
+ }
+ /* else there was a real error. */
+ log_warn(LD_NET,"accept() failed: %s. Closing listener.",
+ tor_socket_strerror(e));
+ connection_mark_for_close(conn);
+ /* Tell the OOS handler about this too */
+ connection_check_oos(get_n_open_sockets(), 0);
+ return -1;
+ }
+ log_debug(LD_NET,
+ "Connection accepted on socket %d (child of fd %d).",
+ (int)news,(int)conn->s);
+
+ /* We accepted a new conn; run OOS handler */
+ connection_check_oos(get_n_open_sockets(), 0);
+
+ if (make_socket_reuseable(news) < 0) {
+ if (tor_socket_errno(news) == EINVAL) {
+ /* This can happen on OSX if we get a badly timed shutdown. */
+ log_debug(LD_NET, "make_socket_reuseable returned EINVAL");
+ } else {
+ log_warn(LD_NET, "Error setting SO_REUSEADDR flag on %s: %s",
+ conn_type_to_string(new_type),
+ tor_socket_strerror(errno));
+ }
+ tor_close_socket(news);
+ return 0;
+ }
+
+ if (options->ConstrainedSockets)
+ set_constrained_socket_buffers(news, (int)options->ConstrainedSockSize);
+
+ if (check_sockaddr_family_match(remote->sa_family, conn) < 0) {
+ tor_close_socket(news);
+ return 0;
+ }
+
+ if (conn->socket_family == AF_INET || conn->socket_family == AF_INET6 ||
+ (conn->socket_family == AF_UNIX && new_type == CONN_TYPE_AP)) {
+ tor_addr_t addr;
+ uint16_t port;
+ if (check_sockaddr(remote, remotelen, LOG_INFO)<0) {
+ log_info(LD_NET,
+ "accept() returned a strange address; closing connection.");
+ tor_close_socket(news);
+ return 0;
+ }
+
+ tor_addr_from_sockaddr(&addr, remote, &port);
+
+ /* process entrance policies here, before we even create the connection */
+ if (new_type == CONN_TYPE_AP) {
+ /* check sockspolicy to see if we should accept it */
+ if (socks_policy_permits_address(&addr) == 0) {
+ log_notice(LD_APP,
+ "Denying socks connection from untrusted address %s.",
+ fmt_and_decorate_addr(&addr));
+ tor_close_socket(news);
+ return 0;
+ }
+ }
+ if (new_type == CONN_TYPE_DIR) {
+ /* check dirpolicy to see if we should accept it */
+ if (dir_policy_permits_address(&addr) == 0) {
+ log_notice(LD_DIRSERV,"Denying dir connection from address %s.",
+ fmt_and_decorate_addr(&addr));
+ tor_close_socket(news);
+ return 0;
+ }
+ }
+ if (new_type == CONN_TYPE_OR) {
+ /* Assess with the connection DoS mitigation subsystem if this address
+ * can open a new connection. */
+ if (dos_conn_addr_get_defense_type(&addr) == DOS_CONN_DEFENSE_CLOSE) {
+ tor_close_socket(news);
+ return 0;
+ }
+ }
+
+ newconn = connection_new(new_type, conn->socket_family);
+ newconn->s = news;
+
+ /* remember the remote address */
+ tor_addr_copy(&newconn->addr, &addr);
+ if (new_type == CONN_TYPE_AP && conn->socket_family == AF_UNIX) {
+ newconn->port = 0;
+ newconn->address = tor_strdup(conn->address);
+ } else {
+ newconn->port = port;
+ newconn->address = tor_addr_to_str_dup(&addr);
+ }
+
+ if (new_type == CONN_TYPE_AP && conn->socket_family != AF_UNIX) {
+ log_info(LD_NET, "New SOCKS connection opened from %s.",
+ fmt_and_decorate_addr(&addr));
+ }
+ if (new_type == CONN_TYPE_AP && conn->socket_family == AF_UNIX) {
+ log_info(LD_NET, "New SOCKS AF_UNIX connection opened");
+ }
+ if (new_type == CONN_TYPE_CONTROL) {
+ log_notice(LD_CONTROL, "New control connection opened from %s.",
+ fmt_and_decorate_addr(&addr));
+ }
+
+ } else if (conn->socket_family == AF_UNIX && conn->type != CONN_TYPE_AP) {
+ tor_assert(conn->type == CONN_TYPE_CONTROL_LISTENER);
+ tor_assert(new_type == CONN_TYPE_CONTROL);
+ log_notice(LD_CONTROL, "New control connection opened.");
+
+ newconn = connection_new(new_type, conn->socket_family);
+ newconn->s = news;
+
+ /* remember the remote address -- do we have anything sane to put here? */
+ tor_addr_make_unspec(&newconn->addr);
+ newconn->port = 1;
+ newconn->address = tor_strdup(conn->address);
+ } else {
+ tor_assert(0);
+ };
+
+ if (connection_add(newconn) < 0) { /* no space, forget it */
+ connection_free(newconn);
+ return 0; /* no need to tear down the parent */
+ }
+
+ if (connection_init_accepted_conn(newconn, TO_LISTENER_CONN(conn)) < 0) {
+ if (! newconn->marked_for_close)
+ connection_mark_for_close(newconn);
+ return 0;
+ }
+ return 0;
+}
+
+/** Initialize states for newly accepted connection <b>conn</b>.
+ *
+ * If conn is an OR, start the TLS handshake.
+ *
+ * If conn is a transparent AP, get its original destination
+ * and place it in circuit_wait.
+ *
+ * The <b>listener</b> parameter is only used for AP connections.
+ */
+int
+connection_init_accepted_conn(connection_t *conn,
+ const listener_connection_t *listener)
+{
+ int rv;
+
+ connection_start_reading(conn);
+
+ switch (conn->type) {
+ case CONN_TYPE_EXT_OR:
+ /* Initiate Extended ORPort authentication. */
+ return connection_ext_or_start_auth(TO_OR_CONN(conn));
+ case CONN_TYPE_OR:
+ control_event_or_conn_status(TO_OR_CONN(conn), OR_CONN_EVENT_NEW, 0);
+ rv = connection_tls_start_handshake(TO_OR_CONN(conn), 1);
+ if (rv < 0) {
+ connection_or_close_for_error(TO_OR_CONN(conn), 0);
+ }
+ return rv;
+ break;
+ case CONN_TYPE_AP:
+ memcpy(&TO_ENTRY_CONN(conn)->entry_cfg, &listener->entry_cfg,
+ sizeof(entry_port_cfg_t));
+ TO_ENTRY_CONN(conn)->nym_epoch = get_signewnym_epoch();
+ TO_ENTRY_CONN(conn)->socks_request->listener_type = listener->base_.type;
+
+ switch (TO_CONN(listener)->type) {
+ case CONN_TYPE_AP_LISTENER:
+ conn->state = AP_CONN_STATE_SOCKS_WAIT;
+ TO_ENTRY_CONN(conn)->socks_request->socks_prefer_no_auth =
+ listener->entry_cfg.socks_prefer_no_auth;
+ break;
+ case CONN_TYPE_AP_TRANS_LISTENER:
+ TO_ENTRY_CONN(conn)->is_transparent_ap = 1;
+ /* XXXX028 -- is this correct still, with the addition of
+ * pending_entry_connections ? */
+ conn->state = AP_CONN_STATE_CIRCUIT_WAIT;
+ return connection_ap_process_transparent(TO_ENTRY_CONN(conn));
+ case CONN_TYPE_AP_NATD_LISTENER:
+ TO_ENTRY_CONN(conn)->is_transparent_ap = 1;
+ conn->state = AP_CONN_STATE_NATD_WAIT;
+ break;
+ case CONN_TYPE_AP_HTTP_CONNECT_LISTENER:
+ conn->state = AP_CONN_STATE_HTTP_CONNECT_WAIT;
+ }
+ break;
+ case CONN_TYPE_DIR:
+ conn->purpose = DIR_PURPOSE_SERVER;
+ conn->state = DIR_CONN_STATE_SERVER_COMMAND_WAIT;
+ break;
+ case CONN_TYPE_CONTROL:
+ conn->state = CONTROL_CONN_STATE_NEEDAUTH;
+ break;
+ }
+ return 0;
+}
+
+/** Take conn, make a nonblocking socket; try to connect to
+ * sa, binding to bindaddr if sa is not localhost. If fail, return -1 and if
+ * applicable put your best guess about errno into *<b>socket_error</b>.
+ * If connected return 1, if EAGAIN return 0.
+ */
+MOCK_IMPL(STATIC int,
+connection_connect_sockaddr,(connection_t *conn,
+ const struct sockaddr *sa,
+ socklen_t sa_len,
+ const struct sockaddr *bindaddr,
+ socklen_t bindaddr_len,
+ int *socket_error))
+{
+ tor_socket_t s;
+ int inprogress = 0;
+ const or_options_t *options = get_options();
+
+ tor_assert(conn);
+ tor_assert(sa);
+ tor_assert(socket_error);
+
+ if (net_is_completely_disabled()) {
+ /* We should never even try to connect anyplace if the network is
+ * completely shut off.
+ *
+ * (We don't check net_is_disabled() here, since we still sometimes
+ * want to open connections when we're in soft hibernation.)
+ */
+ static ratelim_t disablenet_violated = RATELIM_INIT(30*60);
+ *socket_error = SOCK_ERRNO(ENETUNREACH);
+ log_fn_ratelim(&disablenet_violated, LOG_WARN, LD_BUG,
+ "Tried to open a socket with DisableNetwork set.");
+ tor_fragile_assert();
+ return -1;
+ }
+
+ const int protocol_family = sa->sa_family;
+ const int proto = (sa->sa_family == AF_INET6 ||
+ sa->sa_family == AF_INET) ? IPPROTO_TCP : 0;
+
+ s = tor_open_socket_nonblocking(protocol_family, SOCK_STREAM, proto);
+ if (! SOCKET_OK(s)) {
+ /*
+ * Early OOS handler calls; it matters if it's an exhaustion-related
+ * error or not.
+ */
+ *socket_error = tor_socket_errno(s);
+ if (ERRNO_IS_RESOURCE_LIMIT(*socket_error)) {
+ warn_too_many_conns();
+ connection_check_oos(get_n_open_sockets(), 1);
+ } else {
+ log_warn(LD_NET,"Error creating network socket: %s",
+ tor_socket_strerror(*socket_error));
+ connection_check_oos(get_n_open_sockets(), 0);
+ }
+ return -1;
+ }
+
+ if (make_socket_reuseable(s) < 0) {
+ log_warn(LD_NET, "Error setting SO_REUSEADDR flag on new connection: %s",
+ tor_socket_strerror(errno));
+ }
+
+ /*
+ * We've got the socket open; give the OOS handler a chance to check
+ * against configured maximum socket number, but tell it no exhaustion
+ * failure.
+ */
+ connection_check_oos(get_n_open_sockets(), 0);
+
+ if (bindaddr && bind(s, bindaddr, bindaddr_len) < 0) {
+ *socket_error = tor_socket_errno(s);
+ log_warn(LD_NET,"Error binding network socket: %s",
+ tor_socket_strerror(*socket_error));
+ tor_close_socket(s);
+ return -1;
+ }
+
+ tor_assert(options);
+ if (options->ConstrainedSockets)
+ set_constrained_socket_buffers(s, (int)options->ConstrainedSockSize);
+
+ if (connect(s, sa, sa_len) < 0) {
+ int e = tor_socket_errno(s);
+ if (!ERRNO_IS_CONN_EINPROGRESS(e)) {
+ /* yuck. kill it. */
+ *socket_error = e;
+ log_info(LD_NET,
+ "connect() to socket failed: %s",
+ tor_socket_strerror(e));
+ tor_close_socket(s);
+ return -1;
+ } else {
+ inprogress = 1;
+ }
+ }
+
+ /* it succeeded. we're connected. */
+ log_fn(inprogress ? LOG_DEBUG : LOG_INFO, LD_NET,
+ "Connection to socket %s (sock "TOR_SOCKET_T_FORMAT").",
+ inprogress ? "in progress" : "established", s);
+ conn->s = s;
+ if (connection_add_connecting(conn) < 0) {
+ /* no space, forget it */
+ *socket_error = SOCK_ERRNO(ENOBUFS);
+ return -1;
+ }
+
+ return inprogress ? 0 : 1;
+}
+
+/* Log a message if connection attempt is made when IPv4 or IPv6 is disabled.
+ * Log a less severe message if we couldn't conform to ClientPreferIPv6ORPort
+ * or ClientPreferIPv6ORPort. */
+static void
+connection_connect_log_client_use_ip_version(const connection_t *conn)
+{
+ const or_options_t *options = get_options();
+
+ /* Only clients care about ClientUseIPv4/6, bail out early on servers, and
+ * on connections we don't care about */
+ if (server_mode(options) || !conn || conn->type == CONN_TYPE_EXIT) {
+ return;
+ }
+
+ /* We're only prepared to log OR and DIR connections here */
+ if (conn->type != CONN_TYPE_OR && conn->type != CONN_TYPE_DIR) {
+ return;
+ }
+
+ const int must_ipv4 = !fascist_firewall_use_ipv6(options);
+ const int must_ipv6 = (options->ClientUseIPv4 == 0);
+ const int pref_ipv6 = (conn->type == CONN_TYPE_OR
+ ? fascist_firewall_prefer_ipv6_orport(options)
+ : fascist_firewall_prefer_ipv6_dirport(options));
+ tor_addr_t real_addr;
+ tor_addr_make_null(&real_addr, AF_UNSPEC);
+
+ /* OR conns keep the original address in real_addr, as addr gets overwritten
+ * with the descriptor address */
+ if (conn->type == CONN_TYPE_OR) {
+ const or_connection_t *or_conn = TO_OR_CONN((connection_t *)conn);
+ tor_addr_copy(&real_addr, &or_conn->real_addr);
+ } else if (conn->type == CONN_TYPE_DIR) {
+ tor_addr_copy(&real_addr, &conn->addr);
+ }
+
+ /* Check if we broke a mandatory address family restriction */
+ if ((must_ipv4 && tor_addr_family(&real_addr) == AF_INET6)
+ || (must_ipv6 && tor_addr_family(&real_addr) == AF_INET)) {
+ static int logged_backtrace = 0;
+ log_info(LD_BUG, "Outgoing %s connection to %s violated ClientUseIPv%s 0.",
+ conn->type == CONN_TYPE_OR ? "OR" : "Dir",
+ fmt_addr(&real_addr),
+ options->ClientUseIPv4 == 0 ? "4" : "6");
+ if (!logged_backtrace) {
+ log_backtrace(LOG_INFO, LD_BUG, "Address came from");
+ logged_backtrace = 1;
+ }
+ }
+
+ /* Bridges are allowed to break IPv4/IPv6 ORPort preferences to connect to
+ * the node's configured address when ClientPreferIPv6ORPort is auto */
+ if (options->UseBridges && conn->type == CONN_TYPE_OR
+ && options->ClientPreferIPv6ORPort == -1) {
+ return;
+ }
+
+ /* Check if we couldn't satisfy an address family preference */
+ if ((!pref_ipv6 && tor_addr_family(&real_addr) == AF_INET6)
+ || (pref_ipv6 && tor_addr_family(&real_addr) == AF_INET)) {
+ log_info(LD_NET, "Outgoing connection to %s doesn't satisfy "
+ "ClientPreferIPv6%sPort %d, with ClientUseIPv4 %d, and "
+ "fascist_firewall_use_ipv6 %d (ClientUseIPv6 %d and UseBridges "
+ "%d).",
+ fmt_addr(&real_addr),
+ conn->type == CONN_TYPE_OR ? "OR" : "Dir",
+ conn->type == CONN_TYPE_OR ? options->ClientPreferIPv6ORPort
+ : options->ClientPreferIPv6DirPort,
+ options->ClientUseIPv4, fascist_firewall_use_ipv6(options),
+ options->ClientUseIPv6, options->UseBridges);
+ }
+}
+
+/** Retrieve the outbound address depending on the protocol (IPv4 or IPv6)
+ * and the connection type (relay, exit, ...)
+ * Return a socket address or NULL in case nothing is configured.
+ **/
+const tor_addr_t *
+conn_get_outbound_address(sa_family_t family,
+ const or_options_t *options, unsigned int conn_type)
+{
+ const tor_addr_t *ext_addr = NULL;
+
+ int fam_index;
+ switch (family) {
+ case AF_INET:
+ fam_index = 0;
+ break;
+ case AF_INET6:
+ fam_index = 1;
+ break;
+ default:
+ return NULL;
+ }
+
+ // If an exit connection, use the exit address (if present)
+ if (conn_type == CONN_TYPE_EXIT) {
+ if (!tor_addr_is_null(
+ &options->OutboundBindAddresses[OUTBOUND_ADDR_EXIT][fam_index])) {
+ ext_addr = &options->OutboundBindAddresses[OUTBOUND_ADDR_EXIT]
+ [fam_index];
+ } else if (!tor_addr_is_null(
+ &options->OutboundBindAddresses[OUTBOUND_ADDR_EXIT_AND_OR]
+ [fam_index])) {
+ ext_addr = &options->OutboundBindAddresses[OUTBOUND_ADDR_EXIT_AND_OR]
+ [fam_index];
+ }
+ } else { // All non-exit connections
+ if (!tor_addr_is_null(
+ &options->OutboundBindAddresses[OUTBOUND_ADDR_OR][fam_index])) {
+ ext_addr = &options->OutboundBindAddresses[OUTBOUND_ADDR_OR]
+ [fam_index];
+ } else if (!tor_addr_is_null(
+ &options->OutboundBindAddresses[OUTBOUND_ADDR_EXIT_AND_OR]
+ [fam_index])) {
+ ext_addr = &options->OutboundBindAddresses[OUTBOUND_ADDR_EXIT_AND_OR]
+ [fam_index];
+ }
+ }
+ return ext_addr;
+}
+
+/** Take conn, make a nonblocking socket; try to connect to
+ * addr:port (port arrives in *host order*). If fail, return -1 and if
+ * applicable put your best guess about errno into *<b>socket_error</b>.
+ * Else assign s to conn-\>s: if connected return 1, if EAGAIN return 0.
+ *
+ * addr:port can be different to conn->addr:conn->port if connecting through
+ * a proxy.
+ *
+ * address is used to make the logs useful.
+ *
+ * On success, add conn to the list of polled connections.
+ */
+int
+connection_connect(connection_t *conn, const char *address,
+ const tor_addr_t *addr, uint16_t port, int *socket_error)
+{
+ struct sockaddr_storage addrbuf;
+ struct sockaddr_storage bind_addr_ss;
+ struct sockaddr *bind_addr = NULL;
+ struct sockaddr *dest_addr;
+ int dest_addr_len, bind_addr_len = 0;
+
+ /* Log if we didn't stick to ClientUseIPv4/6 or ClientPreferIPv6OR/DirPort
+ */
+ connection_connect_log_client_use_ip_version(conn);
+
+ if (!tor_addr_is_loopback(addr)) {
+ const tor_addr_t *ext_addr = NULL;
+ ext_addr = conn_get_outbound_address(tor_addr_family(addr), get_options(),
+ conn->type);
+ if (ext_addr) {
+ memset(&bind_addr_ss, 0, sizeof(bind_addr_ss));
+ bind_addr_len = tor_addr_to_sockaddr(ext_addr, 0,
+ (struct sockaddr *) &bind_addr_ss,
+ sizeof(bind_addr_ss));
+ if (bind_addr_len == 0) {
+ log_warn(LD_NET,
+ "Error converting OutboundBindAddress %s into sockaddr. "
+ "Ignoring.", fmt_and_decorate_addr(ext_addr));
+ } else {
+ bind_addr = (struct sockaddr *)&bind_addr_ss;
+ }
+ }
+ }
+
+ memset(&addrbuf,0,sizeof(addrbuf));
+ dest_addr = (struct sockaddr*) &addrbuf;
+ dest_addr_len = tor_addr_to_sockaddr(addr, port, dest_addr, sizeof(addrbuf));
+ tor_assert(dest_addr_len > 0);
+
+ log_debug(LD_NET, "Connecting to %s:%u.",
+ escaped_safe_str_client(address), port);
+
+ return connection_connect_sockaddr(conn, dest_addr, dest_addr_len,
+ bind_addr, bind_addr_len, socket_error);
+}
+
+#ifdef HAVE_SYS_UN_H
+
+/** Take conn, make a nonblocking socket; try to connect to
+ * an AF_UNIX socket at socket_path. If fail, return -1 and if applicable
+ * put your best guess about errno into *<b>socket_error</b>. Else assign s
+ * to conn-\>s: if connected return 1, if EAGAIN return 0.
+ *
+ * On success, add conn to the list of polled connections.
+ */
+int
+connection_connect_unix(connection_t *conn, const char *socket_path,
+ int *socket_error)
+{
+ struct sockaddr_un dest_addr;
+
+ tor_assert(socket_path);
+
+ /* Check that we'll be able to fit it into dest_addr later */
+ if (strlen(socket_path) + 1 > sizeof(dest_addr.sun_path)) {
+ log_warn(LD_NET,
+ "Path %s is too long for an AF_UNIX socket\n",
+ escaped_safe_str_client(socket_path));
+ *socket_error = SOCK_ERRNO(ENAMETOOLONG);
+ return -1;
+ }
+
+ memset(&dest_addr, 0, sizeof(dest_addr));
+ dest_addr.sun_family = AF_UNIX;
+ strlcpy(dest_addr.sun_path, socket_path, sizeof(dest_addr.sun_path));
+
+ log_debug(LD_NET,
+ "Connecting to AF_UNIX socket at %s.",
+ escaped_safe_str_client(socket_path));
+
+ return connection_connect_sockaddr(conn,
+ (struct sockaddr *)&dest_addr, sizeof(dest_addr),
+ NULL, 0, socket_error);
+}
+
+#endif /* defined(HAVE_SYS_UN_H) */
+
+/** Convert state number to string representation for logging purposes.
+ */
+static const char *
+connection_proxy_state_to_string(int state)
+{
+ static const char *unknown = "???";
+ static const char *states[] = {
+ "PROXY_NONE",
+ "PROXY_INFANT",
+ "PROXY_HTTPS_WANT_CONNECT_OK",
+ "PROXY_SOCKS4_WANT_CONNECT_OK",
+ "PROXY_SOCKS5_WANT_AUTH_METHOD_NONE",
+ "PROXY_SOCKS5_WANT_AUTH_METHOD_RFC1929",
+ "PROXY_SOCKS5_WANT_AUTH_RFC1929_OK",
+ "PROXY_SOCKS5_WANT_CONNECT_OK",
+ "PROXY_CONNECTED",
+ };
+
+ if (state < PROXY_NONE || state > PROXY_CONNECTED)
+ return unknown;
+
+ return states[state];
+}
+
+/** Returns the global proxy type used by tor. Use this function for
+ * logging or high-level purposes, don't use it to fill the
+ * <b>proxy_type</b> field of or_connection_t; use the actual proxy
+ * protocol instead.*/
+static int
+get_proxy_type(void)
+{
+ const or_options_t *options = get_options();
+
+ if (options->ClientTransportPlugin)
+ return PROXY_PLUGGABLE;
+ else if (options->HTTPSProxy)
+ return PROXY_CONNECT;
+ else if (options->Socks4Proxy)
+ return PROXY_SOCKS4;
+ else if (options->Socks5Proxy)
+ return PROXY_SOCKS5;
+ else
+ return PROXY_NONE;
+}
+
+/* One byte for the version, one for the command, two for the
+ port, and four for the addr... and, one more for the
+ username NUL: */
+#define SOCKS4_STANDARD_BUFFER_SIZE (1 + 1 + 2 + 4 + 1)
+
+/** Write a proxy request of <b>type</b> (socks4, socks5, https) to conn
+ * for conn->addr:conn->port, authenticating with the auth details given
+ * in the configuration (if available). SOCKS 5 and HTTP CONNECT proxies
+ * support authentication.
+ *
+ * Returns -1 if conn->addr is incompatible with the proxy protocol, and
+ * 0 otherwise.
+ *
+ * Use connection_read_proxy_handshake() to complete the handshake.
+ */
+int
+connection_proxy_connect(connection_t *conn, int type)
+{
+ const or_options_t *options;
+
+ tor_assert(conn);
+
+ options = get_options();
+
+ switch (type) {
+ case PROXY_CONNECT: {
+ char buf[1024];
+ char *base64_authenticator=NULL;
+ const char *authenticator = options->HTTPSProxyAuthenticator;
+
+ /* Send HTTP CONNECT and authentication (if available) in
+ * one request */
+
+ if (authenticator) {
+ base64_authenticator = alloc_http_authenticator(authenticator);
+ if (!base64_authenticator)
+ log_warn(LD_OR, "Encoding https authenticator failed");
+ }
+
+ if (base64_authenticator) {
+ const char *addrport = fmt_addrport(&conn->addr, conn->port);
+ tor_snprintf(buf, sizeof(buf), "CONNECT %s HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "Proxy-Authorization: Basic %s\r\n\r\n",
+ addrport,
+ addrport,
+ base64_authenticator);
+ tor_free(base64_authenticator);
+ } else {
+ tor_snprintf(buf, sizeof(buf), "CONNECT %s HTTP/1.0\r\n\r\n",
+ fmt_addrport(&conn->addr, conn->port));
+ }
+
+ connection_buf_add(buf, strlen(buf), conn);
+ conn->proxy_state = PROXY_HTTPS_WANT_CONNECT_OK;
+ break;
+ }
+
+ case PROXY_SOCKS4: {
+ unsigned char *buf;
+ uint16_t portn;
+ uint32_t ip4addr;
+ size_t buf_size = 0;
+ char *socks_args_string = NULL;
+
+ /* Send a SOCKS4 connect request */
+
+ if (tor_addr_family(&conn->addr) != AF_INET) {
+ log_warn(LD_NET, "SOCKS4 client is incompatible with IPv6");
+ return -1;
+ }
+
+ { /* If we are here because we are trying to connect to a
+ pluggable transport proxy, check if we have any SOCKS
+ arguments to transmit. If we do, compress all arguments to
+ a single string in 'socks_args_string': */
+
+ if (get_proxy_type() == PROXY_PLUGGABLE) {
+ socks_args_string =
+ pt_get_socks_args_for_proxy_addrport(&conn->addr, conn->port);
+ if (socks_args_string)
+ log_debug(LD_NET, "Sending out '%s' as our SOCKS argument string.",
+ socks_args_string);
+ }
+ }
+
+ { /* Figure out the buffer size we need for the SOCKS message: */
+
+ buf_size = SOCKS4_STANDARD_BUFFER_SIZE;
+
+ /* If we have a SOCKS argument string, consider its size when
+ calculating the buffer size: */
+ if (socks_args_string)
+ buf_size += strlen(socks_args_string);
+ }
+
+ buf = tor_malloc_zero(buf_size);
+
+ ip4addr = tor_addr_to_ipv4n(&conn->addr);
+ portn = htons(conn->port);
+
+ buf[0] = 4; /* version */
+ buf[1] = SOCKS_COMMAND_CONNECT; /* command */
+ memcpy(buf + 2, &portn, 2); /* port */
+ memcpy(buf + 4, &ip4addr, 4); /* addr */
+
+ /* Next packet field is the userid. If we have pluggable
+ transport SOCKS arguments, we have to embed them
+ there. Otherwise, we use an empty userid. */
+ if (socks_args_string) { /* place the SOCKS args string: */
+ tor_assert(strlen(socks_args_string) > 0);
+ tor_assert(buf_size >=
+ SOCKS4_STANDARD_BUFFER_SIZE + strlen(socks_args_string));
+ strlcpy((char *)buf + 8, socks_args_string, buf_size - 8);
+ tor_free(socks_args_string);
+ } else {
+ buf[8] = 0; /* no userid */
+ }
+
+ connection_buf_add((char *)buf, buf_size, conn);
+ tor_free(buf);
+
+ conn->proxy_state = PROXY_SOCKS4_WANT_CONNECT_OK;
+ break;
+ }
+
+ case PROXY_SOCKS5: {
+ unsigned char buf[4]; /* fields: vers, num methods, method list */
+
+ /* Send a SOCKS5 greeting (connect request must wait) */
+
+ buf[0] = 5; /* version */
+
+ /* We have to use SOCKS5 authentication, if we have a
+ Socks5ProxyUsername or if we want to pass arguments to our
+ pluggable transport proxy: */
+ if ((options->Socks5ProxyUsername) ||
+ (get_proxy_type() == PROXY_PLUGGABLE &&
+ (get_socks_args_by_bridge_addrport(&conn->addr, conn->port)))) {
+ /* number of auth methods */
+ buf[1] = 2;
+ buf[2] = 0x00; /* no authentication */
+ buf[3] = 0x02; /* rfc1929 Username/Passwd auth */
+ conn->proxy_state = PROXY_SOCKS5_WANT_AUTH_METHOD_RFC1929;
+ } else {
+ buf[1] = 1;
+ buf[2] = 0x00; /* no authentication */
+ conn->proxy_state = PROXY_SOCKS5_WANT_AUTH_METHOD_NONE;
+ }
+
+ connection_buf_add((char *)buf, 2 + buf[1], conn);
+ break;
+ }
+
+ default:
+ log_err(LD_BUG, "Invalid proxy protocol, %d", type);
+ tor_fragile_assert();
+ return -1;
+ }
+
+ log_debug(LD_NET, "set state %s",
+ connection_proxy_state_to_string(conn->proxy_state));
+
+ return 0;
+}
+
+/** Read conn's inbuf. If the http response from the proxy is all
+ * here, make sure it's good news, then return 1. If it's bad news,
+ * return -1. Else return 0 and hope for better luck next time.
+ */
+static int
+connection_read_https_proxy_response(connection_t *conn)
+{
+ char *headers;
+ char *reason=NULL;
+ int status_code;
+ time_t date_header;
+
+ switch (fetch_from_buf_http(conn->inbuf,
+ &headers, MAX_HEADERS_SIZE,
+ NULL, NULL, 10000, 0)) {
+ case -1: /* overflow */
+ log_warn(LD_PROTOCOL,
+ "Your https proxy sent back an oversized response. Closing.");
+ return -1;
+ case 0:
+ log_info(LD_NET,"https proxy response not all here yet. Waiting.");
+ return 0;
+ /* case 1, fall through */
+ }
+
+ if (parse_http_response(headers, &status_code, &date_header,
+ NULL, &reason) < 0) {
+ log_warn(LD_NET,
+ "Unparseable headers from proxy (connecting to '%s'). Closing.",
+ conn->address);
+ tor_free(headers);
+ return -1;
+ }
+ tor_free(headers);
+ if (!reason) reason = tor_strdup("[no reason given]");
+
+ if (status_code == 200) {
+ log_info(LD_NET,
+ "HTTPS connect to '%s' successful! (200 %s) Starting TLS.",
+ conn->address, escaped(reason));
+ tor_free(reason);
+ return 1;
+ }
+ /* else, bad news on the status code */
+ switch (status_code) {
+ case 403:
+ log_warn(LD_NET,
+ "The https proxy refused to allow connection to %s "
+ "(status code %d, %s). Closing.",
+ conn->address, status_code, escaped(reason));
+ break;
+ default:
+ log_warn(LD_NET,
+ "The https proxy sent back an unexpected status code %d (%s). "
+ "Closing.",
+ status_code, escaped(reason));
+ break;
+ }
+ tor_free(reason);
+ return -1;
+}
+
+/** Send SOCKS5 CONNECT command to <b>conn</b>, copying <b>conn->addr</b>
+ * and <b>conn->port</b> into the request.
+ */
+static void
+connection_send_socks5_connect(connection_t *conn)
+{
+ unsigned char buf[1024];
+ size_t reqsize = 6;
+ uint16_t port = htons(conn->port);
+
+ buf[0] = 5; /* version */
+ buf[1] = SOCKS_COMMAND_CONNECT; /* command */
+ buf[2] = 0; /* reserved */
+
+ if (tor_addr_family(&conn->addr) == AF_INET) {
+ uint32_t addr = tor_addr_to_ipv4n(&conn->addr);
+
+ buf[3] = 1;
+ reqsize += 4;
+ memcpy(buf + 4, &addr, 4);
+ memcpy(buf + 8, &port, 2);
+ } else { /* AF_INET6 */
+ buf[3] = 4;
+ reqsize += 16;
+ memcpy(buf + 4, tor_addr_to_in6_addr8(&conn->addr), 16);
+ memcpy(buf + 20, &port, 2);
+ }
+
+ connection_buf_add((char *)buf, reqsize, conn);
+
+ conn->proxy_state = PROXY_SOCKS5_WANT_CONNECT_OK;
+}
+
+/** Wrapper around fetch_from_buf_socks_client: see that functions
+ * for documentation of its behavior. */
+static int
+connection_fetch_from_buf_socks_client(connection_t *conn,
+ int state, char **reason)
+{
+ return fetch_from_buf_socks_client(conn->inbuf, state, reason);
+}
+
+/** Call this from connection_*_process_inbuf() to advance the proxy
+ * handshake.
+ *
+ * No matter what proxy protocol is used, if this function returns 1, the
+ * handshake is complete, and the data remaining on inbuf may contain the
+ * start of the communication with the requested server.
+ *
+ * Returns 0 if the current buffer contains an incomplete response, and -1
+ * on error.
+ */
+int
+connection_read_proxy_handshake(connection_t *conn)
+{
+ int ret = 0;
+ char *reason = NULL;
+
+ log_debug(LD_NET, "enter state %s",
+ connection_proxy_state_to_string(conn->proxy_state));
+
+ switch (conn->proxy_state) {
+ case PROXY_HTTPS_WANT_CONNECT_OK:
+ ret = connection_read_https_proxy_response(conn);
+ if (ret == 1)
+ conn->proxy_state = PROXY_CONNECTED;
+ break;
+
+ case PROXY_SOCKS4_WANT_CONNECT_OK:
+ ret = connection_fetch_from_buf_socks_client(conn,
+ conn->proxy_state,
+ &reason);
+ if (ret == 1)
+ conn->proxy_state = PROXY_CONNECTED;
+ break;
+
+ case PROXY_SOCKS5_WANT_AUTH_METHOD_NONE:
+ ret = connection_fetch_from_buf_socks_client(conn,
+ conn->proxy_state,
+ &reason);
+ /* no auth needed, do connect */
+ if (ret == 1) {
+ connection_send_socks5_connect(conn);
+ ret = 0;
+ }
+ break;
+
+ case PROXY_SOCKS5_WANT_AUTH_METHOD_RFC1929:
+ ret = connection_fetch_from_buf_socks_client(conn,
+ conn->proxy_state,
+ &reason);
+
+ /* send auth if needed, otherwise do connect */
+ if (ret == 1) {
+ connection_send_socks5_connect(conn);
+ ret = 0;
+ } else if (ret == 2) {
+ unsigned char buf[1024];
+ size_t reqsize, usize, psize;
+ const char *user, *pass;
+ char *socks_args_string = NULL;
+
+ if (get_proxy_type() == PROXY_PLUGGABLE) {
+ socks_args_string =
+ pt_get_socks_args_for_proxy_addrport(&conn->addr, conn->port);
+ if (!socks_args_string) {
+ log_warn(LD_NET, "Could not create SOCKS args string.");
+ ret = -1;
+ break;
+ }
+
+ log_debug(LD_NET, "SOCKS5 arguments: %s", socks_args_string);
+ tor_assert(strlen(socks_args_string) > 0);
+ tor_assert(strlen(socks_args_string) <= MAX_SOCKS5_AUTH_SIZE_TOTAL);
+
+ if (strlen(socks_args_string) > MAX_SOCKS5_AUTH_FIELD_SIZE) {
+ user = socks_args_string;
+ usize = MAX_SOCKS5_AUTH_FIELD_SIZE;
+ pass = socks_args_string + MAX_SOCKS5_AUTH_FIELD_SIZE;
+ psize = strlen(socks_args_string) - MAX_SOCKS5_AUTH_FIELD_SIZE;
+ } else {
+ user = socks_args_string;
+ usize = strlen(socks_args_string);
+ pass = "\0";
+ psize = 1;
+ }
+ } else if (get_options()->Socks5ProxyUsername) {
+ user = get_options()->Socks5ProxyUsername;
+ pass = get_options()->Socks5ProxyPassword;
+ tor_assert(user && pass);
+ usize = strlen(user);
+ psize = strlen(pass);
+ } else {
+ log_err(LD_BUG, "We entered %s for no reason!", __func__);
+ tor_fragile_assert();
+ ret = -1;
+ break;
+ }
+
+ /* Username and password lengths should have been checked
+ above and during torrc parsing. */
+ tor_assert(usize <= MAX_SOCKS5_AUTH_FIELD_SIZE &&
+ psize <= MAX_SOCKS5_AUTH_FIELD_SIZE);
+ reqsize = 3 + usize + psize;
+
+ buf[0] = 1; /* negotiation version */
+ buf[1] = usize;
+ memcpy(buf + 2, user, usize);
+ buf[2 + usize] = psize;
+ memcpy(buf + 3 + usize, pass, psize);
+
+ if (socks_args_string)
+ tor_free(socks_args_string);
+
+ connection_buf_add((char *)buf, reqsize, conn);
+
+ conn->proxy_state = PROXY_SOCKS5_WANT_AUTH_RFC1929_OK;
+ ret = 0;
+ }
+ break;
+
+ case PROXY_SOCKS5_WANT_AUTH_RFC1929_OK:
+ ret = connection_fetch_from_buf_socks_client(conn,
+ conn->proxy_state,
+ &reason);
+ /* send the connect request */
+ if (ret == 1) {
+ connection_send_socks5_connect(conn);
+ ret = 0;
+ }
+ break;
+
+ case PROXY_SOCKS5_WANT_CONNECT_OK:
+ ret = connection_fetch_from_buf_socks_client(conn,
+ conn->proxy_state,
+ &reason);
+ if (ret == 1)
+ conn->proxy_state = PROXY_CONNECTED;
+ break;
+
+ default:
+ log_err(LD_BUG, "Invalid proxy_state for reading, %d",
+ conn->proxy_state);
+ tor_fragile_assert();
+ ret = -1;
+ break;
+ }
+
+ log_debug(LD_NET, "leaving state %s",
+ connection_proxy_state_to_string(conn->proxy_state));
+
+ if (ret < 0) {
+ if (reason) {
+ log_warn(LD_NET, "Proxy Client: unable to connect to %s:%d (%s)",
+ conn->address, conn->port, escaped(reason));
+ tor_free(reason);
+ } else {
+ log_warn(LD_NET, "Proxy Client: unable to connect to %s:%d",
+ conn->address, conn->port);
+ }
+ } else if (ret == 1) {
+ log_info(LD_NET, "Proxy Client: connection to %s:%d successful",
+ conn->address, conn->port);
+ }
+
+ return ret;
+}
+
+/** Given a list of listener connections in <b>old_conns</b>, and list of
+ * port_cfg_t entries in <b>ports</b>, open a new listener for every port in
+ * <b>ports</b> that does not already have a listener in <b>old_conns</b>.
+ *
+ * Remove from <b>old_conns</b> every connection that has a corresponding
+ * entry in <b>ports</b>. Add to <b>new_conns</b> new every connection we
+ * launch.
+ *
+ * If <b>control_listeners_only</b> is true, then we only open control
+ * listeners, and we do not remove any noncontrol listeners from old_conns.
+ *
+ * Return 0 on success, -1 on failure.
+ **/
+static int
+retry_listener_ports(smartlist_t *old_conns,
+ const smartlist_t *ports,
+ smartlist_t *new_conns,
+ int control_listeners_only)
+{
+ smartlist_t *launch = smartlist_new();
+ int r = 0;
+
+ if (control_listeners_only) {
+ SMARTLIST_FOREACH(ports, port_cfg_t *, p, {
+ if (p->type == CONN_TYPE_CONTROL_LISTENER)
+ smartlist_add(launch, p);
+ });
+ } else {
+ smartlist_add_all(launch, ports);
+ }
+
+ /* Iterate through old_conns, comparing it to launch: remove from both lists
+ * each pair of elements that corresponds to the same port. */
+ SMARTLIST_FOREACH_BEGIN(old_conns, connection_t *, conn) {
+ const port_cfg_t *found_port = NULL;
+
+ /* Okay, so this is a listener. Is it configured? */
+ SMARTLIST_FOREACH_BEGIN(launch, const port_cfg_t *, wanted) {
+ if (conn->type != wanted->type)
+ continue;
+ if ((conn->socket_family != AF_UNIX && wanted->is_unix_addr) ||
+ (conn->socket_family == AF_UNIX && ! wanted->is_unix_addr))
+ continue;
+
+ if (wanted->server_cfg.no_listen)
+ continue; /* We don't want to open a listener for this one */
+
+ if (wanted->is_unix_addr) {
+ if (conn->socket_family == AF_UNIX &&
+ !strcmp(wanted->unix_addr, conn->address)) {
+ found_port = wanted;
+ break;
+ }
+ } else {
+ int port_matches;
+ if (wanted->port == CFG_AUTO_PORT) {
+ port_matches = 1;
+ } else {
+ port_matches = (wanted->port == conn->port);
+ }
+ if (port_matches && tor_addr_eq(&wanted->addr, &conn->addr)) {
+ found_port = wanted;
+ break;
+ }
+ }
+ } SMARTLIST_FOREACH_END(wanted);
+
+ if (found_port) {
+ /* This listener is already running; we don't need to launch it. */
+ //log_debug(LD_NET, "Already have %s on %s:%d",
+ // conn_type_to_string(found_port->type), conn->address, conn->port);
+ smartlist_remove(launch, found_port);
+ /* And we can remove the connection from old_conns too. */
+ SMARTLIST_DEL_CURRENT(old_conns, conn);
+ }
+ } SMARTLIST_FOREACH_END(conn);
+
+ /* Now open all the listeners that are configured but not opened. */
+ SMARTLIST_FOREACH_BEGIN(launch, const port_cfg_t *, port) {
+ struct sockaddr *listensockaddr;
+ socklen_t listensocklen = 0;
+ char *address=NULL;
+ connection_t *conn;
+ int real_port = port->port == CFG_AUTO_PORT ? 0 : port->port;
+ tor_assert(real_port <= UINT16_MAX);
+ if (port->server_cfg.no_listen)
+ continue;
+
+#ifndef _WIN32
+ /* We don't need to be root to create a UNIX socket, so defer until after
+ * setuid. */
+ const or_options_t *options = get_options();
+ if (port->is_unix_addr && !geteuid() && (options->User) &&
+ strcmp(options->User, "root"))
+ continue;
+#endif /* !defined(_WIN32) */
+
+ if (port->is_unix_addr) {
+ listensockaddr = (struct sockaddr *)
+ create_unix_sockaddr(port->unix_addr,
+ &address, &listensocklen);
+ } else {
+ listensockaddr = tor_malloc(sizeof(struct sockaddr_storage));
+ listensocklen = tor_addr_to_sockaddr(&port->addr,
+ real_port,
+ listensockaddr,
+ sizeof(struct sockaddr_storage));
+ address = tor_addr_to_str_dup(&port->addr);
+ }
+
+ if (listensockaddr) {
+ conn = connection_listener_new(listensockaddr, listensocklen,
+ port->type, address, port);
+ tor_free(listensockaddr);
+ tor_free(address);
+ } else {
+ conn = NULL;
+ }
+
+ if (!conn) {
+ r = -1;
+ } else {
+ if (new_conns)
+ smartlist_add(new_conns, conn);
+ }
+ } SMARTLIST_FOREACH_END(port);
+
+ smartlist_free(launch);
+
+ return r;
+}
+
+/** Launch listeners for each port you should have open. Only launch
+ * listeners who are not already open, and only close listeners we no longer
+ * want.
+ *
+ * Add all old conns that should be closed to <b>replaced_conns</b>.
+ * Add all new connections to <b>new_conns</b>.
+ *
+ * If <b>close_all_noncontrol</b> is true, then we only open control
+ * listeners, and we close all other listeners.
+ */
+int
+retry_all_listeners(smartlist_t *replaced_conns,
+ smartlist_t *new_conns, int close_all_noncontrol)
+{
+ smartlist_t *listeners = smartlist_new();
+ const or_options_t *options = get_options();
+ int retval = 0;
+ const uint16_t old_or_port = router_get_advertised_or_port(options);
+ const uint16_t old_or_port_ipv6 =
+ router_get_advertised_or_port_by_af(options,AF_INET6);
+ const uint16_t old_dir_port = router_get_advertised_dir_port(options, 0);
+
+ SMARTLIST_FOREACH_BEGIN(get_connection_array(), connection_t *, conn) {
+ if (connection_is_listener(conn) && !conn->marked_for_close)
+ smartlist_add(listeners, conn);
+ } SMARTLIST_FOREACH_END(conn);
+
+ if (retry_listener_ports(listeners,
+ get_configured_ports(),
+ new_conns,
+ close_all_noncontrol) < 0)
+ retval = -1;
+
+ /* Any members that were still in 'listeners' don't correspond to
+ * any configured port. Kill 'em. */
+ SMARTLIST_FOREACH_BEGIN(listeners, connection_t *, conn) {
+ log_notice(LD_NET, "Closing no-longer-configured %s on %s:%d",
+ conn_type_to_string(conn->type), conn->address, conn->port);
+ if (replaced_conns) {
+ smartlist_add(replaced_conns, conn);
+ } else {
+ connection_close_immediate(conn);
+ connection_mark_for_close(conn);
+ }
+ } SMARTLIST_FOREACH_END(conn);
+
+ smartlist_free(listeners);
+
+ if (old_or_port != router_get_advertised_or_port(options) ||
+ old_or_port_ipv6 != router_get_advertised_or_port_by_af(options,
+ AF_INET6) ||
+ old_dir_port != router_get_advertised_dir_port(options, 0)) {
+ /* Our chosen ORPort or DirPort is not what it used to be: the
+ * descriptor we had (if any) should be regenerated. (We won't
+ * automatically notice this because of changes in the option,
+ * since the value could be "auto".) */
+ mark_my_descriptor_dirty("Chosen Or/DirPort changed");
+ }
+
+ return retval;
+}
+
+/** Mark every listener of type other than CONTROL_LISTENER to be closed. */
+void
+connection_mark_all_noncontrol_listeners(void)
+{
+ SMARTLIST_FOREACH_BEGIN(get_connection_array(), connection_t *, conn) {
+ if (conn->marked_for_close)
+ continue;
+ if (conn->type == CONN_TYPE_CONTROL_LISTENER)
+ continue;
+ if (connection_is_listener(conn))
+ connection_mark_for_close(conn);
+ } SMARTLIST_FOREACH_END(conn);
+}
+
+/** Mark every external connection not used for controllers for close. */
+void
+connection_mark_all_noncontrol_connections(void)
+{
+ SMARTLIST_FOREACH_BEGIN(get_connection_array(), connection_t *, conn) {
+ if (conn->marked_for_close)
+ continue;
+ switch (conn->type) {
+ case CONN_TYPE_CONTROL_LISTENER:
+ case CONN_TYPE_CONTROL:
+ break;
+ case CONN_TYPE_AP:
+ connection_mark_unattached_ap(TO_ENTRY_CONN(conn),
+ END_STREAM_REASON_HIBERNATING);
+ break;
+ case CONN_TYPE_OR:
+ {
+ or_connection_t *orconn = TO_OR_CONN(conn);
+ if (orconn->chan) {
+ connection_or_close_normally(orconn, 0);
+ } else {
+ /*
+ * There should have been one, but mark for close and hope
+ * for the best..
+ */
+ connection_mark_for_close(conn);
+ }
+ }
+ break;
+ default:
+ connection_mark_for_close(conn);
+ break;
+ }
+ } SMARTLIST_FOREACH_END(conn);
+}
+
+/** Return 1 if we should apply rate limiting to <b>conn</b>, and 0
+ * otherwise.
+ * Right now this just checks if it's an internal IP address or an
+ * internal connection. We also should, but don't, check if the connection
+ * uses pluggable transports, since we should then limit it even if it
+ * comes from an internal IP address. */
+static int
+connection_is_rate_limited(connection_t *conn)
+{
+ const or_options_t *options = get_options();
+ if (conn->linked)
+ return 0; /* Internal connection */
+ else if (! options->CountPrivateBandwidth &&
+ (tor_addr_family(&conn->addr) == AF_UNSPEC || /* no address */
+ tor_addr_family(&conn->addr) == AF_UNIX || /* no address */
+ tor_addr_is_internal(&conn->addr, 0)))
+ return 0; /* Internal address */
+ else
+ return 1;
+}
+
+/** When was either global write bucket last empty? If this was recent, then
+ * we're probably low on bandwidth, and we should be stingy with our bandwidth
+ * usage. */
+static time_t write_buckets_last_empty_at = -100;
+
+/** How many seconds of no active local circuits will make the
+ * connection revert to the "relayed" bandwidth class? */
+#define CLIENT_IDLE_TIME_FOR_PRIORITY 30
+
+/** Return 1 if <b>conn</b> should use tokens from the "relayed"
+ * bandwidth rates, else 0. Currently, only OR conns with bandwidth
+ * class 1, and directory conns that are serving data out, count.
+ */
+static int
+connection_counts_as_relayed_traffic(connection_t *conn, time_t now)
+{
+ if (conn->type == CONN_TYPE_OR &&
+ connection_or_client_used(TO_OR_CONN(conn)) +
+ CLIENT_IDLE_TIME_FOR_PRIORITY < now)
+ return 1;
+ if (conn->type == CONN_TYPE_DIR && DIR_CONN_IS_SERVER(conn))
+ return 1;
+ return 0;
+}
+
+/** Helper function to decide how many bytes out of <b>global_bucket</b>
+ * we're willing to use for this transaction. <b>base</b> is the size
+ * of a cell on the network; <b>priority</b> says whether we should
+ * write many of them or just a few; and <b>conn_bucket</b> (if
+ * non-negative) provides an upper limit for our answer. */
+static ssize_t
+connection_bucket_get_share(int base, int priority,
+ ssize_t global_bucket_val, ssize_t conn_bucket)
+{
+ ssize_t at_most;
+ ssize_t num_bytes_high = (priority ? 32 : 16) * base;
+ ssize_t num_bytes_low = (priority ? 4 : 2) * base;
+
+ /* Do a rudimentary limiting so one circuit can't hog a connection.
+ * Pick at most 32 cells, at least 4 cells if possible, and if we're in
+ * the middle pick 1/8 of the available bandwidth. */
+ at_most = global_bucket_val / 8;
+ at_most -= (at_most % base); /* round down */
+ if (at_most > num_bytes_high) /* 16 KB, or 8 KB for low-priority */
+ at_most = num_bytes_high;
+ else if (at_most < num_bytes_low) /* 2 KB, or 1 KB for low-priority */
+ at_most = num_bytes_low;
+
+ if (at_most > global_bucket_val)
+ at_most = global_bucket_val;
+
+ if (conn_bucket >= 0 && at_most > conn_bucket)
+ at_most = conn_bucket;
+
+ if (at_most < 0)
+ return 0;
+ return at_most;
+}
+
+/** How many bytes at most can we read onto this connection? */
+static ssize_t
+connection_bucket_read_limit(connection_t *conn, time_t now)
+{
+ int base = RELAY_PAYLOAD_SIZE;
+ int priority = conn->type != CONN_TYPE_DIR;
+ ssize_t conn_bucket = -1;
+ size_t global_bucket_val = token_bucket_rw_get_read(&global_bucket);
+
+ if (connection_speaks_cells(conn)) {
+ or_connection_t *or_conn = TO_OR_CONN(conn);
+ if (conn->state == OR_CONN_STATE_OPEN)
+ conn_bucket = token_bucket_rw_get_read(&or_conn->bucket);
+ base = get_cell_network_size(or_conn->wide_circ_ids);
+ }
+
+ if (!connection_is_rate_limited(conn)) {
+ /* be willing to read on local conns even if our buckets are empty */
+ return conn_bucket>=0 ? conn_bucket : 1<<14;
+ }
+
+ if (connection_counts_as_relayed_traffic(conn, now)) {
+ size_t relayed = token_bucket_rw_get_read(&global_relayed_bucket);
+ global_bucket_val = MIN(global_bucket_val, relayed);
+ }
+
+ return connection_bucket_get_share(base, priority,
+ global_bucket_val, conn_bucket);
+}
+
+/** How many bytes at most can we write onto this connection? */
+ssize_t
+connection_bucket_write_limit(connection_t *conn, time_t now)
+{
+ int base = RELAY_PAYLOAD_SIZE;
+ int priority = conn->type != CONN_TYPE_DIR;
+ size_t conn_bucket = conn->outbuf_flushlen;
+ size_t global_bucket_val = token_bucket_rw_get_write(&global_bucket);
+
+ if (!connection_is_rate_limited(conn)) {
+ /* be willing to write to local conns even if our buckets are empty */
+ return conn->outbuf_flushlen;
+ }
+
+ if (connection_speaks_cells(conn)) {
+ /* use the per-conn write limit if it's lower */
+ or_connection_t *or_conn = TO_OR_CONN(conn);
+ if (conn->state == OR_CONN_STATE_OPEN)
+ conn_bucket = MIN(conn_bucket,
+ token_bucket_rw_get_write(&or_conn->bucket));
+ base = get_cell_network_size(or_conn->wide_circ_ids);
+ }
+
+ if (connection_counts_as_relayed_traffic(conn, now)) {
+ size_t relayed = token_bucket_rw_get_write(&global_relayed_bucket);
+ global_bucket_val = MIN(global_bucket_val, relayed);
+ }
+
+ return connection_bucket_get_share(base, priority,
+ global_bucket_val, conn_bucket);
+}
+
+/** Return 1 if the global write buckets are low enough that we
+ * shouldn't send <b>attempt</b> bytes of low-priority directory stuff
+ * out to <b>conn</b>. Else return 0.
+
+ * Priority was 1 for v1 requests (directories and running-routers),
+ * and 2 for v2 requests and later (statuses and descriptors).
+ *
+ * There are a lot of parameters we could use here:
+ * - global_relayed_write_bucket. Low is bad.
+ * - global_write_bucket. Low is bad.
+ * - bandwidthrate. Low is bad.
+ * - bandwidthburst. Not a big factor?
+ * - attempt. High is bad.
+ * - total bytes queued on outbufs. High is bad. But I'm wary of
+ * using this, since a few slow-flushing queues will pump up the
+ * number without meaning what we meant to mean. What we really
+ * mean is "total directory bytes added to outbufs recently", but
+ * that's harder to quantify and harder to keep track of.
+ */
+int
+global_write_bucket_low(connection_t *conn, size_t attempt, int priority)
+{
+ size_t smaller_bucket =
+ MIN(token_bucket_rw_get_write(&global_bucket),
+ token_bucket_rw_get_write(&global_relayed_bucket));
+ if (authdir_mode(get_options()) && priority>1)
+ return 0; /* there's always room to answer v2 if we're an auth dir */
+
+ if (!connection_is_rate_limited(conn))
+ return 0; /* local conns don't get limited */
+
+ if (smaller_bucket < attempt)
+ return 1; /* not enough space no matter the priority */
+
+ {
+ const time_t diff = approx_time() - write_buckets_last_empty_at;
+ if (diff <= 1)
+ return 1; /* we're already hitting our limits, no more please */
+ }
+
+ if (priority == 1) { /* old-style v1 query */
+ /* Could we handle *two* of these requests within the next two seconds? */
+ const or_options_t *options = get_options();
+ size_t can_write = (size_t) (smaller_bucket
+ + 2*(options->RelayBandwidthRate ? options->RelayBandwidthRate :
+ options->BandwidthRate));
+ if (can_write < 2*attempt)
+ return 1;
+ } else { /* v2 query */
+ /* no further constraints yet */
+ }
+ return 0;
+}
+
+/** When did we last tell the accounting subsystem about transmitted
+ * bandwidth? */
+static time_t last_recorded_accounting_at = 0;
+
+/** Helper: adjusts our bandwidth history and informs the controller as
+ * appropriate, given that we have just read <b>num_read</b> bytes and written
+ * <b>num_written</b> bytes on <b>conn</b>. */
+static void
+record_num_bytes_transferred_impl(connection_t *conn,
+ time_t now, size_t num_read, size_t num_written)
+{
+ /* Count bytes of answering direct and tunneled directory requests */
+ if (conn->type == CONN_TYPE_DIR && conn->purpose == DIR_PURPOSE_SERVER) {
+ if (num_read > 0)
+ rep_hist_note_dir_bytes_read(num_read, now);
+ if (num_written > 0)
+ rep_hist_note_dir_bytes_written(num_written, now);
+ }
+
+ if (!connection_is_rate_limited(conn))
+ return; /* local IPs are free */
+
+ if (conn->type == CONN_TYPE_OR)
+ rep_hist_note_or_conn_bytes(conn->global_identifier, num_read,
+ num_written, now);
+
+ if (num_read > 0) {
+ rep_hist_note_bytes_read(num_read, now);
+ }
+ if (num_written > 0) {
+ rep_hist_note_bytes_written(num_written, now);
+ }
+ if (conn->type == CONN_TYPE_EXIT)
+ rep_hist_note_exit_bytes(conn->port, num_written, num_read);
+
+ /* Remember these bytes towards statistics. */
+ stats_increment_bytes_read_and_written(num_read, num_written);
+
+ /* Remember these bytes towards accounting. */
+ if (accounting_is_enabled(get_options())) {
+ if (now > last_recorded_accounting_at && last_recorded_accounting_at) {
+ accounting_add_bytes(num_read, num_written,
+ (int)(now - last_recorded_accounting_at));
+ } else {
+ accounting_add_bytes(num_read, num_written, 0);
+ }
+ last_recorded_accounting_at = now;
+ }
+}
+
+/** We just read <b>num_read</b> and wrote <b>num_written</b> bytes
+ * onto <b>conn</b>. Decrement buckets appropriately. */
+static void
+connection_buckets_decrement(connection_t *conn, time_t now,
+ size_t num_read, size_t num_written)
+{
+ if (num_written >= INT_MAX || num_read >= INT_MAX) {
+ log_err(LD_BUG, "Value out of range. num_read=%lu, num_written=%lu, "
+ "connection type=%s, state=%s",
+ (unsigned long)num_read, (unsigned long)num_written,
+ conn_type_to_string(conn->type),
+ conn_state_to_string(conn->type, conn->state));
+ tor_assert_nonfatal_unreached();
+ if (num_written >= INT_MAX)
+ num_written = 1;
+ if (num_read >= INT_MAX)
+ num_read = 1;
+ }
+
+ record_num_bytes_transferred_impl(conn, now, num_read, num_written);
+
+ if (!connection_is_rate_limited(conn))
+ return; /* local IPs are free */
+
+ unsigned flags = 0;
+ if (connection_counts_as_relayed_traffic(conn, now)) {
+ flags = token_bucket_rw_dec(&global_relayed_bucket, num_read, num_written);
+ }
+ flags |= token_bucket_rw_dec(&global_bucket, num_read, num_written);
+
+ if (flags & TB_WRITE) {
+ write_buckets_last_empty_at = now;
+ }
+ if (connection_speaks_cells(conn) && conn->state == OR_CONN_STATE_OPEN) {
+ or_connection_t *or_conn = TO_OR_CONN(conn);
+ token_bucket_rw_dec(&or_conn->bucket, num_read, num_written);
+ }
+}
+
+/**
+ * Mark <b>conn</b> as needing to stop reading because bandwidth has been
+ * exhausted. If <b>is_global_bw</b>, it is closing because global bandwidth
+ * limit has been exhausted. Otherwise, it is closing because its own
+ * bandwidth limit has been exhausted.
+ */
+void
+connection_read_bw_exhausted(connection_t *conn, bool is_global_bw)
+{
+ (void)is_global_bw;
+ conn->read_blocked_on_bw = 1;
+ connection_stop_reading(conn);
+ reenable_blocked_connection_schedule();
+}
+
+/**
+ * Mark <b>conn</b> as needing to stop reading because write bandwidth has
+ * been exhausted. If <b>is_global_bw</b>, it is closing because global
+ * bandwidth limit has been exhausted. Otherwise, it is closing because its
+ * own bandwidth limit has been exhausted.
+*/
+void
+connection_write_bw_exhausted(connection_t *conn, bool is_global_bw)
+{
+ (void)is_global_bw;
+ conn->write_blocked_on_bw = 1;
+ connection_stop_reading(conn);
+ reenable_blocked_connection_schedule();
+}
+
+/** If we have exhausted our global buckets, or the buckets for conn,
+ * stop reading. */
+void
+connection_consider_empty_read_buckets(connection_t *conn)
+{
+ const char *reason;
+
+ if (!connection_is_rate_limited(conn))
+ return; /* Always okay. */
+
+ int is_global = 1;
+
+ if (token_bucket_rw_get_read(&global_bucket) <= 0) {
+ reason = "global read bucket exhausted. Pausing.";
+ } else if (connection_counts_as_relayed_traffic(conn, approx_time()) &&
+ token_bucket_rw_get_read(&global_relayed_bucket) <= 0) {
+ reason = "global relayed read bucket exhausted. Pausing.";
+ } else if (connection_speaks_cells(conn) &&
+ conn->state == OR_CONN_STATE_OPEN &&
+ token_bucket_rw_get_read(&TO_OR_CONN(conn)->bucket) <= 0) {
+ reason = "connection read bucket exhausted. Pausing.";
+ is_global = false;
+ } else
+ return; /* all good, no need to stop it */
+
+ LOG_FN_CONN(conn, (LOG_DEBUG, LD_NET, "%s", reason));
+ connection_read_bw_exhausted(conn, is_global);
+}
+
+/** If we have exhausted our global buckets, or the buckets for conn,
+ * stop writing. */
+void
+connection_consider_empty_write_buckets(connection_t *conn)
+{
+ const char *reason;
+
+ if (!connection_is_rate_limited(conn))
+ return; /* Always okay. */
+
+ bool is_global = true;
+ if (token_bucket_rw_get_write(&global_bucket) <= 0) {
+ reason = "global write bucket exhausted. Pausing.";
+ } else if (connection_counts_as_relayed_traffic(conn, approx_time()) &&
+ token_bucket_rw_get_write(&global_relayed_bucket) <= 0) {
+ reason = "global relayed write bucket exhausted. Pausing.";
+ } else if (connection_speaks_cells(conn) &&
+ conn->state == OR_CONN_STATE_OPEN &&
+ token_bucket_rw_get_write(&TO_OR_CONN(conn)->bucket) <= 0) {
+ reason = "connection write bucket exhausted. Pausing.";
+ is_global = false;
+ } else
+ return; /* all good, no need to stop it */
+
+ LOG_FN_CONN(conn, (LOG_DEBUG, LD_NET, "%s", reason));
+ connection_write_bw_exhausted(conn, is_global);
+}
+
+/** Initialize the global buckets to the values configured in the
+ * options */
+void
+connection_bucket_init(void)
+{
+ const or_options_t *options = get_options();
+ const uint32_t now_ts = monotime_coarse_get_stamp();
+ token_bucket_rw_init(&global_bucket,
+ (int32_t)options->BandwidthRate,
+ (int32_t)options->BandwidthBurst,
+ now_ts);
+ if (options->RelayBandwidthRate) {
+ token_bucket_rw_init(&global_relayed_bucket,
+ (int32_t)options->RelayBandwidthRate,
+ (int32_t)options->RelayBandwidthBurst,
+ now_ts);
+ } else {
+ token_bucket_rw_init(&global_relayed_bucket,
+ (int32_t)options->BandwidthRate,
+ (int32_t)options->BandwidthBurst,
+ now_ts);
+ }
+
+ reenable_blocked_connection_init(options);
+}
+
+/** Update the global connection bucket settings to a new value. */
+void
+connection_bucket_adjust(const or_options_t *options)
+{
+ token_bucket_rw_adjust(&global_bucket,
+ (int32_t)options->BandwidthRate,
+ (int32_t)options->BandwidthBurst);
+ if (options->RelayBandwidthRate) {
+ token_bucket_rw_adjust(&global_relayed_bucket,
+ (int32_t)options->RelayBandwidthRate,
+ (int32_t)options->RelayBandwidthBurst);
+ } else {
+ token_bucket_rw_adjust(&global_relayed_bucket,
+ (int32_t)options->BandwidthRate,
+ (int32_t)options->BandwidthBurst);
+ }
+}
+
+/**
+ * Cached value of the last coarse-timestamp when we refilled the
+ * global buckets.
+ */
+static uint32_t last_refilled_global_buckets_ts=0;
+/**
+ * Refill the token buckets for a single connection <b>conn</b>, and the
+ * global token buckets as appropriate. Requires that <b>now_ts</b> is
+ * the time in coarse timestamp units.
+ */
+static void
+connection_bucket_refill_single(connection_t *conn, uint32_t now_ts)
+{
+ /* Note that we only check for equality here: the underlying
+ * token bucket functions can handle moving backwards in time if they
+ * need to. */
+ if (now_ts != last_refilled_global_buckets_ts) {
+ token_bucket_rw_refill(&global_bucket, now_ts);
+ token_bucket_rw_refill(&global_relayed_bucket, now_ts);
+ last_refilled_global_buckets_ts = now_ts;
+ }
+
+ if (connection_speaks_cells(conn) && conn->state == OR_CONN_STATE_OPEN) {
+ or_connection_t *or_conn = TO_OR_CONN(conn);
+ token_bucket_rw_refill(&or_conn->bucket, now_ts);
+ }
+}
+
+/**
+ * Event to re-enable all connections that were previously blocked on read or
+ * write.
+ */
+static mainloop_event_t *reenable_blocked_connections_ev = NULL;
+
+/** True iff reenable_blocked_connections_ev is currently scheduled. */
+static int reenable_blocked_connections_is_scheduled = 0;
+
+/** Delay after which to run reenable_blocked_connections_ev. */
+static struct timeval reenable_blocked_connections_delay;
+
+/**
+ * Re-enable all connections that were previously blocked on read or write.
+ * This event is scheduled after enough time has elapsed to be sure
+ * that the buckets will refill when the connections have something to do.
+ */
+static void
+reenable_blocked_connections_cb(mainloop_event_t *ev, void *arg)
+{
+ (void)ev;
+ (void)arg;
+ SMARTLIST_FOREACH_BEGIN(get_connection_array(), connection_t *, conn) {
+ if (conn->read_blocked_on_bw == 1) {
+ connection_start_reading(conn);
+ conn->read_blocked_on_bw = 0;
+ }
+ if (conn->write_blocked_on_bw == 1) {
+ connection_start_writing(conn);
+ conn->write_blocked_on_bw = 0;
+ }
+ } SMARTLIST_FOREACH_END(conn);
+
+ reenable_blocked_connections_is_scheduled = 0;
+}
+
+/**
+ * Initialize the mainloop event that we use to wake up connections that
+ * find themselves blocked on bandwidth.
+ */
+static void
+reenable_blocked_connection_init(const or_options_t *options)
+{
+ if (! reenable_blocked_connections_ev) {
+ reenable_blocked_connections_ev =
+ mainloop_event_new(reenable_blocked_connections_cb, NULL);
+ reenable_blocked_connections_is_scheduled = 0;
+ }
+ time_t sec = options->TokenBucketRefillInterval / 1000;
+ int msec = (options->TokenBucketRefillInterval % 1000);
+ reenable_blocked_connections_delay.tv_sec = sec;
+ reenable_blocked_connections_delay.tv_usec = msec * 1000;
+}
+
+/**
+ * Called when we have blocked a connection for being low on bandwidth:
+ * schedule an event to reenable such connections, if it is not already
+ * scheduled.
+ */
+static void
+reenable_blocked_connection_schedule(void)
+{
+ if (reenable_blocked_connections_is_scheduled)
+ return;
+ if (BUG(reenable_blocked_connections_ev == NULL)) {
+ reenable_blocked_connection_init(get_options());
+ }
+ mainloop_event_schedule(reenable_blocked_connections_ev,
+ &reenable_blocked_connections_delay);
+ reenable_blocked_connections_is_scheduled = 1;
+}
+
+/** Read bytes from conn-\>s and process them.
+ *
+ * It calls connection_buf_read_from_socket() to bring in any new bytes,
+ * and then calls connection_process_inbuf() to process them.
+ *
+ * Mark the connection and return -1 if you want to close it, else
+ * return 0.
+ */
+static int
+connection_handle_read_impl(connection_t *conn)
+{
+ ssize_t max_to_read=-1, try_to_read;
+ size_t before, n_read = 0;
+ int socket_error = 0;
+
+ if (conn->marked_for_close)
+ return 0; /* do nothing */
+
+ conn->timestamp_last_read_allowed = approx_time();
+
+ connection_bucket_refill_single(conn, monotime_coarse_get_stamp());
+
+ switch (conn->type) {
+ case CONN_TYPE_OR_LISTENER:
+ return connection_handle_listener_read(conn, CONN_TYPE_OR);
+ case CONN_TYPE_EXT_OR_LISTENER:
+ return connection_handle_listener_read(conn, CONN_TYPE_EXT_OR);
+ case CONN_TYPE_AP_LISTENER:
+ case CONN_TYPE_AP_TRANS_LISTENER:
+ case CONN_TYPE_AP_NATD_LISTENER:
+ case CONN_TYPE_AP_HTTP_CONNECT_LISTENER:
+ return connection_handle_listener_read(conn, CONN_TYPE_AP);
+ case CONN_TYPE_DIR_LISTENER:
+ return connection_handle_listener_read(conn, CONN_TYPE_DIR);
+ case CONN_TYPE_CONTROL_LISTENER:
+ return connection_handle_listener_read(conn, CONN_TYPE_CONTROL);
+ case CONN_TYPE_AP_DNS_LISTENER:
+ /* This should never happen; eventdns.c handles the reads here. */
+ tor_fragile_assert();
+ return 0;
+ }
+
+ loop_again:
+ try_to_read = max_to_read;
+ tor_assert(!conn->marked_for_close);
+
+ before = buf_datalen(conn->inbuf);
+ if (connection_buf_read_from_socket(conn, &max_to_read, &socket_error) < 0) {
+ /* There's a read error; kill the connection.*/
+ if (conn->type == CONN_TYPE_OR) {
+ connection_or_notify_error(TO_OR_CONN(conn),
+ socket_error != 0 ?
+ errno_to_orconn_end_reason(socket_error) :
+ END_OR_CONN_REASON_CONNRESET,
+ socket_error != 0 ?
+ tor_socket_strerror(socket_error) :
+ "(unknown, errno was 0)");
+ }
+ if (CONN_IS_EDGE(conn)) {
+ edge_connection_t *edge_conn = TO_EDGE_CONN(conn);
+ connection_edge_end_errno(edge_conn);
+ if (conn->type == CONN_TYPE_AP && TO_ENTRY_CONN(conn)->socks_request) {
+ /* broken, don't send a socks reply back */
+ TO_ENTRY_CONN(conn)->socks_request->has_finished = 1;
+ }
+ }
+ connection_close_immediate(conn); /* Don't flush; connection is dead. */
+ /*
+ * This can bypass normal channel checking since we did
+ * connection_or_notify_error() above.
+ */
+ connection_mark_for_close_internal(conn);
+ return -1;
+ }
+ n_read += buf_datalen(conn->inbuf) - before;
+ if (CONN_IS_EDGE(conn) && try_to_read != max_to_read) {
+ /* instruct it not to try to package partial cells. */
+ if (connection_process_inbuf(conn, 0) < 0) {
+ return -1;
+ }
+ if (!conn->marked_for_close &&
+ connection_is_reading(conn) &&
+ !conn->inbuf_reached_eof &&
+ max_to_read > 0)
+ goto loop_again; /* try reading again, in case more is here now */
+ }
+ /* one last try, packaging partial cells and all. */
+ if (!conn->marked_for_close &&
+ connection_process_inbuf(conn, 1) < 0) {
+ return -1;
+ }
+ if (conn->linked_conn) {
+ /* The other side's handle_write() will never actually get called, so
+ * we need to invoke the appropriate callbacks ourself. */
+ connection_t *linked = conn->linked_conn;
+
+ if (n_read) {
+ /* Probably a no-op, since linked conns typically don't count for
+ * bandwidth rate limiting. But do it anyway so we can keep stats
+ * accurately. Note that since we read the bytes from conn, and
+ * we're writing the bytes onto the linked connection, we count
+ * these as <i>written</i> bytes. */
+ connection_buckets_decrement(linked, approx_time(), 0, n_read);
+
+ if (connection_flushed_some(linked) < 0)
+ connection_mark_for_close(linked);
+ if (!connection_wants_to_flush(linked))
+ connection_finished_flushing(linked);
+ }
+
+ if (!buf_datalen(linked->outbuf) && conn->active_on_link)
+ connection_stop_reading_from_linked_conn(conn);
+ }
+ /* If we hit the EOF, call connection_reached_eof(). */
+ if (!conn->marked_for_close &&
+ conn->inbuf_reached_eof &&
+ connection_reached_eof(conn) < 0) {
+ return -1;
+ }
+ return 0;
+}
+
+/* DOCDOC connection_handle_read */
+int
+connection_handle_read(connection_t *conn)
+{
+ int res;
+ update_current_time(time(NULL));
+ res = connection_handle_read_impl(conn);
+ return res;
+}
+
+/** Pull in new bytes from conn-\>s or conn-\>linked_conn onto conn-\>inbuf,
+ * either directly or via TLS. Reduce the token buckets by the number of bytes
+ * read.
+ *
+ * If *max_to_read is -1, then decide it ourselves, else go with the
+ * value passed to us. When returning, if it's changed, subtract the
+ * number of bytes we read from *max_to_read.
+ *
+ * Return -1 if we want to break conn, else return 0.
+ */
+static int
+connection_buf_read_from_socket(connection_t *conn, ssize_t *max_to_read,
+ int *socket_error)
+{
+ int result;
+ ssize_t at_most = *max_to_read;
+ size_t slack_in_buf, more_to_read;
+ size_t n_read = 0, n_written = 0;
+
+ if (at_most == -1) { /* we need to initialize it */
+ /* how many bytes are we allowed to read? */
+ at_most = connection_bucket_read_limit(conn, approx_time());
+ }
+
+ slack_in_buf = buf_slack(conn->inbuf);
+ again:
+ if ((size_t)at_most > slack_in_buf && slack_in_buf >= 1024) {
+ more_to_read = at_most - slack_in_buf;
+ at_most = slack_in_buf;
+ } else {
+ more_to_read = 0;
+ }
+
+ if (connection_speaks_cells(conn) &&
+ conn->state > OR_CONN_STATE_PROXY_HANDSHAKING) {
+ int pending;
+ or_connection_t *or_conn = TO_OR_CONN(conn);
+ size_t initial_size;
+ if (conn->state == OR_CONN_STATE_TLS_HANDSHAKING ||
+ conn->state == OR_CONN_STATE_TLS_CLIENT_RENEGOTIATING) {
+ /* continue handshaking even if global token bucket is empty */
+ return connection_tls_continue_handshake(or_conn);
+ }
+
+ log_debug(LD_NET,
+ "%d: starting, inbuf_datalen %ld (%d pending in tls object)."
+ " at_most %ld.",
+ (int)conn->s,(long)buf_datalen(conn->inbuf),
+ tor_tls_get_pending_bytes(or_conn->tls), (long)at_most);
+
+ initial_size = buf_datalen(conn->inbuf);
+ /* else open, or closing */
+ result = buf_read_from_tls(conn->inbuf, or_conn->tls, at_most);
+ if (TOR_TLS_IS_ERROR(result) || result == TOR_TLS_CLOSE)
+ or_conn->tls_error = result;
+ else
+ or_conn->tls_error = 0;
+
+ switch (result) {
+ case TOR_TLS_CLOSE:
+ case TOR_TLS_ERROR_IO:
+ log_debug(LD_NET,"TLS connection closed %son read. Closing. "
+ "(Nickname %s, address %s)",
+ result == TOR_TLS_CLOSE ? "cleanly " : "",
+ or_conn->nickname ? or_conn->nickname : "not set",
+ conn->address);
+ return result;
+ CASE_TOR_TLS_ERROR_ANY_NONIO:
+ log_debug(LD_NET,"tls error [%s]. breaking (nickname %s, address %s).",
+ tor_tls_err_to_string(result),
+ or_conn->nickname ? or_conn->nickname : "not set",
+ conn->address);
+ return result;
+ case TOR_TLS_WANTWRITE:
+ connection_start_writing(conn);
+ return 0;
+ case TOR_TLS_WANTREAD:
+ if (conn->in_connection_handle_write) {
+ /* We've been invoked from connection_handle_write, because we're
+ * waiting for a TLS renegotiation, the renegotiation started, and
+ * SSL_read returned WANTWRITE. But now SSL_read is saying WANTREAD
+ * again. Stop waiting for write events now, or else we'll
+ * busy-loop until data arrives for us to read. */
+ connection_stop_writing(conn);
+ if (!connection_is_reading(conn))
+ connection_start_reading(conn);
+ }
+ /* we're already reading, one hopes */
+ break;
+ case TOR_TLS_DONE: /* no data read, so nothing to process */
+ break; /* so we call bucket_decrement below */
+ default:
+ break;
+ }
+ pending = tor_tls_get_pending_bytes(or_conn->tls);
+ if (pending) {
+ /* If we have any pending bytes, we read them now. This *can*
+ * take us over our read allotment, but really we shouldn't be
+ * believing that SSL bytes are the same as TCP bytes anyway. */
+ int r2 = buf_read_from_tls(conn->inbuf, or_conn->tls, pending);
+ if (BUG(r2<0)) {
+ log_warn(LD_BUG, "apparently, reading pending bytes can fail.");
+ return -1;
+ }
+ }
+ result = (int)(buf_datalen(conn->inbuf)-initial_size);
+ tor_tls_get_n_raw_bytes(or_conn->tls, &n_read, &n_written);
+ log_debug(LD_GENERAL, "After TLS read of %d: %ld read, %ld written",
+ result, (long)n_read, (long)n_written);
+ } else if (conn->linked) {
+ if (conn->linked_conn) {
+ result = buf_move_to_buf(conn->inbuf, conn->linked_conn->outbuf,
+ &conn->linked_conn->outbuf_flushlen);
+ } else {
+ result = 0;
+ }
+ //log_notice(LD_GENERAL, "Moved %d bytes on an internal link!", result);
+ /* If the other side has disappeared, or if it's been marked for close and
+ * we flushed its outbuf, then we should set our inbuf_reached_eof. */
+ if (!conn->linked_conn ||
+ (conn->linked_conn->marked_for_close &&
+ buf_datalen(conn->linked_conn->outbuf) == 0))
+ conn->inbuf_reached_eof = 1;
+
+ n_read = (size_t) result;
+ } else {
+ /* !connection_speaks_cells, !conn->linked_conn. */
+ int reached_eof = 0;
+ CONN_LOG_PROTECT(conn,
+ result = buf_read_from_socket(conn->inbuf, conn->s,
+ at_most,
+ &reached_eof,
+ socket_error));
+ if (reached_eof)
+ conn->inbuf_reached_eof = 1;
+
+// log_fn(LOG_DEBUG,"read_to_buf returned %d.",read_result);
+
+ if (result < 0)
+ return -1;
+ n_read = (size_t) result;
+ }
+
+ if (n_read > 0) {
+ /* change *max_to_read */
+ *max_to_read = at_most - n_read;
+
+ /* Update edge_conn->n_read */
+ if (conn->type == CONN_TYPE_AP) {
+ edge_connection_t *edge_conn = TO_EDGE_CONN(conn);
+
+ /* Check for overflow: */
+ if (PREDICT_LIKELY(UINT32_MAX - edge_conn->n_read > n_read))
+ edge_conn->n_read += (int)n_read;
+ else
+ edge_conn->n_read = UINT32_MAX;
+ }
+
+ /* If CONN_BW events are enabled, update conn->n_read_conn_bw for
+ * OR/DIR/EXIT connections, checking for overflow. */
+ if (get_options()->TestingEnableConnBwEvent &&
+ (conn->type == CONN_TYPE_OR ||
+ conn->type == CONN_TYPE_DIR ||
+ conn->type == CONN_TYPE_EXIT)) {
+ if (PREDICT_LIKELY(UINT32_MAX - conn->n_read_conn_bw > n_read))
+ conn->n_read_conn_bw += (int)n_read;
+ else
+ conn->n_read_conn_bw = UINT32_MAX;
+ }
+ }
+
+ connection_buckets_decrement(conn, approx_time(), n_read, n_written);
+
+ if (more_to_read && result == at_most) {
+ slack_in_buf = buf_slack(conn->inbuf);
+ at_most = more_to_read;
+ goto again;
+ }
+
+ /* Call even if result is 0, since the global read bucket may
+ * have reached 0 on a different conn, and this connection needs to
+ * know to stop reading. */
+ connection_consider_empty_read_buckets(conn);
+ if (n_written > 0 && connection_is_writing(conn))
+ connection_consider_empty_write_buckets(conn);
+
+ return 0;
+}
+
+/** A pass-through to fetch_from_buf. */
+int
+connection_buf_get_bytes(char *string, size_t len, connection_t *conn)
+{
+ return buf_get_bytes(conn->inbuf, string, len);
+}
+
+/** As buf_get_line(), but read from a connection's input buffer. */
+int
+connection_buf_get_line(connection_t *conn, char *data,
+ size_t *data_len)
+{
+ return buf_get_line(conn->inbuf, data, data_len);
+}
+
+/** As fetch_from_buf_http, but fetches from a connection's input buffer_t as
+ * appropriate. */
+int
+connection_fetch_from_buf_http(connection_t *conn,
+ char **headers_out, size_t max_headerlen,
+ char **body_out, size_t *body_used,
+ size_t max_bodylen, int force_complete)
+{
+ return fetch_from_buf_http(conn->inbuf, headers_out, max_headerlen,
+ body_out, body_used, max_bodylen, force_complete);
+}
+
+/** Return conn-\>outbuf_flushlen: how many bytes conn wants to flush
+ * from its outbuf. */
+int
+connection_wants_to_flush(connection_t *conn)
+{
+ return conn->outbuf_flushlen > 0;
+}
+
+/** Are there too many bytes on edge connection <b>conn</b>'s outbuf to
+ * send back a relay-level sendme yet? Return 1 if so, 0 if not. Used by
+ * connection_edge_consider_sending_sendme().
+ */
+int
+connection_outbuf_too_full(connection_t *conn)
+{
+ return (conn->outbuf_flushlen > 10*CELL_PAYLOAD_SIZE);
+}
+
+/**
+ * On Windows Vista and Windows 7, tune the send buffer size according to a
+ * hint from the OS.
+ *
+ * This should help fix slow upload rates.
+ */
+static void
+update_send_buffer_size(tor_socket_t sock)
+{
+#ifdef _WIN32
+ /* We only do this on Vista and 7, because earlier versions of Windows
+ * don't have the SIO_IDEAL_SEND_BACKLOG_QUERY functionality, and on
+ * later versions it isn't necessary. */
+ static int isVistaOr7 = -1;
+ if (isVistaOr7 == -1) {
+ isVistaOr7 = 0;
+ OSVERSIONINFO osvi = { 0 };
+ osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetVersionEx(&osvi);
+ if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion < 2)
+ isVistaOr7 = 1;
+ }
+ if (!isVistaOr7)
+ return;
+ if (get_options()->ConstrainedSockets)
+ return;
+ ULONG isb = 0;
+ DWORD bytesReturned = 0;
+ if (!WSAIoctl(sock, SIO_IDEAL_SEND_BACKLOG_QUERY, NULL, 0,
+ &isb, sizeof(isb), &bytesReturned, NULL, NULL)) {
+ setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (const char*)&isb, sizeof(isb));
+ }
+#else
+ (void) sock;
+#endif
+}
+
+/** Try to flush more bytes onto <b>conn</b>-\>s.
+ *
+ * This function gets called either from conn_write_callback() in main.c
+ * when libevent tells us that conn wants to write, or below
+ * from connection_buf_add() when an entire TLS record is ready.
+ *
+ * Update <b>conn</b>-\>timestamp_last_write_allowed to now, and call flush_buf
+ * or flush_buf_tls appropriately. If it succeeds and there are no more
+ * more bytes on <b>conn</b>-\>outbuf, then call connection_finished_flushing
+ * on it too.
+ *
+ * If <b>force</b>, then write as many bytes as possible, ignoring bandwidth
+ * limits. (Used for flushing messages to controller connections on fatal
+ * errors.)
+ *
+ * Mark the connection and return -1 if you want to close it, else
+ * return 0.
+ */
+static int
+connection_handle_write_impl(connection_t *conn, int force)
+{
+ int e;
+ socklen_t len=(socklen_t)sizeof(e);
+ int result;
+ ssize_t max_to_write;
+ time_t now = approx_time();
+ size_t n_read = 0, n_written = 0;
+ int dont_stop_writing = 0;
+
+ tor_assert(!connection_is_listener(conn));
+
+ if (conn->marked_for_close || !SOCKET_OK(conn->s))
+ return 0; /* do nothing */
+
+ if (conn->in_flushed_some) {
+ log_warn(LD_BUG, "called recursively from inside conn->in_flushed_some");
+ return 0;
+ }
+
+ conn->timestamp_last_write_allowed = now;
+
+ connection_bucket_refill_single(conn, monotime_coarse_get_stamp());
+
+ /* Sometimes, "writable" means "connected". */
+ if (connection_state_is_connecting(conn)) {
+ if (getsockopt(conn->s, SOL_SOCKET, SO_ERROR, (void*)&e, &len) < 0) {
+ log_warn(LD_BUG, "getsockopt() syscall failed");
+ if (conn->type == CONN_TYPE_OR) {
+ or_connection_t *orconn = TO_OR_CONN(conn);
+ connection_or_close_for_error(orconn, 0);
+ } else {
+ if (CONN_IS_EDGE(conn)) {
+ connection_edge_end_errno(TO_EDGE_CONN(conn));
+ }
+ connection_mark_for_close(conn);
+ }
+ return -1;
+ }
+ if (e) {
+ /* some sort of error, but maybe just inprogress still */
+ if (!ERRNO_IS_CONN_EINPROGRESS(e)) {
+ log_info(LD_NET,"in-progress connect failed. Removing. (%s)",
+ tor_socket_strerror(e));
+ if (CONN_IS_EDGE(conn))
+ connection_edge_end_errno(TO_EDGE_CONN(conn));
+ if (conn->type == CONN_TYPE_OR)
+ connection_or_notify_error(TO_OR_CONN(conn),
+ errno_to_orconn_end_reason(e),
+ tor_socket_strerror(e));
+
+ connection_close_immediate(conn);
+ /*
+ * This can bypass normal channel checking since we did
+ * connection_or_notify_error() above.
+ */
+ connection_mark_for_close_internal(conn);
+ return -1;
+ } else {
+ return 0; /* no change, see if next time is better */
+ }
+ }
+ /* The connection is successful. */
+ if (connection_finished_connecting(conn)<0)
+ return -1;
+ }
+
+ max_to_write = force ? (ssize_t)conn->outbuf_flushlen
+ : connection_bucket_write_limit(conn, now);
+
+ if (connection_speaks_cells(conn) &&
+ conn->state > OR_CONN_STATE_PROXY_HANDSHAKING) {
+ or_connection_t *or_conn = TO_OR_CONN(conn);
+ size_t initial_size;
+ if (conn->state == OR_CONN_STATE_TLS_HANDSHAKING ||
+ conn->state == OR_CONN_STATE_TLS_CLIENT_RENEGOTIATING) {
+ connection_stop_writing(conn);
+ if (connection_tls_continue_handshake(or_conn) < 0) {
+ /* Don't flush; connection is dead. */
+ connection_or_notify_error(or_conn,
+ END_OR_CONN_REASON_MISC,
+ "TLS error in connection_tls_"
+ "continue_handshake()");
+ connection_close_immediate(conn);
+ /*
+ * This can bypass normal channel checking since we did
+ * connection_or_notify_error() above.
+ */
+ connection_mark_for_close_internal(conn);
+ return -1;
+ }
+ return 0;
+ } else if (conn->state == OR_CONN_STATE_TLS_SERVER_RENEGOTIATING) {
+ return connection_handle_read(conn);
+ }
+
+ /* else open, or closing */
+ initial_size = buf_datalen(conn->outbuf);
+ result = buf_flush_to_tls(conn->outbuf, or_conn->tls,
+ max_to_write, &conn->outbuf_flushlen);
+
+ if (result >= 0)
+ update_send_buffer_size(conn->s);
+
+ /* If we just flushed the last bytes, tell the channel on the
+ * or_conn to check if it needs to geoip_change_dirreq_state() */
+ /* XXXX move this to flushed_some or finished_flushing -NM */
+ if (buf_datalen(conn->outbuf) == 0 && or_conn->chan)
+ channel_notify_flushed(TLS_CHAN_TO_BASE(or_conn->chan));
+
+ switch (result) {
+ CASE_TOR_TLS_ERROR_ANY:
+ case TOR_TLS_CLOSE:
+ log_info(LD_NET, result != TOR_TLS_CLOSE ?
+ "tls error. breaking.":"TLS connection closed on flush");
+ /* Don't flush; connection is dead. */
+ connection_or_notify_error(or_conn,
+ END_OR_CONN_REASON_MISC,
+ result != TOR_TLS_CLOSE ?
+ "TLS error in during flush" :
+ "TLS closed during flush");
+ connection_close_immediate(conn);
+ /*
+ * This can bypass normal channel checking since we did
+ * connection_or_notify_error() above.
+ */
+ connection_mark_for_close_internal(conn);
+ return -1;
+ case TOR_TLS_WANTWRITE:
+ log_debug(LD_NET,"wanted write.");
+ /* we're already writing */
+ dont_stop_writing = 1;
+ break;
+ case TOR_TLS_WANTREAD:
+ /* Make sure to avoid a loop if the receive buckets are empty. */
+ log_debug(LD_NET,"wanted read.");
+ if (!connection_is_reading(conn)) {
+ connection_write_bw_exhausted(conn, true);
+ /* we'll start reading again when we get more tokens in our
+ * read bucket; then we'll start writing again too.
+ */
+ }
+ /* else no problem, we're already reading */
+ return 0;
+ /* case TOR_TLS_DONE:
+ * for TOR_TLS_DONE, fall through to check if the flushlen
+ * is empty, so we can stop writing.
+ */
+ }
+
+ tor_tls_get_n_raw_bytes(or_conn->tls, &n_read, &n_written);
+ log_debug(LD_GENERAL, "After TLS write of %d: %ld read, %ld written",
+ result, (long)n_read, (long)n_written);
+ or_conn->bytes_xmitted += result;
+ or_conn->bytes_xmitted_by_tls += n_written;
+ /* So we notice bytes were written even on error */
+ /* XXXX This cast is safe since we can never write INT_MAX bytes in a
+ * single set of TLS operations. But it looks kinda ugly. If we refactor
+ * the *_buf_tls functions, we should make them return ssize_t or size_t
+ * or something. */
+ result = (int)(initial_size-buf_datalen(conn->outbuf));
+ } else {
+ CONN_LOG_PROTECT(conn,
+ result = buf_flush_to_socket(conn->outbuf, conn->s,
+ max_to_write, &conn->outbuf_flushlen));
+ if (result < 0) {
+ if (CONN_IS_EDGE(conn))
+ connection_edge_end_errno(TO_EDGE_CONN(conn));
+ if (conn->type == CONN_TYPE_AP) {
+ /* writing failed; we couldn't send a SOCKS reply if we wanted to */
+ TO_ENTRY_CONN(conn)->socks_request->has_finished = 1;
+ }
+
+ connection_close_immediate(conn); /* Don't flush; connection is dead. */
+ connection_mark_for_close(conn);
+ return -1;
+ }
+ update_send_buffer_size(conn->s);
+ n_written = (size_t) result;
+ }
+
+ if (n_written && conn->type == CONN_TYPE_AP) {
+ edge_connection_t *edge_conn = TO_EDGE_CONN(conn);
+
+ /* Check for overflow: */
+ if (PREDICT_LIKELY(UINT32_MAX - edge_conn->n_written > n_written))
+ edge_conn->n_written += (int)n_written;
+ else
+ edge_conn->n_written = UINT32_MAX;
+ }
+
+ /* If CONN_BW events are enabled, update conn->n_written_conn_bw for
+ * OR/DIR/EXIT connections, checking for overflow. */
+ if (n_written && get_options()->TestingEnableConnBwEvent &&
+ (conn->type == CONN_TYPE_OR ||
+ conn->type == CONN_TYPE_DIR ||
+ conn->type == CONN_TYPE_EXIT)) {
+ if (PREDICT_LIKELY(UINT32_MAX - conn->n_written_conn_bw > n_written))
+ conn->n_written_conn_bw += (int)n_written;
+ else
+ conn->n_written_conn_bw = UINT32_MAX;
+ }
+
+ connection_buckets_decrement(conn, approx_time(), n_read, n_written);
+
+ if (result > 0) {
+ /* If we wrote any bytes from our buffer, then call the appropriate
+ * functions. */
+ if (connection_flushed_some(conn) < 0) {
+ if (connection_speaks_cells(conn)) {
+ connection_or_notify_error(TO_OR_CONN(conn),
+ END_OR_CONN_REASON_MISC,
+ "Got error back from "
+ "connection_flushed_some()");
+ }
+
+ /*
+ * This can bypass normal channel checking since we did
+ * connection_or_notify_error() above.
+ */
+ connection_mark_for_close_internal(conn);
+ }
+ }
+
+ if (!connection_wants_to_flush(conn) &&
+ !dont_stop_writing) { /* it's done flushing */
+ if (connection_finished_flushing(conn) < 0) {
+ /* already marked */
+ return -1;
+ }
+ return 0;
+ }
+
+ /* Call even if result is 0, since the global write bucket may
+ * have reached 0 on a different conn, and this connection needs to
+ * know to stop writing. */
+ connection_consider_empty_write_buckets(conn);
+ if (n_read > 0 && connection_is_reading(conn))
+ connection_consider_empty_read_buckets(conn);
+
+ return 0;
+}
+
+/* DOCDOC connection_handle_write */
+int
+connection_handle_write(connection_t *conn, int force)
+{
+ int res;
+ update_current_time(time(NULL));
+ conn->in_connection_handle_write = 1;
+ res = connection_handle_write_impl(conn, force);
+ conn->in_connection_handle_write = 0;
+ return res;
+}
+
+/**
+ * Try to flush data that's waiting for a write on <b>conn</b>. Return
+ * -1 on failure, 0 on success.
+ *
+ * Don't use this function for regular writing; the buffers
+ * system should be good enough at scheduling writes there. Instead, this
+ * function is for cases when we're about to exit or something and we want
+ * to report it right away.
+ */
+int
+connection_flush(connection_t *conn)
+{
+ return connection_handle_write(conn, 1);
+}
+
+/** Helper for connection_write_to_buf_impl and connection_write_buf_to_buf:
+ *
+ * Return true iff it is okay to queue bytes on <b>conn</b>'s outbuf for
+ * writing.
+ */
+static int
+connection_may_write_to_buf(connection_t *conn)
+{
+ /* if it's marked for close, only allow write if we mean to flush it */
+ if (conn->marked_for_close && !conn->hold_open_until_flushed)
+ return 0;
+
+ return 1;
+}
+
+/** Helper for connection_write_to_buf_impl and connection_write_buf_to_buf:
+ *
+ * Called when an attempt to add bytes on <b>conn</b>'s outbuf has failed;
+ * mark the connection and warn as appropriate.
+ */
+static void
+connection_write_to_buf_failed(connection_t *conn)
+{
+ if (CONN_IS_EDGE(conn)) {
+ /* if it failed, it means we have our package/delivery windows set
+ wrong compared to our max outbuf size. close the whole circuit. */
+ log_warn(LD_NET,
+ "write_to_buf failed. Closing circuit (fd %d).", (int)conn->s);
+ circuit_mark_for_close(circuit_get_by_edge_conn(TO_EDGE_CONN(conn)),
+ END_CIRC_REASON_INTERNAL);
+ } else if (conn->type == CONN_TYPE_OR) {
+ or_connection_t *orconn = TO_OR_CONN(conn);
+ log_warn(LD_NET,
+ "write_to_buf failed on an orconn; notifying of error "
+ "(fd %d)", (int)(conn->s));
+ connection_or_close_for_error(orconn, 0);
+ } else {
+ log_warn(LD_NET,
+ "write_to_buf failed. Closing connection (fd %d).",
+ (int)conn->s);
+ connection_mark_for_close(conn);
+ }
+}
+
+/** Helper for connection_write_to_buf_impl and connection_write_buf_to_buf:
+ *
+ * Called when an attempt to add bytes on <b>conn</b>'s outbuf has succeeded:
+ * record the number of bytes added.
+ */
+static void
+connection_write_to_buf_commit(connection_t *conn, size_t len)
+{
+ /* If we receive optimistic data in the EXIT_CONN_STATE_RESOLVING
+ * state, we don't want to try to write it right away, since
+ * conn->write_event won't be set yet. Otherwise, write data from
+ * this conn as the socket is available. */
+ if (conn->write_event) {
+ connection_start_writing(conn);
+ }
+ conn->outbuf_flushlen += len;
+}
+
+/** Append <b>len</b> bytes of <b>string</b> onto <b>conn</b>'s
+ * outbuf, and ask it to start writing.
+ *
+ * If <b>zlib</b> is nonzero, this is a directory connection that should get
+ * its contents compressed or decompressed as they're written. If zlib is
+ * negative, this is the last data to be compressed, and the connection's zlib
+ * state should be flushed.
+ */
+MOCK_IMPL(void,
+connection_write_to_buf_impl_,(const char *string, size_t len,
+ connection_t *conn, int zlib))
+{
+ /* XXXX This function really needs to return -1 on failure. */
+ int r;
+ if (!len && !(zlib<0))
+ return;
+
+ if (!connection_may_write_to_buf(conn))
+ return;
+
+ size_t written;
+
+ if (zlib) {
+ size_t old_datalen = buf_datalen(conn->outbuf);
+ dir_connection_t *dir_conn = TO_DIR_CONN(conn);
+ int done = zlib < 0;
+ CONN_LOG_PROTECT(conn, r = buf_add_compress(conn->outbuf,
+ dir_conn->compress_state,
+ string, len, done));
+ written = buf_datalen(conn->outbuf) - old_datalen;
+ } else {
+ CONN_LOG_PROTECT(conn, r = buf_add(conn->outbuf, string, len));
+ written = len;
+ }
+ if (r < 0) {
+ connection_write_to_buf_failed(conn);
+ return;
+ }
+ connection_write_to_buf_commit(conn, written);
+}
+
+void
+connection_buf_add_compress(const char *string, size_t len,
+ dir_connection_t *conn, int done)
+{
+ connection_write_to_buf_impl_(string, len, TO_CONN(conn), done ? -1 : 1);
+}
+
+/**
+ * Add all bytes from <b>buf</b> to <b>conn</b>'s outbuf, draining them
+ * from <b>buf</b>. (If the connection is marked and will soon be closed,
+ * nothing is drained.)
+ */
+void
+connection_buf_add_buf(connection_t *conn, buf_t *buf)
+{
+ tor_assert(conn);
+ tor_assert(buf);
+ size_t len = buf_datalen(buf);
+ if (len == 0)
+ return;
+
+ if (!connection_may_write_to_buf(conn))
+ return;
+
+ buf_move_all(conn->outbuf, buf);
+ connection_write_to_buf_commit(conn, len);
+}
+
+#define CONN_GET_ALL_TEMPLATE(var, test) \
+ STMT_BEGIN \
+ smartlist_t *conns = get_connection_array(); \
+ smartlist_t *ret_conns = smartlist_new(); \
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, var) { \
+ if (var && (test) && !var->marked_for_close) \
+ smartlist_add(ret_conns, var); \
+ } SMARTLIST_FOREACH_END(var); \
+ return ret_conns; \
+ STMT_END
+
+/* Return a list of connections that aren't close and matches the given type
+ * and state. The returned list can be empty and must be freed using
+ * smartlist_free(). The caller does NOT have ownership of the objects in the
+ * list so it must not free them nor reference them as they can disappear. */
+smartlist_t *
+connection_list_by_type_state(int type, int state)
+{
+ CONN_GET_ALL_TEMPLATE(conn, (conn->type == type && conn->state == state));
+}
+
+/* Return a list of connections that aren't close and matches the given type
+ * and purpose. The returned list can be empty and must be freed using
+ * smartlist_free(). The caller does NOT have ownership of the objects in the
+ * list so it must not free them nor reference them as they can disappear. */
+smartlist_t *
+connection_list_by_type_purpose(int type, int purpose)
+{
+ CONN_GET_ALL_TEMPLATE(conn,
+ (conn->type == type && conn->purpose == purpose));
+}
+
+/** Return a connection_t * from get_connection_array() that satisfies test on
+ * var, and that is not marked for close. */
+#define CONN_GET_TEMPLATE(var, test) \
+ STMT_BEGIN \
+ smartlist_t *conns = get_connection_array(); \
+ SMARTLIST_FOREACH(conns, connection_t *, var, \
+ { \
+ if (var && (test) && !var->marked_for_close) \
+ return var; \
+ }); \
+ return NULL; \
+ STMT_END
+
+/** Return a connection with given type, address, port, and purpose;
+ * or NULL if no such connection exists (or if all such connections are marked
+ * for close). */
+MOCK_IMPL(connection_t *,
+connection_get_by_type_addr_port_purpose,(int type,
+ const tor_addr_t *addr, uint16_t port,
+ int purpose))
+{
+ CONN_GET_TEMPLATE(conn,
+ (conn->type == type &&
+ tor_addr_eq(&conn->addr, addr) &&
+ conn->port == port &&
+ conn->purpose == purpose));
+}
+
+/** Return the stream with id <b>id</b> if it is not already marked for
+ * close.
+ */
+connection_t *
+connection_get_by_global_id(uint64_t id)
+{
+ CONN_GET_TEMPLATE(conn, conn->global_identifier == id);
+}
+
+/** Return a connection of type <b>type</b> that is not marked for close.
+ */
+connection_t *
+connection_get_by_type(int type)
+{
+ CONN_GET_TEMPLATE(conn, conn->type == type);
+}
+
+/** Return a connection of type <b>type</b> that is in state <b>state</b>,
+ * and that is not marked for close.
+ */
+connection_t *
+connection_get_by_type_state(int type, int state)
+{
+ CONN_GET_TEMPLATE(conn, conn->type == type && conn->state == state);
+}
+
+/** Return a connection of type <b>type</b> that has rendquery equal
+ * to <b>rendquery</b>, and that is not marked for close. If state
+ * is non-zero, conn must be of that state too.
+ */
+connection_t *
+connection_get_by_type_state_rendquery(int type, int state,
+ const char *rendquery)
+{
+ tor_assert(type == CONN_TYPE_DIR ||
+ type == CONN_TYPE_AP || type == CONN_TYPE_EXIT);
+ tor_assert(rendquery);
+
+ CONN_GET_TEMPLATE(conn,
+ (conn->type == type &&
+ (!state || state == conn->state)) &&
+ (
+ (type == CONN_TYPE_DIR &&
+ TO_DIR_CONN(conn)->rend_data &&
+ !rend_cmp_service_ids(rendquery,
+ rend_data_get_address(TO_DIR_CONN(conn)->rend_data)))
+ ||
+ (CONN_IS_EDGE(conn) &&
+ TO_EDGE_CONN(conn)->rend_data &&
+ !rend_cmp_service_ids(rendquery,
+ rend_data_get_address(TO_EDGE_CONN(conn)->rend_data)))
+ ));
+}
+
+/** Return a new smartlist of dir_connection_t * from get_connection_array()
+ * that satisfy conn_test on connection_t *conn_var, and dirconn_test on
+ * dir_connection_t *dirconn_var. conn_var must be of CONN_TYPE_DIR and not
+ * marked for close to be included in the list. */
+#define DIR_CONN_LIST_TEMPLATE(conn_var, conn_test, \
+ dirconn_var, dirconn_test) \
+ STMT_BEGIN \
+ smartlist_t *conns = get_connection_array(); \
+ smartlist_t *dir_conns = smartlist_new(); \
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn_var) { \
+ if (conn_var && (conn_test) \
+ && conn_var->type == CONN_TYPE_DIR \
+ && !conn_var->marked_for_close) { \
+ dir_connection_t *dirconn_var = TO_DIR_CONN(conn_var); \
+ if (dirconn_var && (dirconn_test)) { \
+ smartlist_add(dir_conns, dirconn_var); \
+ } \
+ } \
+ } SMARTLIST_FOREACH_END(conn_var); \
+ return dir_conns; \
+ STMT_END
+
+/** Return a list of directory connections that are fetching the item
+ * described by <b>purpose</b>/<b>resource</b>. If there are none,
+ * return an empty list. This list must be freed using smartlist_free,
+ * but the pointers in it must not be freed.
+ * Note that this list should not be cached, as the pointers in it can be
+ * freed if their connections close. */
+smartlist_t *
+connection_dir_list_by_purpose_and_resource(
+ int purpose,
+ const char *resource)
+{
+ DIR_CONN_LIST_TEMPLATE(conn,
+ conn->purpose == purpose,
+ dirconn,
+ 0 == strcmp_opt(resource,
+ dirconn->requested_resource));
+}
+
+/** Return a list of directory connections that are fetching the item
+ * described by <b>purpose</b>/<b>resource</b>/<b>state</b>. If there are
+ * none, return an empty list. This list must be freed using smartlist_free,
+ * but the pointers in it must not be freed.
+ * Note that this list should not be cached, as the pointers in it can be
+ * freed if their connections close. */
+smartlist_t *
+connection_dir_list_by_purpose_resource_and_state(
+ int purpose,
+ const char *resource,
+ int state)
+{
+ DIR_CONN_LIST_TEMPLATE(conn,
+ conn->purpose == purpose && conn->state == state,
+ dirconn,
+ 0 == strcmp_opt(resource,
+ dirconn->requested_resource));
+}
+
+#undef DIR_CONN_LIST_TEMPLATE
+
+/** Return an arbitrary active OR connection that isn't <b>this_conn</b>.
+ *
+ * We use this to guess if we should tell the controller that we
+ * didn't manage to connect to any of our bridges. */
+static connection_t *
+connection_get_another_active_or_conn(const or_connection_t *this_conn)
+{
+ CONN_GET_TEMPLATE(conn,
+ conn != TO_CONN(this_conn) && conn->type == CONN_TYPE_OR);
+}
+
+/** Return 1 if there are any active OR connections apart from
+ * <b>this_conn</b>.
+ *
+ * We use this to guess if we should tell the controller that we
+ * didn't manage to connect to any of our bridges. */
+int
+any_other_active_or_conns(const or_connection_t *this_conn)
+{
+ connection_t *conn = connection_get_another_active_or_conn(this_conn);
+ if (conn != NULL) {
+ log_debug(LD_DIR, "%s: Found an OR connection: %s",
+ __func__, conn->address);
+ return 1;
+ }
+
+ return 0;
+}
+
+#undef CONN_GET_TEMPLATE
+
+/** Return 1 if <b>conn</b> is a listener conn, else return 0. */
+int
+connection_is_listener(connection_t *conn)
+{
+ if (conn->type == CONN_TYPE_OR_LISTENER ||
+ conn->type == CONN_TYPE_EXT_OR_LISTENER ||
+ conn->type == CONN_TYPE_AP_LISTENER ||
+ conn->type == CONN_TYPE_AP_TRANS_LISTENER ||
+ conn->type == CONN_TYPE_AP_DNS_LISTENER ||
+ conn->type == CONN_TYPE_AP_NATD_LISTENER ||
+ conn->type == CONN_TYPE_AP_HTTP_CONNECT_LISTENER ||
+ conn->type == CONN_TYPE_DIR_LISTENER ||
+ conn->type == CONN_TYPE_CONTROL_LISTENER)
+ return 1;
+ return 0;
+}
+
+/** Return 1 if <b>conn</b> is in state "open" and is not marked
+ * for close, else return 0.
+ */
+int
+connection_state_is_open(connection_t *conn)
+{
+ tor_assert(conn);
+
+ if (conn->marked_for_close)
+ return 0;
+
+ if ((conn->type == CONN_TYPE_OR && conn->state == OR_CONN_STATE_OPEN) ||
+ (conn->type == CONN_TYPE_EXT_OR) ||
+ (conn->type == CONN_TYPE_AP && conn->state == AP_CONN_STATE_OPEN) ||
+ (conn->type == CONN_TYPE_EXIT && conn->state == EXIT_CONN_STATE_OPEN) ||
+ (conn->type == CONN_TYPE_CONTROL &&
+ conn->state == CONTROL_CONN_STATE_OPEN))
+ return 1;
+
+ return 0;
+}
+
+/** Return 1 if conn is in 'connecting' state, else return 0. */
+int
+connection_state_is_connecting(connection_t *conn)
+{
+ tor_assert(conn);
+
+ if (conn->marked_for_close)
+ return 0;
+ switch (conn->type)
+ {
+ case CONN_TYPE_OR:
+ return conn->state == OR_CONN_STATE_CONNECTING;
+ case CONN_TYPE_EXIT:
+ return conn->state == EXIT_CONN_STATE_CONNECTING;
+ case CONN_TYPE_DIR:
+ return conn->state == DIR_CONN_STATE_CONNECTING;
+ }
+
+ return 0;
+}
+
+/** Allocates a base64'ed authenticator for use in http or https
+ * auth, based on the input string <b>authenticator</b>. Returns it
+ * if success, else returns NULL. */
+char *
+alloc_http_authenticator(const char *authenticator)
+{
+ /* an authenticator in Basic authentication
+ * is just the string "username:password" */
+ const size_t authenticator_length = strlen(authenticator);
+ const size_t base64_authenticator_length =
+ base64_encode_size(authenticator_length, 0) + 1;
+ char *base64_authenticator = tor_malloc(base64_authenticator_length);
+ if (base64_encode(base64_authenticator, base64_authenticator_length,
+ authenticator, authenticator_length, 0) < 0) {
+ tor_free(base64_authenticator); /* free and set to null */
+ }
+ return base64_authenticator;
+}
+
+/** Given a socket handle, check whether the local address (sockname) of the
+ * socket is one that we've connected from before. If so, double-check
+ * whether our address has changed and we need to generate keys. If we do,
+ * call init_keys().
+ */
+static void
+client_check_address_changed(tor_socket_t sock)
+{
+ tor_addr_t out_addr, iface_addr;
+ tor_addr_t **last_interface_ip_ptr;
+ sa_family_t family;
+
+ if (!outgoing_addrs)
+ outgoing_addrs = smartlist_new();
+
+ if (tor_addr_from_getsockname(&out_addr, sock) < 0) {
+ int e = tor_socket_errno(sock);
+ log_warn(LD_NET, "getsockname() to check for address change failed: %s",
+ tor_socket_strerror(e));
+ return;
+ }
+ family = tor_addr_family(&out_addr);
+
+ if (family == AF_INET)
+ last_interface_ip_ptr = &last_interface_ipv4;
+ else if (family == AF_INET6)
+ last_interface_ip_ptr = &last_interface_ipv6;
+ else
+ return;
+
+ if (! *last_interface_ip_ptr) {
+ tor_addr_t *a = tor_malloc_zero(sizeof(tor_addr_t));
+ if (get_interface_address6(LOG_INFO, family, a)==0) {
+ *last_interface_ip_ptr = a;
+ } else {
+ tor_free(a);
+ }
+ }
+
+ /* If we've used this address previously, we're okay. */
+ SMARTLIST_FOREACH(outgoing_addrs, const tor_addr_t *, a_ptr,
+ if (tor_addr_eq(a_ptr, &out_addr))
+ return;
+ );
+
+ /* Uh-oh. We haven't connected from this address before. Has the interface
+ * address changed? */
+ if (get_interface_address6(LOG_INFO, family, &iface_addr)<0)
+ return;
+
+ if (tor_addr_eq(&iface_addr, *last_interface_ip_ptr)) {
+ /* Nope, it hasn't changed. Add this address to the list. */
+ smartlist_add(outgoing_addrs, tor_memdup(&out_addr, sizeof(tor_addr_t)));
+ } else {
+ /* The interface changed. We're a client, so we need to regenerate our
+ * keys. First, reset the state. */
+ log_notice(LD_NET, "Our IP address has changed. Rotating keys...");
+ tor_addr_copy(*last_interface_ip_ptr, &iface_addr);
+ SMARTLIST_FOREACH(outgoing_addrs, tor_addr_t*, a_ptr, tor_free(a_ptr));
+ smartlist_clear(outgoing_addrs);
+ smartlist_add(outgoing_addrs, tor_memdup(&out_addr, sizeof(tor_addr_t)));
+ /* We'll need to resolve ourselves again. */
+ reset_last_resolved_addr();
+ /* Okay, now change our keys. */
+ ip_address_changed(1);
+ }
+}
+
+/** Some systems have limited system buffers for recv and xmit on
+ * sockets allocated in a virtual server or similar environment. For a Tor
+ * server this can produce the "Error creating network socket: No buffer
+ * space available" error once all available TCP buffer space is consumed.
+ * This method will attempt to constrain the buffers allocated for the socket
+ * to the desired size to stay below system TCP buffer limits.
+ */
+static void
+set_constrained_socket_buffers(tor_socket_t sock, int size)
+{
+ void *sz = (void*)&size;
+ socklen_t sz_sz = (socklen_t) sizeof(size);
+ if (setsockopt(sock, SOL_SOCKET, SO_SNDBUF, sz, sz_sz) < 0) {
+ int e = tor_socket_errno(sock);
+ log_warn(LD_NET, "setsockopt() to constrain send "
+ "buffer to %d bytes failed: %s", size, tor_socket_strerror(e));
+ }
+ if (setsockopt(sock, SOL_SOCKET, SO_RCVBUF, sz, sz_sz) < 0) {
+ int e = tor_socket_errno(sock);
+ log_warn(LD_NET, "setsockopt() to constrain recv "
+ "buffer to %d bytes failed: %s", size, tor_socket_strerror(e));
+ }
+}
+
+/** Process new bytes that have arrived on conn-\>inbuf.
+ *
+ * This function just passes conn to the connection-specific
+ * connection_*_process_inbuf() function. It also passes in
+ * package_partial if wanted.
+ */
+static int
+connection_process_inbuf(connection_t *conn, int package_partial)
+{
+ tor_assert(conn);
+
+ switch (conn->type) {
+ case CONN_TYPE_OR:
+ return connection_or_process_inbuf(TO_OR_CONN(conn));
+ case CONN_TYPE_EXT_OR:
+ return connection_ext_or_process_inbuf(TO_OR_CONN(conn));
+ case CONN_TYPE_EXIT:
+ case CONN_TYPE_AP:
+ return connection_edge_process_inbuf(TO_EDGE_CONN(conn),
+ package_partial);
+ case CONN_TYPE_DIR:
+ return connection_dir_process_inbuf(TO_DIR_CONN(conn));
+ case CONN_TYPE_CONTROL:
+ return connection_control_process_inbuf(TO_CONTROL_CONN(conn));
+ default:
+ log_err(LD_BUG,"got unexpected conn type %d.", conn->type);
+ tor_fragile_assert();
+ return -1;
+ }
+}
+
+/** Called whenever we've written data on a connection. */
+static int
+connection_flushed_some(connection_t *conn)
+{
+ int r = 0;
+ tor_assert(!conn->in_flushed_some);
+ conn->in_flushed_some = 1;
+ if (conn->type == CONN_TYPE_DIR &&
+ conn->state == DIR_CONN_STATE_SERVER_WRITING) {
+ r = connection_dirserv_flushed_some(TO_DIR_CONN(conn));
+ } else if (conn->type == CONN_TYPE_OR) {
+ r = connection_or_flushed_some(TO_OR_CONN(conn));
+ } else if (CONN_IS_EDGE(conn)) {
+ r = connection_edge_flushed_some(TO_EDGE_CONN(conn));
+ }
+ conn->in_flushed_some = 0;
+ return r;
+}
+
+/** We just finished flushing bytes to the appropriately low network layer,
+ * and there are no more bytes remaining in conn-\>outbuf or
+ * conn-\>tls to be flushed.
+ *
+ * This function just passes conn to the connection-specific
+ * connection_*_finished_flushing() function.
+ */
+static int
+connection_finished_flushing(connection_t *conn)
+{
+ tor_assert(conn);
+
+ /* If the connection is closed, don't try to do anything more here. */
+ if (CONN_IS_CLOSED(conn))
+ return 0;
+
+// log_fn(LOG_DEBUG,"entered. Socket %u.", conn->s);
+
+ connection_stop_writing(conn);
+
+ switch (conn->type) {
+ case CONN_TYPE_OR:
+ return connection_or_finished_flushing(TO_OR_CONN(conn));
+ case CONN_TYPE_EXT_OR:
+ return connection_ext_or_finished_flushing(TO_OR_CONN(conn));
+ case CONN_TYPE_AP:
+ case CONN_TYPE_EXIT:
+ return connection_edge_finished_flushing(TO_EDGE_CONN(conn));
+ case CONN_TYPE_DIR:
+ return connection_dir_finished_flushing(TO_DIR_CONN(conn));
+ case CONN_TYPE_CONTROL:
+ return connection_control_finished_flushing(TO_CONTROL_CONN(conn));
+ default:
+ log_err(LD_BUG,"got unexpected conn type %d.", conn->type);
+ tor_fragile_assert();
+ return -1;
+ }
+}
+
+/** Called when our attempt to connect() to another server has just
+ * succeeded.
+ *
+ * This function just passes conn to the connection-specific
+ * connection_*_finished_connecting() function.
+ */
+static int
+connection_finished_connecting(connection_t *conn)
+{
+ tor_assert(conn);
+
+ if (!server_mode(get_options())) {
+ /* See whether getsockname() says our address changed. We need to do this
+ * now that the connection has finished, because getsockname() on Windows
+ * won't work until then. */
+ client_check_address_changed(conn->s);
+ }
+
+ switch (conn->type)
+ {
+ case CONN_TYPE_OR:
+ return connection_or_finished_connecting(TO_OR_CONN(conn));
+ case CONN_TYPE_EXIT:
+ return connection_edge_finished_connecting(TO_EDGE_CONN(conn));
+ case CONN_TYPE_DIR:
+ return connection_dir_finished_connecting(TO_DIR_CONN(conn));
+ default:
+ log_err(LD_BUG,"got unexpected conn type %d.", conn->type);
+ tor_fragile_assert();
+ return -1;
+ }
+}
+
+/** Callback: invoked when a connection reaches an EOF event. */
+static int
+connection_reached_eof(connection_t *conn)
+{
+ switch (conn->type) {
+ case CONN_TYPE_OR:
+ case CONN_TYPE_EXT_OR:
+ return connection_or_reached_eof(TO_OR_CONN(conn));
+ case CONN_TYPE_AP:
+ case CONN_TYPE_EXIT:
+ return connection_edge_reached_eof(TO_EDGE_CONN(conn));
+ case CONN_TYPE_DIR:
+ return connection_dir_reached_eof(TO_DIR_CONN(conn));
+ case CONN_TYPE_CONTROL:
+ return connection_control_reached_eof(TO_CONTROL_CONN(conn));
+ default:
+ log_err(LD_BUG,"got unexpected conn type %d.", conn->type);
+ tor_fragile_assert();
+ return -1;
+ }
+}
+
+/** Comparator for the two-orconn case in OOS victim sort */
+static int
+oos_victim_comparator_for_orconns(or_connection_t *a, or_connection_t *b)
+{
+ int a_circs, b_circs;
+ /* Fewer circuits == higher priority for OOS kill, sort earlier */
+
+ a_circs = connection_or_get_num_circuits(a);
+ b_circs = connection_or_get_num_circuits(b);
+
+ if (a_circs < b_circs) return 1;
+ else if (a_circs > b_circs) return -1;
+ else return 0;
+}
+
+/** Sort comparator for OOS victims; better targets sort before worse
+ * ones. */
+static int
+oos_victim_comparator(const void **a_v, const void **b_v)
+{
+ connection_t *a = NULL, *b = NULL;
+
+ /* Get connection pointers out */
+
+ a = (connection_t *)(*a_v);
+ b = (connection_t *)(*b_v);
+
+ tor_assert(a != NULL);
+ tor_assert(b != NULL);
+
+ /*
+ * We always prefer orconns as victims currently; we won't even see
+ * these non-orconn cases, but if we do, sort them after orconns.
+ */
+ if (a->type == CONN_TYPE_OR && b->type == CONN_TYPE_OR) {
+ return oos_victim_comparator_for_orconns(TO_OR_CONN(a), TO_OR_CONN(b));
+ } else {
+ /*
+ * One isn't an orconn; if one is, it goes first. We currently have no
+ * opinions about cases where neither is an orconn.
+ */
+ if (a->type == CONN_TYPE_OR) return -1;
+ else if (b->type == CONN_TYPE_OR) return 1;
+ else return 0;
+ }
+}
+
+/** Pick n victim connections for the OOS handler and return them in a
+ * smartlist.
+ */
+MOCK_IMPL(STATIC smartlist_t *,
+pick_oos_victims, (int n))
+{
+ smartlist_t *eligible = NULL, *victims = NULL;
+ smartlist_t *conns;
+ int conn_counts_by_type[CONN_TYPE_MAX_ + 1], i;
+
+ /*
+ * Big damn assumption (someone improve this someday!):
+ *
+ * Socket exhaustion normally happens on high-volume relays, and so
+ * most of the connections involved are orconns. We should pick victims
+ * by assembling a list of all orconns, and sorting them in order of
+ * how much 'damage' by some metric we'd be doing by dropping them.
+ *
+ * If we move on from orconns, we should probably think about incoming
+ * directory connections next, or exit connections. Things we should
+ * probably never kill are controller connections and listeners.
+ *
+ * This function will count how many connections of different types
+ * exist and log it for purposes of gathering data on typical OOS
+ * situations to guide future improvements.
+ */
+
+ /* First, get the connection array */
+ conns = get_connection_array();
+ /*
+ * Iterate it and pick out eligible connection types, and log some stats
+ * along the way.
+ */
+ eligible = smartlist_new();
+ memset(conn_counts_by_type, 0, sizeof(conn_counts_by_type));
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, c) {
+ /* Bump the counter */
+ tor_assert(c->type <= CONN_TYPE_MAX_);
+ ++(conn_counts_by_type[c->type]);
+
+ /* Skip anything without a socket we can free */
+ if (!(SOCKET_OK(c->s))) {
+ continue;
+ }
+
+ /* Skip anything we would count as moribund */
+ if (connection_is_moribund(c)) {
+ continue;
+ }
+
+ switch (c->type) {
+ case CONN_TYPE_OR:
+ /* We've got an orconn, it's eligible to be OOSed */
+ smartlist_add(eligible, c);
+ break;
+ default:
+ /* We don't know what to do with it, ignore it */
+ break;
+ }
+ } SMARTLIST_FOREACH_END(c);
+
+ /* Log some stats */
+ if (smartlist_len(conns) > 0) {
+ /* At least one counter must be non-zero */
+ log_info(LD_NET, "Some stats on conn types seen during OOS follow");
+ for (i = CONN_TYPE_MIN_; i <= CONN_TYPE_MAX_; ++i) {
+ /* Did we see any? */
+ if (conn_counts_by_type[i] > 0) {
+ log_info(LD_NET, "%s: %d conns",
+ conn_type_to_string(i),
+ conn_counts_by_type[i]);
+ }
+ }
+ log_info(LD_NET, "Done with OOS conn type stats");
+ }
+
+ /* Did we find more eligible targets than we want to kill? */
+ if (smartlist_len(eligible) > n) {
+ /* Sort the list in order of target preference */
+ smartlist_sort(eligible, oos_victim_comparator);
+ /* Pick first n as victims */
+ victims = smartlist_new();
+ for (i = 0; i < n; ++i) {
+ smartlist_add(victims, smartlist_get(eligible, i));
+ }
+ /* Free the original list */
+ smartlist_free(eligible);
+ } else {
+ /* No, we can just call them all victims */
+ victims = eligible;
+ }
+
+ return victims;
+}
+
+/** Kill a list of connections for the OOS handler. */
+MOCK_IMPL(STATIC void,
+kill_conn_list_for_oos, (smartlist_t *conns))
+{
+ if (!conns) return;
+
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, c) {
+ /* Make sure the channel layer gets told about orconns */
+ if (c->type == CONN_TYPE_OR) {
+ connection_or_close_for_error(TO_OR_CONN(c), 1);
+ } else {
+ connection_mark_for_close(c);
+ }
+ } SMARTLIST_FOREACH_END(c);
+
+ log_notice(LD_NET,
+ "OOS handler marked %d connections",
+ smartlist_len(conns));
+}
+
+/** Check if a connection is on the way out so the OOS handler doesn't try
+ * to kill more than it needs. */
+int
+connection_is_moribund(connection_t *conn)
+{
+ if (conn != NULL &&
+ (conn->conn_array_index < 0 ||
+ conn->marked_for_close)) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/** Out-of-Sockets handler; n_socks is the current number of open
+ * sockets, and failed is non-zero if a socket exhaustion related
+ * error immediately preceded this call. This is where to do
+ * circuit-killing heuristics as needed.
+ */
+void
+connection_check_oos(int n_socks, int failed)
+{
+ int target_n_socks = 0, moribund_socks, socks_to_kill;
+ smartlist_t *conns;
+
+ /* Early exit: is OOS checking disabled? */
+ if (get_options()->DisableOOSCheck) {
+ return;
+ }
+
+ /* Sanity-check args */
+ tor_assert(n_socks >= 0);
+
+ /*
+ * Make some log noise; keep it at debug level since this gets a chance
+ * to run on every connection attempt.
+ */
+ log_debug(LD_NET,
+ "Running the OOS handler (%d open sockets, %s)",
+ n_socks, (failed != 0) ? "exhaustion seen" : "no exhaustion");
+
+ /*
+ * Check if we're really handling an OOS condition, and if so decide how
+ * many sockets we want to get down to. Be sure we check if the threshold
+ * is distinct from zero first; it's possible for this to be called a few
+ * times before we've finished reading the config.
+ */
+ if (n_socks >= get_options()->ConnLimit_high_thresh &&
+ get_options()->ConnLimit_high_thresh != 0 &&
+ get_options()->ConnLimit_ != 0) {
+ /* Try to get down to the low threshold */
+ target_n_socks = get_options()->ConnLimit_low_thresh;
+ log_notice(LD_NET,
+ "Current number of sockets %d is greater than configured "
+ "limit %d; OOS handler trying to get down to %d",
+ n_socks, get_options()->ConnLimit_high_thresh,
+ target_n_socks);
+ } else if (failed) {
+ /*
+ * If we're not at the limit but we hit a socket exhaustion error, try to
+ * drop some (but not as aggressively as ConnLimit_low_threshold, which is
+ * 3/4 of ConnLimit_)
+ */
+ target_n_socks = (n_socks * 9) / 10;
+ log_notice(LD_NET,
+ "We saw socket exhaustion at %d open sockets; OOS handler "
+ "trying to get down to %d",
+ n_socks, target_n_socks);
+ }
+
+ if (target_n_socks > 0) {
+ /*
+ * It's an OOS!
+ *
+ * Count moribund sockets; it's be important that anything we decide
+ * to get rid of here but don't immediately close get counted as moribund
+ * on subsequent invocations so we don't try to kill too many things if
+ * connection_check_oos() gets called multiple times.
+ */
+ moribund_socks = connection_count_moribund();
+
+ if (moribund_socks < n_socks - target_n_socks) {
+ socks_to_kill = n_socks - target_n_socks - moribund_socks;
+
+ conns = pick_oos_victims(socks_to_kill);
+ if (conns) {
+ kill_conn_list_for_oos(conns);
+ log_notice(LD_NET,
+ "OOS handler killed %d conns", smartlist_len(conns));
+ smartlist_free(conns);
+ } else {
+ log_notice(LD_NET, "OOS handler failed to pick any victim conns");
+ }
+ } else {
+ log_notice(LD_NET,
+ "Not killing any sockets for OOS because there are %d "
+ "already moribund, and we only want to eliminate %d",
+ moribund_socks, n_socks - target_n_socks);
+ }
+ }
+}
+
+/** Log how many bytes are used by buffers of different kinds and sizes. */
+void
+connection_dump_buffer_mem_stats(int severity)
+{
+ uint64_t used_by_type[CONN_TYPE_MAX_+1];
+ uint64_t alloc_by_type[CONN_TYPE_MAX_+1];
+ int n_conns_by_type[CONN_TYPE_MAX_+1];
+ uint64_t total_alloc = 0;
+ uint64_t total_used = 0;
+ int i;
+ smartlist_t *conns = get_connection_array();
+
+ memset(used_by_type, 0, sizeof(used_by_type));
+ memset(alloc_by_type, 0, sizeof(alloc_by_type));
+ memset(n_conns_by_type, 0, sizeof(n_conns_by_type));
+
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, c) {
+ int tp = c->type;
+ ++n_conns_by_type[tp];
+ if (c->inbuf) {
+ used_by_type[tp] += buf_datalen(c->inbuf);
+ alloc_by_type[tp] += buf_allocation(c->inbuf);
+ }
+ if (c->outbuf) {
+ used_by_type[tp] += buf_datalen(c->outbuf);
+ alloc_by_type[tp] += buf_allocation(c->outbuf);
+ }
+ } SMARTLIST_FOREACH_END(c);
+ for (i=0; i <= CONN_TYPE_MAX_; ++i) {
+ total_used += used_by_type[i];
+ total_alloc += alloc_by_type[i];
+ }
+
+ tor_log(severity, LD_GENERAL,
+ "In buffers for %d connections: %"PRIu64" used/%"PRIu64" allocated",
+ smartlist_len(conns),
+ (total_used), (total_alloc));
+ for (i=CONN_TYPE_MIN_; i <= CONN_TYPE_MAX_; ++i) {
+ if (!n_conns_by_type[i])
+ continue;
+ tor_log(severity, LD_GENERAL,
+ " For %d %s connections: %"PRIu64" used/%"PRIu64" allocated",
+ n_conns_by_type[i], conn_type_to_string(i),
+ (used_by_type[i]), (alloc_by_type[i]));
+ }
+}
+
+/** Verify that connection <b>conn</b> has all of its invariants
+ * correct. Trigger an assert if anything is invalid.
+ */
+void
+assert_connection_ok(connection_t *conn, time_t now)
+{
+ (void) now; /* XXXX unused. */
+ tor_assert(conn);
+ tor_assert(conn->type >= CONN_TYPE_MIN_);
+ tor_assert(conn->type <= CONN_TYPE_MAX_);
+
+ switch (conn->type) {
+ case CONN_TYPE_OR:
+ case CONN_TYPE_EXT_OR:
+ tor_assert(conn->magic == OR_CONNECTION_MAGIC);
+ break;
+ case CONN_TYPE_AP:
+ tor_assert(conn->magic == ENTRY_CONNECTION_MAGIC);
+ break;
+ case CONN_TYPE_EXIT:
+ tor_assert(conn->magic == EDGE_CONNECTION_MAGIC);
+ break;
+ case CONN_TYPE_DIR:
+ tor_assert(conn->magic == DIR_CONNECTION_MAGIC);
+ break;
+ case CONN_TYPE_CONTROL:
+ tor_assert(conn->magic == CONTROL_CONNECTION_MAGIC);
+ break;
+ CASE_ANY_LISTENER_TYPE:
+ tor_assert(conn->magic == LISTENER_CONNECTION_MAGIC);
+ break;
+ default:
+ tor_assert(conn->magic == BASE_CONNECTION_MAGIC);
+ break;
+ }
+
+ if (conn->linked_conn) {
+ tor_assert(conn->linked_conn->linked_conn == conn);
+ tor_assert(conn->linked);
+ }
+ if (conn->linked)
+ tor_assert(!SOCKET_OK(conn->s));
+
+ if (conn->outbuf_flushlen > 0) {
+ /* With optimistic data, we may have queued data in
+ * EXIT_CONN_STATE_RESOLVING while the conn is not yet marked to writing.
+ * */
+ tor_assert((conn->type == CONN_TYPE_EXIT &&
+ conn->state == EXIT_CONN_STATE_RESOLVING) ||
+ connection_is_writing(conn) ||
+ conn->write_blocked_on_bw ||
+ (CONN_IS_EDGE(conn) &&
+ TO_EDGE_CONN(conn)->edge_blocked_on_circ));
+ }
+
+ if (conn->hold_open_until_flushed)
+ tor_assert(conn->marked_for_close);
+
+ /* XXXX check: read_blocked_on_bw, write_blocked_on_bw, s, conn_array_index,
+ * marked_for_close. */
+
+ /* buffers */
+ if (conn->inbuf)
+ buf_assert_ok(conn->inbuf);
+ if (conn->outbuf)
+ buf_assert_ok(conn->outbuf);
+
+ if (conn->type == CONN_TYPE_OR) {
+ or_connection_t *or_conn = TO_OR_CONN(conn);
+ if (conn->state == OR_CONN_STATE_OPEN) {
+ /* tor_assert(conn->bandwidth > 0); */
+ /* the above isn't necessarily true: if we just did a TLS
+ * handshake but we didn't recognize the other peer, or it
+ * gave a bad cert/etc, then we won't have assigned bandwidth,
+ * yet it will be open. -RD
+ */
+// tor_assert(conn->read_bucket >= 0);
+ }
+// tor_assert(conn->addr && conn->port);
+ tor_assert(conn->address);
+ if (conn->state > OR_CONN_STATE_PROXY_HANDSHAKING)
+ tor_assert(or_conn->tls);
+ }
+
+ if (CONN_IS_EDGE(conn)) {
+ /* XXX unchecked: package window, deliver window. */
+ if (conn->type == CONN_TYPE_AP) {
+ entry_connection_t *entry_conn = TO_ENTRY_CONN(conn);
+ if (entry_conn->chosen_exit_optional || entry_conn->chosen_exit_retries)
+ tor_assert(entry_conn->chosen_exit_name);
+
+ tor_assert(entry_conn->socks_request);
+ if (conn->state == AP_CONN_STATE_OPEN) {
+ tor_assert(entry_conn->socks_request->has_finished);
+ if (!conn->marked_for_close) {
+ tor_assert(ENTRY_TO_EDGE_CONN(entry_conn)->cpath_layer);
+ assert_cpath_layer_ok(ENTRY_TO_EDGE_CONN(entry_conn)->cpath_layer);
+ }
+ }
+ }
+ if (conn->type == CONN_TYPE_EXIT) {
+ tor_assert(conn->purpose == EXIT_PURPOSE_CONNECT ||
+ conn->purpose == EXIT_PURPOSE_RESOLVE);
+ }
+ } else if (conn->type == CONN_TYPE_DIR) {
+ } else {
+ /* Purpose is only used for dir and exit types currently */
+ tor_assert(!conn->purpose);
+ }
+
+ switch (conn->type)
+ {
+ CASE_ANY_LISTENER_TYPE:
+ tor_assert(conn->state == LISTENER_STATE_READY);
+ break;
+ case CONN_TYPE_OR:
+ tor_assert(conn->state >= OR_CONN_STATE_MIN_);
+ tor_assert(conn->state <= OR_CONN_STATE_MAX_);
+ break;
+ case CONN_TYPE_EXT_OR:
+ tor_assert(conn->state >= EXT_OR_CONN_STATE_MIN_);
+ tor_assert(conn->state <= EXT_OR_CONN_STATE_MAX_);
+ break;
+ case CONN_TYPE_EXIT:
+ tor_assert(conn->state >= EXIT_CONN_STATE_MIN_);
+ tor_assert(conn->state <= EXIT_CONN_STATE_MAX_);
+ tor_assert(conn->purpose >= EXIT_PURPOSE_MIN_);
+ tor_assert(conn->purpose <= EXIT_PURPOSE_MAX_);
+ break;
+ case CONN_TYPE_AP:
+ tor_assert(conn->state >= AP_CONN_STATE_MIN_);
+ tor_assert(conn->state <= AP_CONN_STATE_MAX_);
+ tor_assert(TO_ENTRY_CONN(conn)->socks_request);
+ break;
+ case CONN_TYPE_DIR:
+ tor_assert(conn->state >= DIR_CONN_STATE_MIN_);
+ tor_assert(conn->state <= DIR_CONN_STATE_MAX_);
+ tor_assert(conn->purpose >= DIR_PURPOSE_MIN_);
+ tor_assert(conn->purpose <= DIR_PURPOSE_MAX_);
+ break;
+ case CONN_TYPE_CONTROL:
+ tor_assert(conn->state >= CONTROL_CONN_STATE_MIN_);
+ tor_assert(conn->state <= CONTROL_CONN_STATE_MAX_);
+ break;
+ default:
+ tor_assert(0);
+ }
+}
+
+/** Fills <b>addr</b> and <b>port</b> with the details of the global
+ * proxy server we are using.
+ * <b>conn</b> contains the connection we are using the proxy for.
+ *
+ * Return 0 on success, -1 on failure.
+ */
+int
+get_proxy_addrport(tor_addr_t *addr, uint16_t *port, int *proxy_type,
+ const connection_t *conn)
+{
+ const or_options_t *options = get_options();
+
+ /* Client Transport Plugins can use another proxy, but that should be hidden
+ * from the rest of tor (as the plugin is responsible for dealing with the
+ * proxy), check it first, then check the rest of the proxy types to allow
+ * the config to have unused ClientTransportPlugin entries.
+ */
+ if (options->ClientTransportPlugin) {
+ const transport_t *transport = NULL;
+ int r;
+ r = get_transport_by_bridge_addrport(&conn->addr, conn->port, &transport);
+ if (r<0)
+ return -1;
+ if (transport) { /* transport found */
+ tor_addr_copy(addr, &transport->addr);
+ *port = transport->port;
+ *proxy_type = transport->socks_version;
+ return 0;
+ }
+
+ /* Unused ClientTransportPlugin. */
+ }
+
+ if (options->HTTPSProxy) {
+ tor_addr_copy(addr, &options->HTTPSProxyAddr);
+ *port = options->HTTPSProxyPort;
+ *proxy_type = PROXY_CONNECT;
+ return 0;
+ } else if (options->Socks4Proxy) {
+ tor_addr_copy(addr, &options->Socks4ProxyAddr);
+ *port = options->Socks4ProxyPort;
+ *proxy_type = PROXY_SOCKS4;
+ return 0;
+ } else if (options->Socks5Proxy) {
+ tor_addr_copy(addr, &options->Socks5ProxyAddr);
+ *port = options->Socks5ProxyPort;
+ *proxy_type = PROXY_SOCKS5;
+ return 0;
+ }
+
+ tor_addr_make_unspec(addr);
+ *port = 0;
+ *proxy_type = PROXY_NONE;
+ return 0;
+}
+
+/** Log a failed connection to a proxy server.
+ * <b>conn</b> is the connection we use the proxy server for. */
+void
+log_failed_proxy_connection(connection_t *conn)
+{
+ tor_addr_t proxy_addr;
+ uint16_t proxy_port;
+ int proxy_type;
+
+ if (get_proxy_addrport(&proxy_addr, &proxy_port, &proxy_type, conn) != 0)
+ return; /* if we have no proxy set up, leave this function. */
+
+ log_warn(LD_NET,
+ "The connection to the %s proxy server at %s just failed. "
+ "Make sure that the proxy server is up and running.",
+ proxy_type_to_string(proxy_type),
+ fmt_addrport(&proxy_addr, proxy_port));
+}
+
+/** Return string representation of <b>proxy_type</b>. */
+static const char *
+proxy_type_to_string(int proxy_type)
+{
+ switch (proxy_type) {
+ case PROXY_CONNECT: return "HTTP";
+ case PROXY_SOCKS4: return "SOCKS4";
+ case PROXY_SOCKS5: return "SOCKS5";
+ case PROXY_PLUGGABLE: return "pluggable transports SOCKS";
+ case PROXY_NONE: return "NULL";
+ default: tor_assert(0);
+ }
+ return NULL; /*Unreached*/
+}
+
+/** Call connection_free_minimal() on every connection in our array, and
+ * release all storage held by connection.c.
+ *
+ * Don't do the checks in connection_free(), because they will
+ * fail.
+ */
+void
+connection_free_all(void)
+{
+ smartlist_t *conns = get_connection_array();
+
+ /* We don't want to log any messages to controllers. */
+ SMARTLIST_FOREACH(conns, connection_t *, conn,
+ if (conn->type == CONN_TYPE_CONTROL)
+ TO_CONTROL_CONN(conn)->event_mask = 0);
+
+ control_update_global_event_mask();
+
+ /* Unlink everything from the identity map. */
+ connection_or_clear_identity_map();
+ connection_or_clear_ext_or_id_map();
+
+ /* Clear out our list of broken connections */
+ clear_broken_connection_map(0);
+
+ SMARTLIST_FOREACH(conns, connection_t *, conn,
+ connection_free_minimal(conn));
+
+ if (outgoing_addrs) {
+ SMARTLIST_FOREACH(outgoing_addrs, tor_addr_t *, addr, tor_free(addr));
+ smartlist_free(outgoing_addrs);
+ outgoing_addrs = NULL;
+ }
+
+ tor_free(last_interface_ipv4);
+ tor_free(last_interface_ipv6);
+ last_recorded_accounting_at = 0;
+
+ mainloop_event_free(reenable_blocked_connections_ev);
+ reenable_blocked_connections_is_scheduled = 0;
+ memset(&reenable_blocked_connections_delay, 0, sizeof(struct timeval));
+}
+
+/** Log a warning, and possibly emit a control event, that <b>received</b> came
+ * at a skewed time. <b>trusted</b> indicates that the <b>source</b> was one
+ * that we had more faith in and therefore the warning level should have higher
+ * severity.
+ */
+MOCK_IMPL(void,
+clock_skew_warning, (const connection_t *conn, long apparent_skew, int trusted,
+ log_domain_mask_t domain, const char *received,
+ const char *source))
+{
+ char dbuf[64];
+ char *ext_source = NULL, *warn = NULL;
+ format_time_interval(dbuf, sizeof(dbuf), apparent_skew);
+ if (conn)
+ tor_asprintf(&ext_source, "%s:%s:%d", source, conn->address, conn->port);
+ else
+ ext_source = tor_strdup(source);
+ log_fn(trusted ? LOG_WARN : LOG_INFO, domain,
+ "Received %s with skewed time (%s): "
+ "It seems that our clock is %s by %s, or that theirs is %s%s. "
+ "Tor requires an accurate clock to work: please check your time, "
+ "timezone, and date settings.", received, ext_source,
+ apparent_skew > 0 ? "ahead" : "behind", dbuf,
+ apparent_skew > 0 ? "behind" : "ahead",
+ (!conn || trusted) ? "" : ", or they are sending us the wrong time");
+ if (trusted) {
+ control_event_general_status(LOG_WARN, "CLOCK_SKEW SKEW=%ld SOURCE=%s",
+ apparent_skew, ext_source);
+ tor_asprintf(&warn, "Clock skew %ld in %s from %s", apparent_skew,
+ received, source);
+ control_event_bootstrap_problem(warn, "CLOCK_SKEW", conn, 1);
+ }
+ tor_free(warn);
+ tor_free(ext_source);
+}
diff --git a/src/core/mainloop/connection.h b/src/core/mainloop/connection.h
new file mode 100644
index 0000000000..3419ee65e8
--- /dev/null
+++ b/src/core/mainloop/connection.h
@@ -0,0 +1,343 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file connection.h
+ * \brief Header file for connection.c.
+ **/
+
+#ifndef TOR_CONNECTION_H
+#define TOR_CONNECTION_H
+
+listener_connection_t *TO_LISTENER_CONN(connection_t *);
+
+struct buf_t;
+
+#define CONN_TYPE_MIN_ 3
+/** Type for sockets listening for OR connections. */
+#define CONN_TYPE_OR_LISTENER 3
+/** A bidirectional TLS connection transmitting a sequence of cells.
+ * May be from an OR to an OR, or from an OP to an OR. */
+#define CONN_TYPE_OR 4
+/** A TCP connection from an onion router to a stream's destination. */
+#define CONN_TYPE_EXIT 5
+/** Type for sockets listening for SOCKS connections. */
+#define CONN_TYPE_AP_LISTENER 6
+/** A SOCKS proxy connection from the user application to the onion
+ * proxy. */
+#define CONN_TYPE_AP 7
+/** Type for sockets listening for HTTP connections to the directory server. */
+#define CONN_TYPE_DIR_LISTENER 8
+/** Type for HTTP connections to the directory server. */
+#define CONN_TYPE_DIR 9
+/* Type 10 is unused. */
+/** Type for listening for connections from user interface process. */
+#define CONN_TYPE_CONTROL_LISTENER 11
+/** Type for connections from user interface process. */
+#define CONN_TYPE_CONTROL 12
+/** Type for sockets listening for transparent connections redirected by pf or
+ * netfilter. */
+#define CONN_TYPE_AP_TRANS_LISTENER 13
+/** Type for sockets listening for transparent connections redirected by
+ * natd. */
+#define CONN_TYPE_AP_NATD_LISTENER 14
+/** Type for sockets listening for DNS requests. */
+#define CONN_TYPE_AP_DNS_LISTENER 15
+
+/** Type for connections from the Extended ORPort. */
+#define CONN_TYPE_EXT_OR 16
+/** Type for sockets listening for Extended ORPort connections. */
+#define CONN_TYPE_EXT_OR_LISTENER 17
+/** Type for sockets listening for HTTP CONNECT tunnel connections. */
+#define CONN_TYPE_AP_HTTP_CONNECT_LISTENER 18
+
+#define CONN_TYPE_MAX_ 19
+/* !!!! If _CONN_TYPE_MAX is ever over 31, we must grow the type field in
+ * connection_t. */
+
+/* Proxy client handshake states */
+/* We use a proxy but we haven't even connected to it yet. */
+#define PROXY_INFANT 1
+/* We use an HTTP proxy and we've sent the CONNECT command. */
+#define PROXY_HTTPS_WANT_CONNECT_OK 2
+/* We use a SOCKS4 proxy and we've sent the CONNECT command. */
+#define PROXY_SOCKS4_WANT_CONNECT_OK 3
+/* We use a SOCKS5 proxy and we try to negotiate without
+ any authentication . */
+#define PROXY_SOCKS5_WANT_AUTH_METHOD_NONE 4
+/* We use a SOCKS5 proxy and we try to negotiate with
+ Username/Password authentication . */
+#define PROXY_SOCKS5_WANT_AUTH_METHOD_RFC1929 5
+/* We use a SOCKS5 proxy and we just sent our credentials. */
+#define PROXY_SOCKS5_WANT_AUTH_RFC1929_OK 6
+/* We use a SOCKS5 proxy and we just sent our CONNECT command. */
+#define PROXY_SOCKS5_WANT_CONNECT_OK 7
+/* We use a proxy and we CONNECTed successfully!. */
+#define PROXY_CONNECTED 8
+
+/** State for any listener connection. */
+#define LISTENER_STATE_READY 0
+
+const char *conn_type_to_string(int type);
+const char *conn_state_to_string(int type, int state);
+int conn_listener_type_supports_af_unix(int type);
+
+dir_connection_t *dir_connection_new(int socket_family);
+or_connection_t *or_connection_new(int type, int socket_family);
+edge_connection_t *edge_connection_new(int type, int socket_family);
+entry_connection_t *entry_connection_new(int type, int socket_family);
+control_connection_t *control_connection_new(int socket_family);
+listener_connection_t *listener_connection_new(int type, int socket_family);
+connection_t *connection_new(int type, int socket_family);
+int connection_init_accepted_conn(connection_t *conn,
+ const listener_connection_t *listener);
+void connection_link_connections(connection_t *conn_a, connection_t *conn_b);
+MOCK_DECL(void,connection_free_,(connection_t *conn));
+#define connection_free(conn) \
+ FREE_AND_NULL(connection_t, connection_free_, (conn))
+void connection_free_all(void);
+void connection_about_to_close_connection(connection_t *conn);
+void connection_close_immediate(connection_t *conn);
+void connection_mark_for_close_(connection_t *conn,
+ int line, const char *file);
+MOCK_DECL(void, connection_mark_for_close_internal_,
+ (connection_t *conn, int line, const char *file));
+
+#define connection_mark_for_close(c) \
+ connection_mark_for_close_((c), __LINE__, SHORT_FILE__)
+#define connection_mark_for_close_internal(c) \
+ connection_mark_for_close_internal_((c), __LINE__, SHORT_FILE__)
+
+/**
+ * Mark 'c' for close, but try to hold it open until all the data is written.
+ * Use the _internal versions of connection_mark_for_close; this should be
+ * called when you either are sure that if this is an or_connection_t the
+ * controlling channel has been notified (e.g. with
+ * connection_or_notify_error()), or you actually are the
+ * connection_or_close_for_error() or connection_or_close_normally function.
+ * For all other cases, use connection_mark_and_flush() instead, which
+ * checks for or_connection_t properly, instead. See below.
+ */
+#define connection_mark_and_flush_internal_(c,line,file) \
+ do { \
+ connection_t *tmp_conn__ = (c); \
+ connection_mark_for_close_internal_(tmp_conn__, (line), (file)); \
+ tmp_conn__->hold_open_until_flushed = 1; \
+ } while (0)
+
+#define connection_mark_and_flush_internal(c) \
+ connection_mark_and_flush_internal_((c), __LINE__, SHORT_FILE__)
+
+/**
+ * Mark 'c' for close, but try to hold it open until all the data is written.
+ */
+#define connection_mark_and_flush_(c,line,file) \
+ do { \
+ connection_t *tmp_conn_ = (c); \
+ if (tmp_conn_->type == CONN_TYPE_OR) { \
+ log_warn(LD_CHANNEL | LD_BUG, \
+ "Something tried to close (and flush) an or_connection_t" \
+ " without going through channels at %s:%d", \
+ file, line); \
+ connection_or_close_for_error(TO_OR_CONN(tmp_conn_), 1); \
+ } else { \
+ connection_mark_and_flush_internal_(c, line, file); \
+ } \
+ } while (0)
+
+#define connection_mark_and_flush(c) \
+ connection_mark_and_flush_((c), __LINE__, SHORT_FILE__)
+
+void connection_expire_held_open(void);
+
+int connection_connect(connection_t *conn, const char *address,
+ const tor_addr_t *addr,
+ uint16_t port, int *socket_error);
+
+#ifdef HAVE_SYS_UN_H
+
+int connection_connect_unix(connection_t *conn, const char *socket_path,
+ int *socket_error);
+
+#endif /* defined(HAVE_SYS_UN_H) */
+
+/** Maximum size of information that we can fit into SOCKS5 username
+ or password fields. */
+#define MAX_SOCKS5_AUTH_FIELD_SIZE 255
+
+/** Total maximum size of information that we can fit into SOCKS5
+ username and password fields. */
+#define MAX_SOCKS5_AUTH_SIZE_TOTAL 2*MAX_SOCKS5_AUTH_FIELD_SIZE
+
+int connection_proxy_connect(connection_t *conn, int type);
+int connection_read_proxy_handshake(connection_t *conn);
+void log_failed_proxy_connection(connection_t *conn);
+int get_proxy_addrport(tor_addr_t *addr, uint16_t *port, int *proxy_type,
+ const connection_t *conn);
+
+int retry_all_listeners(smartlist_t *replaced_conns,
+ smartlist_t *new_conns,
+ int close_all_noncontrol);
+
+void connection_mark_all_noncontrol_listeners(void);
+void connection_mark_all_noncontrol_connections(void);
+
+ssize_t connection_bucket_write_limit(connection_t *conn, time_t now);
+int global_write_bucket_low(connection_t *conn, size_t attempt, int priority);
+void connection_bucket_init(void);
+void connection_bucket_adjust(const or_options_t *options);
+void connection_bucket_refill_all(time_t now,
+ uint32_t now_ts);
+void connection_read_bw_exhausted(connection_t *conn, bool is_global_bw);
+void connection_write_bw_exhausted(connection_t *conn, bool is_global_bw);
+void connection_consider_empty_read_buckets(connection_t *conn);
+void connection_consider_empty_write_buckets(connection_t *conn);
+
+int connection_handle_read(connection_t *conn);
+
+int connection_buf_get_bytes(char *string, size_t len, connection_t *conn);
+int connection_buf_get_line(connection_t *conn, char *data,
+ size_t *data_len);
+int connection_fetch_from_buf_http(connection_t *conn,
+ char **headers_out, size_t max_headerlen,
+ char **body_out, size_t *body_used,
+ size_t max_bodylen, int force_complete);
+
+int connection_wants_to_flush(connection_t *conn);
+int connection_outbuf_too_full(connection_t *conn);
+int connection_handle_write(connection_t *conn, int force);
+int connection_flush(connection_t *conn);
+
+MOCK_DECL(void, connection_write_to_buf_impl_,
+ (const char *string, size_t len, connection_t *conn, int zlib));
+/* DOCDOC connection_write_to_buf */
+static void connection_buf_add(const char *string, size_t len,
+ connection_t *conn);
+static inline void
+connection_buf_add(const char *string, size_t len, connection_t *conn)
+{
+ connection_write_to_buf_impl_(string, len, conn, 0);
+}
+void connection_buf_add_compress(const char *string, size_t len,
+ dir_connection_t *conn, int done);
+void connection_buf_add_buf(connection_t *conn, struct buf_t *buf);
+
+size_t connection_get_inbuf_len(connection_t *conn);
+size_t connection_get_outbuf_len(connection_t *conn);
+connection_t *connection_get_by_global_id(uint64_t id);
+
+connection_t *connection_get_by_type(int type);
+MOCK_DECL(connection_t *,connection_get_by_type_addr_port_purpose,(int type,
+ const tor_addr_t *addr,
+ uint16_t port, int purpose));
+connection_t *connection_get_by_type_state(int type, int state);
+connection_t *connection_get_by_type_state_rendquery(int type, int state,
+ const char *rendquery);
+smartlist_t *connection_list_by_type_state(int type, int state);
+smartlist_t *connection_list_by_type_purpose(int type, int purpose);
+smartlist_t *connection_dir_list_by_purpose_and_resource(
+ int purpose,
+ const char *resource);
+smartlist_t *connection_dir_list_by_purpose_resource_and_state(
+ int purpose,
+ const char *resource,
+ int state);
+
+#define CONN_LEN_AND_FREE_TEMPLATE(sl) \
+ STMT_BEGIN \
+ int len = smartlist_len(sl); \
+ smartlist_free(sl); \
+ return len; \
+ STMT_END
+
+/** Return a count of directory connections that are fetching the item
+ * described by <b>purpose</b>/<b>resource</b>. */
+static inline int
+connection_dir_count_by_purpose_and_resource(
+ int purpose,
+ const char *resource)
+{
+ smartlist_t *conns = connection_dir_list_by_purpose_and_resource(
+ purpose,
+ resource);
+ CONN_LEN_AND_FREE_TEMPLATE(conns);
+}
+
+/** Return a count of directory connections that are fetching the item
+ * described by <b>purpose</b>/<b>resource</b>/<b>state</b>. */
+static inline int
+connection_dir_count_by_purpose_resource_and_state(
+ int purpose,
+ const char *resource,
+ int state)
+{
+ smartlist_t *conns =
+ connection_dir_list_by_purpose_resource_and_state(
+ purpose,
+ resource,
+ state);
+ CONN_LEN_AND_FREE_TEMPLATE(conns);
+}
+
+#undef CONN_LEN_AND_FREE_TEMPLATE
+
+int any_other_active_or_conns(const or_connection_t *this_conn);
+
+/* || 0 is for -Wparentheses-equality (-Wall?) appeasement under clang */
+#define connection_speaks_cells(conn) (((conn)->type == CONN_TYPE_OR) || 0)
+int connection_is_listener(connection_t *conn);
+int connection_state_is_open(connection_t *conn);
+int connection_state_is_connecting(connection_t *conn);
+
+char *alloc_http_authenticator(const char *authenticator);
+
+void assert_connection_ok(connection_t *conn, time_t now);
+int connection_or_nonopen_was_started_here(or_connection_t *conn);
+void connection_dump_buffer_mem_stats(int severity);
+
+MOCK_DECL(void, clock_skew_warning,
+ (const connection_t *conn, long apparent_skew, int trusted,
+ log_domain_mask_t domain, const char *received,
+ const char *source));
+
+int connection_is_moribund(connection_t *conn);
+void connection_check_oos(int n_socks, int failed);
+
+/** Execute the statement <b>stmt</b>, which may log events concerning the
+ * connection <b>conn</b>. To prevent infinite loops, disable log messages
+ * being sent to controllers if <b>conn</b> is a control connection.
+ *
+ * Stmt must not contain any return or goto statements.
+ */
+#define CONN_LOG_PROTECT(conn, stmt) \
+ STMT_BEGIN \
+ int _log_conn_is_control; \
+ tor_assert(conn); \
+ _log_conn_is_control = (conn->type == CONN_TYPE_CONTROL); \
+ if (_log_conn_is_control) \
+ disable_control_logging(); \
+ STMT_BEGIN stmt; STMT_END; \
+ if (_log_conn_is_control) \
+ enable_control_logging(); \
+ STMT_END
+
+#ifdef CONNECTION_PRIVATE
+STATIC void connection_free_minimal(connection_t *conn);
+
+/* Used only by connection.c and test*.c */
+MOCK_DECL(STATIC int,connection_connect_sockaddr,
+ (connection_t *conn,
+ const struct sockaddr *sa,
+ socklen_t sa_len,
+ const struct sockaddr *bindaddr,
+ socklen_t bindaddr_len,
+ int *socket_error));
+MOCK_DECL(STATIC void, kill_conn_list_for_oos, (smartlist_t *conns));
+MOCK_DECL(STATIC smartlist_t *, pick_oos_victims, (int n));
+
+#endif /* defined(CONNECTION_PRIVATE) */
+
+#endif /* !defined(TOR_CONNECTION_H) */
diff --git a/src/core/mainloop/cpuworker.c b/src/core/mainloop/cpuworker.c
new file mode 100644
index 0000000000..8b58e4c68c
--- /dev/null
+++ b/src/core/mainloop/cpuworker.c
@@ -0,0 +1,599 @@
+/* Copyright (c) 2003-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file cpuworker.c
+ * \brief Uses the workqueue/threadpool code to farm CPU-intensive activities
+ * out to subprocesses.
+ *
+ * The multithreading backend for this module is in workqueue.c; this module
+ * specializes workqueue.c.
+ *
+ * Right now, we use this infrastructure
+ * <ul><li>for processing onionskins in onion.c
+ * <li>for compressing consensuses in consdiffmgr.c,
+ * <li>and for calculating diffs and compressing them in consdiffmgr.c.
+ * </ul>
+ **/
+#include "or/or.h"
+#include "or/channel.h"
+#include "or/circuitbuild.h"
+#include "or/circuitlist.h"
+#include "or/connection_or.h"
+#include "or/config.h"
+#include "or/cpuworker.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "or/main.h"
+#include "or/onion.h"
+#include "or/rephist.h"
+#include "or/router.h"
+#include "lib/evloop/workqueue.h"
+
+#include "or/or_circuit_st.h"
+#include "lib/intmath/weakrng.h"
+
+static void queue_pending_tasks(void);
+
+typedef struct worker_state_s {
+ int generation;
+ server_onion_keys_t *onion_keys;
+} worker_state_t;
+
+static void *
+worker_state_new(void *arg)
+{
+ worker_state_t *ws;
+ (void)arg;
+ ws = tor_malloc_zero(sizeof(worker_state_t));
+ ws->onion_keys = server_onion_keys_new();
+ return ws;
+}
+
+#define worker_state_free(ws) \
+ FREE_AND_NULL(worker_state_t, worker_state_free_, (ws))
+
+static void
+worker_state_free_(worker_state_t *ws)
+{
+ if (!ws)
+ return;
+ server_onion_keys_free(ws->onion_keys);
+ tor_free(ws);
+}
+
+static void
+worker_state_free_void(void *arg)
+{
+ worker_state_free_(arg);
+}
+
+static replyqueue_t *replyqueue = NULL;
+static threadpool_t *threadpool = NULL;
+
+static tor_weak_rng_t request_sample_rng = TOR_WEAK_RNG_INIT;
+
+static int total_pending_tasks = 0;
+static int max_pending_tasks = 128;
+
+/** Initialize the cpuworker subsystem. It is OK to call this more than once
+ * during Tor's lifetime.
+ */
+void
+cpu_init(void)
+{
+ if (!replyqueue) {
+ replyqueue = replyqueue_new(0);
+ }
+ if (!threadpool) {
+ /*
+ In our threadpool implementation, half the threads are permissive and
+ half are strict (when it comes to running lower-priority tasks). So we
+ always make sure we have at least two threads, so that there will be at
+ least one thread of each kind.
+ */
+ const int n_threads = get_num_cpus(get_options()) + 1;
+ threadpool = threadpool_new(n_threads,
+ replyqueue,
+ worker_state_new,
+ worker_state_free_void,
+ NULL);
+
+ int r = threadpool_register_reply_event(threadpool, NULL);
+
+ tor_assert(r == 0);
+ }
+
+ /* Total voodoo. Can we make this more sensible? */
+ max_pending_tasks = get_num_cpus(get_options()) * 64;
+ crypto_seed_weak_rng(&request_sample_rng);
+}
+
+/** Magic numbers to make sure our cpuworker_requests don't grow any
+ * mis-framing bugs. */
+#define CPUWORKER_REQUEST_MAGIC 0xda4afeed
+#define CPUWORKER_REPLY_MAGIC 0x5eedf00d
+
+/** A request sent to a cpuworker. */
+typedef struct cpuworker_request_t {
+ /** Magic number; must be CPUWORKER_REQUEST_MAGIC. */
+ uint32_t magic;
+
+ /** Flag: Are we timing this request? */
+ unsigned timed : 1;
+ /** If we're timing this request, when was it sent to the cpuworker? */
+ struct timeval started_at;
+
+ /** A create cell for the cpuworker to process. */
+ create_cell_t create_cell;
+
+ /* Turn the above into a tagged union if needed. */
+} cpuworker_request_t;
+
+/** A reply sent by a cpuworker. */
+typedef struct cpuworker_reply_t {
+ /** Magic number; must be CPUWORKER_REPLY_MAGIC. */
+ uint32_t magic;
+
+ /** True iff we got a successful request. */
+ uint8_t success;
+
+ /** Are we timing this request? */
+ unsigned int timed : 1;
+ /** What handshake type was the request? (Used for timing) */
+ uint16_t handshake_type;
+ /** When did we send the request to the cpuworker? */
+ struct timeval started_at;
+ /** Once the cpuworker received the request, how many microseconds did it
+ * take? (This shouldn't overflow; 4 billion micoseconds is over an hour,
+ * and we'll never have an onion handshake that takes so long.) */
+ uint32_t n_usec;
+
+ /** Output of processing a create cell
+ *
+ * @{
+ */
+ /** The created cell to send back. */
+ created_cell_t created_cell;
+ /** The keys to use on this circuit. */
+ uint8_t keys[CPATH_KEY_MATERIAL_LEN];
+ /** Input to use for authenticating introduce1 cells. */
+ uint8_t rend_auth_material[DIGEST_LEN];
+} cpuworker_reply_t;
+
+typedef struct cpuworker_job_u {
+ or_circuit_t *circ;
+ union {
+ cpuworker_request_t request;
+ cpuworker_reply_t reply;
+ } u;
+} cpuworker_job_t;
+
+static workqueue_reply_t
+update_state_threadfn(void *state_, void *work_)
+{
+ worker_state_t *state = state_;
+ worker_state_t *update = work_;
+ server_onion_keys_free(state->onion_keys);
+ state->onion_keys = update->onion_keys;
+ update->onion_keys = NULL;
+ worker_state_free(update);
+ ++state->generation;
+ return WQ_RPL_REPLY;
+}
+
+/** Called when the onion key has changed so update all CPU worker(s) with
+ * new function pointers with which a new state will be generated.
+ */
+void
+cpuworkers_rotate_keyinfo(void)
+{
+ if (!threadpool) {
+ /* If we're a client, then we won't have cpuworkers, and we won't need
+ * to tell them to rotate their state.
+ */
+ return;
+ }
+ if (threadpool_queue_update(threadpool,
+ worker_state_new,
+ update_state_threadfn,
+ worker_state_free_void,
+ NULL)) {
+ log_warn(LD_OR, "Failed to queue key update for worker threads.");
+ }
+}
+
+/** Indexed by handshake type: how many onionskins have we processed and
+ * counted of that type? */
+static uint64_t onionskins_n_processed[MAX_ONION_HANDSHAKE_TYPE+1];
+/** Indexed by handshake type, corresponding to the onionskins counted in
+ * onionskins_n_processed: how many microseconds have we spent in cpuworkers
+ * processing that kind of onionskin? */
+static uint64_t onionskins_usec_internal[MAX_ONION_HANDSHAKE_TYPE+1];
+/** Indexed by handshake type, corresponding to onionskins counted in
+ * onionskins_n_processed: how many microseconds have we spent waiting for
+ * cpuworkers to give us answers for that kind of onionskin?
+ */
+static uint64_t onionskins_usec_roundtrip[MAX_ONION_HANDSHAKE_TYPE+1];
+
+/** If any onionskin takes longer than this, we clip them to this
+ * time. (microseconds) */
+#define MAX_BELIEVABLE_ONIONSKIN_DELAY (2*1000*1000)
+
+/** Return true iff we'd like to measure a handshake of type
+ * <b>onionskin_type</b>. Call only from the main thread. */
+static int
+should_time_request(uint16_t onionskin_type)
+{
+ /* If we've never heard of this type, we shouldn't even be here. */
+ if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE)
+ return 0;
+ /* Measure the first N handshakes of each type, to ensure we have a
+ * sample */
+ if (onionskins_n_processed[onionskin_type] < 4096)
+ return 1;
+ /** Otherwise, measure with P=1/128. We avoid doing this for every
+ * handshake, since the measurement itself can take a little time. */
+ return tor_weak_random_one_in_n(&request_sample_rng, 128);
+}
+
+/** Return an estimate of how many microseconds we will need for a single
+ * cpuworker to process <b>n_requests</b> onionskins of type
+ * <b>onionskin_type</b>. */
+uint64_t
+estimated_usec_for_onionskins(uint32_t n_requests, uint16_t onionskin_type)
+{
+ if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
+ return 1000 * (uint64_t)n_requests;
+ if (PREDICT_UNLIKELY(onionskins_n_processed[onionskin_type] < 100)) {
+ /* Until we have 100 data points, just asssume everything takes 1 msec. */
+ return 1000 * (uint64_t)n_requests;
+ } else {
+ /* This can't overflow: we'll never have more than 500000 onionskins
+ * measured in onionskin_usec_internal, and they won't take anything near
+ * 1 sec each, and we won't have anything like 1 million queued
+ * onionskins. But that's 5e5 * 1e6 * 1e6, which is still less than
+ * UINT64_MAX. */
+ return (onionskins_usec_internal[onionskin_type] * n_requests) /
+ onionskins_n_processed[onionskin_type];
+ }
+}
+
+/** Compute the absolute and relative overhead of using the cpuworker
+ * framework for onionskins of type <b>onionskin_type</b>.*/
+static int
+get_overhead_for_onionskins(uint32_t *usec_out, double *frac_out,
+ uint16_t onionskin_type)
+{
+ uint64_t overhead;
+
+ *usec_out = 0;
+ *frac_out = 0.0;
+
+ if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
+ return -1;
+ if (onionskins_n_processed[onionskin_type] == 0 ||
+ onionskins_usec_internal[onionskin_type] == 0 ||
+ onionskins_usec_roundtrip[onionskin_type] == 0)
+ return -1;
+
+ overhead = onionskins_usec_roundtrip[onionskin_type] -
+ onionskins_usec_internal[onionskin_type];
+
+ *usec_out = (uint32_t)(overhead / onionskins_n_processed[onionskin_type]);
+ *frac_out = ((double)overhead) / onionskins_usec_internal[onionskin_type];
+
+ return 0;
+}
+
+/** If we've measured overhead for onionskins of type <b>onionskin_type</b>,
+ * log it. */
+void
+cpuworker_log_onionskin_overhead(int severity, int onionskin_type,
+ const char *onionskin_type_name)
+{
+ uint32_t overhead;
+ double relative_overhead;
+ int r;
+
+ r = get_overhead_for_onionskins(&overhead, &relative_overhead,
+ onionskin_type);
+ if (!overhead || r<0)
+ return;
+
+ log_fn(severity, LD_OR,
+ "%s onionskins have averaged %u usec overhead (%.2f%%) in "
+ "cpuworker code ",
+ onionskin_type_name, (unsigned)overhead, relative_overhead*100);
+}
+
+/** Handle a reply from the worker threads. */
+static void
+cpuworker_onion_handshake_replyfn(void *work_)
+{
+ cpuworker_job_t *job = work_;
+ cpuworker_reply_t rpl;
+ or_circuit_t *circ = NULL;
+
+ tor_assert(total_pending_tasks > 0);
+ --total_pending_tasks;
+
+ /* Could avoid this, but doesn't matter. */
+ memcpy(&rpl, &job->u.reply, sizeof(rpl));
+
+ tor_assert(rpl.magic == CPUWORKER_REPLY_MAGIC);
+
+ if (rpl.timed && rpl.success &&
+ rpl.handshake_type <= MAX_ONION_HANDSHAKE_TYPE) {
+ /* Time how long this request took. The handshake_type check should be
+ needless, but let's leave it in to be safe. */
+ struct timeval tv_end, tv_diff;
+ int64_t usec_roundtrip;
+ tor_gettimeofday(&tv_end);
+ timersub(&tv_end, &rpl.started_at, &tv_diff);
+ usec_roundtrip = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
+ if (usec_roundtrip >= 0 &&
+ usec_roundtrip < MAX_BELIEVABLE_ONIONSKIN_DELAY) {
+ ++onionskins_n_processed[rpl.handshake_type];
+ onionskins_usec_internal[rpl.handshake_type] += rpl.n_usec;
+ onionskins_usec_roundtrip[rpl.handshake_type] += usec_roundtrip;
+ if (onionskins_n_processed[rpl.handshake_type] >= 500000) {
+ /* Scale down every 500000 handshakes. On a busy server, that's
+ * less impressive than it sounds. */
+ onionskins_n_processed[rpl.handshake_type] /= 2;
+ onionskins_usec_internal[rpl.handshake_type] /= 2;
+ onionskins_usec_roundtrip[rpl.handshake_type] /= 2;
+ }
+ }
+ }
+
+ circ = job->circ;
+
+ log_debug(LD_OR,
+ "Unpacking cpuworker reply %p, circ=%p, success=%d",
+ job, circ, rpl.success);
+
+ if (circ->base_.magic == DEAD_CIRCUIT_MAGIC) {
+ /* The circuit was supposed to get freed while the reply was
+ * pending. Instead, it got left for us to free so that we wouldn't freak
+ * out when the job->circ field wound up pointing to nothing. */
+ log_debug(LD_OR, "Circuit died while reply was pending. Freeing memory.");
+ circ->base_.magic = 0;
+ tor_free(circ);
+ goto done_processing;
+ }
+
+ circ->workqueue_entry = NULL;
+
+ if (TO_CIRCUIT(circ)->marked_for_close) {
+ /* We already marked this circuit; we can't call it open. */
+ log_debug(LD_OR,"circuit is already marked.");
+ goto done_processing;
+ }
+
+ if (rpl.success == 0) {
+ log_debug(LD_OR,
+ "decoding onionskin failed. "
+ "(Old key or bad software.) Closing.");
+ circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_TORPROTOCOL);
+ goto done_processing;
+ }
+
+ if (onionskin_answer(circ,
+ &rpl.created_cell,
+ (const char*)rpl.keys, sizeof(rpl.keys),
+ rpl.rend_auth_material) < 0) {
+ log_warn(LD_OR,"onionskin_answer failed. Closing.");
+ circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_INTERNAL);
+ goto done_processing;
+ }
+ log_debug(LD_OR,"onionskin_answer succeeded. Yay.");
+
+ done_processing:
+ memwipe(&rpl, 0, sizeof(rpl));
+ memwipe(job, 0, sizeof(*job));
+ tor_free(job);
+ queue_pending_tasks();
+}
+
+/** Implementation function for onion handshake requests. */
+static workqueue_reply_t
+cpuworker_onion_handshake_threadfn(void *state_, void *work_)
+{
+ worker_state_t *state = state_;
+ cpuworker_job_t *job = work_;
+
+ /* variables for onion processing */
+ server_onion_keys_t *onion_keys = state->onion_keys;
+ cpuworker_request_t req;
+ cpuworker_reply_t rpl;
+
+ memcpy(&req, &job->u.request, sizeof(req));
+
+ tor_assert(req.magic == CPUWORKER_REQUEST_MAGIC);
+ memset(&rpl, 0, sizeof(rpl));
+
+ const create_cell_t *cc = &req.create_cell;
+ created_cell_t *cell_out = &rpl.created_cell;
+ struct timeval tv_start = {0,0}, tv_end;
+ int n;
+ rpl.timed = req.timed;
+ rpl.started_at = req.started_at;
+ rpl.handshake_type = cc->handshake_type;
+ if (req.timed)
+ tor_gettimeofday(&tv_start);
+ n = onion_skin_server_handshake(cc->handshake_type,
+ cc->onionskin, cc->handshake_len,
+ onion_keys,
+ cell_out->reply,
+ rpl.keys, CPATH_KEY_MATERIAL_LEN,
+ rpl.rend_auth_material);
+ if (n < 0) {
+ /* failure */
+ log_debug(LD_OR,"onion_skin_server_handshake failed.");
+ memset(&rpl, 0, sizeof(rpl));
+ rpl.success = 0;
+ } else {
+ /* success */
+ log_debug(LD_OR,"onion_skin_server_handshake succeeded.");
+ cell_out->handshake_len = n;
+ switch (cc->cell_type) {
+ case CELL_CREATE:
+ cell_out->cell_type = CELL_CREATED; break;
+ case CELL_CREATE2:
+ cell_out->cell_type = CELL_CREATED2; break;
+ case CELL_CREATE_FAST:
+ cell_out->cell_type = CELL_CREATED_FAST; break;
+ default:
+ tor_assert(0);
+ return WQ_RPL_SHUTDOWN;
+ }
+ rpl.success = 1;
+ }
+ rpl.magic = CPUWORKER_REPLY_MAGIC;
+ if (req.timed) {
+ struct timeval tv_diff;
+ int64_t usec;
+ tor_gettimeofday(&tv_end);
+ timersub(&tv_end, &tv_start, &tv_diff);
+ usec = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
+ if (usec < 0 || usec > MAX_BELIEVABLE_ONIONSKIN_DELAY)
+ rpl.n_usec = MAX_BELIEVABLE_ONIONSKIN_DELAY;
+ else
+ rpl.n_usec = (uint32_t) usec;
+ }
+
+ memcpy(&job->u.reply, &rpl, sizeof(rpl));
+
+ memwipe(&req, 0, sizeof(req));
+ memwipe(&rpl, 0, sizeof(req));
+ return WQ_RPL_REPLY;
+}
+
+/** Take pending tasks from the queue and assign them to cpuworkers. */
+static void
+queue_pending_tasks(void)
+{
+ or_circuit_t *circ;
+ create_cell_t *onionskin = NULL;
+
+ while (total_pending_tasks < max_pending_tasks) {
+ circ = onion_next_task(&onionskin);
+
+ if (!circ)
+ return;
+
+ if (assign_onionskin_to_cpuworker(circ, onionskin) < 0)
+ log_info(LD_OR,"assign_to_cpuworker failed. Ignoring.");
+ }
+}
+
+/** DOCDOC */
+MOCK_IMPL(workqueue_entry_t *,
+cpuworker_queue_work,(workqueue_priority_t priority,
+ workqueue_reply_t (*fn)(void *, void *),
+ void (*reply_fn)(void *),
+ void *arg))
+{
+ tor_assert(threadpool);
+
+ return threadpool_queue_work_priority(threadpool,
+ priority,
+ fn,
+ reply_fn,
+ arg);
+}
+
+/** Try to tell a cpuworker to perform the public key operations necessary to
+ * respond to <b>onionskin</b> for the circuit <b>circ</b>.
+ *
+ * Return 0 if we successfully assign the task, or -1 on failure.
+ */
+int
+assign_onionskin_to_cpuworker(or_circuit_t *circ,
+ create_cell_t *onionskin)
+{
+ workqueue_entry_t *queue_entry;
+ cpuworker_job_t *job;
+ cpuworker_request_t req;
+ int should_time;
+
+ tor_assert(threadpool);
+
+ if (!circ->p_chan) {
+ log_info(LD_OR,"circ->p_chan gone. Failing circ.");
+ tor_free(onionskin);
+ return -1;
+ }
+
+ if (total_pending_tasks >= max_pending_tasks) {
+ log_debug(LD_OR,"No idle cpuworkers. Queuing.");
+ if (onion_pending_add(circ, onionskin) < 0) {
+ tor_free(onionskin);
+ return -1;
+ }
+ return 0;
+ }
+
+ if (!channel_is_client(circ->p_chan))
+ rep_hist_note_circuit_handshake_assigned(onionskin->handshake_type);
+
+ should_time = should_time_request(onionskin->handshake_type);
+ memset(&req, 0, sizeof(req));
+ req.magic = CPUWORKER_REQUEST_MAGIC;
+ req.timed = should_time;
+
+ memcpy(&req.create_cell, onionskin, sizeof(create_cell_t));
+
+ tor_free(onionskin);
+
+ if (should_time)
+ tor_gettimeofday(&req.started_at);
+
+ job = tor_malloc_zero(sizeof(cpuworker_job_t));
+ job->circ = circ;
+ memcpy(&job->u.request, &req, sizeof(req));
+ memwipe(&req, 0, sizeof(req));
+
+ ++total_pending_tasks;
+ queue_entry = threadpool_queue_work_priority(threadpool,
+ WQ_PRI_HIGH,
+ cpuworker_onion_handshake_threadfn,
+ cpuworker_onion_handshake_replyfn,
+ job);
+ if (!queue_entry) {
+ log_warn(LD_BUG, "Couldn't queue work on threadpool");
+ tor_free(job);
+ return -1;
+ }
+
+ log_debug(LD_OR, "Queued task %p (qe=%p, circ=%p)",
+ job, queue_entry, job->circ);
+
+ circ->workqueue_entry = queue_entry;
+
+ return 0;
+}
+
+/** If <b>circ</b> has a pending handshake that hasn't been processed yet,
+ * remove it from the worker queue. */
+void
+cpuworker_cancel_circ_handshake(or_circuit_t *circ)
+{
+ cpuworker_job_t *job;
+ if (circ->workqueue_entry == NULL)
+ return;
+
+ job = workqueue_entry_cancel(circ->workqueue_entry);
+ if (job) {
+ /* It successfully cancelled. */
+ memwipe(job, 0xe0, sizeof(*job));
+ tor_free(job);
+ tor_assert(total_pending_tasks > 0);
+ --total_pending_tasks;
+ /* if (!job), this is done in cpuworker_onion_handshake_replyfn. */
+ circ->workqueue_entry = NULL;
+ }
+}
diff --git a/src/core/mainloop/cpuworker.h b/src/core/mainloop/cpuworker.h
new file mode 100644
index 0000000000..50812b2dab
--- /dev/null
+++ b/src/core/mainloop/cpuworker.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file cpuworker.h
+ * \brief Header file for cpuworker.c.
+ **/
+
+#ifndef TOR_CPUWORKER_H
+#define TOR_CPUWORKER_H
+
+void cpu_init(void);
+void cpuworkers_rotate_keyinfo(void);
+struct workqueue_entry_s;
+enum workqueue_reply_t;
+enum workqueue_priority_t;
+MOCK_DECL(struct workqueue_entry_s *, cpuworker_queue_work, (
+ enum workqueue_priority_t priority,
+ enum workqueue_reply_t (*fn)(void *, void *),
+ void (*reply_fn)(void *),
+ void *arg));
+
+struct create_cell_t;
+int assign_onionskin_to_cpuworker(or_circuit_t *circ,
+ struct create_cell_t *onionskin);
+
+uint64_t estimated_usec_for_onionskins(uint32_t n_requests,
+ uint16_t onionskin_type);
+void cpuworker_log_onionskin_overhead(int severity, int onionskin_type,
+ const char *onionskin_type_name);
+void cpuworker_cancel_circ_handshake(or_circuit_t *circ);
+
+#endif /* !defined(TOR_CPUWORKER_H) */
+
diff --git a/src/core/mainloop/main.c b/src/core/mainloop/main.c
new file mode 100644
index 0000000000..2a57aa587a
--- /dev/null
+++ b/src/core/mainloop/main.c
@@ -0,0 +1,4311 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file main.c
+ * \brief Toplevel module. Handles signals, multiplexes between
+ * connections, implements main loop, and drives scheduled events.
+ *
+ * For the main loop itself; see run_main_loop_once(). It invokes the rest of
+ * Tor mostly through Libevent callbacks. Libevent callbacks can happen when
+ * a timer elapses, a signal is received, a socket is ready to read or write,
+ * or an event is manually activated.
+ *
+ * Most events in Tor are driven from these callbacks:
+ * <ul>
+ * <li>conn_read_callback() and conn_write_callback() here, which are
+ * invoked when a socket is ready to read or write respectively.
+ * <li>signal_callback(), which handles incoming signals.
+ * </ul>
+ * Other events are used for specific purposes, or for building more complex
+ * control structures. If you search for usage of tor_libevent_new(), you
+ * will find all the events that we construct in Tor.
+ *
+ * Tor has numerous housekeeping operations that need to happen
+ * regularly. They are handled in different ways:
+ * <ul>
+ * <li>The most frequent operations are handled after every read or write
+ * event, at the end of connection_handle_read() and
+ * connection_handle_write().
+ *
+ * <li>The next most frequent operations happen after each invocation of the
+ * main loop, in run_main_loop_once().
+ *
+ * <li>Once per second, we run all of the operations listed in
+ * second_elapsed_callback(), and in its child, run_scheduled_events().
+ *
+ * <li>Once-a-second operations are handled in second_elapsed_callback().
+ *
+ * <li>More infrequent operations take place based on the periodic event
+ * driver in periodic.c . These are stored in the periodic_events[]
+ * table.
+ * </ul>
+ *
+ **/
+
+#define MAIN_PRIVATE
+#include "or/or.h"
+#include "or/addressmap.h"
+#include "lib/err/backtrace.h"
+#include "or/bridges.h"
+#include "lib/container/buffers.h"
+#include "lib/tls/buffers_tls.h"
+#include "or/channel.h"
+#include "or/channeltls.h"
+#include "or/channelpadding.h"
+#include "or/circuitbuild.h"
+#include "or/circuitlist.h"
+#include "or/circuituse.h"
+#include "or/circuitmux_ewma.h"
+#include "or/command.h"
+#include "lib/compress/compress.h"
+#include "or/config.h"
+#include "or/confparse.h"
+#include "or/connection.h"
+#include "or/connection_edge.h"
+#include "or/connection_or.h"
+#include "or/consdiffmgr.h"
+#include "or/control.h"
+#include "or/cpuworker.h"
+#include "lib/crypt_ops/crypto_s2k.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "or/directory.h"
+#include "or/dirserv.h"
+#include "or/dns.h"
+#include "or/dnsserv.h"
+#include "or/dos.h"
+#include "or/entrynodes.h"
+#include "or/geoip.h"
+#include "or/hibernate.h"
+#include "or/hs_cache.h"
+#include "or/hs_circuitmap.h"
+#include "or/hs_client.h"
+#include "or/keypin.h"
+#include "or/main.h"
+#include "or/microdesc.h"
+#include "or/networkstatus.h"
+#include "or/nodelist.h"
+#include "app/ntmain.h"
+#include "or/onion.h"
+#include "or/periodic.h"
+#include "or/policies.h"
+#include "or/protover.h"
+#include "or/transports.h"
+#include "or/relay.h"
+#include "or/rendclient.h"
+#include "or/rendcommon.h"
+#include "or/rendservice.h"
+#include "or/rephist.h"
+#include "or/router.h"
+#include "or/routerkeys.h"
+#include "or/routerlist.h"
+#include "or/routerparse.h"
+#include "or/scheduler.h"
+#include "or/statefile.h"
+#include "or/status.h"
+#include "or/tor_api.h"
+#include "or/tor_api_internal.h"
+#include "lib/process/waitpid.h"
+#include "or/ext_orport.h"
+#include "lib/memarea/memarea.h"
+#include "lib/meminfo/meminfo.h"
+#include "lib/osinfo/uname.h"
+#include "lib/sandbox/sandbox.h"
+#include "lib/fs/lockfile.h"
+#include "lib/net/buffers_net.h"
+#include "lib/tls/tortls.h"
+#include "lib/evloop/compat_libevent.h"
+#include "lib/encoding/confline.h"
+#include "lib/evloop/timers.h"
+
+#include <event2/event.h>
+
+#include "or/dirauth/dirvote.h"
+#include "or/dirauth/mode.h"
+#include "or/dirauth/shared_random.h"
+
+#include "or/cell_st.h"
+#include "or/entry_connection_st.h"
+#include "or/networkstatus_st.h"
+#include "or/or_connection_st.h"
+#include "or/or_state_st.h"
+#include "or/port_cfg_st.h"
+#include "or/routerinfo_st.h"
+#include "or/socks_request_st.h"
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_SYSTEMD
+# if defined(__COVERITY__) && !defined(__INCLUDE_LEVEL__)
+/* Systemd's use of gcc's __INCLUDE_LEVEL__ extension macro appears to confuse
+ * Coverity. Here's a kludge to unconfuse it.
+ */
+# define __INCLUDE_LEVEL__ 2
+#endif /* defined(__COVERITY__) && !defined(__INCLUDE_LEVEL__) */
+#include <systemd/sd-daemon.h>
+#endif /* defined(HAVE_SYSTEMD) */
+
+void evdns_shutdown(int);
+
+#ifdef HAVE_RUST
+// helper function defined in Rust to output a log message indicating if tor is
+// running with Rust enabled. See src/rust/tor_util
+void rust_log_welcome_string(void);
+#endif
+
+/********* PROTOTYPES **********/
+
+static void dumpmemusage(int severity);
+static void dumpstats(int severity); /* log stats */
+static void conn_read_callback(evutil_socket_t fd, short event, void *_conn);
+static void conn_write_callback(evutil_socket_t fd, short event, void *_conn);
+static void second_elapsed_callback(periodic_timer_t *timer, void *args);
+static int conn_close_if_marked(int i);
+static void connection_start_reading_from_linked_conn(connection_t *conn);
+static int connection_should_read_from_linked_conn(connection_t *conn);
+static int run_main_loop_until_done(void);
+static void process_signal(int sig);
+static void shutdown_did_not_work_callback(evutil_socket_t fd, short event,
+ void *arg) ATTR_NORETURN;
+
+/********* START VARIABLES **********/
+
+/* Token bucket for all traffic. */
+token_bucket_rw_t global_bucket;
+
+/* Token bucket for relayed traffic. */
+token_bucket_rw_t global_relayed_bucket;
+
+/* XXX we might want to keep stats about global_relayed_*_bucket too. Or not.*/
+/** How many bytes have we read since we started the process? */
+static uint64_t stats_n_bytes_read = 0;
+/** How many bytes have we written since we started the process? */
+static uint64_t stats_n_bytes_written = 0;
+/** What time did this process start up? */
+time_t time_of_process_start = 0;
+/** How many seconds have we been running? */
+static long stats_n_seconds_working = 0;
+/** How many times have we returned from the main loop successfully? */
+static uint64_t stats_n_main_loop_successes = 0;
+/** How many times have we received an error from the main loop? */
+static uint64_t stats_n_main_loop_errors = 0;
+/** How many times have we returned from the main loop with no events. */
+static uint64_t stats_n_main_loop_idle = 0;
+
+/** How often will we honor SIGNEWNYM requests? */
+#define MAX_SIGNEWNYM_RATE 10
+/** When did we last process a SIGNEWNYM request? */
+static time_t time_of_last_signewnym = 0;
+/** Is there a signewnym request we're currently waiting to handle? */
+static int signewnym_is_pending = 0;
+/** Mainloop event for the deferred signewnym call. */
+static mainloop_event_t *handle_deferred_signewnym_ev = NULL;
+/** How many times have we called newnym? */
+static unsigned newnym_epoch = 0;
+
+/** Smartlist of all open connections. */
+STATIC smartlist_t *connection_array = NULL;
+/** List of connections that have been marked for close and need to be freed
+ * and removed from connection_array. */
+static smartlist_t *closeable_connection_lst = NULL;
+/** List of linked connections that are currently reading data into their
+ * inbuf from their partner's outbuf. */
+static smartlist_t *active_linked_connection_lst = NULL;
+/** Flag: Set to true iff we entered the current libevent main loop via
+ * <b>loop_once</b>. If so, there's no need to trigger a loopexit in order
+ * to handle linked connections. */
+static int called_loop_once = 0;
+/** Flag: if true, it's time to shut down, so the main loop should exit as
+ * soon as possible.
+ */
+static int main_loop_should_exit = 0;
+/** The return value that the main loop should yield when it exits, if
+ * main_loop_should_exit is true.
+ */
+static int main_loop_exit_value = 0;
+
+/** We set this to 1 when we've opened a circuit, so we can print a log
+ * entry to inform the user that Tor is working. We set it to 0 when
+ * we think the fact that we once opened a circuit doesn't mean we can do so
+ * any longer (a big time jump happened, when we notice our directory is
+ * heinously out-of-date, etc.
+ */
+static int can_complete_circuits = 0;
+
+/** How often do we check for router descriptors that we should download
+ * when we have too little directory info? */
+#define GREEDY_DESCRIPTOR_RETRY_INTERVAL (10)
+/** How often do we check for router descriptors that we should download
+ * when we have enough directory info? */
+#define LAZY_DESCRIPTOR_RETRY_INTERVAL (60)
+
+/** Decides our behavior when no logs are configured/before any
+ * logs have been configured. For 0, we log notice to stdout as normal.
+ * For 1, we log warnings only. For 2, we log nothing.
+ */
+int quiet_level = 0;
+
+/********* END VARIABLES ************/
+
+/****************************************************************************
+ *
+ * This section contains accessors and other methods on the connection_array
+ * variables (which are global within this file and unavailable outside it).
+ *
+ ****************************************************************************/
+
+/** Return 1 if we have successfully built a circuit, and nothing has changed
+ * to make us think that maybe we can't.
+ */
+int
+have_completed_a_circuit(void)
+{
+ return can_complete_circuits;
+}
+
+/** Note that we have successfully built a circuit, so that reachability
+ * testing and introduction points and so on may be attempted. */
+void
+note_that_we_completed_a_circuit(void)
+{
+ can_complete_circuits = 1;
+}
+
+/** Note that something has happened (like a clock jump, or DisableNetwork) to
+ * make us think that maybe we can't complete circuits. */
+void
+note_that_we_maybe_cant_complete_circuits(void)
+{
+ can_complete_circuits = 0;
+}
+
+/** Add <b>conn</b> to the array of connections that we can poll on. The
+ * connection's socket must be set; the connection starts out
+ * non-reading and non-writing.
+ */
+int
+connection_add_impl(connection_t *conn, int is_connecting)
+{
+ tor_assert(conn);
+ tor_assert(SOCKET_OK(conn->s) ||
+ conn->linked ||
+ (conn->type == CONN_TYPE_AP &&
+ TO_EDGE_CONN(conn)->is_dns_request));
+
+ tor_assert(conn->conn_array_index == -1); /* can only connection_add once */
+ conn->conn_array_index = smartlist_len(connection_array);
+ smartlist_add(connection_array, conn);
+
+ (void) is_connecting;
+
+ if (SOCKET_OK(conn->s) || conn->linked) {
+ conn->read_event = tor_event_new(tor_libevent_get_base(),
+ conn->s, EV_READ|EV_PERSIST, conn_read_callback, conn);
+ conn->write_event = tor_event_new(tor_libevent_get_base(),
+ conn->s, EV_WRITE|EV_PERSIST, conn_write_callback, conn);
+ /* XXXX CHECK FOR NULL RETURN! */
+ }
+
+ log_debug(LD_NET,"new conn type %s, socket %d, address %s, n_conns %d.",
+ conn_type_to_string(conn->type), (int)conn->s, conn->address,
+ smartlist_len(connection_array));
+
+ return 0;
+}
+
+/** Tell libevent that we don't care about <b>conn</b> any more. */
+void
+connection_unregister_events(connection_t *conn)
+{
+ if (conn->read_event) {
+ if (event_del(conn->read_event))
+ log_warn(LD_BUG, "Error removing read event for %d", (int)conn->s);
+ tor_free(conn->read_event);
+ }
+ if (conn->write_event) {
+ if (event_del(conn->write_event))
+ log_warn(LD_BUG, "Error removing write event for %d", (int)conn->s);
+ tor_free(conn->write_event);
+ }
+ if (conn->type == CONN_TYPE_AP_DNS_LISTENER) {
+ dnsserv_close_listener(conn);
+ }
+}
+
+/** Remove the connection from the global list, and remove the
+ * corresponding poll entry. Calling this function will shift the last
+ * connection (if any) into the position occupied by conn.
+ */
+int
+connection_remove(connection_t *conn)
+{
+ int current_index;
+ connection_t *tmp;
+
+ tor_assert(conn);
+
+ log_debug(LD_NET,"removing socket %d (type %s), n_conns now %d",
+ (int)conn->s, conn_type_to_string(conn->type),
+ smartlist_len(connection_array));
+
+ if (conn->type == CONN_TYPE_AP && conn->socket_family == AF_UNIX) {
+ log_info(LD_NET, "Closing SOCKS Unix socket connection");
+ }
+
+ control_event_conn_bandwidth(conn);
+
+ tor_assert(conn->conn_array_index >= 0);
+ current_index = conn->conn_array_index;
+ connection_unregister_events(conn); /* This is redundant, but cheap. */
+ if (current_index == smartlist_len(connection_array)-1) { /* at the end */
+ smartlist_del(connection_array, current_index);
+ return 0;
+ }
+
+ /* replace this one with the one at the end */
+ smartlist_del(connection_array, current_index);
+ tmp = smartlist_get(connection_array, current_index);
+ tmp->conn_array_index = current_index;
+
+ return 0;
+}
+
+/** If <b>conn</b> is an edge conn, remove it from the list
+ * of conn's on this circuit. If it's not on an edge,
+ * flush and send destroys for all circuits on this conn.
+ *
+ * Remove it from connection_array (if applicable) and
+ * from closeable_connection_list.
+ *
+ * Then free it.
+ */
+static void
+connection_unlink(connection_t *conn)
+{
+ connection_about_to_close_connection(conn);
+ if (conn->conn_array_index >= 0) {
+ connection_remove(conn);
+ }
+ if (conn->linked_conn) {
+ conn->linked_conn->linked_conn = NULL;
+ if (! conn->linked_conn->marked_for_close &&
+ conn->linked_conn->reading_from_linked_conn)
+ connection_start_reading(conn->linked_conn);
+ conn->linked_conn = NULL;
+ }
+ smartlist_remove(closeable_connection_lst, conn);
+ smartlist_remove(active_linked_connection_lst, conn);
+ if (conn->type == CONN_TYPE_EXIT) {
+ assert_connection_edge_not_dns_pending(TO_EDGE_CONN(conn));
+ }
+ if (conn->type == CONN_TYPE_OR) {
+ if (!tor_digest_is_zero(TO_OR_CONN(conn)->identity_digest))
+ connection_or_clear_identity(TO_OR_CONN(conn));
+ /* connection_unlink() can only get called if the connection
+ * was already on the closeable list, and it got there by
+ * connection_mark_for_close(), which was called from
+ * connection_or_close_normally() or
+ * connection_or_close_for_error(), so the channel should
+ * already be in CHANNEL_STATE_CLOSING, and then the
+ * connection_about_to_close_connection() goes to
+ * connection_or_about_to_close(), which calls channel_closed()
+ * to notify the channel_t layer, and closed the channel, so
+ * nothing more to do here to deal with the channel associated
+ * with an orconn.
+ */
+ }
+ connection_free(conn);
+}
+
+/**
+ * Callback: used to activate read events for all linked connections, so
+ * libevent knows to call their read callbacks. This callback run as a
+ * postloop event, so that the events _it_ activates don't happen until
+ * Libevent has a chance to check for other events.
+ */
+static void
+schedule_active_linked_connections_cb(mainloop_event_t *event, void *arg)
+{
+ (void)event;
+ (void)arg;
+
+ /* All active linked conns should get their read events activated,
+ * so that libevent knows to run their callbacks. */
+ SMARTLIST_FOREACH(active_linked_connection_lst, connection_t *, conn,
+ event_active(conn->read_event, EV_READ, 1));
+}
+
+/** Event that invokes schedule_active_linked_connections_cb. */
+static mainloop_event_t *schedule_active_linked_connections_event = NULL;
+
+/** Initialize the global connection list, closeable connection list,
+ * and active connection list. */
+STATIC void
+init_connection_lists(void)
+{
+ if (!connection_array)
+ connection_array = smartlist_new();
+ if (!closeable_connection_lst)
+ closeable_connection_lst = smartlist_new();
+ if (!active_linked_connection_lst)
+ active_linked_connection_lst = smartlist_new();
+}
+
+/** Schedule <b>conn</b> to be closed. **/
+void
+add_connection_to_closeable_list(connection_t *conn)
+{
+ tor_assert(!smartlist_contains(closeable_connection_lst, conn));
+ tor_assert(conn->marked_for_close);
+ assert_connection_ok(conn, time(NULL));
+ smartlist_add(closeable_connection_lst, conn);
+ mainloop_schedule_postloop_cleanup();
+}
+
+/** Return 1 if conn is on the closeable list, else return 0. */
+int
+connection_is_on_closeable_list(connection_t *conn)
+{
+ return smartlist_contains(closeable_connection_lst, conn);
+}
+
+/** Return true iff conn is in the current poll array. */
+int
+connection_in_array(connection_t *conn)
+{
+ return smartlist_contains(connection_array, conn);
+}
+
+/** Set <b>*array</b> to an array of all connections. <b>*array</b> must not
+ * be modified.
+ */
+MOCK_IMPL(smartlist_t *,
+get_connection_array, (void))
+{
+ if (!connection_array)
+ connection_array = smartlist_new();
+ return connection_array;
+}
+
+/**
+ * Return the amount of network traffic read, in bytes, over the life of this
+ * process.
+ */
+MOCK_IMPL(uint64_t,
+get_bytes_read,(void))
+{
+ return stats_n_bytes_read;
+}
+
+/**
+ * Return the amount of network traffic read, in bytes, over the life of this
+ * process.
+ */
+MOCK_IMPL(uint64_t,
+get_bytes_written,(void))
+{
+ return stats_n_bytes_written;
+}
+
+/**
+ * Increment the amount of network traffic read and written, over the life of
+ * this process.
+ */
+void
+stats_increment_bytes_read_and_written(uint64_t r, uint64_t w)
+{
+ stats_n_bytes_read += r;
+ stats_n_bytes_written += w;
+}
+
+/** Set the event mask on <b>conn</b> to <b>events</b>. (The event
+ * mask is a bitmask whose bits are READ_EVENT and WRITE_EVENT)
+ */
+void
+connection_watch_events(connection_t *conn, watchable_events_t events)
+{
+ if (events & READ_EVENT)
+ connection_start_reading(conn);
+ else
+ connection_stop_reading(conn);
+
+ if (events & WRITE_EVENT)
+ connection_start_writing(conn);
+ else
+ connection_stop_writing(conn);
+}
+
+/** Return true iff <b>conn</b> is listening for read events. */
+int
+connection_is_reading(connection_t *conn)
+{
+ tor_assert(conn);
+
+ return conn->reading_from_linked_conn ||
+ (conn->read_event && event_pending(conn->read_event, EV_READ, NULL));
+}
+
+/** Reset our main loop counters. */
+void
+reset_main_loop_counters(void)
+{
+ stats_n_main_loop_successes = 0;
+ stats_n_main_loop_errors = 0;
+ stats_n_main_loop_idle = 0;
+}
+
+/** Increment the main loop success counter. */
+static void
+increment_main_loop_success_count(void)
+{
+ ++stats_n_main_loop_successes;
+}
+
+/** Get the main loop success counter. */
+uint64_t
+get_main_loop_success_count(void)
+{
+ return stats_n_main_loop_successes;
+}
+
+/** Increment the main loop error counter. */
+static void
+increment_main_loop_error_count(void)
+{
+ ++stats_n_main_loop_errors;
+}
+
+/** Get the main loop error counter. */
+uint64_t
+get_main_loop_error_count(void)
+{
+ return stats_n_main_loop_errors;
+}
+
+/** Increment the main loop idle counter. */
+static void
+increment_main_loop_idle_count(void)
+{
+ ++stats_n_main_loop_idle;
+}
+
+/** Get the main loop idle counter. */
+uint64_t
+get_main_loop_idle_count(void)
+{
+ return stats_n_main_loop_idle;
+}
+
+/** Check whether <b>conn</b> is correct in having (or not having) a
+ * read/write event (passed in <b>ev</b>). On success, return 0. On failure,
+ * log a warning and return -1. */
+static int
+connection_check_event(connection_t *conn, struct event *ev)
+{
+ int bad;
+
+ if (conn->type == CONN_TYPE_AP && TO_EDGE_CONN(conn)->is_dns_request) {
+ /* DNS requests which we launch through the dnsserv.c module do not have
+ * any underlying socket or any underlying linked connection, so they
+ * shouldn't have any attached events either.
+ */
+ bad = ev != NULL;
+ } else {
+ /* Everything else should have an underlying socket, or a linked
+ * connection (which is also tracked with a read_event/write_event pair).
+ */
+ bad = ev == NULL;
+ }
+
+ if (bad) {
+ log_warn(LD_BUG, "Event missing on connection %p [%s;%s]. "
+ "socket=%d. linked=%d. "
+ "is_dns_request=%d. Marked_for_close=%s:%d",
+ conn,
+ conn_type_to_string(conn->type),
+ conn_state_to_string(conn->type, conn->state),
+ (int)conn->s, (int)conn->linked,
+ (conn->type == CONN_TYPE_AP &&
+ TO_EDGE_CONN(conn)->is_dns_request),
+ conn->marked_for_close_file ? conn->marked_for_close_file : "-",
+ conn->marked_for_close
+ );
+ log_backtrace(LOG_WARN, LD_BUG, "Backtrace attached.");
+ return -1;
+ }
+ return 0;
+}
+
+/** Tell the main loop to stop notifying <b>conn</b> of any read events. */
+MOCK_IMPL(void,
+connection_stop_reading,(connection_t *conn))
+{
+ tor_assert(conn);
+
+ if (connection_check_event(conn, conn->read_event) < 0) {
+ return;
+ }
+
+ if (conn->linked) {
+ conn->reading_from_linked_conn = 0;
+ connection_stop_reading_from_linked_conn(conn);
+ } else {
+ if (event_del(conn->read_event))
+ log_warn(LD_NET, "Error from libevent setting read event state for %d "
+ "to unwatched: %s",
+ (int)conn->s,
+ tor_socket_strerror(tor_socket_errno(conn->s)));
+ }
+}
+
+/** Tell the main loop to start notifying <b>conn</b> of any read events. */
+MOCK_IMPL(void,
+connection_start_reading,(connection_t *conn))
+{
+ tor_assert(conn);
+
+ if (connection_check_event(conn, conn->read_event) < 0) {
+ return;
+ }
+
+ if (conn->linked) {
+ conn->reading_from_linked_conn = 1;
+ if (connection_should_read_from_linked_conn(conn))
+ connection_start_reading_from_linked_conn(conn);
+ } else {
+ if (event_add(conn->read_event, NULL))
+ log_warn(LD_NET, "Error from libevent setting read event state for %d "
+ "to watched: %s",
+ (int)conn->s,
+ tor_socket_strerror(tor_socket_errno(conn->s)));
+ }
+}
+
+/** Return true iff <b>conn</b> is listening for write events. */
+int
+connection_is_writing(connection_t *conn)
+{
+ tor_assert(conn);
+
+ return conn->writing_to_linked_conn ||
+ (conn->write_event && event_pending(conn->write_event, EV_WRITE, NULL));
+}
+
+/** Tell the main loop to stop notifying <b>conn</b> of any write events. */
+MOCK_IMPL(void,
+connection_stop_writing,(connection_t *conn))
+{
+ tor_assert(conn);
+
+ if (connection_check_event(conn, conn->write_event) < 0) {
+ return;
+ }
+
+ if (conn->linked) {
+ conn->writing_to_linked_conn = 0;
+ if (conn->linked_conn)
+ connection_stop_reading_from_linked_conn(conn->linked_conn);
+ } else {
+ if (event_del(conn->write_event))
+ log_warn(LD_NET, "Error from libevent setting write event state for %d "
+ "to unwatched: %s",
+ (int)conn->s,
+ tor_socket_strerror(tor_socket_errno(conn->s)));
+ }
+}
+
+/** Tell the main loop to start notifying <b>conn</b> of any write events. */
+MOCK_IMPL(void,
+connection_start_writing,(connection_t *conn))
+{
+ tor_assert(conn);
+
+ if (connection_check_event(conn, conn->write_event) < 0) {
+ return;
+ }
+
+ if (conn->linked) {
+ conn->writing_to_linked_conn = 1;
+ if (conn->linked_conn &&
+ connection_should_read_from_linked_conn(conn->linked_conn))
+ connection_start_reading_from_linked_conn(conn->linked_conn);
+ } else {
+ if (event_add(conn->write_event, NULL))
+ log_warn(LD_NET, "Error from libevent setting write event state for %d "
+ "to watched: %s",
+ (int)conn->s,
+ tor_socket_strerror(tor_socket_errno(conn->s)));
+ }
+}
+
+/** Return true iff <b>conn</b> is linked conn, and reading from the conn
+ * linked to it would be good and feasible. (Reading is "feasible" if the
+ * other conn exists and has data in its outbuf, and is "good" if we have our
+ * reading_from_linked_conn flag set and the other conn has its
+ * writing_to_linked_conn flag set.)*/
+static int
+connection_should_read_from_linked_conn(connection_t *conn)
+{
+ if (conn->linked && conn->reading_from_linked_conn) {
+ if (! conn->linked_conn ||
+ (conn->linked_conn->writing_to_linked_conn &&
+ buf_datalen(conn->linked_conn->outbuf)))
+ return 1;
+ }
+ return 0;
+}
+
+/** Event to run 'shutdown did not work callback'. */
+static struct event *shutdown_did_not_work_event = NULL;
+
+/** Failsafe measure that should never actually be necessary: If
+ * tor_shutdown_event_loop_and_exit() somehow doesn't successfully exit the
+ * event loop, then this callback will kill Tor with an assertion failure
+ * seconds later
+ */
+static void
+shutdown_did_not_work_callback(evutil_socket_t fd, short event, void *arg)
+{
+ // LCOV_EXCL_START
+ (void) fd;
+ (void) event;
+ (void) arg;
+ tor_assert_unreached();
+ // LCOV_EXCL_STOP
+}
+
+#ifdef ENABLE_RESTART_DEBUGGING
+static struct event *tor_shutdown_event_loop_for_restart_event = NULL;
+static void
+tor_shutdown_event_loop_for_restart_cb(
+ evutil_socket_t fd, short event, void *arg)
+{
+ (void)fd;
+ (void)event;
+ (void)arg;
+ tor_event_free(tor_shutdown_event_loop_for_restart_event);
+ tor_shutdown_event_loop_and_exit(0);
+}
+#endif
+
+/**
+ * After finishing the current callback (if any), shut down the main loop,
+ * clean up the process, and exit with <b>exitcode</b>.
+ */
+void
+tor_shutdown_event_loop_and_exit(int exitcode)
+{
+ if (main_loop_should_exit)
+ return; /* Ignore multiple calls to this function. */
+
+ main_loop_should_exit = 1;
+ main_loop_exit_value = exitcode;
+
+ /* Die with an assertion failure in ten seconds, if for some reason we don't
+ * exit normally. */
+ /* XXXX We should consider this code if it's never used. */
+ struct timeval ten_seconds = { 10, 0 };
+ shutdown_did_not_work_event = tor_evtimer_new(
+ tor_libevent_get_base(),
+ shutdown_did_not_work_callback, NULL);
+ event_add(shutdown_did_not_work_event, &ten_seconds);
+
+ /* Unlike exit_loop_after_delay(), exit_loop_after_callback
+ * prevents other callbacks from running. */
+ tor_libevent_exit_loop_after_callback(tor_libevent_get_base());
+}
+
+/** Return true iff tor_shutdown_event_loop_and_exit() has been called. */
+int
+tor_event_loop_shutdown_is_pending(void)
+{
+ return main_loop_should_exit;
+}
+
+/** Helper: Tell the main loop to begin reading bytes into <b>conn</b> from
+ * its linked connection, if it is not doing so already. Called by
+ * connection_start_reading and connection_start_writing as appropriate. */
+static void
+connection_start_reading_from_linked_conn(connection_t *conn)
+{
+ tor_assert(conn);
+ tor_assert(conn->linked == 1);
+
+ if (!conn->active_on_link) {
+ conn->active_on_link = 1;
+ smartlist_add(active_linked_connection_lst, conn);
+ mainloop_event_activate(schedule_active_linked_connections_event);
+ } else {
+ tor_assert(smartlist_contains(active_linked_connection_lst, conn));
+ }
+}
+
+/** Tell the main loop to stop reading bytes into <b>conn</b> from its linked
+ * connection, if is currently doing so. Called by connection_stop_reading,
+ * connection_stop_writing, and connection_read. */
+void
+connection_stop_reading_from_linked_conn(connection_t *conn)
+{
+ tor_assert(conn);
+ tor_assert(conn->linked == 1);
+
+ if (conn->active_on_link) {
+ conn->active_on_link = 0;
+ /* FFFF We could keep an index here so we can smartlist_del
+ * cleanly. On the other hand, this doesn't show up on profiles,
+ * so let's leave it alone for now. */
+ smartlist_remove(active_linked_connection_lst, conn);
+ } else {
+ tor_assert(!smartlist_contains(active_linked_connection_lst, conn));
+ }
+}
+
+/** Close all connections that have been scheduled to get closed. */
+STATIC void
+close_closeable_connections(void)
+{
+ int i;
+ for (i = 0; i < smartlist_len(closeable_connection_lst); ) {
+ connection_t *conn = smartlist_get(closeable_connection_lst, i);
+ if (conn->conn_array_index < 0) {
+ connection_unlink(conn); /* blow it away right now */
+ } else {
+ if (!conn_close_if_marked(conn->conn_array_index))
+ ++i;
+ }
+ }
+}
+
+/** Count moribund connections for the OOS handler */
+MOCK_IMPL(int,
+connection_count_moribund, (void))
+{
+ int moribund = 0;
+
+ /*
+ * Count things we'll try to kill when close_closeable_connections()
+ * runs next.
+ */
+ SMARTLIST_FOREACH_BEGIN(closeable_connection_lst, connection_t *, conn) {
+ if (SOCKET_OK(conn->s) && connection_is_moribund(conn)) ++moribund;
+ } SMARTLIST_FOREACH_END(conn);
+
+ return moribund;
+}
+
+/** Libevent callback: this gets invoked when (connection_t*)<b>conn</b> has
+ * some data to read. */
+static void
+conn_read_callback(evutil_socket_t fd, short event, void *_conn)
+{
+ connection_t *conn = _conn;
+ (void)fd;
+ (void)event;
+
+ log_debug(LD_NET,"socket %d wants to read.",(int)conn->s);
+
+ /* assert_connection_ok(conn, time(NULL)); */
+
+ if (connection_handle_read(conn) < 0) {
+ if (!conn->marked_for_close) {
+#ifndef _WIN32
+ log_warn(LD_BUG,"Unhandled error on read for %s connection "
+ "(fd %d); removing",
+ conn_type_to_string(conn->type), (int)conn->s);
+ tor_fragile_assert();
+#endif /* !defined(_WIN32) */
+ if (CONN_IS_EDGE(conn))
+ connection_edge_end_errno(TO_EDGE_CONN(conn));
+ connection_mark_for_close(conn);
+ }
+ }
+ assert_connection_ok(conn, time(NULL));
+
+ if (smartlist_len(closeable_connection_lst))
+ close_closeable_connections();
+}
+
+/** Libevent callback: this gets invoked when (connection_t*)<b>conn</b> has
+ * some data to write. */
+static void
+conn_write_callback(evutil_socket_t fd, short events, void *_conn)
+{
+ connection_t *conn = _conn;
+ (void)fd;
+ (void)events;
+
+ LOG_FN_CONN(conn, (LOG_DEBUG, LD_NET, "socket %d wants to write.",
+ (int)conn->s));
+
+ /* assert_connection_ok(conn, time(NULL)); */
+
+ if (connection_handle_write(conn, 0) < 0) {
+ if (!conn->marked_for_close) {
+ /* this connection is broken. remove it. */
+ log_fn(LOG_WARN,LD_BUG,
+ "unhandled error on write for %s connection (fd %d); removing",
+ conn_type_to_string(conn->type), (int)conn->s);
+ tor_fragile_assert();
+ if (CONN_IS_EDGE(conn)) {
+ /* otherwise we cry wolf about duplicate close */
+ edge_connection_t *edge_conn = TO_EDGE_CONN(conn);
+ if (!edge_conn->end_reason)
+ edge_conn->end_reason = END_STREAM_REASON_INTERNAL;
+ edge_conn->edge_has_sent_end = 1;
+ }
+ connection_close_immediate(conn); /* So we don't try to flush. */
+ connection_mark_for_close(conn);
+ }
+ }
+ assert_connection_ok(conn, time(NULL));
+
+ if (smartlist_len(closeable_connection_lst))
+ close_closeable_connections();
+}
+
+/** If the connection at connection_array[i] is marked for close, then:
+ * - If it has data that it wants to flush, try to flush it.
+ * - If it _still_ has data to flush, and conn->hold_open_until_flushed is
+ * true, then leave the connection open and return.
+ * - Otherwise, remove the connection from connection_array and from
+ * all other lists, close it, and free it.
+ * Returns 1 if the connection was closed, 0 otherwise.
+ */
+static int
+conn_close_if_marked(int i)
+{
+ connection_t *conn;
+ int retval;
+ time_t now;
+
+ conn = smartlist_get(connection_array, i);
+ if (!conn->marked_for_close)
+ return 0; /* nothing to see here, move along */
+ now = time(NULL);
+ assert_connection_ok(conn, now);
+ /* assert_all_pending_dns_resolves_ok(); */
+
+ log_debug(LD_NET,"Cleaning up connection (fd "TOR_SOCKET_T_FORMAT").",
+ conn->s);
+
+ /* If the connection we are about to close was trying to connect to
+ a proxy server and failed, the client won't be able to use that
+ proxy. We should warn the user about this. */
+ if (conn->proxy_state == PROXY_INFANT)
+ log_failed_proxy_connection(conn);
+
+ if ((SOCKET_OK(conn->s) || conn->linked_conn) &&
+ connection_wants_to_flush(conn)) {
+ /* s == -1 means it's an incomplete edge connection, or that the socket
+ * has already been closed as unflushable. */
+ ssize_t sz = connection_bucket_write_limit(conn, now);
+ if (!conn->hold_open_until_flushed)
+ log_info(LD_NET,
+ "Conn (addr %s, fd %d, type %s, state %d) marked, but wants "
+ "to flush %d bytes. (Marked at %s:%d)",
+ escaped_safe_str_client(conn->address),
+ (int)conn->s, conn_type_to_string(conn->type), conn->state,
+ (int)conn->outbuf_flushlen,
+ conn->marked_for_close_file, conn->marked_for_close);
+ if (conn->linked_conn) {
+ retval = buf_move_to_buf(conn->linked_conn->inbuf, conn->outbuf,
+ &conn->outbuf_flushlen);
+ if (retval >= 0) {
+ /* The linked conn will notice that it has data when it notices that
+ * we're gone. */
+ connection_start_reading_from_linked_conn(conn->linked_conn);
+ }
+ log_debug(LD_GENERAL, "Flushed last %d bytes from a linked conn; "
+ "%d left; flushlen %d; wants-to-flush==%d", retval,
+ (int)connection_get_outbuf_len(conn),
+ (int)conn->outbuf_flushlen,
+ connection_wants_to_flush(conn));
+ } else if (connection_speaks_cells(conn)) {
+ if (conn->state == OR_CONN_STATE_OPEN) {
+ retval = buf_flush_to_tls(conn->outbuf, TO_OR_CONN(conn)->tls, sz,
+ &conn->outbuf_flushlen);
+ } else
+ retval = -1; /* never flush non-open broken tls connections */
+ } else {
+ retval = buf_flush_to_socket(conn->outbuf, conn->s, sz,
+ &conn->outbuf_flushlen);
+ }
+ if (retval >= 0 && /* Technically, we could survive things like
+ TLS_WANT_WRITE here. But don't bother for now. */
+ conn->hold_open_until_flushed && connection_wants_to_flush(conn)) {
+ if (retval > 0) {
+ LOG_FN_CONN(conn, (LOG_INFO,LD_NET,
+ "Holding conn (fd %d) open for more flushing.",
+ (int)conn->s));
+ conn->timestamp_last_write_allowed = now; /* reset so we can flush
+ * more */
+ } else if (sz == 0) {
+ /* Also, retval==0. If we get here, we didn't want to write anything
+ * (because of rate-limiting) and we didn't. */
+
+ /* Connection must flush before closing, but it's being rate-limited.
+ * Let's remove from Libevent, and mark it as blocked on bandwidth
+ * so it will be re-added on next token bucket refill. Prevents
+ * busy Libevent loops where we keep ending up here and returning
+ * 0 until we are no longer blocked on bandwidth.
+ */
+ connection_consider_empty_read_buckets(conn);
+ connection_consider_empty_write_buckets(conn);
+
+ /* Make sure that consider_empty_buckets really disabled the
+ * connection: */
+ if (BUG(connection_is_writing(conn))) {
+ connection_write_bw_exhausted(conn, true);
+ }
+ if (BUG(connection_is_reading(conn))) {
+ /* XXXX+ We should make this code unreachable; if a connection is
+ * marked for close and flushing, there is no point in reading to it
+ * at all. Further, checking at this point is a bit of a hack: it
+ * would make much more sense to react in
+ * connection_handle_read_impl, or to just stop reading in
+ * mark_and_flush */
+ connection_read_bw_exhausted(conn, true/* kludge. */);
+ }
+ }
+ return 0;
+ }
+ if (connection_wants_to_flush(conn)) {
+ log_fn(LOG_INFO, LD_NET, "We stalled too much while trying to write %d "
+ "bytes to address %s. If this happens a lot, either "
+ "something is wrong with your network connection, or "
+ "something is wrong with theirs. "
+ "(fd %d, type %s, state %d, marked at %s:%d).",
+ (int)connection_get_outbuf_len(conn),
+ escaped_safe_str_client(conn->address),
+ (int)conn->s, conn_type_to_string(conn->type), conn->state,
+ conn->marked_for_close_file,
+ conn->marked_for_close);
+ }
+ }
+
+ connection_unlink(conn); /* unlink, remove, free */
+ return 1;
+}
+
+/** Implementation for directory_all_unreachable. This is done in a callback,
+ * since otherwise it would complicate Tor's control-flow graph beyond all
+ * reason.
+ */
+static void
+directory_all_unreachable_cb(mainloop_event_t *event, void *arg)
+{
+ (void)event;
+ (void)arg;
+
+ connection_t *conn;
+
+ while ((conn = connection_get_by_type_state(CONN_TYPE_AP,
+ AP_CONN_STATE_CIRCUIT_WAIT))) {
+ entry_connection_t *entry_conn = TO_ENTRY_CONN(conn);
+ log_notice(LD_NET,
+ "Is your network connection down? "
+ "Failing connection to '%s:%d'.",
+ safe_str_client(entry_conn->socks_request->address),
+ entry_conn->socks_request->port);
+ connection_mark_unattached_ap(entry_conn,
+ END_STREAM_REASON_NET_UNREACHABLE);
+ }
+ control_event_general_error("DIR_ALL_UNREACHABLE");
+}
+
+static mainloop_event_t *directory_all_unreachable_cb_event = NULL;
+
+/** We've just tried every dirserver we know about, and none of
+ * them were reachable. Assume the network is down. Change state
+ * so next time an application connection arrives we'll delay it
+ * and try another directory fetch. Kill off all the circuit_wait
+ * streams that are waiting now, since they will all timeout anyway.
+ */
+void
+directory_all_unreachable(time_t now)
+{
+ (void)now;
+
+ reset_uptime(); /* reset it */
+
+ if (!directory_all_unreachable_cb_event) {
+ directory_all_unreachable_cb_event =
+ mainloop_event_new(directory_all_unreachable_cb, NULL);
+ tor_assert(directory_all_unreachable_cb_event);
+ }
+
+ mainloop_event_activate(directory_all_unreachable_cb_event);
+}
+
+/** This function is called whenever we successfully pull down some new
+ * network statuses or server descriptors. */
+void
+directory_info_has_arrived(time_t now, int from_cache, int suppress_logs)
+{
+ const or_options_t *options = get_options();
+
+ /* if we have enough dir info, then update our guard status with
+ * whatever we just learned. */
+ int invalidate_circs = guards_update_all();
+
+ if (invalidate_circs) {
+ circuit_mark_all_unused_circs();
+ circuit_mark_all_dirty_circs_as_unusable();
+ }
+
+ if (!router_have_minimum_dir_info()) {
+ int quiet = suppress_logs || from_cache ||
+ directory_too_idle_to_fetch_descriptors(options, now);
+ tor_log(quiet ? LOG_INFO : LOG_NOTICE, LD_DIR,
+ "I learned some more directory information, but not enough to "
+ "build a circuit: %s", get_dir_info_status_string());
+ update_all_descriptor_downloads(now);
+ return;
+ } else {
+ if (directory_fetches_from_authorities(options)) {
+ update_all_descriptor_downloads(now);
+ }
+
+ /* Don't even bother trying to get extrainfo until the rest of our
+ * directory info is up-to-date */
+ if (options->DownloadExtraInfo)
+ update_extrainfo_downloads(now);
+ }
+
+ if (server_mode(options) && !net_is_disabled() && !from_cache &&
+ (have_completed_a_circuit() || !any_predicted_circuits(now)))
+ router_do_reachability_checks(1, 1);
+}
+
+/** Perform regular maintenance tasks for a single connection. This
+ * function gets run once per second per connection by run_scheduled_events.
+ */
+static void
+run_connection_housekeeping(int i, time_t now)
+{
+ cell_t cell;
+ connection_t *conn = smartlist_get(connection_array, i);
+ const or_options_t *options = get_options();
+ or_connection_t *or_conn;
+ channel_t *chan = NULL;
+ int have_any_circuits;
+ int past_keepalive =
+ now >= conn->timestamp_last_write_allowed + options->KeepalivePeriod;
+
+ if (conn->outbuf && !connection_get_outbuf_len(conn) &&
+ conn->type == CONN_TYPE_OR)
+ TO_OR_CONN(conn)->timestamp_lastempty = now;
+
+ if (conn->marked_for_close) {
+ /* nothing to do here */
+ return;
+ }
+
+ /* Expire any directory connections that haven't been active (sent
+ * if a server or received if a client) for 5 min */
+ if (conn->type == CONN_TYPE_DIR &&
+ ((DIR_CONN_IS_SERVER(conn) &&
+ conn->timestamp_last_write_allowed
+ + options->TestingDirConnectionMaxStall < now) ||
+ (!DIR_CONN_IS_SERVER(conn) &&
+ conn->timestamp_last_read_allowed
+ + options->TestingDirConnectionMaxStall < now))) {
+ log_info(LD_DIR,"Expiring wedged directory conn (fd %d, purpose %d)",
+ (int)conn->s, conn->purpose);
+ /* This check is temporary; it's to let us know whether we should consider
+ * parsing partial serverdesc responses. */
+ if (conn->purpose == DIR_PURPOSE_FETCH_SERVERDESC &&
+ connection_get_inbuf_len(conn) >= 1024) {
+ log_info(LD_DIR,"Trying to extract information from wedged server desc "
+ "download.");
+ connection_dir_reached_eof(TO_DIR_CONN(conn));
+ } else {
+ connection_mark_for_close(conn);
+ }
+ return;
+ }
+
+ if (!connection_speaks_cells(conn))
+ return; /* we're all done here, the rest is just for OR conns */
+
+ /* If we haven't flushed to an OR connection for a while, then either nuke
+ the connection or send a keepalive, depending. */
+
+ or_conn = TO_OR_CONN(conn);
+ tor_assert(conn->outbuf);
+
+ chan = TLS_CHAN_TO_BASE(or_conn->chan);
+ tor_assert(chan);
+
+ if (channel_num_circuits(chan) != 0) {
+ have_any_circuits = 1;
+ chan->timestamp_last_had_circuits = now;
+ } else {
+ have_any_circuits = 0;
+ }
+
+ if (channel_is_bad_for_new_circs(TLS_CHAN_TO_BASE(or_conn->chan)) &&
+ ! have_any_circuits) {
+ /* It's bad for new circuits, and has no unmarked circuits on it:
+ * mark it now. */
+ log_info(LD_OR,
+ "Expiring non-used OR connection to fd %d (%s:%d) [Too old].",
+ (int)conn->s, conn->address, conn->port);
+ if (conn->state == OR_CONN_STATE_CONNECTING)
+ connection_or_connect_failed(TO_OR_CONN(conn),
+ END_OR_CONN_REASON_TIMEOUT,
+ "Tor gave up on the connection");
+ connection_or_close_normally(TO_OR_CONN(conn), 1);
+ } else if (!connection_state_is_open(conn)) {
+ if (past_keepalive) {
+ /* We never managed to actually get this connection open and happy. */
+ log_info(LD_OR,"Expiring non-open OR connection to fd %d (%s:%d).",
+ (int)conn->s,conn->address, conn->port);
+ connection_or_close_normally(TO_OR_CONN(conn), 0);
+ }
+ } else if (we_are_hibernating() &&
+ ! have_any_circuits &&
+ !connection_get_outbuf_len(conn)) {
+ /* We're hibernating or shutting down, there's no circuits, and nothing to
+ * flush.*/
+ log_info(LD_OR,"Expiring non-used OR connection to fd %d (%s:%d) "
+ "[Hibernating or exiting].",
+ (int)conn->s,conn->address, conn->port);
+ connection_or_close_normally(TO_OR_CONN(conn), 1);
+ } else if (!have_any_circuits &&
+ now - or_conn->idle_timeout >=
+ chan->timestamp_last_had_circuits) {
+ log_info(LD_OR,"Expiring non-used OR connection %"PRIu64" to fd %d "
+ "(%s:%d) [no circuits for %d; timeout %d; %scanonical].",
+ (chan->global_identifier),
+ (int)conn->s, conn->address, conn->port,
+ (int)(now - chan->timestamp_last_had_circuits),
+ or_conn->idle_timeout,
+ or_conn->is_canonical ? "" : "non");
+ connection_or_close_normally(TO_OR_CONN(conn), 0);
+ } else if (
+ now >= or_conn->timestamp_lastempty + options->KeepalivePeriod*10 &&
+ now >=
+ conn->timestamp_last_write_allowed + options->KeepalivePeriod*10) {
+ log_fn(LOG_PROTOCOL_WARN,LD_PROTOCOL,
+ "Expiring stuck OR connection to fd %d (%s:%d). (%d bytes to "
+ "flush; %d seconds since last write)",
+ (int)conn->s, conn->address, conn->port,
+ (int)connection_get_outbuf_len(conn),
+ (int)(now-conn->timestamp_last_write_allowed));
+ connection_or_close_normally(TO_OR_CONN(conn), 0);
+ } else if (past_keepalive && !connection_get_outbuf_len(conn)) {
+ /* send a padding cell */
+ log_fn(LOG_DEBUG,LD_OR,"Sending keepalive to (%s:%d)",
+ conn->address, conn->port);
+ memset(&cell,0,sizeof(cell_t));
+ cell.command = CELL_PADDING;
+ connection_or_write_cell_to_buf(&cell, or_conn);
+ } else {
+ channelpadding_decide_to_pad_channel(chan);
+ }
+}
+
+/** Honor a NEWNYM request: make future requests unlinkable to past
+ * requests. */
+static void
+signewnym_impl(time_t now)
+{
+ const or_options_t *options = get_options();
+ if (!proxy_mode(options)) {
+ log_info(LD_CONTROL, "Ignoring SIGNAL NEWNYM because client functionality "
+ "is disabled.");
+ return;
+ }
+
+ circuit_mark_all_dirty_circs_as_unusable();
+ addressmap_clear_transient();
+ hs_client_purge_state();
+ time_of_last_signewnym = now;
+ signewnym_is_pending = 0;
+
+ ++newnym_epoch;
+
+ control_event_signal(SIGNEWNYM);
+}
+
+/** Callback: run a deferred signewnym. */
+static void
+handle_deferred_signewnym_cb(mainloop_event_t *event, void *arg)
+{
+ (void)event;
+ (void)arg;
+ log_info(LD_CONTROL, "Honoring delayed NEWNYM request");
+ signewnym_impl(time(NULL));
+}
+
+/** Return the number of times that signewnym has been called. */
+unsigned
+get_signewnym_epoch(void)
+{
+ return newnym_epoch;
+}
+
+/** True iff we have initialized all the members of <b>periodic_events</b>.
+ * Used to prevent double-initialization. */
+static int periodic_events_initialized = 0;
+
+/* Declare all the timer callback functions... */
+#undef CALLBACK
+#define CALLBACK(name) \
+ static int name ## _callback(time_t, const or_options_t *)
+CALLBACK(add_entropy);
+CALLBACK(check_authority_cert);
+CALLBACK(check_canonical_channels);
+CALLBACK(check_descriptor);
+CALLBACK(check_dns_honesty);
+CALLBACK(check_ed_keys);
+CALLBACK(check_expired_networkstatus);
+CALLBACK(check_for_reachability_bw);
+CALLBACK(check_onion_keys_expiry_time);
+CALLBACK(clean_caches);
+CALLBACK(clean_consdiffmgr);
+CALLBACK(dirvote);
+CALLBACK(downrate_stability);
+CALLBACK(expire_old_ciruits_serverside);
+CALLBACK(fetch_networkstatus);
+CALLBACK(heartbeat);
+CALLBACK(hs_service);
+CALLBACK(launch_descriptor_fetches);
+CALLBACK(launch_reachability_tests);
+CALLBACK(reachability_warnings);
+CALLBACK(record_bridge_stats);
+CALLBACK(rend_cache_failure_clean);
+CALLBACK(reset_padding_counts);
+CALLBACK(retry_dns);
+CALLBACK(retry_listeners);
+CALLBACK(rotate_onion_key);
+CALLBACK(rotate_x509_certificate);
+CALLBACK(save_stability);
+CALLBACK(save_state);
+CALLBACK(write_bridge_ns);
+CALLBACK(write_stats_file);
+
+#undef CALLBACK
+
+/* Now we declare an array of periodic_event_item_t for each periodic event */
+#define CALLBACK(name, r, f) PERIODIC_EVENT(name, r, f)
+
+STATIC periodic_event_item_t periodic_events[] = {
+ /* Everyone needs to run those. */
+ CALLBACK(add_entropy, PERIODIC_EVENT_ROLE_ALL, 0),
+ CALLBACK(check_expired_networkstatus, PERIODIC_EVENT_ROLE_ALL, 0),
+ CALLBACK(clean_caches, PERIODIC_EVENT_ROLE_ALL, 0),
+ CALLBACK(fetch_networkstatus, PERIODIC_EVENT_ROLE_ALL,
+ PERIODIC_EVENT_FLAG_NEED_NET),
+ CALLBACK(heartbeat, PERIODIC_EVENT_ROLE_ALL, 0),
+ CALLBACK(launch_descriptor_fetches, PERIODIC_EVENT_ROLE_ALL,
+ PERIODIC_EVENT_FLAG_NEED_NET),
+ CALLBACK(reset_padding_counts, PERIODIC_EVENT_ROLE_ALL, 0),
+ CALLBACK(retry_listeners, PERIODIC_EVENT_ROLE_ALL,
+ PERIODIC_EVENT_FLAG_NEED_NET),
+ CALLBACK(save_state, PERIODIC_EVENT_ROLE_ALL, 0),
+ CALLBACK(rotate_x509_certificate, PERIODIC_EVENT_ROLE_ALL, 0),
+ CALLBACK(write_stats_file, PERIODIC_EVENT_ROLE_ALL, 0),
+
+ /* Routers (bridge and relay) only. */
+ CALLBACK(check_descriptor, PERIODIC_EVENT_ROLE_ROUTER,
+ PERIODIC_EVENT_FLAG_NEED_NET),
+ CALLBACK(check_ed_keys, PERIODIC_EVENT_ROLE_ROUTER, 0),
+ CALLBACK(check_for_reachability_bw, PERIODIC_EVENT_ROLE_ROUTER,
+ PERIODIC_EVENT_FLAG_NEED_NET),
+ CALLBACK(check_onion_keys_expiry_time, PERIODIC_EVENT_ROLE_ROUTER, 0),
+ CALLBACK(expire_old_ciruits_serverside, PERIODIC_EVENT_ROLE_ROUTER,
+ PERIODIC_EVENT_FLAG_NEED_NET),
+ CALLBACK(reachability_warnings, PERIODIC_EVENT_ROLE_ROUTER,
+ PERIODIC_EVENT_FLAG_NEED_NET),
+ CALLBACK(retry_dns, PERIODIC_EVENT_ROLE_ROUTER, 0),
+ CALLBACK(rotate_onion_key, PERIODIC_EVENT_ROLE_ROUTER, 0),
+
+ /* Authorities (bridge and directory) only. */
+ CALLBACK(downrate_stability, PERIODIC_EVENT_ROLE_AUTHORITIES, 0),
+ CALLBACK(launch_reachability_tests, PERIODIC_EVENT_ROLE_AUTHORITIES,
+ PERIODIC_EVENT_FLAG_NEED_NET),
+ CALLBACK(save_stability, PERIODIC_EVENT_ROLE_AUTHORITIES, 0),
+
+ /* Directory authority only. */
+ CALLBACK(check_authority_cert, PERIODIC_EVENT_ROLE_DIRAUTH, 0),
+ CALLBACK(dirvote, PERIODIC_EVENT_ROLE_DIRAUTH, PERIODIC_EVENT_FLAG_NEED_NET),
+
+ /* Relay only. */
+ CALLBACK(check_canonical_channels, PERIODIC_EVENT_ROLE_RELAY,
+ PERIODIC_EVENT_FLAG_NEED_NET),
+ CALLBACK(check_dns_honesty, PERIODIC_EVENT_ROLE_RELAY,
+ PERIODIC_EVENT_FLAG_NEED_NET),
+
+ /* Hidden Service service only. */
+ CALLBACK(hs_service, PERIODIC_EVENT_ROLE_HS_SERVICE,
+ PERIODIC_EVENT_FLAG_NEED_NET),
+
+ /* Bridge only. */
+ CALLBACK(record_bridge_stats, PERIODIC_EVENT_ROLE_BRIDGE, 0),
+
+ /* Client only. */
+ CALLBACK(rend_cache_failure_clean, PERIODIC_EVENT_ROLE_CLIENT, 0),
+
+ /* Bridge Authority only. */
+ CALLBACK(write_bridge_ns, PERIODIC_EVENT_ROLE_BRIDGEAUTH, 0),
+
+ /* Directory server only. */
+ CALLBACK(clean_consdiffmgr, PERIODIC_EVENT_ROLE_DIRSERVER, 0),
+
+ END_OF_PERIODIC_EVENTS
+};
+#undef CALLBACK
+
+/* These are pointers to members of periodic_events[] that are used to
+ * implement particular callbacks. We keep them separate here so that we
+ * can access them by name. We also keep them inside periodic_events[]
+ * so that we can implement "reset all timers" in a reasonable way. */
+static periodic_event_item_t *check_descriptor_event=NULL;
+static periodic_event_item_t *dirvote_event=NULL;
+static periodic_event_item_t *fetch_networkstatus_event=NULL;
+static periodic_event_item_t *launch_descriptor_fetches_event=NULL;
+static periodic_event_item_t *check_dns_honesty_event=NULL;
+static periodic_event_item_t *save_state_event=NULL;
+
+/** Reset all the periodic events so we'll do all our actions again as if we
+ * just started up.
+ * Useful if our clock just moved back a long time from the future,
+ * so we don't wait until that future arrives again before acting.
+ */
+void
+reset_all_main_loop_timers(void)
+{
+ int i;
+ for (i = 0; periodic_events[i].name; ++i) {
+ periodic_event_reschedule(&periodic_events[i]);
+ }
+}
+
+/** Return the member of periodic_events[] whose name is <b>name</b>.
+ * Return NULL if no such event is found.
+ */
+static periodic_event_item_t *
+find_periodic_event(const char *name)
+{
+ int i;
+ for (i = 0; periodic_events[i].name; ++i) {
+ if (strcmp(name, periodic_events[i].name) == 0)
+ return &periodic_events[i];
+ }
+ return NULL;
+}
+
+/** Return a bitmask of the roles this tor instance is configured for using
+ * the given options. */
+STATIC int
+get_my_roles(const or_options_t *options)
+{
+ tor_assert(options);
+
+ int roles = 0;
+ int is_bridge = options->BridgeRelay;
+ int is_client = options_any_client_port_set(options);
+ int is_relay = server_mode(options);
+ int is_dirauth = authdir_mode_v3(options);
+ int is_bridgeauth = authdir_mode_bridge(options);
+ int is_hidden_service = !!hs_service_get_num_services() ||
+ !!rend_num_services();
+ int is_dirserver = dir_server_mode(options);
+
+ if (is_bridge) roles |= PERIODIC_EVENT_ROLE_BRIDGE;
+ if (is_client) roles |= PERIODIC_EVENT_ROLE_CLIENT;
+ if (is_relay) roles |= PERIODIC_EVENT_ROLE_RELAY;
+ if (is_dirauth) roles |= PERIODIC_EVENT_ROLE_DIRAUTH;
+ if (is_bridgeauth) roles |= PERIODIC_EVENT_ROLE_BRIDGEAUTH;
+ if (is_hidden_service) roles |= PERIODIC_EVENT_ROLE_HS_SERVICE;
+ if (is_dirserver) roles |= PERIODIC_EVENT_ROLE_DIRSERVER;
+
+ return roles;
+}
+
+/** Event to run initialize_periodic_events_cb */
+static struct event *initialize_periodic_events_event = NULL;
+
+/** Helper, run one second after setup:
+ * Initializes all members of periodic_events and starts them running.
+ *
+ * (We do this one second after setup for backward-compatibility reasons;
+ * it might not actually be necessary.) */
+static void
+initialize_periodic_events_cb(evutil_socket_t fd, short events, void *data)
+{
+ (void) fd;
+ (void) events;
+ (void) data;
+
+ tor_event_free(initialize_periodic_events_event);
+
+ rescan_periodic_events(get_options());
+}
+
+/** Set up all the members of periodic_events[], and configure them all to be
+ * launched from a callback. */
+STATIC void
+initialize_periodic_events(void)
+{
+ tor_assert(periodic_events_initialized == 0);
+ periodic_events_initialized = 1;
+
+ /* Set up all periodic events. We'll launch them by roles. */
+ int i;
+ for (i = 0; periodic_events[i].name; ++i) {
+ periodic_event_setup(&periodic_events[i]);
+ }
+
+#define NAMED_CALLBACK(name) \
+ STMT_BEGIN name ## _event = find_periodic_event( #name ); STMT_END
+
+ NAMED_CALLBACK(check_descriptor);
+ NAMED_CALLBACK(dirvote);
+ NAMED_CALLBACK(fetch_networkstatus);
+ NAMED_CALLBACK(launch_descriptor_fetches);
+ NAMED_CALLBACK(check_dns_honesty);
+ NAMED_CALLBACK(save_state);
+
+ struct timeval one_second = { 1, 0 };
+ initialize_periodic_events_event = tor_evtimer_new(
+ tor_libevent_get_base(),
+ initialize_periodic_events_cb, NULL);
+ event_add(initialize_periodic_events_event, &one_second);
+}
+
+STATIC void
+teardown_periodic_events(void)
+{
+ int i;
+ for (i = 0; periodic_events[i].name; ++i) {
+ periodic_event_destroy(&periodic_events[i]);
+ }
+ periodic_events_initialized = 0;
+}
+
+/** Do a pass at all our periodic events, disable those we don't need anymore
+ * and enable those we need now using the given options. */
+void
+rescan_periodic_events(const or_options_t *options)
+{
+ tor_assert(options);
+
+ /* Avoid scanning the event list if we haven't initialized it yet. This is
+ * particularly useful for unit tests in order to avoid initializing main
+ * loop events everytime. */
+ if (!periodic_events_initialized) {
+ return;
+ }
+
+ int roles = get_my_roles(options);
+
+ for (int i = 0; periodic_events[i].name; ++i) {
+ periodic_event_item_t *item = &periodic_events[i];
+
+ /* Handle the event flags. */
+ if (net_is_disabled() &&
+ (item->flags & PERIODIC_EVENT_FLAG_NEED_NET)) {
+ continue;
+ }
+
+ /* Enable the event if needed. It is safe to enable an event that was
+ * already enabled. Same goes for disabling it. */
+ if (item->roles & roles) {
+ log_debug(LD_GENERAL, "Launching periodic event %s", item->name);
+ periodic_event_enable(item);
+ } else {
+ log_debug(LD_GENERAL, "Disabling periodic event %s", item->name);
+ periodic_event_disable(item);
+ }
+ }
+}
+
+/* We just got new options globally set, see if we need to enabled or disable
+ * periodic events. */
+void
+periodic_events_on_new_options(const or_options_t *options)
+{
+ /* Only if we've already initialized the events, rescan the list which will
+ * enable or disable events depending on our roles. This will be called at
+ * bootup and we don't want this function to initialize the events because
+ * they aren't set up at this stage. */
+ if (periodic_events_initialized) {
+ rescan_periodic_events(options);
+ }
+}
+
+/**
+ * Update our schedule so that we'll check whether we need to update our
+ * descriptor immediately, rather than after up to CHECK_DESCRIPTOR_INTERVAL
+ * seconds.
+ */
+void
+reschedule_descriptor_update_check(void)
+{
+ if (check_descriptor_event) {
+ periodic_event_reschedule(check_descriptor_event);
+ }
+}
+
+/**
+ * Update our schedule so that we'll check whether we need to fetch directory
+ * info immediately.
+ */
+void
+reschedule_directory_downloads(void)
+{
+ tor_assert(fetch_networkstatus_event);
+ tor_assert(launch_descriptor_fetches_event);
+
+ periodic_event_reschedule(fetch_networkstatus_event);
+ periodic_event_reschedule(launch_descriptor_fetches_event);
+}
+
+/** Mainloop callback: clean up circuits, channels, and connections
+ * that are pending close. */
+static void
+postloop_cleanup_cb(mainloop_event_t *ev, void *arg)
+{
+ (void)ev;
+ (void)arg;
+ circuit_close_all_marked();
+ close_closeable_connections();
+ channel_run_cleanup();
+ channel_listener_run_cleanup();
+}
+
+/** Event to run postloop_cleanup_cb */
+static mainloop_event_t *postloop_cleanup_ev=NULL;
+
+/** Schedule a post-loop event to clean up marked channels, connections, and
+ * circuits. */
+void
+mainloop_schedule_postloop_cleanup(void)
+{
+ if (PREDICT_UNLIKELY(postloop_cleanup_ev == NULL)) {
+ // (It's possible that we can get here if we decide to close a connection
+ // in the earliest stages of our configuration, before we create events.)
+ return;
+ }
+ mainloop_event_activate(postloop_cleanup_ev);
+}
+
+#define LONGEST_TIMER_PERIOD (30 * 86400)
+/** Helper: Return the number of seconds between <b>now</b> and <b>next</b>,
+ * clipped to the range [1 second, LONGEST_TIMER_PERIOD]. */
+static inline int
+safe_timer_diff(time_t now, time_t next)
+{
+ if (next > now) {
+ /* There were no computers at signed TIME_MIN (1902 on 32-bit systems),
+ * and nothing that could run Tor. It's a bug if 'next' is around then.
+ * On 64-bit systems with signed TIME_MIN, TIME_MIN is before the Big
+ * Bang. We cannot extrapolate past a singularity, but there was probably
+ * nothing that could run Tor then, either.
+ **/
+ tor_assert(next > TIME_MIN + LONGEST_TIMER_PERIOD);
+
+ if (next - LONGEST_TIMER_PERIOD > now)
+ return LONGEST_TIMER_PERIOD;
+ return (int)(next - now);
+ } else {
+ return 1;
+ }
+}
+
+/** Perform regular maintenance tasks. This function gets run once per
+ * second by second_elapsed_callback().
+ */
+static void
+run_scheduled_events(time_t now)
+{
+ const or_options_t *options = get_options();
+
+ /* 0. See if we've been asked to shut down and our timeout has
+ * expired; or if our bandwidth limits are exhausted and we
+ * should hibernate; or if it's time to wake up from hibernation.
+ */
+ consider_hibernation(now);
+
+ /* Maybe enough time elapsed for us to reconsider a circuit. */
+ circuit_upgrade_circuits_from_guard_wait();
+
+ if (options->UseBridges && !net_is_disabled()) {
+ /* Note: this check uses net_is_disabled(), not should_delay_dir_fetches()
+ * -- the latter is only for fetching consensus-derived directory info. */
+ fetch_bridge_descriptors(options, now);
+ }
+
+ if (accounting_is_enabled(options)) {
+ accounting_run_housekeeping(now);
+ }
+
+ /* 3a. Every second, we examine pending circuits and prune the
+ * ones which have been pending for more than a few seconds.
+ * We do this before step 4, so it can try building more if
+ * it's not comfortable with the number of available circuits.
+ */
+ /* (If our circuit build timeout can ever become lower than a second (which
+ * it can't, currently), we should do this more often.) */
+ circuit_expire_building();
+ circuit_expire_waiting_for_better_guard();
+
+ /* 3b. Also look at pending streams and prune the ones that 'began'
+ * a long time ago but haven't gotten a 'connected' yet.
+ * Do this before step 4, so we can put them back into pending
+ * state to be picked up by the new circuit.
+ */
+ connection_ap_expire_beginning();
+
+ /* 3c. And expire connections that we've held open for too long.
+ */
+ connection_expire_held_open();
+
+ /* 4. Every second, we try a new circuit if there are no valid
+ * circuits. Every NewCircuitPeriod seconds, we expire circuits
+ * that became dirty more than MaxCircuitDirtiness seconds ago,
+ * and we make a new circ if there are no clean circuits.
+ */
+ const int have_dir_info = router_have_minimum_dir_info();
+ if (have_dir_info && !net_is_disabled()) {
+ circuit_build_needed_circs(now);
+ } else {
+ circuit_expire_old_circs_as_needed(now);
+ }
+
+ /* 5. We do housekeeping for each connection... */
+ channel_update_bad_for_new_circs(NULL, 0);
+ int i;
+ for (i=0;i<smartlist_len(connection_array);i++) {
+ run_connection_housekeeping(i, now);
+ }
+
+ /* 11b. check pending unconfigured managed proxies */
+ if (!net_is_disabled() && pt_proxies_configuration_pending())
+ pt_configure_remaining_proxies();
+}
+
+/* Periodic callback: rotate the onion keys after the period defined by the
+ * "onion-key-rotation-days" consensus parameter, shut down and restart all
+ * cpuworkers, and update our descriptor if necessary.
+ */
+static int
+rotate_onion_key_callback(time_t now, const or_options_t *options)
+{
+ if (server_mode(options)) {
+ int onion_key_lifetime = get_onion_key_lifetime();
+ time_t rotation_time = get_onion_key_set_at()+onion_key_lifetime;
+ if (rotation_time > now) {
+ return ONION_KEY_CONSENSUS_CHECK_INTERVAL;
+ }
+
+ log_info(LD_GENERAL,"Rotating onion key.");
+ rotate_onion_key();
+ cpuworkers_rotate_keyinfo();
+ if (router_rebuild_descriptor(1)<0) {
+ log_info(LD_CONFIG, "Couldn't rebuild router descriptor");
+ }
+ if (advertised_server_mode() && !net_is_disabled())
+ router_upload_dir_desc_to_dirservers(0);
+ return ONION_KEY_CONSENSUS_CHECK_INTERVAL;
+ }
+ return PERIODIC_EVENT_NO_UPDATE;
+}
+
+/* Period callback: Check if our old onion keys are still valid after the
+ * period of time defined by the consensus parameter
+ * "onion-key-grace-period-days", otherwise expire them by setting them to
+ * NULL.
+ */
+static int
+check_onion_keys_expiry_time_callback(time_t now, const or_options_t *options)
+{
+ if (server_mode(options)) {
+ int onion_key_grace_period = get_onion_key_grace_period();
+ time_t expiry_time = get_onion_key_set_at()+onion_key_grace_period;
+ if (expiry_time > now) {
+ return ONION_KEY_CONSENSUS_CHECK_INTERVAL;
+ }
+
+ log_info(LD_GENERAL, "Expiring old onion keys.");
+ expire_old_onion_keys();
+ cpuworkers_rotate_keyinfo();
+ return ONION_KEY_CONSENSUS_CHECK_INTERVAL;
+ }
+
+ return PERIODIC_EVENT_NO_UPDATE;
+}
+
+/* Periodic callback: Every 30 seconds, check whether it's time to make new
+ * Ed25519 subkeys.
+ */
+static int
+check_ed_keys_callback(time_t now, const or_options_t *options)
+{
+ if (server_mode(options)) {
+ if (should_make_new_ed_keys(options, now)) {
+ int new_signing_key = load_ed_keys(options, now);
+ if (new_signing_key < 0 ||
+ generate_ed_link_cert(options, now, new_signing_key > 0)) {
+ log_err(LD_OR, "Unable to update Ed25519 keys! Exiting.");
+ tor_shutdown_event_loop_and_exit(1);
+ }
+ }
+ return 30;
+ }
+ return PERIODIC_EVENT_NO_UPDATE;
+}
+
+/**
+ * Periodic callback: Every {LAZY,GREEDY}_DESCRIPTOR_RETRY_INTERVAL,
+ * see about fetching descriptors, microdescriptors, and extrainfo
+ * documents.
+ */
+static int
+launch_descriptor_fetches_callback(time_t now, const or_options_t *options)
+{
+ if (should_delay_dir_fetches(options, NULL))
+ return PERIODIC_EVENT_NO_UPDATE;
+
+ update_all_descriptor_downloads(now);
+ update_extrainfo_downloads(now);
+ if (router_have_minimum_dir_info())
+ return LAZY_DESCRIPTOR_RETRY_INTERVAL;
+ else
+ return GREEDY_DESCRIPTOR_RETRY_INTERVAL;
+}
+
+/**
+ * Periodic event: Rotate our X.509 certificates and TLS keys once every
+ * MAX_SSL_KEY_LIFETIME_INTERNAL.
+ */
+static int
+rotate_x509_certificate_callback(time_t now, const or_options_t *options)
+{
+ static int first = 1;
+ (void)now;
+ (void)options;
+ if (first) {
+ first = 0;
+ return MAX_SSL_KEY_LIFETIME_INTERNAL;
+ }
+
+ /* 1b. Every MAX_SSL_KEY_LIFETIME_INTERNAL seconds, we change our
+ * TLS context. */
+ log_info(LD_GENERAL,"Rotating tls context.");
+ if (router_initialize_tls_context() < 0) {
+ log_err(LD_BUG, "Error reinitializing TLS context");
+ tor_assert_unreached();
+ }
+ if (generate_ed_link_cert(options, now, 1)) {
+ log_err(LD_OR, "Unable to update Ed25519->TLS link certificate for "
+ "new TLS context.");
+ tor_assert_unreached();
+ }
+
+ /* We also make sure to rotate the TLS connections themselves if they've
+ * been up for too long -- but that's done via is_bad_for_new_circs in
+ * run_connection_housekeeping() above. */
+ return MAX_SSL_KEY_LIFETIME_INTERNAL;
+}
+
+/**
+ * Periodic callback: once an hour, grab some more entropy from the
+ * kernel and feed it to our CSPRNG.
+ **/
+static int
+add_entropy_callback(time_t now, const or_options_t *options)
+{
+ (void)now;
+ (void)options;
+ /* We already seeded once, so don't die on failure. */
+ if (crypto_seed_rng() < 0) {
+ log_warn(LD_GENERAL, "Tried to re-seed RNG, but failed. We already "
+ "seeded once, though, so we won't exit here.");
+ }
+
+ /** How often do we add more entropy to OpenSSL's RNG pool? */
+#define ENTROPY_INTERVAL (60*60)
+ return ENTROPY_INTERVAL;
+}
+
+/**
+ * Periodic callback: if we're an authority, make sure we test
+ * the routers on the network for reachability.
+ */
+static int
+launch_reachability_tests_callback(time_t now, const or_options_t *options)
+{
+ if (authdir_mode_tests_reachability(options) &&
+ !net_is_disabled()) {
+ /* try to determine reachability of the other Tor relays */
+ dirserv_test_reachability(now);
+ }
+ return REACHABILITY_TEST_INTERVAL;
+}
+
+/**
+ * Periodic callback: if we're an authority, discount the stability
+ * information (and other rephist information) that's older.
+ */
+static int
+downrate_stability_callback(time_t now, const or_options_t *options)
+{
+ (void)options;
+ /* 1d. Periodically, we discount older stability information so that new
+ * stability info counts more, and save the stability information to disk as
+ * appropriate. */
+ time_t next = rep_hist_downrate_old_runs(now);
+ return safe_timer_diff(now, next);
+}
+
+/**
+ * Periodic callback: if we're an authority, record our measured stability
+ * information from rephist in an mtbf file.
+ */
+static int
+save_stability_callback(time_t now, const or_options_t *options)
+{
+ if (authdir_mode_tests_reachability(options)) {
+ if (rep_hist_record_mtbf_data(now, 1)<0) {
+ log_warn(LD_GENERAL, "Couldn't store mtbf data.");
+ }
+ }
+#define SAVE_STABILITY_INTERVAL (30*60)
+ return SAVE_STABILITY_INTERVAL;
+}
+
+/**
+ * Periodic callback: if we're an authority, check on our authority
+ * certificate (the one that authenticates our authority signing key).
+ */
+static int
+check_authority_cert_callback(time_t now, const or_options_t *options)
+{
+ (void)now;
+ (void)options;
+ /* 1e. Periodically, if we're a v3 authority, we check whether our cert is
+ * close to expiring and warn the admin if it is. */
+ v3_authority_check_key_expiry();
+#define CHECK_V3_CERTIFICATE_INTERVAL (5*60)
+ return CHECK_V3_CERTIFICATE_INTERVAL;
+}
+
+/**
+ * Scheduled callback: Run directory-authority voting functionality.
+ *
+ * The schedule is a bit complicated here, so dirvote_act() manages the
+ * schedule itself.
+ **/
+static int
+dirvote_callback(time_t now, const or_options_t *options)
+{
+ if (!authdir_mode_v3(options)) {
+ tor_assert_nonfatal_unreached();
+ return 3600;
+ }
+
+ time_t next = dirvote_act(options, now);
+ if (BUG(next == TIME_MAX)) {
+ /* This shouldn't be returned unless we called dirvote_act() without
+ * being an authority. If it happens, maybe our configuration will
+ * fix itself in an hour or so? */
+ return 3600;
+ }
+ return safe_timer_diff(now, next);
+}
+
+/** Reschedule the directory-authority voting event. Run this whenever the
+ * schedule has changed. */
+void
+reschedule_dirvote(const or_options_t *options)
+{
+ if (periodic_events_initialized && authdir_mode_v3(options)) {
+ periodic_event_reschedule(dirvote_event);
+ }
+}
+
+/**
+ * Periodic callback: If our consensus is too old, recalculate whether
+ * we can actually use it.
+ */
+static int
+check_expired_networkstatus_callback(time_t now, const or_options_t *options)
+{
+ (void)options;
+ /* Check whether our networkstatus has expired. */
+ networkstatus_t *ns = networkstatus_get_latest_consensus();
+ /*XXXX RD: This value needs to be the same as REASONABLY_LIVE_TIME in
+ * networkstatus_get_reasonably_live_consensus(), but that value is way
+ * way too high. Arma: is the bridge issue there resolved yet? -NM */
+#define NS_EXPIRY_SLOP (24*60*60)
+ if (ns && ns->valid_until < (now - NS_EXPIRY_SLOP) &&
+ router_have_minimum_dir_info()) {
+ router_dir_info_changed();
+ }
+#define CHECK_EXPIRED_NS_INTERVAL (2*60)
+ return CHECK_EXPIRED_NS_INTERVAL;
+}
+
+/**
+ * Scheduled callback: Save the state file to disk if appropriate.
+ */
+static int
+save_state_callback(time_t now, const or_options_t *options)
+{
+ (void) options;
+ (void) or_state_save(now); // only saves if appropriate
+ const time_t next_write = get_or_state()->next_write;
+ if (next_write == TIME_MAX) {
+ return 86400;
+ }
+ return safe_timer_diff(now, next_write);
+}
+
+/** Reschedule the event for saving the state file.
+ *
+ * Run this when the state becomes dirty. */
+void
+reschedule_or_state_save(void)
+{
+ if (save_state_event == NULL) {
+ /* This can happen early on during startup. */
+ return;
+ }
+ periodic_event_reschedule(save_state_event);
+}
+
+/**
+ * Periodic callback: Write statistics to disk if appropriate.
+ */
+static int
+write_stats_file_callback(time_t now, const or_options_t *options)
+{
+ /* 1g. Check whether we should write statistics to disk.
+ */
+#define CHECK_WRITE_STATS_INTERVAL (60*60)
+ time_t next_time_to_write_stats_files = now + CHECK_WRITE_STATS_INTERVAL;
+ if (options->CellStatistics) {
+ time_t next_write =
+ rep_hist_buffer_stats_write(now);
+ if (next_write && next_write < next_time_to_write_stats_files)
+ next_time_to_write_stats_files = next_write;
+ }
+ if (options->DirReqStatistics) {
+ time_t next_write = geoip_dirreq_stats_write(now);
+ if (next_write && next_write < next_time_to_write_stats_files)
+ next_time_to_write_stats_files = next_write;
+ }
+ if (options->EntryStatistics) {
+ time_t next_write = geoip_entry_stats_write(now);
+ if (next_write && next_write < next_time_to_write_stats_files)
+ next_time_to_write_stats_files = next_write;
+ }
+ if (options->HiddenServiceStatistics) {
+ time_t next_write = rep_hist_hs_stats_write(now);
+ if (next_write && next_write < next_time_to_write_stats_files)
+ next_time_to_write_stats_files = next_write;
+ }
+ if (options->ExitPortStatistics) {
+ time_t next_write = rep_hist_exit_stats_write(now);
+ if (next_write && next_write < next_time_to_write_stats_files)
+ next_time_to_write_stats_files = next_write;
+ }
+ if (options->ConnDirectionStatistics) {
+ time_t next_write = rep_hist_conn_stats_write(now);
+ if (next_write && next_write < next_time_to_write_stats_files)
+ next_time_to_write_stats_files = next_write;
+ }
+ if (options->BridgeAuthoritativeDir) {
+ time_t next_write = rep_hist_desc_stats_write(now);
+ if (next_write && next_write < next_time_to_write_stats_files)
+ next_time_to_write_stats_files = next_write;
+ }
+
+ return safe_timer_diff(now, next_time_to_write_stats_files);
+}
+
+#define CHANNEL_CHECK_INTERVAL (60*60)
+static int
+check_canonical_channels_callback(time_t now, const or_options_t *options)
+{
+ (void)now;
+ if (public_server_mode(options))
+ channel_check_for_duplicates();
+
+ return CHANNEL_CHECK_INTERVAL;
+}
+
+static int
+reset_padding_counts_callback(time_t now, const or_options_t *options)
+{
+ if (options->PaddingStatistics) {
+ rep_hist_prep_published_padding_counts(now);
+ }
+
+ rep_hist_reset_padding_counts();
+ return REPHIST_CELL_PADDING_COUNTS_INTERVAL;
+}
+
+static int should_init_bridge_stats = 1;
+
+/**
+ * Periodic callback: Write bridge statistics to disk if appropriate.
+ */
+static int
+record_bridge_stats_callback(time_t now, const or_options_t *options)
+{
+ /* 1h. Check whether we should write bridge statistics to disk.
+ */
+ if (should_record_bridge_info(options)) {
+ if (should_init_bridge_stats) {
+ /* (Re-)initialize bridge statistics. */
+ geoip_bridge_stats_init(now);
+ should_init_bridge_stats = 0;
+ return WRITE_STATS_INTERVAL;
+ } else {
+ /* Possibly write bridge statistics to disk and ask when to write
+ * them next time. */
+ time_t next = geoip_bridge_stats_write(now);
+ return safe_timer_diff(now, next);
+ }
+ } else if (!should_init_bridge_stats) {
+ /* Bridge mode was turned off. Ensure that stats are re-initialized
+ * next time bridge mode is turned on. */
+ should_init_bridge_stats = 1;
+ }
+ return PERIODIC_EVENT_NO_UPDATE;
+}
+
+/**
+ * Periodic callback: Clean in-memory caches every once in a while
+ */
+static int
+clean_caches_callback(time_t now, const or_options_t *options)
+{
+ /* Remove old information from rephist and the rend cache. */
+ rep_history_clean(now - options->RephistTrackTime);
+ rend_cache_clean(now, REND_CACHE_TYPE_SERVICE);
+ hs_cache_clean_as_client(now);
+ hs_cache_clean_as_dir(now);
+ microdesc_cache_rebuild(NULL, 0);
+#define CLEAN_CACHES_INTERVAL (30*60)
+ return CLEAN_CACHES_INTERVAL;
+}
+
+/**
+ * Periodic callback: Clean the cache of failed hidden service lookups
+ * frequently.
+ */
+static int
+rend_cache_failure_clean_callback(time_t now, const or_options_t *options)
+{
+ (void)options;
+ /* We don't keep entries that are more than five minutes old so we try to
+ * clean it as soon as we can since we want to make sure the client waits
+ * as little as possible for reachability reasons. */
+ rend_cache_failure_clean(now);
+ hs_cache_client_intro_state_clean(now);
+ return 30;
+}
+
+/**
+ * Periodic callback: If we're a server and initializing dns failed, retry.
+ */
+static int
+retry_dns_callback(time_t now, const or_options_t *options)
+{
+ (void)now;
+#define RETRY_DNS_INTERVAL (10*60)
+ if (server_mode(options) && has_dns_init_failed())
+ dns_init();
+ return RETRY_DNS_INTERVAL;
+}
+
+/** Periodic callback: consider rebuilding or and re-uploading our descriptor
+ * (if we've passed our internal checks). */
+static int
+check_descriptor_callback(time_t now, const or_options_t *options)
+{
+/** How often do we check whether part of our router info has changed in a
+ * way that would require an upload? That includes checking whether our IP
+ * address has changed. */
+#define CHECK_DESCRIPTOR_INTERVAL (60)
+
+ (void)options;
+
+ /* 2b. Once per minute, regenerate and upload the descriptor if the old
+ * one is inaccurate. */
+ if (!net_is_disabled()) {
+ check_descriptor_bandwidth_changed(now);
+ check_descriptor_ipaddress_changed(now);
+ mark_my_descriptor_dirty_if_too_old(now);
+ consider_publishable_server(0);
+ /* If any networkstatus documents are no longer recent, we need to
+ * update all the descriptors' running status. */
+ /* Remove dead routers. */
+ /* XXXX This doesn't belong here, but it was here in the pre-
+ * XXXX refactoring code. */
+ routerlist_remove_old_routers();
+ }
+
+ return CHECK_DESCRIPTOR_INTERVAL;
+}
+
+/**
+ * Periodic callback: check whether we're reachable (as a relay), and
+ * whether our bandwidth has changed enough that we need to
+ * publish a new descriptor.
+ */
+static int
+check_for_reachability_bw_callback(time_t now, const or_options_t *options)
+{
+ /* XXXX This whole thing was stuck in the middle of what is now
+ * XXXX check_descriptor_callback. I'm not sure it's right. */
+
+ static int dirport_reachability_count = 0;
+ /* also, check religiously for reachability, if it's within the first
+ * 20 minutes of our uptime. */
+ if (server_mode(options) &&
+ (have_completed_a_circuit() || !any_predicted_circuits(now)) &&
+ !net_is_disabled()) {
+ if (get_uptime() < TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT) {
+ router_do_reachability_checks(1, dirport_reachability_count==0);
+ if (++dirport_reachability_count > 5)
+ dirport_reachability_count = 0;
+ return 1;
+ } else {
+ /* If we haven't checked for 12 hours and our bandwidth estimate is
+ * low, do another bandwidth test. This is especially important for
+ * bridges, since they might go long periods without much use. */
+ const routerinfo_t *me = router_get_my_routerinfo();
+ static int first_time = 1;
+ if (!first_time && me &&
+ me->bandwidthcapacity < me->bandwidthrate &&
+ me->bandwidthcapacity < 51200) {
+ reset_bandwidth_test();
+ }
+ first_time = 0;
+#define BANDWIDTH_RECHECK_INTERVAL (12*60*60)
+ return BANDWIDTH_RECHECK_INTERVAL;
+ }
+ }
+ return CHECK_DESCRIPTOR_INTERVAL;
+}
+
+/**
+ * Periodic event: once a minute, (or every second if TestingTorNetwork, or
+ * during client bootstrap), check whether we want to download any
+ * networkstatus documents. */
+static int
+fetch_networkstatus_callback(time_t now, const or_options_t *options)
+{
+ /* How often do we check whether we should download network status
+ * documents? */
+ const int we_are_bootstrapping = networkstatus_consensus_is_bootstrapping(
+ now);
+ const int prefer_mirrors = !directory_fetches_from_authorities(
+ get_options());
+ int networkstatus_dl_check_interval = 60;
+ /* check more often when testing, or when bootstrapping from mirrors
+ * (connection limits prevent too many connections being made) */
+ if (options->TestingTorNetwork
+ || (we_are_bootstrapping && prefer_mirrors)) {
+ networkstatus_dl_check_interval = 1;
+ }
+
+ if (should_delay_dir_fetches(options, NULL))
+ return PERIODIC_EVENT_NO_UPDATE;
+
+ update_networkstatus_downloads(now);
+ return networkstatus_dl_check_interval;
+}
+
+/**
+ * Periodic callback: Every 60 seconds, we relaunch listeners if any died. */
+static int
+retry_listeners_callback(time_t now, const or_options_t *options)
+{
+ (void)now;
+ (void)options;
+ if (!net_is_disabled()) {
+ retry_all_listeners(NULL, NULL, 0);
+ return 60;
+ }
+ return PERIODIC_EVENT_NO_UPDATE;
+}
+
+/**
+ * Periodic callback: as a server, see if we have any old unused circuits
+ * that should be expired */
+static int
+expire_old_ciruits_serverside_callback(time_t now, const or_options_t *options)
+{
+ (void)options;
+ /* every 11 seconds, so not usually the same second as other such events */
+ circuit_expire_old_circuits_serverside(now);
+ return 11;
+}
+
+/**
+ * Callback: Send warnings if Tor doesn't find its ports reachable.
+ */
+static int
+reachability_warnings_callback(time_t now, const or_options_t *options)
+{
+ (void) now;
+
+ if (get_uptime() < TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT) {
+ return (int)(TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT - get_uptime());
+ }
+
+ if (server_mode(options) &&
+ !net_is_disabled() &&
+ have_completed_a_circuit()) {
+ /* every 20 minutes, check and complain if necessary */
+ const routerinfo_t *me = router_get_my_routerinfo();
+ if (me && !check_whether_orport_reachable(options)) {
+ char *address = tor_dup_ip(me->addr);
+ log_warn(LD_CONFIG,"Your server (%s:%d) has not managed to confirm that "
+ "its ORPort is reachable. Relays do not publish descriptors "
+ "until their ORPort and DirPort are reachable. Please check "
+ "your firewalls, ports, address, /etc/hosts file, etc.",
+ address, me->or_port);
+ control_event_server_status(LOG_WARN,
+ "REACHABILITY_FAILED ORADDRESS=%s:%d",
+ address, me->or_port);
+ tor_free(address);
+ }
+
+ if (me && !check_whether_dirport_reachable(options)) {
+ char *address = tor_dup_ip(me->addr);
+ log_warn(LD_CONFIG,
+ "Your server (%s:%d) has not managed to confirm that its "
+ "DirPort is reachable. Relays do not publish descriptors "
+ "until their ORPort and DirPort are reachable. Please check "
+ "your firewalls, ports, address, /etc/hosts file, etc.",
+ address, me->dir_port);
+ control_event_server_status(LOG_WARN,
+ "REACHABILITY_FAILED DIRADDRESS=%s:%d",
+ address, me->dir_port);
+ tor_free(address);
+ }
+ }
+
+ return TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT;
+}
+
+static int dns_honesty_first_time = 1;
+
+/**
+ * Periodic event: if we're an exit, see if our DNS server is telling us
+ * obvious lies.
+ */
+static int
+check_dns_honesty_callback(time_t now, const or_options_t *options)
+{
+ (void)now;
+ /* 9. and if we're an exit node, check whether our DNS is telling stories
+ * to us. */
+ if (net_is_disabled() ||
+ ! public_server_mode(options) ||
+ router_my_exit_policy_is_reject_star())
+ return PERIODIC_EVENT_NO_UPDATE;
+
+ if (dns_honesty_first_time) {
+ /* Don't launch right when we start */
+ dns_honesty_first_time = 0;
+ return crypto_rand_int_range(60, 180);
+ }
+
+ dns_launch_correctness_checks();
+ return 12*3600 + crypto_rand_int(12*3600);
+}
+
+/**
+ * Periodic callback: if we're the bridge authority, write a networkstatus
+ * file to disk.
+ */
+static int
+write_bridge_ns_callback(time_t now, const or_options_t *options)
+{
+ /* 10. write bridge networkstatus file to disk */
+ if (options->BridgeAuthoritativeDir) {
+ networkstatus_dump_bridge_status_to_file(now);
+#define BRIDGE_STATUSFILE_INTERVAL (30*60)
+ return BRIDGE_STATUSFILE_INTERVAL;
+ }
+ return PERIODIC_EVENT_NO_UPDATE;
+}
+
+static int heartbeat_callback_first_time = 1;
+
+/**
+ * Periodic callback: write the heartbeat message in the logs.
+ *
+ * If writing the heartbeat message to the logs fails for some reason, retry
+ * again after <b>MIN_HEARTBEAT_PERIOD</b> seconds.
+ */
+static int
+heartbeat_callback(time_t now, const or_options_t *options)
+{
+ /* Check if heartbeat is disabled */
+ if (!options->HeartbeatPeriod) {
+ return PERIODIC_EVENT_NO_UPDATE;
+ }
+
+ /* Skip the first one. */
+ if (heartbeat_callback_first_time) {
+ heartbeat_callback_first_time = 0;
+ return options->HeartbeatPeriod;
+ }
+
+ /* Write the heartbeat message */
+ if (log_heartbeat(now) == 0) {
+ return options->HeartbeatPeriod;
+ } else {
+ /* If we couldn't write the heartbeat log message, try again in the minimum
+ * interval of time. */
+ return MIN_HEARTBEAT_PERIOD;
+ }
+}
+
+#define CDM_CLEAN_CALLBACK_INTERVAL 600
+static int
+clean_consdiffmgr_callback(time_t now, const or_options_t *options)
+{
+ (void)now;
+ if (dir_server_mode(options)) {
+ consdiffmgr_cleanup();
+ }
+ return CDM_CLEAN_CALLBACK_INTERVAL;
+}
+
+/*
+ * Periodic callback: Run scheduled events for HS service. This is called
+ * every second.
+ */
+static int
+hs_service_callback(time_t now, const or_options_t *options)
+{
+ (void) options;
+
+ /* We need to at least be able to build circuits and that we actually have
+ * a working network. */
+ if (!have_completed_a_circuit() || net_is_disabled() ||
+ networkstatus_get_live_consensus(now) == NULL) {
+ goto end;
+ }
+
+ hs_service_run_scheduled_events(now);
+
+ end:
+ /* Every 1 second. */
+ return 1;
+}
+
+/** Timer: used to invoke second_elapsed_callback() once per second. */
+static periodic_timer_t *second_timer = NULL;
+
+/**
+ * Enable or disable the per-second timer as appropriate, creating it if
+ * necessary.
+ */
+void
+reschedule_per_second_timer(void)
+{
+ struct timeval one_second;
+ one_second.tv_sec = 1;
+ one_second.tv_usec = 0;
+
+ if (! second_timer) {
+ second_timer = periodic_timer_new(tor_libevent_get_base(),
+ &one_second,
+ second_elapsed_callback,
+ NULL);
+ tor_assert(second_timer);
+ }
+
+ const bool run_per_second_events =
+ control_any_per_second_event_enabled() || ! net_is_completely_disabled();
+
+ if (run_per_second_events) {
+ periodic_timer_launch(second_timer, &one_second);
+ } else {
+ periodic_timer_disable(second_timer);
+ }
+}
+
+/** Last time that update_current_time was called. */
+static time_t current_second = 0;
+/** Last time that update_current_time updated current_second. */
+static monotime_coarse_t current_second_last_changed;
+
+/**
+ * Set the current time to "now", which should be the value returned by
+ * time(). Check for clock jumps and track the total number of seconds we
+ * have been running.
+ */
+void
+update_current_time(time_t now)
+{
+ if (PREDICT_LIKELY(now == current_second)) {
+ /* We call this function a lot. Most frequently, the current second
+ * will not have changed, so we just return. */
+ return;
+ }
+
+ const time_t seconds_elapsed = current_second ? (now - current_second) : 0;
+
+ /* Check the wall clock against the monotonic clock, so we can
+ * better tell idleness from clock jumps and/or other shenanigans. */
+ monotime_coarse_t last_updated;
+ memcpy(&last_updated, &current_second_last_changed, sizeof(last_updated));
+ monotime_coarse_get(&current_second_last_changed);
+
+ /** How much clock jumping do we tolerate? */
+#define NUM_JUMPED_SECONDS_BEFORE_WARN 100
+
+ /** How much idleness do we tolerate? */
+#define NUM_IDLE_SECONDS_BEFORE_WARN 3600
+
+ if (seconds_elapsed < -NUM_JUMPED_SECONDS_BEFORE_WARN) {
+ // moving back in time is always a bad sign.
+ circuit_note_clock_jumped(seconds_elapsed, false);
+ } else if (seconds_elapsed >= NUM_JUMPED_SECONDS_BEFORE_WARN) {
+ /* Compare the monotonic clock to the result of time(). */
+ const int32_t monotime_msec_passed =
+ monotime_coarse_diff_msec32(&last_updated,
+ &current_second_last_changed);
+ const int monotime_sec_passed = monotime_msec_passed / 1000;
+ const int discrepancy = monotime_sec_passed - (int)seconds_elapsed;
+ /* If the monotonic clock deviates from time(NULL), we have a couple of
+ * possibilities. On some systems, this means we have been suspended or
+ * sleeping. Everywhere, it can mean that the wall-clock time has
+ * been changed -- for example, with settimeofday().
+ *
+ * On the other hand, if the monotonic time matches with the wall-clock
+ * time, we've probably just been idle for a while, with no events firing.
+ * we tolerate much more of that.
+ */
+ const bool clock_jumped = abs(discrepancy) > 2;
+
+ if (clock_jumped || seconds_elapsed >= NUM_IDLE_SECONDS_BEFORE_WARN) {
+ circuit_note_clock_jumped(seconds_elapsed, ! clock_jumped);
+ }
+ } else if (seconds_elapsed > 0) {
+ stats_n_seconds_working += seconds_elapsed;
+ }
+
+ update_approx_time(now);
+ current_second = now;
+}
+
+/** Libevent callback: invoked once every second. */
+static void
+second_elapsed_callback(periodic_timer_t *timer, void *arg)
+{
+ /* XXXX This could be sensibly refactored into multiple callbacks, and we
+ * could use Libevent's timers for this rather than checking the current
+ * time against a bunch of timeouts every second. */
+ time_t now;
+ (void)timer;
+ (void)arg;
+
+ now = time(NULL);
+
+ /* We don't need to do this once-per-second any more: time-updating is
+ * only in this callback _because it is a callback_. It should be fine
+ * to disable this callback, and the time will still get updated.
+ */
+ update_current_time(now);
+
+ /* Maybe some controller events are ready to fire */
+ control_per_second_events();
+
+ run_scheduled_events(now);
+}
+
+#ifdef HAVE_SYSTEMD_209
+static periodic_timer_t *systemd_watchdog_timer = NULL;
+
+/** Libevent callback: invoked to reset systemd watchdog. */
+static void
+systemd_watchdog_callback(periodic_timer_t *timer, void *arg)
+{
+ (void)timer;
+ (void)arg;
+ sd_notify(0, "WATCHDOG=1");
+}
+#endif /* defined(HAVE_SYSTEMD_209) */
+
+#define UPTIME_CUTOFF_FOR_NEW_BANDWIDTH_TEST (6*60*60)
+
+/** Called when our IP address seems to have changed. <b>at_interface</b>
+ * should be true if we detected a change in our interface, and false if we
+ * detected a change in our published address. */
+void
+ip_address_changed(int at_interface)
+{
+ const or_options_t *options = get_options();
+ int server = server_mode(options);
+ int exit_reject_interfaces = (server && options->ExitRelay
+ && options->ExitPolicyRejectLocalInterfaces);
+
+ if (at_interface) {
+ if (! server) {
+ /* Okay, change our keys. */
+ if (init_keys_client() < 0)
+ log_warn(LD_GENERAL, "Unable to rotate keys after IP change!");
+ }
+ } else {
+ if (server) {
+ if (get_uptime() > UPTIME_CUTOFF_FOR_NEW_BANDWIDTH_TEST)
+ reset_bandwidth_test();
+ reset_uptime();
+ router_reset_reachability();
+ }
+ }
+
+ /* Exit relays incorporate interface addresses in their exit policies when
+ * ExitPolicyRejectLocalInterfaces is set */
+ if (exit_reject_interfaces || (server && !at_interface)) {
+ mark_my_descriptor_dirty("IP address changed");
+ }
+
+ dns_servers_relaunch_checks();
+}
+
+/** Forget what we've learned about the correctness of our DNS servers, and
+ * start learning again. */
+void
+dns_servers_relaunch_checks(void)
+{
+ if (server_mode(get_options())) {
+ dns_reset_correctness_checks();
+ if (periodic_events_initialized) {
+ tor_assert(check_dns_honesty_event);
+ periodic_event_reschedule(check_dns_honesty_event);
+ }
+ }
+}
+
+/** Called when we get a SIGHUP: reload configuration files and keys,
+ * retry all connections, and so on. */
+static int
+do_hup(void)
+{
+ const or_options_t *options = get_options();
+
+ log_notice(LD_GENERAL,"Received reload signal (hup). Reloading config and "
+ "resetting internal state.");
+ if (accounting_is_enabled(options))
+ accounting_record_bandwidth_usage(time(NULL), get_or_state());
+
+ router_reset_warnings();
+ routerlist_reset_warnings();
+ /* first, reload config variables, in case they've changed */
+ if (options->ReloadTorrcOnSIGHUP) {
+ /* no need to provide argc/v, they've been cached in init_from_config */
+ int init_rv = options_init_from_torrc(0, NULL);
+ if (init_rv < 0) {
+ log_err(LD_CONFIG,"Reading config failed--see warnings above. "
+ "For usage, try -h.");
+ return -1;
+ } else if (BUG(init_rv > 0)) {
+ // LCOV_EXCL_START
+ /* This should be impossible: the only "return 1" cases in
+ * options_init_from_torrc are ones caused by command-line arguments;
+ * but they can't change while Tor is running. */
+ return -1;
+ // LCOV_EXCL_STOP
+ }
+ options = get_options(); /* they have changed now */
+ /* Logs are only truncated the first time they are opened, but were
+ probably intended to be cleaned up on signal. */
+ if (options->TruncateLogFile)
+ truncate_logs();
+ } else {
+ char *msg = NULL;
+ log_notice(LD_GENERAL, "Not reloading config file: the controller told "
+ "us not to.");
+ /* Make stuff get rescanned, reloaded, etc. */
+ if (set_options((or_options_t*)options, &msg) < 0) {
+ if (!msg)
+ msg = tor_strdup("Unknown error");
+ log_warn(LD_GENERAL, "Unable to re-set previous options: %s", msg);
+ tor_free(msg);
+ }
+ }
+ if (authdir_mode(options)) {
+ /* reload the approved-routers file */
+ if (dirserv_load_fingerprint_file() < 0) {
+ /* warnings are logged from dirserv_load_fingerprint_file() directly */
+ log_info(LD_GENERAL, "Error reloading fingerprints. "
+ "Continuing with old list.");
+ }
+ }
+
+ /* Rotate away from the old dirty circuits. This has to be done
+ * after we've read the new options, but before we start using
+ * circuits for directory fetches. */
+ circuit_mark_all_dirty_circs_as_unusable();
+
+ /* retry appropriate downloads */
+ router_reset_status_download_failures();
+ router_reset_descriptor_download_failures();
+ if (!net_is_disabled())
+ update_networkstatus_downloads(time(NULL));
+
+ /* We'll retry routerstatus downloads in about 10 seconds; no need to
+ * force a retry there. */
+
+ if (server_mode(options)) {
+ /* Maybe we've been given a new ed25519 key or certificate?
+ */
+ time_t now = approx_time();
+ int new_signing_key = load_ed_keys(options, now);
+ if (new_signing_key < 0 ||
+ generate_ed_link_cert(options, now, new_signing_key > 0)) {
+ log_warn(LD_OR, "Problem reloading Ed25519 keys; still using old keys.");
+ }
+
+ /* Update cpuworker and dnsworker processes, so they get up-to-date
+ * configuration options. */
+ cpuworkers_rotate_keyinfo();
+ dns_reset();
+ }
+ return 0;
+}
+
+/** Initialize some mainloop_event_t objects that we require. */
+STATIC void
+initialize_mainloop_events(void)
+{
+ if (!schedule_active_linked_connections_event) {
+ schedule_active_linked_connections_event =
+ mainloop_event_postloop_new(schedule_active_linked_connections_cb, NULL);
+ }
+ if (!postloop_cleanup_ev) {
+ postloop_cleanup_ev =
+ mainloop_event_postloop_new(postloop_cleanup_cb, NULL);
+ }
+}
+
+/** Tor main loop. */
+int
+do_main_loop(void)
+{
+ time_t now;
+
+ /* initialize the periodic events first, so that code that depends on the
+ * events being present does not assert.
+ */
+ if (! periodic_events_initialized) {
+ initialize_periodic_events();
+ }
+
+ initialize_mainloop_events();
+
+ /* initialize dns resolve map, spawn workers if needed */
+ if (dns_init() < 0) {
+ if (get_options()->ServerDNSAllowBrokenConfig)
+ log_warn(LD_GENERAL, "Couldn't set up any working nameservers. "
+ "Network not up yet? Will try again soon.");
+ else {
+ log_err(LD_GENERAL,"Error initializing dns subsystem; exiting. To "
+ "retry instead, set the ServerDNSAllowBrokenResolvConf option.");
+ }
+ }
+
+ handle_signals();
+ monotime_init();
+ timers_initialize();
+
+ /* load the private keys, if we're supposed to have them, and set up the
+ * TLS context. */
+ if (! client_identity_key_is_set()) {
+ if (init_keys() < 0) {
+ log_err(LD_OR, "Error initializing keys; exiting");
+ return -1;
+ }
+ }
+
+ /* Set up our buckets */
+ connection_bucket_init();
+
+ /* initialize the bootstrap status events to know we're starting up */
+ control_event_bootstrap(BOOTSTRAP_STATUS_STARTING, 0);
+
+ /* Initialize the keypinning log. */
+ if (authdir_mode_v3(get_options())) {
+ char *fname = get_datadir_fname("key-pinning-journal");
+ int r = 0;
+ if (keypin_load_journal(fname)<0) {
+ log_err(LD_DIR, "Error loading key-pinning journal: %s",strerror(errno));
+ r = -1;
+ }
+ if (keypin_open_journal(fname)<0) {
+ log_err(LD_DIR, "Error opening key-pinning journal: %s",strerror(errno));
+ r = -1;
+ }
+ tor_free(fname);
+ if (r)
+ return r;
+ }
+ {
+ /* This is the old name for key-pinning-journal. These got corrupted
+ * in a couple of cases by #16530, so we started over. See #16580 for
+ * the rationale and for other options we didn't take. We can remove
+ * this code once all the authorities that ran 0.2.7.1-alpha-dev are
+ * upgraded.
+ */
+ char *fname = get_datadir_fname("key-pinning-entries");
+ unlink(fname);
+ tor_free(fname);
+ }
+
+ if (trusted_dirs_reload_certs()) {
+ log_warn(LD_DIR,
+ "Couldn't load all cached v3 certificates. Starting anyway.");
+ }
+ if (router_reload_consensus_networkstatus()) {
+ return -1;
+ }
+ /* load the routers file, or assign the defaults. */
+ if (router_reload_router_list()) {
+ return -1;
+ }
+ /* load the networkstatuses. (This launches a download for new routers as
+ * appropriate.)
+ */
+ now = time(NULL);
+ directory_info_has_arrived(now, 1, 0);
+
+ if (server_mode(get_options()) || dir_server_mode(get_options())) {
+ /* launch cpuworkers. Need to do this *after* we've read the onion key. */
+ cpu_init();
+ }
+ consdiffmgr_enable_background_compression();
+
+ /* Setup shared random protocol subsystem. */
+ if (authdir_mode_v3(get_options())) {
+ if (sr_init(1) < 0) {
+ return -1;
+ }
+ }
+
+ /* set up once-a-second callback. */
+ reschedule_per_second_timer();
+
+#ifdef HAVE_SYSTEMD_209
+ uint64_t watchdog_delay;
+ /* set up systemd watchdog notification. */
+ if (sd_watchdog_enabled(1, &watchdog_delay) > 0) {
+ if (! systemd_watchdog_timer) {
+ struct timeval watchdog;
+ /* The manager will "act on" us if we don't send them a notification
+ * every 'watchdog_delay' microseconds. So, send notifications twice
+ * that often. */
+ watchdog_delay /= 2;
+ watchdog.tv_sec = watchdog_delay / 1000000;
+ watchdog.tv_usec = watchdog_delay % 1000000;
+
+ systemd_watchdog_timer = periodic_timer_new(tor_libevent_get_base(),
+ &watchdog,
+ systemd_watchdog_callback,
+ NULL);
+ tor_assert(systemd_watchdog_timer);
+ }
+ }
+#endif /* defined(HAVE_SYSTEMD_209) */
+
+#ifdef HAVE_SYSTEMD
+ {
+ const int r = sd_notify(0, "READY=1");
+ if (r < 0) {
+ log_warn(LD_GENERAL, "Unable to send readiness to systemd: %s",
+ strerror(r));
+ } else if (r > 0) {
+ log_notice(LD_GENERAL, "Signaled readiness to systemd");
+ } else {
+ log_info(LD_GENERAL, "Systemd NOTIFY_SOCKET not present.");
+ }
+ }
+#endif /* defined(HAVE_SYSTEMD) */
+
+ main_loop_should_exit = 0;
+ main_loop_exit_value = 0;
+
+#ifdef ENABLE_RESTART_DEBUGGING
+ {
+ static int first_time = 1;
+
+ if (first_time && getenv("TOR_DEBUG_RESTART")) {
+ first_time = 0;
+ const char *sec_str = getenv("TOR_DEBUG_RESTART_AFTER_SECONDS");
+ long sec;
+ int sec_ok=0;
+ if (sec_str &&
+ (sec = tor_parse_long(sec_str, 10, 0, INT_MAX, &sec_ok, NULL)) &&
+ sec_ok) {
+ /* Okay, we parsed the seconds. */
+ } else {
+ sec = 5;
+ }
+ struct timeval restart_after = { (time_t) sec, 0 };
+ tor_shutdown_event_loop_for_restart_event =
+ tor_evtimer_new(tor_libevent_get_base(),
+ tor_shutdown_event_loop_for_restart_cb, NULL);
+ event_add(tor_shutdown_event_loop_for_restart_event, &restart_after);
+ }
+ }
+#endif
+
+ return run_main_loop_until_done();
+}
+
+#ifndef _WIN32
+/** Rate-limiter for EINVAL-type libevent warnings. */
+static ratelim_t libevent_error_ratelim = RATELIM_INIT(10);
+#endif
+
+/**
+ * Run the main loop a single time. Return 0 for "exit"; -1 for "exit with
+ * error", and 1 for "run this again."
+ */
+static int
+run_main_loop_once(void)
+{
+ int loop_result;
+
+ if (nt_service_is_stopping())
+ return 0;
+
+ if (main_loop_should_exit)
+ return 0;
+
+#ifndef _WIN32
+ /* Make it easier to tell whether libevent failure is our fault or not. */
+ errno = 0;
+#endif
+
+ if (get_options()->MainloopStats) {
+ /* We always enforce that EVLOOP_ONCE is passed to event_base_loop() if we
+ * are collecting main loop statistics. */
+ called_loop_once = 1;
+ } else {
+ called_loop_once = 0;
+ }
+
+ /* Make sure we know (about) what time it is. */
+ update_approx_time(time(NULL));
+
+ /* Here it is: the main loop. Here we tell Libevent to poll until we have
+ * an event, or the second ends, or until we have some active linked
+ * connections to trigger events for. Libevent will wait till one
+ * of these happens, then run all the appropriate callbacks. */
+ loop_result = tor_libevent_run_event_loop(tor_libevent_get_base(),
+ called_loop_once);
+
+ if (get_options()->MainloopStats) {
+ /* Update our main loop counters. */
+ if (loop_result == 0) {
+ // The call was successful.
+ increment_main_loop_success_count();
+ } else if (loop_result == -1) {
+ // The call was erroneous.
+ increment_main_loop_error_count();
+ } else if (loop_result == 1) {
+ // The call didn't have any active or pending events
+ // to handle.
+ increment_main_loop_idle_count();
+ }
+ }
+
+ /* Oh, the loop failed. That might be an error that we need to
+ * catch, but more likely, it's just an interrupted poll() call or something,
+ * and we should try again. */
+ if (loop_result < 0) {
+ int e = tor_socket_errno(-1);
+ /* let the program survive things like ^z */
+ if (e != EINTR && !ERRNO_IS_EINPROGRESS(e)) {
+ log_err(LD_NET,"libevent call with %s failed: %s [%d]",
+ tor_libevent_get_method(), tor_socket_strerror(e), e);
+ return -1;
+#ifndef _WIN32
+ } else if (e == EINVAL) {
+ log_fn_ratelim(&libevent_error_ratelim, LOG_WARN, LD_NET,
+ "EINVAL from libevent: should you upgrade libevent?");
+ if (libevent_error_ratelim.n_calls_since_last_time > 8) {
+ log_err(LD_NET, "Too many libevent errors, too fast: dying");
+ return -1;
+ }
+#endif /* !defined(_WIN32) */
+ } else {
+ tor_assert_nonfatal_once(! ERRNO_IS_EINPROGRESS(e));
+ log_debug(LD_NET,"libevent call interrupted.");
+ /* You can't trust the results of this poll(). Go back to the
+ * top of the big for loop. */
+ return 1;
+ }
+ }
+
+ if (main_loop_should_exit)
+ return 0;
+
+ return 1;
+}
+
+/** Run the run_main_loop_once() function until it declares itself done,
+ * and return its final return value.
+ *
+ * Shadow won't invoke this function, so don't fill it up with things.
+ */
+static int
+run_main_loop_until_done(void)
+{
+ int loop_result = 1;
+ do {
+ loop_result = run_main_loop_once();
+ } while (loop_result == 1);
+
+ if (main_loop_should_exit)
+ return main_loop_exit_value;
+ else
+ return loop_result;
+}
+
+/** Libevent callback: invoked when we get a signal.
+ */
+static void
+signal_callback(evutil_socket_t fd, short events, void *arg)
+{
+ const int *sigptr = arg;
+ const int sig = *sigptr;
+ (void)fd;
+ (void)events;
+
+ update_current_time(time(NULL));
+ process_signal(sig);
+}
+
+/** Do the work of acting on a signal received in <b>sig</b> */
+static void
+process_signal(int sig)
+{
+ switch (sig)
+ {
+ case SIGTERM:
+ log_notice(LD_GENERAL,"Catching signal TERM, exiting cleanly.");
+ tor_shutdown_event_loop_and_exit(0);
+ break;
+ case SIGINT:
+ if (!server_mode(get_options())) { /* do it now */
+ log_notice(LD_GENERAL,"Interrupt: exiting cleanly.");
+ tor_shutdown_event_loop_and_exit(0);
+ return;
+ }
+#ifdef HAVE_SYSTEMD
+ sd_notify(0, "STOPPING=1");
+#endif
+ hibernate_begin_shutdown();
+ break;
+#ifdef SIGPIPE
+ case SIGPIPE:
+ log_debug(LD_GENERAL,"Caught SIGPIPE. Ignoring.");
+ break;
+#endif
+ case SIGUSR1:
+ /* prefer to log it at INFO, but make sure we always see it */
+ dumpstats(get_min_log_level()<LOG_INFO ? get_min_log_level() : LOG_INFO);
+ control_event_signal(sig);
+ break;
+ case SIGUSR2:
+ switch_logs_debug();
+ log_debug(LD_GENERAL,"Caught USR2, going to loglevel debug. "
+ "Send HUP to change back.");
+ control_event_signal(sig);
+ break;
+ case SIGHUP:
+#ifdef HAVE_SYSTEMD
+ sd_notify(0, "RELOADING=1");
+#endif
+ if (do_hup() < 0) {
+ log_warn(LD_CONFIG,"Restart failed (config error?). Exiting.");
+ tor_shutdown_event_loop_and_exit(1);
+ return;
+ }
+#ifdef HAVE_SYSTEMD
+ sd_notify(0, "READY=1");
+#endif
+ control_event_signal(sig);
+ break;
+#ifdef SIGCHLD
+ case SIGCHLD:
+ notify_pending_waitpid_callbacks();
+ break;
+#endif
+ case SIGNEWNYM: {
+ time_t now = time(NULL);
+ if (time_of_last_signewnym + MAX_SIGNEWNYM_RATE > now) {
+ const time_t delay_sec =
+ time_of_last_signewnym + MAX_SIGNEWNYM_RATE - now;
+ if (! signewnym_is_pending) {
+ signewnym_is_pending = 1;
+ if (!handle_deferred_signewnym_ev) {
+ handle_deferred_signewnym_ev =
+ mainloop_event_postloop_new(handle_deferred_signewnym_cb, NULL);
+ }
+ const struct timeval delay_tv = { delay_sec, 0 };
+ mainloop_event_schedule(handle_deferred_signewnym_ev, &delay_tv);
+ }
+ log_notice(LD_CONTROL,
+ "Rate limiting NEWNYM request: delaying by %d second(s)",
+ (int)(delay_sec));
+ } else {
+ signewnym_impl(now);
+ }
+ break;
+ }
+ case SIGCLEARDNSCACHE:
+ addressmap_clear_transient();
+ control_event_signal(sig);
+ break;
+ case SIGHEARTBEAT:
+ log_heartbeat(time(NULL));
+ control_event_signal(sig);
+ break;
+ }
+}
+
+/** Returns Tor's uptime. */
+MOCK_IMPL(long,
+get_uptime,(void))
+{
+ return stats_n_seconds_working;
+}
+
+/** Reset Tor's uptime. */
+MOCK_IMPL(void,
+reset_uptime,(void))
+{
+ stats_n_seconds_working = 0;
+}
+
+/**
+ * Write current memory usage information to the log.
+ */
+static void
+dumpmemusage(int severity)
+{
+ connection_dump_buffer_mem_stats(severity);
+ tor_log(severity, LD_GENERAL, "In rephist: %"PRIu64" used by %d Tors.",
+ (rephist_total_alloc), rephist_total_num);
+ dump_routerlist_mem_usage(severity);
+ dump_cell_pool_usage(severity);
+ dump_dns_mem_usage(severity);
+ tor_log_mallinfo(severity);
+}
+
+/** Write all statistics to the log, with log level <b>severity</b>. Called
+ * in response to a SIGUSR1. */
+static void
+dumpstats(int severity)
+{
+ time_t now = time(NULL);
+ time_t elapsed;
+ size_t rbuf_cap, wbuf_cap, rbuf_len, wbuf_len;
+
+ tor_log(severity, LD_GENERAL, "Dumping stats:");
+
+ SMARTLIST_FOREACH_BEGIN(connection_array, connection_t *, conn) {
+ int i = conn_sl_idx;
+ tor_log(severity, LD_GENERAL,
+ "Conn %d (socket %d) type %d (%s), state %d (%s), created %d secs ago",
+ i, (int)conn->s, conn->type, conn_type_to_string(conn->type),
+ conn->state, conn_state_to_string(conn->type, conn->state),
+ (int)(now - conn->timestamp_created));
+ if (!connection_is_listener(conn)) {
+ tor_log(severity,LD_GENERAL,
+ "Conn %d is to %s:%d.", i,
+ safe_str_client(conn->address),
+ conn->port);
+ tor_log(severity,LD_GENERAL,
+ "Conn %d: %d bytes waiting on inbuf (len %d, last read %d secs ago)",
+ i,
+ (int)connection_get_inbuf_len(conn),
+ (int)buf_allocation(conn->inbuf),
+ (int)(now - conn->timestamp_last_read_allowed));
+ tor_log(severity,LD_GENERAL,
+ "Conn %d: %d bytes waiting on outbuf "
+ "(len %d, last written %d secs ago)",i,
+ (int)connection_get_outbuf_len(conn),
+ (int)buf_allocation(conn->outbuf),
+ (int)(now - conn->timestamp_last_write_allowed));
+ if (conn->type == CONN_TYPE_OR) {
+ or_connection_t *or_conn = TO_OR_CONN(conn);
+ if (or_conn->tls) {
+ if (tor_tls_get_buffer_sizes(or_conn->tls, &rbuf_cap, &rbuf_len,
+ &wbuf_cap, &wbuf_len) == 0) {
+ tor_log(severity, LD_GENERAL,
+ "Conn %d: %d/%d bytes used on OpenSSL read buffer; "
+ "%d/%d bytes used on write buffer.",
+ i, (int)rbuf_len, (int)rbuf_cap, (int)wbuf_len, (int)wbuf_cap);
+ }
+ }
+ }
+ }
+ circuit_dump_by_conn(conn, severity); /* dump info about all the circuits
+ * using this conn */
+ } SMARTLIST_FOREACH_END(conn);
+
+ channel_dumpstats(severity);
+ channel_listener_dumpstats(severity);
+
+ tor_log(severity, LD_NET,
+ "Cells processed: %"PRIu64" padding\n"
+ " %"PRIu64" create\n"
+ " %"PRIu64" created\n"
+ " %"PRIu64" relay\n"
+ " (%"PRIu64" relayed)\n"
+ " (%"PRIu64" delivered)\n"
+ " %"PRIu64" destroy",
+ (stats_n_padding_cells_processed),
+ (stats_n_create_cells_processed),
+ (stats_n_created_cells_processed),
+ (stats_n_relay_cells_processed),
+ (stats_n_relay_cells_relayed),
+ (stats_n_relay_cells_delivered),
+ (stats_n_destroy_cells_processed));
+ if (stats_n_data_cells_packaged)
+ tor_log(severity,LD_NET,"Average packaged cell fullness: %2.3f%%",
+ 100*(((double)stats_n_data_bytes_packaged) /
+ ((double)stats_n_data_cells_packaged*RELAY_PAYLOAD_SIZE)) );
+ if (stats_n_data_cells_received)
+ tor_log(severity,LD_NET,"Average delivered cell fullness: %2.3f%%",
+ 100*(((double)stats_n_data_bytes_received) /
+ ((double)stats_n_data_cells_received*RELAY_PAYLOAD_SIZE)) );
+
+ cpuworker_log_onionskin_overhead(severity, ONION_HANDSHAKE_TYPE_TAP, "TAP");
+ cpuworker_log_onionskin_overhead(severity, ONION_HANDSHAKE_TYPE_NTOR,"ntor");
+
+ if (now - time_of_process_start >= 0)
+ elapsed = now - time_of_process_start;
+ else
+ elapsed = 0;
+
+ if (elapsed) {
+ tor_log(severity, LD_NET,
+ "Average bandwidth: %"PRIu64"/%d = %d bytes/sec reading",
+ (stats_n_bytes_read),
+ (int)elapsed,
+ (int) (stats_n_bytes_read/elapsed));
+ tor_log(severity, LD_NET,
+ "Average bandwidth: %"PRIu64"/%d = %d bytes/sec writing",
+ (stats_n_bytes_written),
+ (int)elapsed,
+ (int) (stats_n_bytes_written/elapsed));
+ }
+
+ tor_log(severity, LD_NET, "--------------- Dumping memory information:");
+ dumpmemusage(severity);
+
+ rep_hist_dump_stats(now,severity);
+ rend_service_dump_stats(severity);
+ dump_distinct_digest_count(severity);
+}
+
+/** Called by exit() as we shut down the process.
+ */
+static void
+exit_function(void)
+{
+ /* NOTE: If we ever daemonize, this gets called immediately. That's
+ * okay for now, because we only use this on Windows. */
+#ifdef _WIN32
+ WSACleanup();
+#endif
+}
+
+#ifdef _WIN32
+#define UNIX_ONLY 0
+#else
+#define UNIX_ONLY 1
+#endif
+
+static struct {
+ /** A numeric code for this signal. Must match the signal value if
+ * try_to_register is true. */
+ int signal_value;
+ /** True if we should try to register this signal with libevent and catch
+ * corresponding posix signals. False otherwise. */
+ int try_to_register;
+ /** Pointer to hold the event object constructed for this signal. */
+ struct event *signal_event;
+} signal_handlers[] = {
+#ifdef SIGINT
+ { SIGINT, UNIX_ONLY, NULL }, /* do a controlled slow shutdown */
+#endif
+#ifdef SIGTERM
+ { SIGTERM, UNIX_ONLY, NULL }, /* to terminate now */
+#endif
+#ifdef SIGPIPE
+ { SIGPIPE, UNIX_ONLY, NULL }, /* otherwise SIGPIPE kills us */
+#endif
+#ifdef SIGUSR1
+ { SIGUSR1, UNIX_ONLY, NULL }, /* dump stats */
+#endif
+#ifdef SIGUSR2
+ { SIGUSR2, UNIX_ONLY, NULL }, /* go to loglevel debug */
+#endif
+#ifdef SIGHUP
+ { SIGHUP, UNIX_ONLY, NULL }, /* to reload config, retry conns, etc */
+#endif
+#ifdef SIGXFSZ
+ { SIGXFSZ, UNIX_ONLY, NULL }, /* handle file-too-big resource exhaustion */
+#endif
+#ifdef SIGCHLD
+ { SIGCHLD, UNIX_ONLY, NULL }, /* handle dns/cpu workers that exit */
+#endif
+ /* These are controller-only */
+ { SIGNEWNYM, 0, NULL },
+ { SIGCLEARDNSCACHE, 0, NULL },
+ { SIGHEARTBEAT, 0, NULL },
+ { -1, -1, NULL }
+};
+
+/** Set up the signal handler events for this process, and register them
+ * with libevent if appropriate. */
+void
+handle_signals(void)
+{
+ int i;
+ const int enabled = !get_options()->DisableSignalHandlers;
+
+ for (i = 0; signal_handlers[i].signal_value >= 0; ++i) {
+ /* Signal handlers are only registered with libevent if they need to catch
+ * real POSIX signals. We construct these signal handler events in either
+ * case, though, so that controllers can activate them with the SIGNAL
+ * command.
+ */
+ if (enabled && signal_handlers[i].try_to_register) {
+ signal_handlers[i].signal_event =
+ tor_evsignal_new(tor_libevent_get_base(),
+ signal_handlers[i].signal_value,
+ signal_callback,
+ &signal_handlers[i].signal_value);
+ if (event_add(signal_handlers[i].signal_event, NULL))
+ log_warn(LD_BUG, "Error from libevent when adding "
+ "event for signal %d",
+ signal_handlers[i].signal_value);
+ } else {
+ signal_handlers[i].signal_event =
+ tor_event_new(tor_libevent_get_base(), -1,
+ EV_SIGNAL, signal_callback,
+ &signal_handlers[i].signal_value);
+ }
+ }
+}
+
+/* Cause the signal handler for signal_num to be called in the event loop. */
+void
+activate_signal(int signal_num)
+{
+ int i;
+ for (i = 0; signal_handlers[i].signal_value >= 0; ++i) {
+ if (signal_handlers[i].signal_value == signal_num) {
+ event_active(signal_handlers[i].signal_event, EV_SIGNAL, 1);
+ return;
+ }
+ }
+}
+
+/** Main entry point for the Tor command-line client. Return 0 on "success",
+ * negative on "failure", and positive on "success and exit".
+ */
+int
+tor_init(int argc, char *argv[])
+{
+ char progname[256];
+ int quiet = 0;
+
+ time_of_process_start = time(NULL);
+ init_connection_lists();
+ /* Have the log set up with our application name. */
+ tor_snprintf(progname, sizeof(progname), "Tor %s", get_version());
+ log_set_application_name(progname);
+
+ /* Set up the crypto nice and early */
+ if (crypto_early_init() < 0) {
+ log_err(LD_GENERAL, "Unable to initialize the crypto subsystem!");
+ return -1;
+ }
+
+ /* Initialize the history structures. */
+ rep_hist_init();
+ /* Initialize the service cache. */
+ rend_cache_init();
+ addressmap_init(); /* Init the client dns cache. Do it always, since it's
+ * cheap. */
+ /* Initialize the HS subsystem. */
+ hs_init();
+
+ {
+ /* We search for the "quiet" option first, since it decides whether we
+ * will log anything at all to the command line. */
+ config_line_t *opts = NULL, *cmdline_opts = NULL;
+ const config_line_t *cl;
+ (void) config_parse_commandline(argc, argv, 1, &opts, &cmdline_opts);
+ for (cl = cmdline_opts; cl; cl = cl->next) {
+ if (!strcmp(cl->key, "--hush"))
+ quiet = 1;
+ if (!strcmp(cl->key, "--quiet") ||
+ !strcmp(cl->key, "--dump-config"))
+ quiet = 2;
+ /* The following options imply --hush */
+ if (!strcmp(cl->key, "--version") || !strcmp(cl->key, "--digests") ||
+ !strcmp(cl->key, "--list-torrc-options") ||
+ !strcmp(cl->key, "--library-versions") ||
+ !strcmp(cl->key, "--hash-password") ||
+ !strcmp(cl->key, "-h") || !strcmp(cl->key, "--help")) {
+ if (quiet < 1)
+ quiet = 1;
+ }
+ }
+ config_free_lines(opts);
+ config_free_lines(cmdline_opts);
+ }
+
+ /* give it somewhere to log to initially */
+ switch (quiet) {
+ case 2:
+ /* no initial logging */
+ break;
+ case 1:
+ add_temp_log(LOG_WARN);
+ break;
+ default:
+ add_temp_log(LOG_NOTICE);
+ }
+ quiet_level = quiet;
+
+ {
+ const char *version = get_version();
+
+ log_notice(LD_GENERAL, "Tor %s running on %s with Libevent %s, "
+ "OpenSSL %s, Zlib %s, Liblzma %s, and Libzstd %s.", version,
+ get_uname(),
+ tor_libevent_get_version_str(),
+ crypto_openssl_get_version_str(),
+ tor_compress_supports_method(ZLIB_METHOD) ?
+ tor_compress_version_str(ZLIB_METHOD) : "N/A",
+ tor_compress_supports_method(LZMA_METHOD) ?
+ tor_compress_version_str(LZMA_METHOD) : "N/A",
+ tor_compress_supports_method(ZSTD_METHOD) ?
+ tor_compress_version_str(ZSTD_METHOD) : "N/A");
+
+ log_notice(LD_GENERAL, "Tor can't help you if you use it wrong! "
+ "Learn how to be safe at "
+ "https://www.torproject.org/download/download#warning");
+
+ if (strstr(version, "alpha") || strstr(version, "beta"))
+ log_notice(LD_GENERAL, "This version is not a stable Tor release. "
+ "Expect more bugs than usual.");
+
+ tor_compress_log_init_warnings();
+ }
+
+#ifdef HAVE_RUST
+ rust_log_welcome_string();
+#endif /* defined(HAVE_RUST) */
+
+ if (network_init()<0) {
+ log_err(LD_BUG,"Error initializing network; exiting.");
+ return -1;
+ }
+ atexit(exit_function);
+
+ int init_rv = options_init_from_torrc(argc,argv);
+ if (init_rv < 0) {
+ log_err(LD_CONFIG,"Reading config failed--see warnings above.");
+ return -1;
+ } else if (init_rv > 0) {
+ // We succeeded, and should exit anyway -- probably the user just said
+ // "--version" or something like that.
+ return 1;
+ }
+
+ /* The options are now initialised */
+ const or_options_t *options = get_options();
+
+ /* Initialize channelpadding parameters to defaults until we get
+ * a consensus */
+ channelpadding_new_consensus_params(NULL);
+
+ /* Initialize predicted ports list after loading options */
+ predicted_ports_init();
+
+#ifndef _WIN32
+ if (geteuid()==0)
+ log_warn(LD_GENERAL,"You are running Tor as root. You don't need to, "
+ "and you probably shouldn't.");
+#endif
+
+ if (crypto_global_init(options->HardwareAccel,
+ options->AccelName,
+ options->AccelDir)) {
+ log_err(LD_BUG, "Unable to initialize OpenSSL. Exiting.");
+ return -1;
+ }
+ stream_choice_seed_weak_rng();
+ if (tor_init_libevent_rng() < 0) {
+ log_warn(LD_NET, "Problem initializing libevent RNG.");
+ }
+
+ /* Scan/clean unparseable descriptors; after reading config */
+ routerparse_init();
+
+ return 0;
+}
+
+/** A lockfile structure, used to prevent two Tors from messing with the
+ * data directory at once. If this variable is non-NULL, we're holding
+ * the lockfile. */
+static tor_lockfile_t *lockfile = NULL;
+
+/** Try to grab the lock file described in <b>options</b>, if we do not
+ * already have it. If <b>err_if_locked</b> is true, warn if somebody else is
+ * holding the lock, and exit if we can't get it after waiting. Otherwise,
+ * return -1 if we can't get the lockfile. Return 0 on success.
+ */
+int
+try_locking(const or_options_t *options, int err_if_locked)
+{
+ if (lockfile)
+ return 0;
+ else {
+ char *fname = options_get_datadir_fname(options, "lock");
+ int already_locked = 0;
+ tor_lockfile_t *lf = tor_lockfile_lock(fname, 0, &already_locked);
+ tor_free(fname);
+ if (!lf) {
+ if (err_if_locked && already_locked) {
+ int r;
+ log_warn(LD_GENERAL, "It looks like another Tor process is running "
+ "with the same data directory. Waiting 5 seconds to see "
+ "if it goes away.");
+#ifndef _WIN32
+ sleep(5);
+#else
+ Sleep(5000);
+#endif
+ r = try_locking(options, 0);
+ if (r<0) {
+ log_err(LD_GENERAL, "No, it's still there. Exiting.");
+ return -1;
+ }
+ return r;
+ }
+ return -1;
+ }
+ lockfile = lf;
+ return 0;
+ }
+}
+
+/** Return true iff we've successfully acquired the lock file. */
+int
+have_lockfile(void)
+{
+ return lockfile != NULL;
+}
+
+/** If we have successfully acquired the lock file, release it. */
+void
+release_lockfile(void)
+{
+ if (lockfile) {
+ tor_lockfile_unlock(lockfile);
+ lockfile = NULL;
+ }
+}
+
+/** Free all memory that we might have allocated somewhere.
+ * If <b>postfork</b>, we are a worker process and we want to free
+ * only the parts of memory that we won't touch. If !<b>postfork</b>,
+ * Tor is shutting down and we should free everything.
+ *
+ * Helps us find the real leaks with sanitizers and the like. Also valgrind
+ * should then report 0 reachable in its leak report (in an ideal world --
+ * in practice libevent, SSL, libc etc never quite free everything). */
+void
+tor_free_all(int postfork)
+{
+ if (!postfork) {
+ evdns_shutdown(1);
+ }
+ geoip_free_all();
+ dirvote_free_all();
+ routerlist_free_all();
+ networkstatus_free_all();
+ addressmap_free_all();
+ dirserv_free_all();
+ rend_cache_free_all();
+ rend_service_authorization_free_all();
+ rep_hist_free_all();
+ dns_free_all();
+ clear_pending_onions();
+ circuit_free_all();
+ entry_guards_free_all();
+ pt_free_all();
+ channel_tls_free_all();
+ channel_free_all();
+ connection_free_all();
+ connection_edge_free_all();
+ scheduler_free_all();
+ nodelist_free_all();
+ microdesc_free_all();
+ routerparse_free_all();
+ ext_orport_free_all();
+ control_free_all();
+ sandbox_free_getaddrinfo_cache();
+ protover_free_all();
+ bridges_free_all();
+ consdiffmgr_free_all();
+ hs_free_all();
+ dos_free_all();
+ circuitmux_ewma_free_all();
+ accounting_free_all();
+
+ if (!postfork) {
+ config_free_all();
+ or_state_free_all();
+ router_free_all();
+ routerkeys_free_all();
+ policies_free_all();
+ }
+ if (!postfork) {
+ tor_tls_free_all();
+#ifndef _WIN32
+ tor_getpwnam(NULL);
+#endif
+ }
+ /* stuff in main.c */
+
+ smartlist_free(connection_array);
+ smartlist_free(closeable_connection_lst);
+ smartlist_free(active_linked_connection_lst);
+ periodic_timer_free(second_timer);
+ teardown_periodic_events();
+ tor_event_free(shutdown_did_not_work_event);
+ tor_event_free(initialize_periodic_events_event);
+ mainloop_event_free(directory_all_unreachable_cb_event);
+ mainloop_event_free(schedule_active_linked_connections_event);
+ mainloop_event_free(postloop_cleanup_ev);
+ mainloop_event_free(handle_deferred_signewnym_ev);
+
+#ifdef HAVE_SYSTEMD_209
+ periodic_timer_free(systemd_watchdog_timer);
+#endif
+
+ memset(&global_bucket, 0, sizeof(global_bucket));
+ memset(&global_relayed_bucket, 0, sizeof(global_relayed_bucket));
+ stats_n_bytes_read = stats_n_bytes_written = 0;
+ time_of_process_start = 0;
+ time_of_last_signewnym = 0;
+ signewnym_is_pending = 0;
+ newnym_epoch = 0;
+ called_loop_once = 0;
+ main_loop_should_exit = 0;
+ main_loop_exit_value = 0;
+ can_complete_circuits = 0;
+ quiet_level = 0;
+ should_init_bridge_stats = 1;
+ dns_honesty_first_time = 1;
+ heartbeat_callback_first_time = 1;
+ current_second = 0;
+ memset(&current_second_last_changed, 0,
+ sizeof(current_second_last_changed));
+
+ if (!postfork) {
+ release_lockfile();
+ }
+ tor_libevent_free_all();
+ /* Stuff in util.c and address.c*/
+ if (!postfork) {
+ escaped(NULL);
+ esc_router_info(NULL);
+ clean_up_backtrace_handler();
+ logs_free_all(); /* free log strings. do this last so logs keep working. */
+ }
+}
+
+/**
+ * Remove the specified file, and log a warning if the operation fails for
+ * any reason other than the file not existing. Ignores NULL filenames.
+ */
+void
+tor_remove_file(const char *filename)
+{
+ if (filename && tor_unlink(filename) != 0 && errno != ENOENT) {
+ log_warn(LD_FS, "Couldn't unlink %s: %s",
+ filename, strerror(errno));
+ }
+}
+
+/** Do whatever cleanup is necessary before shutting Tor down. */
+void
+tor_cleanup(void)
+{
+ const or_options_t *options = get_options();
+ if (options->command == CMD_RUN_TOR) {
+ time_t now = time(NULL);
+ /* Remove our pid file. We don't care if there was an error when we
+ * unlink, nothing we could do about it anyways. */
+ tor_remove_file(options->PidFile);
+ /* Remove control port file */
+ tor_remove_file(options->ControlPortWriteToFile);
+ /* Remove cookie authentication file */
+ {
+ char *cookie_fname = get_controller_cookie_file_name();
+ tor_remove_file(cookie_fname);
+ tor_free(cookie_fname);
+ }
+ /* Remove Extended ORPort cookie authentication file */
+ {
+ char *cookie_fname = get_ext_or_auth_cookie_file_name();
+ tor_remove_file(cookie_fname);
+ tor_free(cookie_fname);
+ }
+ if (accounting_is_enabled(options))
+ accounting_record_bandwidth_usage(now, get_or_state());
+ or_state_mark_dirty(get_or_state(), 0); /* force an immediate save. */
+ or_state_save(now);
+ if (authdir_mode(options)) {
+ sr_save_and_cleanup();
+ }
+ if (authdir_mode_tests_reachability(options))
+ rep_hist_record_mtbf_data(now, 0);
+ keypin_close_journal();
+ }
+
+ timers_shutdown();
+
+ tor_free_all(0); /* We could move tor_free_all back into the ifdef below
+ later, if it makes shutdown unacceptably slow. But for
+ now, leave it here: it's helped us catch bugs in the
+ past. */
+ crypto_global_cleanup();
+}
+
+/** Read/create keys as needed, and echo our fingerprint to stdout. */
+static int
+do_list_fingerprint(void)
+{
+ char buf[FINGERPRINT_LEN+1];
+ crypto_pk_t *k;
+ const char *nickname = get_options()->Nickname;
+ sandbox_disable_getaddrinfo_cache();
+ if (!server_mode(get_options())) {
+ log_err(LD_GENERAL,
+ "Clients don't have long-term identity keys. Exiting.");
+ return -1;
+ }
+ tor_assert(nickname);
+ if (init_keys() < 0) {
+ log_err(LD_GENERAL,"Error initializing keys; exiting.");
+ return -1;
+ }
+ if (!(k = get_server_identity_key())) {
+ log_err(LD_GENERAL,"Error: missing identity key.");
+ return -1;
+ }
+ if (crypto_pk_get_fingerprint(k, buf, 1)<0) {
+ log_err(LD_BUG, "Error computing fingerprint");
+ return -1;
+ }
+ printf("%s %s\n", nickname, buf);
+ return 0;
+}
+
+/** Entry point for password hashing: take the desired password from
+ * the command line, and print its salted hash to stdout. **/
+static void
+do_hash_password(void)
+{
+
+ char output[256];
+ char key[S2K_RFC2440_SPECIFIER_LEN+DIGEST_LEN];
+
+ crypto_rand(key, S2K_RFC2440_SPECIFIER_LEN-1);
+ key[S2K_RFC2440_SPECIFIER_LEN-1] = (uint8_t)96; /* Hash 64 K of data. */
+ secret_to_key_rfc2440(key+S2K_RFC2440_SPECIFIER_LEN, DIGEST_LEN,
+ get_options()->command_arg, strlen(get_options()->command_arg),
+ key);
+ base16_encode(output, sizeof(output), key, sizeof(key));
+ printf("16:%s\n",output);
+}
+
+/** Entry point for configuration dumping: write the configuration to
+ * stdout. */
+static int
+do_dump_config(void)
+{
+ const or_options_t *options = get_options();
+ const char *arg = options->command_arg;
+ int how;
+ char *opts;
+
+ if (!strcmp(arg, "short")) {
+ how = OPTIONS_DUMP_MINIMAL;
+ } else if (!strcmp(arg, "non-builtin")) {
+ how = OPTIONS_DUMP_DEFAULTS;
+ } else if (!strcmp(arg, "full")) {
+ how = OPTIONS_DUMP_ALL;
+ } else {
+ fprintf(stderr, "No valid argument to --dump-config found!\n");
+ fprintf(stderr, "Please select 'short', 'non-builtin', or 'full'.\n");
+
+ return -1;
+ }
+
+ opts = options_dump(options, how);
+ printf("%s", opts);
+ tor_free(opts);
+
+ return 0;
+}
+
+static void
+init_addrinfo(void)
+{
+ if (! server_mode(get_options()) ||
+ (get_options()->Address && strlen(get_options()->Address) > 0)) {
+ /* We don't need to seed our own hostname, because we won't be calling
+ * resolve_my_address on it.
+ */
+ return;
+ }
+ char hname[256];
+
+ // host name to sandbox
+ gethostname(hname, sizeof(hname));
+ sandbox_add_addrinfo(hname);
+}
+
+static sandbox_cfg_t*
+sandbox_init_filter(void)
+{
+ const or_options_t *options = get_options();
+ sandbox_cfg_t *cfg = sandbox_cfg_new();
+ int i;
+
+ sandbox_cfg_allow_openat_filename(&cfg,
+ get_cachedir_fname("cached-status"));
+
+#define OPEN(name) \
+ sandbox_cfg_allow_open_filename(&cfg, tor_strdup(name))
+
+#define OPEN_DATADIR(name) \
+ sandbox_cfg_allow_open_filename(&cfg, get_datadir_fname(name))
+
+#define OPEN_DATADIR2(name, name2) \
+ sandbox_cfg_allow_open_filename(&cfg, get_datadir_fname2((name), (name2)))
+
+#define OPEN_DATADIR_SUFFIX(name, suffix) do { \
+ OPEN_DATADIR(name); \
+ OPEN_DATADIR(name suffix); \
+ } while (0)
+
+#define OPEN_DATADIR2_SUFFIX(name, name2, suffix) do { \
+ OPEN_DATADIR2(name, name2); \
+ OPEN_DATADIR2(name, name2 suffix); \
+ } while (0)
+
+#define OPEN_KEY_DIRECTORY() \
+ sandbox_cfg_allow_open_filename(&cfg, tor_strdup(options->KeyDirectory))
+#define OPEN_CACHEDIR(name) \
+ sandbox_cfg_allow_open_filename(&cfg, get_cachedir_fname(name))
+#define OPEN_CACHEDIR_SUFFIX(name, suffix) do { \
+ OPEN_CACHEDIR(name); \
+ OPEN_CACHEDIR(name suffix); \
+ } while (0)
+#define OPEN_KEYDIR(name) \
+ sandbox_cfg_allow_open_filename(&cfg, get_keydir_fname(name))
+#define OPEN_KEYDIR_SUFFIX(name, suffix) do { \
+ OPEN_KEYDIR(name); \
+ OPEN_KEYDIR(name suffix); \
+ } while (0)
+
+ OPEN(options->DataDirectory);
+ OPEN_KEY_DIRECTORY();
+
+ OPEN_CACHEDIR_SUFFIX("cached-certs", ".tmp");
+ OPEN_CACHEDIR_SUFFIX("cached-consensus", ".tmp");
+ OPEN_CACHEDIR_SUFFIX("unverified-consensus", ".tmp");
+ OPEN_CACHEDIR_SUFFIX("unverified-microdesc-consensus", ".tmp");
+ OPEN_CACHEDIR_SUFFIX("cached-microdesc-consensus", ".tmp");
+ OPEN_CACHEDIR_SUFFIX("cached-microdescs", ".tmp");
+ OPEN_CACHEDIR_SUFFIX("cached-microdescs.new", ".tmp");
+ OPEN_CACHEDIR_SUFFIX("cached-descriptors", ".tmp");
+ OPEN_CACHEDIR_SUFFIX("cached-descriptors.new", ".tmp");
+ OPEN_CACHEDIR("cached-descriptors.tmp.tmp");
+ OPEN_CACHEDIR_SUFFIX("cached-extrainfo", ".tmp");
+ OPEN_CACHEDIR_SUFFIX("cached-extrainfo.new", ".tmp");
+ OPEN_CACHEDIR("cached-extrainfo.tmp.tmp");
+
+ OPEN_DATADIR_SUFFIX("state", ".tmp");
+ OPEN_DATADIR_SUFFIX("sr-state", ".tmp");
+ OPEN_DATADIR_SUFFIX("unparseable-desc", ".tmp");
+ OPEN_DATADIR_SUFFIX("v3-status-votes", ".tmp");
+ OPEN_DATADIR("key-pinning-journal");
+ OPEN("/dev/srandom");
+ OPEN("/dev/urandom");
+ OPEN("/dev/random");
+ OPEN("/etc/hosts");
+ OPEN("/proc/meminfo");
+
+ if (options->BridgeAuthoritativeDir)
+ OPEN_DATADIR_SUFFIX("networkstatus-bridges", ".tmp");
+
+ if (authdir_mode(options))
+ OPEN_DATADIR("approved-routers");
+
+ if (options->ServerDNSResolvConfFile)
+ sandbox_cfg_allow_open_filename(&cfg,
+ tor_strdup(options->ServerDNSResolvConfFile));
+ else
+ sandbox_cfg_allow_open_filename(&cfg, tor_strdup("/etc/resolv.conf"));
+
+ for (i = 0; i < 2; ++i) {
+ if (get_torrc_fname(i)) {
+ sandbox_cfg_allow_open_filename(&cfg, tor_strdup(get_torrc_fname(i)));
+ }
+ }
+
+ SMARTLIST_FOREACH(options->FilesOpenedByIncludes, char *, f, {
+ OPEN(f);
+ });
+
+#define RENAME_SUFFIX(name, suffix) \
+ sandbox_cfg_allow_rename(&cfg, \
+ get_datadir_fname(name suffix), \
+ get_datadir_fname(name))
+
+#define RENAME_SUFFIX2(prefix, name, suffix) \
+ sandbox_cfg_allow_rename(&cfg, \
+ get_datadir_fname2(prefix, name suffix), \
+ get_datadir_fname2(prefix, name))
+
+#define RENAME_CACHEDIR_SUFFIX(name, suffix) \
+ sandbox_cfg_allow_rename(&cfg, \
+ get_cachedir_fname(name suffix), \
+ get_cachedir_fname(name))
+
+#define RENAME_KEYDIR_SUFFIX(name, suffix) \
+ sandbox_cfg_allow_rename(&cfg, \
+ get_keydir_fname(name suffix), \
+ get_keydir_fname(name))
+
+ RENAME_CACHEDIR_SUFFIX("cached-certs", ".tmp");
+ RENAME_CACHEDIR_SUFFIX("cached-consensus", ".tmp");
+ RENAME_CACHEDIR_SUFFIX("unverified-consensus", ".tmp");
+ RENAME_CACHEDIR_SUFFIX("unverified-microdesc-consensus", ".tmp");
+ RENAME_CACHEDIR_SUFFIX("cached-microdesc-consensus", ".tmp");
+ RENAME_CACHEDIR_SUFFIX("cached-microdescs", ".tmp");
+ RENAME_CACHEDIR_SUFFIX("cached-microdescs", ".new");
+ RENAME_CACHEDIR_SUFFIX("cached-microdescs.new", ".tmp");
+ RENAME_CACHEDIR_SUFFIX("cached-descriptors", ".tmp");
+ RENAME_CACHEDIR_SUFFIX("cached-descriptors", ".new");
+ RENAME_CACHEDIR_SUFFIX("cached-descriptors.new", ".tmp");
+ RENAME_CACHEDIR_SUFFIX("cached-extrainfo", ".tmp");
+ RENAME_CACHEDIR_SUFFIX("cached-extrainfo", ".new");
+ RENAME_CACHEDIR_SUFFIX("cached-extrainfo.new", ".tmp");
+
+ RENAME_SUFFIX("state", ".tmp");
+ RENAME_SUFFIX("sr-state", ".tmp");
+ RENAME_SUFFIX("unparseable-desc", ".tmp");
+ RENAME_SUFFIX("v3-status-votes", ".tmp");
+
+ if (options->BridgeAuthoritativeDir)
+ RENAME_SUFFIX("networkstatus-bridges", ".tmp");
+
+#define STAT_DATADIR(name) \
+ sandbox_cfg_allow_stat_filename(&cfg, get_datadir_fname(name))
+
+#define STAT_CACHEDIR(name) \
+ sandbox_cfg_allow_stat_filename(&cfg, get_cachedir_fname(name))
+
+#define STAT_DATADIR2(name, name2) \
+ sandbox_cfg_allow_stat_filename(&cfg, get_datadir_fname2((name), (name2)))
+
+#define STAT_KEY_DIRECTORY() \
+ sandbox_cfg_allow_stat_filename(&cfg, tor_strdup(options->KeyDirectory))
+
+ STAT_DATADIR(NULL);
+ STAT_DATADIR("lock");
+ STAT_DATADIR("state");
+ STAT_DATADIR("router-stability");
+
+ STAT_CACHEDIR("cached-extrainfo.new");
+
+ {
+ smartlist_t *files = smartlist_new();
+ tor_log_get_logfile_names(files);
+ SMARTLIST_FOREACH(files, char *, file_name, {
+ /* steals reference */
+ sandbox_cfg_allow_open_filename(&cfg, file_name);
+ });
+ smartlist_free(files);
+ }
+
+ {
+ smartlist_t *files = smartlist_new();
+ smartlist_t *dirs = smartlist_new();
+ hs_service_lists_fnames_for_sandbox(files, dirs);
+ SMARTLIST_FOREACH(files, char *, file_name, {
+ char *tmp_name = NULL;
+ tor_asprintf(&tmp_name, "%s.tmp", file_name);
+ sandbox_cfg_allow_rename(&cfg,
+ tor_strdup(tmp_name), tor_strdup(file_name));
+ /* steals references */
+ sandbox_cfg_allow_open_filename(&cfg, file_name);
+ sandbox_cfg_allow_open_filename(&cfg, tmp_name);
+ });
+ SMARTLIST_FOREACH(dirs, char *, dir, {
+ /* steals reference */
+ sandbox_cfg_allow_stat_filename(&cfg, dir);
+ });
+ smartlist_free(files);
+ smartlist_free(dirs);
+ }
+
+ {
+ char *fname;
+ if ((fname = get_controller_cookie_file_name())) {
+ sandbox_cfg_allow_open_filename(&cfg, fname);
+ }
+ if ((fname = get_ext_or_auth_cookie_file_name())) {
+ sandbox_cfg_allow_open_filename(&cfg, fname);
+ }
+ }
+
+ SMARTLIST_FOREACH_BEGIN(get_configured_ports(), port_cfg_t *, port) {
+ if (!port->is_unix_addr)
+ continue;
+ /* When we open an AF_UNIX address, we want permission to open the
+ * directory that holds it. */
+ char *dirname = tor_strdup(port->unix_addr);
+ if (get_parent_directory(dirname) == 0) {
+ OPEN(dirname);
+ }
+ tor_free(dirname);
+ sandbox_cfg_allow_chmod_filename(&cfg, tor_strdup(port->unix_addr));
+ sandbox_cfg_allow_chown_filename(&cfg, tor_strdup(port->unix_addr));
+ } SMARTLIST_FOREACH_END(port);
+
+ if (options->DirPortFrontPage) {
+ sandbox_cfg_allow_open_filename(&cfg,
+ tor_strdup(options->DirPortFrontPage));
+ }
+
+ // orport
+ if (server_mode(get_options())) {
+
+ OPEN_KEYDIR_SUFFIX("secret_id_key", ".tmp");
+ OPEN_KEYDIR_SUFFIX("secret_onion_key", ".tmp");
+ OPEN_KEYDIR_SUFFIX("secret_onion_key_ntor", ".tmp");
+ OPEN_KEYDIR("secret_id_key.old");
+ OPEN_KEYDIR("secret_onion_key.old");
+ OPEN_KEYDIR("secret_onion_key_ntor.old");
+
+ OPEN_KEYDIR_SUFFIX("ed25519_master_id_secret_key", ".tmp");
+ OPEN_KEYDIR_SUFFIX("ed25519_master_id_secret_key_encrypted", ".tmp");
+ OPEN_KEYDIR_SUFFIX("ed25519_master_id_public_key", ".tmp");
+ OPEN_KEYDIR_SUFFIX("ed25519_signing_secret_key", ".tmp");
+ OPEN_KEYDIR_SUFFIX("ed25519_signing_secret_key_encrypted", ".tmp");
+ OPEN_KEYDIR_SUFFIX("ed25519_signing_public_key", ".tmp");
+ OPEN_KEYDIR_SUFFIX("ed25519_signing_cert", ".tmp");
+
+ OPEN_DATADIR2_SUFFIX("stats", "bridge-stats", ".tmp");
+ OPEN_DATADIR2_SUFFIX("stats", "dirreq-stats", ".tmp");
+
+ OPEN_DATADIR2_SUFFIX("stats", "entry-stats", ".tmp");
+ OPEN_DATADIR2_SUFFIX("stats", "exit-stats", ".tmp");
+ OPEN_DATADIR2_SUFFIX("stats", "buffer-stats", ".tmp");
+ OPEN_DATADIR2_SUFFIX("stats", "conn-stats", ".tmp");
+ OPEN_DATADIR2_SUFFIX("stats", "hidserv-stats", ".tmp");
+
+ OPEN_DATADIR("approved-routers");
+ OPEN_DATADIR_SUFFIX("fingerprint", ".tmp");
+ OPEN_DATADIR_SUFFIX("hashed-fingerprint", ".tmp");
+ OPEN_DATADIR_SUFFIX("router-stability", ".tmp");
+
+ OPEN("/etc/resolv.conf");
+
+ RENAME_SUFFIX("fingerprint", ".tmp");
+ RENAME_KEYDIR_SUFFIX("secret_onion_key_ntor", ".tmp");
+
+ RENAME_KEYDIR_SUFFIX("secret_id_key", ".tmp");
+ RENAME_KEYDIR_SUFFIX("secret_id_key.old", ".tmp");
+ RENAME_KEYDIR_SUFFIX("secret_onion_key", ".tmp");
+ RENAME_KEYDIR_SUFFIX("secret_onion_key.old", ".tmp");
+
+ RENAME_SUFFIX2("stats", "bridge-stats", ".tmp");
+ RENAME_SUFFIX2("stats", "dirreq-stats", ".tmp");
+ RENAME_SUFFIX2("stats", "entry-stats", ".tmp");
+ RENAME_SUFFIX2("stats", "exit-stats", ".tmp");
+ RENAME_SUFFIX2("stats", "buffer-stats", ".tmp");
+ RENAME_SUFFIX2("stats", "conn-stats", ".tmp");
+ RENAME_SUFFIX2("stats", "hidserv-stats", ".tmp");
+ RENAME_SUFFIX("hashed-fingerprint", ".tmp");
+ RENAME_SUFFIX("router-stability", ".tmp");
+
+ RENAME_KEYDIR_SUFFIX("ed25519_master_id_secret_key", ".tmp");
+ RENAME_KEYDIR_SUFFIX("ed25519_master_id_secret_key_encrypted", ".tmp");
+ RENAME_KEYDIR_SUFFIX("ed25519_master_id_public_key", ".tmp");
+ RENAME_KEYDIR_SUFFIX("ed25519_signing_secret_key", ".tmp");
+ RENAME_KEYDIR_SUFFIX("ed25519_signing_cert", ".tmp");
+
+ sandbox_cfg_allow_rename(&cfg,
+ get_keydir_fname("secret_onion_key"),
+ get_keydir_fname("secret_onion_key.old"));
+ sandbox_cfg_allow_rename(&cfg,
+ get_keydir_fname("secret_onion_key_ntor"),
+ get_keydir_fname("secret_onion_key_ntor.old"));
+
+ STAT_KEY_DIRECTORY();
+ OPEN_DATADIR("stats");
+ STAT_DATADIR("stats");
+ STAT_DATADIR2("stats", "dirreq-stats");
+
+ consdiffmgr_register_with_sandbox(&cfg);
+ }
+
+ init_addrinfo();
+
+ return cfg;
+}
+
+/* Main entry point for the Tor process. Called from tor_main(), and by
+ * anybody embedding Tor. */
+int
+tor_run_main(const tor_main_configuration_t *tor_cfg)
+{
+ int result = 0;
+
+ int argc = tor_cfg->argc;
+ char **argv = tor_cfg->argv;
+
+#ifdef _WIN32
+#ifndef HeapEnableTerminationOnCorruption
+#define HeapEnableTerminationOnCorruption 1
+#endif
+ /* On heap corruption, just give up; don't try to play along. */
+ HeapSetInformation(NULL, HeapEnableTerminationOnCorruption, NULL, 0);
+ /* Call SetProcessDEPPolicy to permanently enable DEP.
+ The function will not resolve on earlier versions of Windows,
+ and failure is not dangerous. */
+ HMODULE hMod = GetModuleHandleA("Kernel32.dll");
+ if (hMod) {
+ typedef BOOL (WINAPI *PSETDEP)(DWORD);
+ PSETDEP setdeppolicy = (PSETDEP)GetProcAddress(hMod,
+ "SetProcessDEPPolicy");
+ if (setdeppolicy) {
+ /* PROCESS_DEP_ENABLE | PROCESS_DEP_DISABLE_ATL_THUNK_EMULATION */
+ setdeppolicy(3);
+ }
+ }
+#endif /* defined(_WIN32) */
+
+ {
+ int bt_err = configure_backtrace_handler(get_version());
+ if (bt_err < 0) {
+ log_warn(LD_BUG, "Unable to install backtrace handler: %s",
+ strerror(-bt_err));
+ }
+ }
+ init_protocol_warning_severity_level();
+
+ update_approx_time(time(NULL));
+ tor_threads_init();
+ tor_compress_init();
+ init_logging(0);
+ monotime_init();
+#ifdef NT_SERVICE
+ {
+ int done = 0;
+ result = nt_service_parse_options(argc, argv, &done);
+ if (done) return result;
+ }
+#endif /* defined(NT_SERVICE) */
+ {
+ int init_rv = tor_init(argc, argv);
+ if (init_rv < 0)
+ return -1;
+ else if (init_rv > 0)
+ return 0;
+ }
+
+ if (get_options()->Sandbox && get_options()->command == CMD_RUN_TOR) {
+ sandbox_cfg_t* cfg = sandbox_init_filter();
+
+ if (sandbox_init(cfg)) {
+ log_err(LD_BUG,"Failed to create syscall sandbox filter");
+ return -1;
+ }
+
+ // registering libevent rng
+#ifdef HAVE_EVUTIL_SECURE_RNG_SET_URANDOM_DEVICE_FILE
+ evutil_secure_rng_set_urandom_device_file(
+ (char*) sandbox_intern_string("/dev/urandom"));
+#endif
+ }
+
+ switch (get_options()->command) {
+ case CMD_RUN_TOR:
+#ifdef NT_SERVICE
+ nt_service_set_state(SERVICE_RUNNING);
+#endif
+ result = do_main_loop();
+ break;
+ case CMD_KEYGEN:
+ result = load_ed_keys(get_options(), time(NULL)) < 0;
+ break;
+ case CMD_KEY_EXPIRATION:
+ init_keys();
+ result = log_cert_expiration();
+ break;
+ case CMD_LIST_FINGERPRINT:
+ result = do_list_fingerprint();
+ break;
+ case CMD_HASH_PASSWORD:
+ do_hash_password();
+ result = 0;
+ break;
+ case CMD_VERIFY_CONFIG:
+ if (quiet_level == 0)
+ printf("Configuration was valid\n");
+ result = 0;
+ break;
+ case CMD_DUMP_CONFIG:
+ result = do_dump_config();
+ break;
+ case CMD_RUN_UNITTESTS: /* only set by test.c */
+ default:
+ log_warn(LD_BUG,"Illegal command number %d: internal error.",
+ get_options()->command);
+ result = -1;
+ }
+ tor_cleanup();
+ return result;
+}
diff --git a/src/core/mainloop/main.h b/src/core/mainloop/main.h
new file mode 100644
index 0000000000..90146f4bd4
--- /dev/null
+++ b/src/core/mainloop/main.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file main.h
+ * \brief Header file for main.c.
+ **/
+
+#ifndef TOR_MAIN_H
+#define TOR_MAIN_H
+
+int have_completed_a_circuit(void);
+void note_that_we_completed_a_circuit(void);
+void note_that_we_maybe_cant_complete_circuits(void);
+
+int connection_add_impl(connection_t *conn, int is_connecting);
+#define connection_add(conn) connection_add_impl((conn), 0)
+#define connection_add_connecting(conn) connection_add_impl((conn), 1)
+int connection_remove(connection_t *conn);
+void connection_unregister_events(connection_t *conn);
+int connection_in_array(connection_t *conn);
+void add_connection_to_closeable_list(connection_t *conn);
+int connection_is_on_closeable_list(connection_t *conn);
+
+MOCK_DECL(smartlist_t *, get_connection_array, (void));
+MOCK_DECL(uint64_t,get_bytes_read,(void));
+MOCK_DECL(uint64_t,get_bytes_written,(void));
+void stats_increment_bytes_read_and_written(uint64_t r, uint64_t w);
+
+/** Bitmask for events that we can turn on and off with
+ * connection_watch_events. */
+typedef enum watchable_events {
+ /* Yes, it is intentional that these match Libevent's EV_READ and EV_WRITE */
+ READ_EVENT=0x02, /**< We want to know when a connection is readable */
+ WRITE_EVENT=0x04 /**< We want to know when a connection is writable */
+} watchable_events_t;
+void connection_watch_events(connection_t *conn, watchable_events_t events);
+int connection_is_reading(connection_t *conn);
+MOCK_DECL(void,connection_stop_reading,(connection_t *conn));
+MOCK_DECL(void,connection_start_reading,(connection_t *conn));
+
+int connection_is_writing(connection_t *conn);
+MOCK_DECL(void,connection_stop_writing,(connection_t *conn));
+MOCK_DECL(void,connection_start_writing,(connection_t *conn));
+
+void tor_shutdown_event_loop_and_exit(int exitcode);
+int tor_event_loop_shutdown_is_pending(void);
+
+void connection_stop_reading_from_linked_conn(connection_t *conn);
+
+MOCK_DECL(int, connection_count_moribund, (void));
+
+void directory_all_unreachable(time_t now);
+void directory_info_has_arrived(time_t now, int from_cache, int suppress_logs);
+
+void ip_address_changed(int at_interface);
+void dns_servers_relaunch_checks(void);
+void reset_all_main_loop_timers(void);
+void reschedule_descriptor_update_check(void);
+void reschedule_directory_downloads(void);
+void reschedule_or_state_save(void);
+void reschedule_dirvote(const or_options_t *options);
+void mainloop_schedule_postloop_cleanup(void);
+void rescan_periodic_events(const or_options_t *options);
+
+void update_current_time(time_t now);
+
+MOCK_DECL(long,get_uptime,(void));
+MOCK_DECL(void,reset_uptime,(void));
+
+unsigned get_signewnym_epoch(void);
+
+void handle_signals(void);
+void activate_signal(int signal_num);
+
+int try_locking(const or_options_t *options, int err_if_locked);
+int have_lockfile(void);
+void release_lockfile(void);
+
+void tor_remove_file(const char *filename);
+
+void tor_cleanup(void);
+void tor_free_all(int postfork);
+
+int do_main_loop(void);
+int tor_init(int argc, char **argv);
+
+void reset_main_loop_counters(void);
+uint64_t get_main_loop_success_count(void);
+uint64_t get_main_loop_error_count(void);
+uint64_t get_main_loop_idle_count(void);
+
+void periodic_events_on_new_options(const or_options_t *options);
+void reschedule_per_second_timer(void);
+
+struct token_bucket_rw_t;
+
+extern time_t time_of_process_start;
+extern int quiet_level;
+extern struct token_bucket_rw_t global_bucket;
+extern struct token_bucket_rw_t global_relayed_bucket;
+
+#ifdef MAIN_PRIVATE
+STATIC void init_connection_lists(void);
+STATIC void initialize_mainloop_events(void);
+STATIC void close_closeable_connections(void);
+STATIC void initialize_periodic_events(void);
+STATIC void teardown_periodic_events(void);
+STATIC int get_my_roles(const or_options_t *options);
+#ifdef TOR_UNIT_TESTS
+extern smartlist_t *connection_array;
+
+/* We need the periodic_event_item_t definition. */
+#include "or/periodic.h"
+extern periodic_event_item_t periodic_events[];
+#endif
+#endif /* defined(MAIN_PRIVATE) */
+
+#endif /* !defined(TOR_MAIN_H) */
diff --git a/src/core/mainloop/periodic.c b/src/core/mainloop/periodic.c
new file mode 100644
index 0000000000..041b2d287b
--- /dev/null
+++ b/src/core/mainloop/periodic.c
@@ -0,0 +1,172 @@
+/* Copyright (c) 2015-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file periodic.c
+ *
+ * \brief Generic backend for handling periodic events.
+ *
+ * The events in this module are used by main.c to track items that need
+ * to fire once every N seconds, possibly picking a new interval each time
+ * that they fire. See periodic_events[] in main.c for examples.
+ */
+
+#include "or/or.h"
+#include "lib/evloop/compat_libevent.h"
+#include "or/config.h"
+#include "or/main.h"
+#include "or/periodic.h"
+#include "lib/evloop/compat_libevent.h"
+
+/** We disable any interval greater than this number of seconds, on the
+ * grounds that it is probably an absolute time mistakenly passed in as a
+ * relative time.
+ */
+static const int MAX_INTERVAL = 10 * 365 * 86400;
+
+/** Set the event <b>event</b> to run in <b>next_interval</b> seconds from
+ * now. */
+static void
+periodic_event_set_interval(periodic_event_item_t *event,
+ time_t next_interval)
+{
+ tor_assert(next_interval < MAX_INTERVAL);
+ struct timeval tv;
+ tv.tv_sec = next_interval;
+ tv.tv_usec = 0;
+ mainloop_event_schedule(event->ev, &tv);
+}
+
+/** Wraps dispatches for periodic events, <b>data</b> will be a pointer to the
+ * event that needs to be called */
+static void
+periodic_event_dispatch(mainloop_event_t *ev, void *data)
+{
+ periodic_event_item_t *event = data;
+ tor_assert(ev == event->ev);
+
+ if (BUG(!periodic_event_is_enabled(event))) {
+ return;
+ }
+
+ time_t now = time(NULL);
+ update_current_time(now);
+ const or_options_t *options = get_options();
+// log_debug(LD_GENERAL, "Dispatching %s", event->name);
+ int r = event->fn(now, options);
+ int next_interval = 0;
+
+ if (!periodic_event_is_enabled(event)) {
+ /* The event got disabled from inside its callback; no need to
+ * reschedule. */
+ return;
+ }
+
+ /* update the last run time if action was taken */
+ if (r==0) {
+ log_err(LD_BUG, "Invalid return value for periodic event from %s.",
+ event->name);
+ tor_assert(r != 0);
+ } else if (r > 0) {
+ event->last_action_time = now;
+ /* If the event is meant to happen after ten years, that's likely
+ * a bug, and somebody gave an absolute time rather than an interval.
+ */
+ tor_assert(r < MAX_INTERVAL);
+ next_interval = r;
+ } else {
+ /* no action was taken, it is likely a precondition failed,
+ * we should reschedule for next second incase the precondition
+ * passes then */
+ next_interval = 1;
+ }
+
+// log_debug(LD_GENERAL, "Scheduling %s for %d seconds", event->name,
+// next_interval);
+ struct timeval tv = { next_interval , 0 };
+ mainloop_event_schedule(ev, &tv);
+}
+
+/** Schedules <b>event</b> to run as soon as possible from now. */
+void
+periodic_event_reschedule(periodic_event_item_t *event)
+{
+ /* Don't reschedule a disabled event. */
+ if (periodic_event_is_enabled(event)) {
+ periodic_event_set_interval(event, 1);
+ }
+}
+
+/** Initializes the libevent backend for a periodic event. */
+void
+periodic_event_setup(periodic_event_item_t *event)
+{
+ if (event->ev) { /* Already setup? This is a bug */
+ log_err(LD_BUG, "Initial dispatch should only be done once.");
+ tor_assert(0);
+ }
+
+ event->ev = mainloop_event_new(periodic_event_dispatch,
+ event);
+ tor_assert(event->ev);
+}
+
+/** Handles initial dispatch for periodic events. It should happen 1 second
+ * after the events are created to mimic behaviour before #3199's refactor */
+void
+periodic_event_launch(periodic_event_item_t *event)
+{
+ if (! event->ev) { /* Not setup? This is a bug */
+ log_err(LD_BUG, "periodic_event_launch without periodic_event_setup");
+ tor_assert(0);
+ }
+ /* Event already enabled? This is a bug */
+ if (periodic_event_is_enabled(event)) {
+ log_err(LD_BUG, "periodic_event_launch on an already enabled event");
+ tor_assert(0);
+ }
+
+ // Initial dispatch
+ event->enabled = 1;
+ periodic_event_dispatch(event->ev, event);
+}
+
+/** Release all storage associated with <b>event</b> */
+void
+periodic_event_destroy(periodic_event_item_t *event)
+{
+ if (!event)
+ return;
+ mainloop_event_free(event->ev);
+ event->last_action_time = 0;
+}
+
+/** Enable the given event which means the event is launched and then the
+ * event's enabled flag is set. This can be called for an event that is
+ * already enabled. */
+void
+periodic_event_enable(periodic_event_item_t *event)
+{
+ tor_assert(event);
+ /* Safely and silently ignore if this event is already enabled. */
+ if (periodic_event_is_enabled(event)) {
+ return;
+ }
+
+ periodic_event_launch(event);
+}
+
+/** Disable the given event which means the event is destroyed and then the
+ * event's enabled flag is unset. This can be called for an event that is
+ * already disabled. */
+void
+periodic_event_disable(periodic_event_item_t *event)
+{
+ tor_assert(event);
+ /* Safely and silently ignore if this event is already disabled. */
+ if (!periodic_event_is_enabled(event)) {
+ return;
+ }
+ mainloop_event_cancel(event->ev);
+ event->enabled = 0;
+}
diff --git a/src/core/mainloop/periodic.h b/src/core/mainloop/periodic.h
new file mode 100644
index 0000000000..4c8c3c96cc
--- /dev/null
+++ b/src/core/mainloop/periodic.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2015-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_PERIODIC_H
+#define TOR_PERIODIC_H
+
+#define PERIODIC_EVENT_NO_UPDATE (-1)
+
+/* Tor roles for which a periodic event item is for. An event can be for
+ * multiple roles, they can be combined. */
+#define PERIODIC_EVENT_ROLE_CLIENT (1U << 0)
+#define PERIODIC_EVENT_ROLE_RELAY (1U << 1)
+#define PERIODIC_EVENT_ROLE_BRIDGE (1U << 2)
+#define PERIODIC_EVENT_ROLE_DIRAUTH (1U << 3)
+#define PERIODIC_EVENT_ROLE_BRIDGEAUTH (1U << 4)
+#define PERIODIC_EVENT_ROLE_HS_SERVICE (1U << 5)
+#define PERIODIC_EVENT_ROLE_DIRSERVER (1U << 6)
+
+/* Helper macro to make it a bit less annoying to defined groups of roles that
+ * are often used. */
+
+/* Router that is a Bridge or Relay. */
+#define PERIODIC_EVENT_ROLE_ROUTER \
+ (PERIODIC_EVENT_ROLE_BRIDGE | PERIODIC_EVENT_ROLE_RELAY)
+/* Authorities that is both bridge and directory. */
+#define PERIODIC_EVENT_ROLE_AUTHORITIES \
+ (PERIODIC_EVENT_ROLE_BRIDGEAUTH | PERIODIC_EVENT_ROLE_DIRAUTH)
+/* All roles. */
+#define PERIODIC_EVENT_ROLE_ALL \
+ (PERIODIC_EVENT_ROLE_AUTHORITIES | PERIODIC_EVENT_ROLE_CLIENT | \
+ PERIODIC_EVENT_ROLE_HS_SERVICE | PERIODIC_EVENT_ROLE_ROUTER)
+
+/*
+ * Event flags which can change the behavior of an event.
+ */
+
+/* Indicate that the event needs the network meaning that if we are in
+ * DisableNetwork or hibernation mode, the event won't be enabled. This obey
+ * the net_is_disabled() check. */
+#define PERIODIC_EVENT_FLAG_NEED_NET (1U << 0)
+
+/** Callback function for a periodic event to take action. The return value
+* influences the next time the function will get called. Return
+* PERIODIC_EVENT_NO_UPDATE to not update <b>last_action_time</b> and be polled
+* again in the next second. If a positive value is returned it will update the
+* interval time. */
+typedef int (*periodic_event_helper_t)(time_t now,
+ const or_options_t *options);
+
+struct mainloop_event_t;
+
+/** A single item for the periodic-events-function table. */
+typedef struct periodic_event_item_t {
+ periodic_event_helper_t fn; /**< The function to run the event */
+ time_t last_action_time; /**< The last time the function did something */
+ struct mainloop_event_t *ev; /**< Libevent callback we're using to implement
+ * this */
+ const char *name; /**< Name of the function -- for debug */
+
+ /* Bitmask of roles define above for which this event applies. */
+ uint32_t roles;
+ /* Bitmask of flags which can change the behavior of the event. */
+ uint32_t flags;
+ /* Indicate that this event has been enabled that is scheduled. */
+ unsigned int enabled : 1;
+} periodic_event_item_t;
+
+/** events will get their interval from first execution */
+#define PERIODIC_EVENT(fn, r, f) { fn##_callback, 0, NULL, #fn, r, f, 0 }
+#define END_OF_PERIODIC_EVENTS { NULL, 0, NULL, NULL, 0, 0, 0 }
+
+/* Return true iff the given event was setup before thus is enabled to be
+ * scheduled. */
+static inline int
+periodic_event_is_enabled(const periodic_event_item_t *item)
+{
+ return item->enabled;
+}
+
+void periodic_event_launch(periodic_event_item_t *event);
+void periodic_event_setup(periodic_event_item_t *event);
+void periodic_event_destroy(periodic_event_item_t *event);
+void periodic_event_reschedule(periodic_event_item_t *event);
+void periodic_event_enable(periodic_event_item_t *event);
+void periodic_event_disable(periodic_event_item_t *event);
+
+#endif /* !defined(TOR_PERIODIC_H) */
+
diff --git a/src/core/or/addr_policy_st.h b/src/core/or/addr_policy_st.h
new file mode 100644
index 0000000000..be3e9d8f01
--- /dev/null
+++ b/src/core/or/addr_policy_st.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_ADDR_POLICY_ST_H
+#define TOR_ADDR_POLICY_ST_H
+
+#include "lib/cc/torint.h"
+#include "lib/net/address.h"
+
+/** What action type does an address policy indicate: accept or reject? */
+typedef enum {
+ ADDR_POLICY_ACCEPT=1,
+ ADDR_POLICY_REJECT=2,
+} addr_policy_action_t;
+#define addr_policy_action_bitfield_t ENUM_BF(addr_policy_action_t)
+
+/** A reference-counted address policy rule. */
+typedef struct addr_policy_t {
+ int refcnt; /**< Reference count */
+ /** What to do when the policy matches.*/
+ addr_policy_action_bitfield_t policy_type:2;
+ unsigned int is_private:1; /**< True iff this is the pseudo-address,
+ * "private". */
+ unsigned int is_canonical:1; /**< True iff this policy is the canonical
+ * copy (stored in a hash table to avoid
+ * duplication of common policies) */
+ maskbits_t maskbits; /**< Accept/reject all addresses <b>a</b> such that the
+ * first <b>maskbits</b> bits of <b>a</b> match
+ * <b>addr</b>. */
+ /** Base address to accept or reject.
+ *
+ * Note that wildcards are treated
+ * differntly depending on address family. An AF_UNSPEC address means
+ * "All addresses, IPv4 or IPv6." An AF_INET address with maskbits==0 means
+ * "All IPv4 addresses" and an AF_INET6 address with maskbits == 0 means
+ * "All IPv6 addresses".
+ **/
+ tor_addr_t addr;
+ uint16_t prt_min; /**< Lowest port number to accept/reject. */
+ uint16_t prt_max; /**< Highest port number to accept/reject. */
+} addr_policy_t;
+
+#endif
diff --git a/src/core/or/address_set.c b/src/core/or/address_set.c
new file mode 100644
index 0000000000..927a5597c0
--- /dev/null
+++ b/src/core/or/address_set.c
@@ -0,0 +1,71 @@
+/* Copyright (c) 2018-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file address_set.c
+ * \brief Implementation for a set of addresses.
+ *
+ * This module was first written on a semi-emergency basis to improve the
+ * robustness of the anti-DoS module. As such, it's written in a pretty
+ * conservative way, and should be susceptible to improvement later on.
+ **/
+
+#include "orconfig.h"
+#include "or/address_set.h"
+#include "lib/net/address.h"
+#include "lib/container/bloomfilt.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "siphash.h"
+
+/* Wrap our hash function to have the signature that the bloom filter
+ * needs. */
+static uint64_t
+bloomfilt_addr_hash(const struct sipkey *key,
+ const void *item)
+{
+ return tor_addr_keyed_hash(key, item);
+}
+
+/**
+ * Allocate and return an address_set, suitable for holding up to
+ * <b>max_address_guess</b> distinct values.
+ */
+address_set_t *
+address_set_new(int max_addresses_guess)
+{
+ uint8_t k[BLOOMFILT_KEY_LEN];
+ crypto_rand((void*)k, sizeof(k));
+ return bloomfilt_new(max_addresses_guess, bloomfilt_addr_hash, k);
+}
+
+/**
+ * Add <b>addr</b> to <b>set</b>.
+ *
+ * All future queries for <b>addr</b> in set will return true. Removing
+ * items is not possible.
+ */
+void
+address_set_add(address_set_t *set, const struct tor_addr_t *addr)
+{
+ bloomfilt_add(set, addr);
+}
+
+/** As address_set_add(), but take an ipv4 address in host order. */
+void
+address_set_add_ipv4h(address_set_t *set, uint32_t addr)
+{
+ tor_addr_t a;
+ tor_addr_from_ipv4h(&a, addr);
+ address_set_add(set, &a);
+}
+
+/**
+ * Return true if <b>addr</b> is a member of <b>set</b>. (And probably,
+ * return false if <b>addr</b> is not a member of set.)
+ */
+int
+address_set_probably_contains(const address_set_t *set,
+ const struct tor_addr_t *addr)
+{
+ return bloomfilt_probably_contains(set, addr);
+}
diff --git a/src/core/or/address_set.h b/src/core/or/address_set.h
new file mode 100644
index 0000000000..2efa1cb03b
--- /dev/null
+++ b/src/core/or/address_set.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2018-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file address_set.h
+ * \brief Types to handle sets of addresses.
+ **/
+
+#ifndef TOR_ADDRESS_SET_H
+#define TOR_ADDRESS_SET_H
+
+#include "orconfig.h"
+#include "lib/cc/torint.h"
+#include "lib/container/bloomfilt.h"
+
+/**
+ * An address_set_t represents a set of tor_addr_t values. The implementation
+ * is probabilistic: false negatives cannot occur but false positives are
+ * possible.
+ */
+typedef struct bloomfilt_t address_set_t;
+struct tor_addr_t;
+
+address_set_t *address_set_new(int max_addresses_guess);
+#define address_set_free(set) bloomfilt_free(set)
+void address_set_add(address_set_t *set, const struct tor_addr_t *addr);
+void address_set_add_ipv4h(address_set_t *set, uint32_t addr);
+int address_set_probably_contains(const address_set_t *set,
+ const struct tor_addr_t *addr);
+
+#endif
diff --git a/src/core/or/cell_queue_st.h b/src/core/or/cell_queue_st.h
new file mode 100644
index 0000000000..40110019bc
--- /dev/null
+++ b/src/core/or/cell_queue_st.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef PACKED_CELL_ST_H
+#define PACKED_CELL_ST_H
+
+#include "tor_queue.h"
+
+/** A cell as packed for writing to the network. */
+struct packed_cell_t {
+ /** Next cell queued on this circuit. */
+ TOR_SIMPLEQ_ENTRY(packed_cell_t) next;
+ char body[CELL_MAX_NETWORK_SIZE]; /**< Cell as packed for network. */
+ uint32_t inserted_timestamp; /**< Time (in timestamp units) when this cell
+ * was inserted */
+};
+
+/** A queue of cells on a circuit, waiting to be added to the
+ * or_connection_t's outbuf. */
+struct cell_queue_t {
+ /** Linked list of packed_cell_t*/
+ TOR_SIMPLEQ_HEAD(cell_simpleq, packed_cell_t) head;
+ int n; /**< The number of cells in the queue. */
+};
+
+#endif
diff --git a/src/core/or/cell_st.h b/src/core/or/cell_st.h
new file mode 100644
index 0000000000..6728e783b9
--- /dev/null
+++ b/src/core/or/cell_st.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef CELL_ST_H
+#define CELL_ST_H
+
+/** Parsed onion routing cell. All communication between nodes
+ * is via cells. */
+struct cell_t {
+ circid_t circ_id; /**< Circuit which received the cell. */
+ uint8_t command; /**< Type of the cell: one of CELL_PADDING, CELL_CREATE,
+ * CELL_DESTROY, etc */
+ uint8_t payload[CELL_PAYLOAD_SIZE]; /**< Cell body. */
+};
+
+#endif
+
diff --git a/src/core/or/channel.c b/src/core/or/channel.c
new file mode 100644
index 0000000000..e6d717f111
--- /dev/null
+++ b/src/core/or/channel.c
@@ -0,0 +1,3480 @@
+
+/* * Copyright (c) 2012-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file channel.c
+ *
+ * \brief OR/OP-to-OR channel abstraction layer. A channel's job is to
+ * transfer cells from Tor instance to Tor instance. Currently, there is only
+ * one implementation of the channel abstraction: in channeltls.c.
+ *
+ * Channels are a higher-level abstraction than or_connection_t: In general,
+ * any means that two Tor relays use to exchange cells, or any means that a
+ * relay and a client use to exchange cells, is a channel.
+ *
+ * Channels differ from pluggable transports in that they do not wrap an
+ * underlying protocol over which cells are transmitted: they <em>are</em> the
+ * underlying protocol.
+ *
+ * This module defines the generic parts of the channel_t interface, and
+ * provides the machinery necessary for specialized implementations to be
+ * created. At present, there is one specialized implementation in
+ * channeltls.c, which uses connection_or.c to send cells over a TLS
+ * connection.
+ *
+ * Every channel implementation is responsible for being able to transmit
+ * cells that are passed to it
+ *
+ * For *inbound* cells, the entry point is: channel_process_cell(). It takes a
+ * cell and will pass it to the cell handler set by
+ * channel_set_cell_handlers(). Currently, this is passed back to the command
+ * subsystem which is command_process_cell().
+ *
+ * NOTE: For now, the separation between channels and specialized channels
+ * (like channeltls) is not that well defined. So the channeltls layer calls
+ * channel_process_cell() which originally comes from the connection subsytem.
+ * This should be hopefully be fixed with #23993.
+ *
+ * For *outbound* cells, the entry point is: channel_write_packed_cell().
+ * Only packed cells are dequeued from the circuit queue by the scheduler
+ * which uses channel_flush_from_first_active_circuit() to decide which cells
+ * to flush from which circuit on the channel. They are then passed down to
+ * the channel subsystem. This calls the low layer with the function pointer
+ * .write_packed_cell().
+ *
+ * Each specialized channel (currently only channeltls_t) MUST implement a
+ * series of function found in channel_t. See channel.h for more
+ * documentation.
+ **/
+
+/*
+ * Define this so channel.h gives us things only channel_t subclasses
+ * should touch.
+ */
+#define TOR_CHANNEL_INTERNAL_
+
+/* This one's for stuff only channel.c and the test suite should see */
+#define CHANNEL_PRIVATE_
+
+#include "or/or.h"
+#include "or/channel.h"
+#include "or/channeltls.h"
+#include "or/channelpadding.h"
+#include "or/circuitbuild.h"
+#include "or/circuitlist.h"
+#include "or/circuitstats.h"
+#include "or/config.h"
+#include "or/connection_or.h" /* For var_cell_free() */
+#include "or/circuitmux.h"
+#include "or/entrynodes.h"
+#include "or/geoip.h"
+#include "or/main.h"
+#include "or/nodelist.h"
+#include "or/relay.h"
+#include "or/rephist.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+#include "or/scheduler.h"
+#include "lib/time/compat_time.h"
+#include "or/networkstatus.h"
+#include "or/rendservice.h"
+#include "lib/evloop/timers.h"
+
+#include "or/cell_queue_st.h"
+
+/* Global lists of channels */
+
+/* All channel_t instances */
+static smartlist_t *all_channels = NULL;
+
+/* All channel_t instances not in ERROR or CLOSED states */
+static smartlist_t *active_channels = NULL;
+
+/* All channel_t instances in ERROR or CLOSED states */
+static smartlist_t *finished_channels = NULL;
+
+/* All channel_listener_t instances */
+static smartlist_t *all_listeners = NULL;
+
+/* All channel_listener_t instances in LISTENING state */
+static smartlist_t *active_listeners = NULL;
+
+/* All channel_listener_t instances in LISTENING state */
+static smartlist_t *finished_listeners = NULL;
+
+/** Map from channel->global_identifier to channel. Contains the same
+ * elements as all_channels. */
+static HT_HEAD(channel_gid_map, channel_s) channel_gid_map = HT_INITIALIZER();
+
+static unsigned
+channel_id_hash(const channel_t *chan)
+{
+ return (unsigned) chan->global_identifier;
+}
+static int
+channel_id_eq(const channel_t *a, const channel_t *b)
+{
+ return a->global_identifier == b->global_identifier;
+}
+HT_PROTOTYPE(channel_gid_map, channel_s, gidmap_node,
+ channel_id_hash, channel_id_eq)
+HT_GENERATE2(channel_gid_map, channel_s, gidmap_node,
+ channel_id_hash, channel_id_eq,
+ 0.6, tor_reallocarray_, tor_free_)
+
+HANDLE_IMPL(channel, channel_s,)
+
+/* Counter for ID numbers */
+static uint64_t n_channels_allocated = 0;
+
+/* Digest->channel map
+ *
+ * Similar to the one used in connection_or.c, this maps from the identity
+ * digest of a remote endpoint to a channel_t to that endpoint. Channels
+ * should be placed here when registered and removed when they close or error.
+ * If more than one channel exists, follow the next_with_same_id pointer
+ * as a linked list.
+ */
+static HT_HEAD(channel_idmap, channel_idmap_entry_s) channel_identity_map =
+ HT_INITIALIZER();
+
+typedef struct channel_idmap_entry_s {
+ HT_ENTRY(channel_idmap_entry_s) node;
+ uint8_t digest[DIGEST_LEN];
+ TOR_LIST_HEAD(channel_list_s, channel_s) channel_list;
+} channel_idmap_entry_t;
+
+static inline unsigned
+channel_idmap_hash(const channel_idmap_entry_t *ent)
+{
+ return (unsigned) siphash24g(ent->digest, DIGEST_LEN);
+}
+
+static inline int
+channel_idmap_eq(const channel_idmap_entry_t *a,
+ const channel_idmap_entry_t *b)
+{
+ return tor_memeq(a->digest, b->digest, DIGEST_LEN);
+}
+
+HT_PROTOTYPE(channel_idmap, channel_idmap_entry_s, node, channel_idmap_hash,
+ channel_idmap_eq)
+HT_GENERATE2(channel_idmap, channel_idmap_entry_s, node, channel_idmap_hash,
+ channel_idmap_eq, 0.5, tor_reallocarray_, tor_free_)
+
+/* Functions to maintain the digest map */
+static void channel_remove_from_digest_map(channel_t *chan);
+
+static void channel_force_xfree(channel_t *chan);
+static void channel_free_list(smartlist_t *channels,
+ int mark_for_close);
+static void channel_listener_free_list(smartlist_t *channels,
+ int mark_for_close);
+static void channel_listener_force_xfree(channel_listener_t *chan_l);
+
+/***********************************
+ * Channel state utility functions *
+ **********************************/
+
+/**
+ * Indicate whether a given channel state is valid.
+ */
+int
+channel_state_is_valid(channel_state_t state)
+{
+ int is_valid;
+
+ switch (state) {
+ case CHANNEL_STATE_CLOSED:
+ case CHANNEL_STATE_CLOSING:
+ case CHANNEL_STATE_ERROR:
+ case CHANNEL_STATE_MAINT:
+ case CHANNEL_STATE_OPENING:
+ case CHANNEL_STATE_OPEN:
+ is_valid = 1;
+ break;
+ case CHANNEL_STATE_LAST:
+ default:
+ is_valid = 0;
+ }
+
+ return is_valid;
+}
+
+/**
+ * Indicate whether a given channel listener state is valid.
+ */
+int
+channel_listener_state_is_valid(channel_listener_state_t state)
+{
+ int is_valid;
+
+ switch (state) {
+ case CHANNEL_LISTENER_STATE_CLOSED:
+ case CHANNEL_LISTENER_STATE_LISTENING:
+ case CHANNEL_LISTENER_STATE_CLOSING:
+ case CHANNEL_LISTENER_STATE_ERROR:
+ is_valid = 1;
+ break;
+ case CHANNEL_LISTENER_STATE_LAST:
+ default:
+ is_valid = 0;
+ }
+
+ return is_valid;
+}
+
+/**
+ * Indicate whether a channel state transition is valid.
+ *
+ * This function takes two channel states and indicates whether a
+ * transition between them is permitted (see the state definitions and
+ * transition table in or.h at the channel_state_t typedef).
+ */
+int
+channel_state_can_transition(channel_state_t from, channel_state_t to)
+{
+ int is_valid;
+
+ switch (from) {
+ case CHANNEL_STATE_CLOSED:
+ is_valid = (to == CHANNEL_STATE_OPENING);
+ break;
+ case CHANNEL_STATE_CLOSING:
+ is_valid = (to == CHANNEL_STATE_CLOSED ||
+ to == CHANNEL_STATE_ERROR);
+ break;
+ case CHANNEL_STATE_ERROR:
+ is_valid = 0;
+ break;
+ case CHANNEL_STATE_MAINT:
+ is_valid = (to == CHANNEL_STATE_CLOSING ||
+ to == CHANNEL_STATE_ERROR ||
+ to == CHANNEL_STATE_OPEN);
+ break;
+ case CHANNEL_STATE_OPENING:
+ is_valid = (to == CHANNEL_STATE_CLOSING ||
+ to == CHANNEL_STATE_ERROR ||
+ to == CHANNEL_STATE_OPEN);
+ break;
+ case CHANNEL_STATE_OPEN:
+ is_valid = (to == CHANNEL_STATE_CLOSING ||
+ to == CHANNEL_STATE_ERROR ||
+ to == CHANNEL_STATE_MAINT);
+ break;
+ case CHANNEL_STATE_LAST:
+ default:
+ is_valid = 0;
+ }
+
+ return is_valid;
+}
+
+/**
+ * Indicate whether a channel listener state transition is valid.
+ *
+ * This function takes two channel listener states and indicates whether a
+ * transition between them is permitted (see the state definitions and
+ * transition table in or.h at the channel_listener_state_t typedef).
+ */
+int
+channel_listener_state_can_transition(channel_listener_state_t from,
+ channel_listener_state_t to)
+{
+ int is_valid;
+
+ switch (from) {
+ case CHANNEL_LISTENER_STATE_CLOSED:
+ is_valid = (to == CHANNEL_LISTENER_STATE_LISTENING);
+ break;
+ case CHANNEL_LISTENER_STATE_CLOSING:
+ is_valid = (to == CHANNEL_LISTENER_STATE_CLOSED ||
+ to == CHANNEL_LISTENER_STATE_ERROR);
+ break;
+ case CHANNEL_LISTENER_STATE_ERROR:
+ is_valid = 0;
+ break;
+ case CHANNEL_LISTENER_STATE_LISTENING:
+ is_valid = (to == CHANNEL_LISTENER_STATE_CLOSING ||
+ to == CHANNEL_LISTENER_STATE_ERROR);
+ break;
+ case CHANNEL_LISTENER_STATE_LAST:
+ default:
+ is_valid = 0;
+ }
+
+ return is_valid;
+}
+
+/**
+ * Return a human-readable description for a channel state.
+ */
+const char *
+channel_state_to_string(channel_state_t state)
+{
+ const char *descr;
+
+ switch (state) {
+ case CHANNEL_STATE_CLOSED:
+ descr = "closed";
+ break;
+ case CHANNEL_STATE_CLOSING:
+ descr = "closing";
+ break;
+ case CHANNEL_STATE_ERROR:
+ descr = "channel error";
+ break;
+ case CHANNEL_STATE_MAINT:
+ descr = "temporarily suspended for maintenance";
+ break;
+ case CHANNEL_STATE_OPENING:
+ descr = "opening";
+ break;
+ case CHANNEL_STATE_OPEN:
+ descr = "open";
+ break;
+ case CHANNEL_STATE_LAST:
+ default:
+ descr = "unknown or invalid channel state";
+ }
+
+ return descr;
+}
+
+/**
+ * Return a human-readable description for a channel listener state.
+ */
+const char *
+channel_listener_state_to_string(channel_listener_state_t state)
+{
+ const char *descr;
+
+ switch (state) {
+ case CHANNEL_LISTENER_STATE_CLOSED:
+ descr = "closed";
+ break;
+ case CHANNEL_LISTENER_STATE_CLOSING:
+ descr = "closing";
+ break;
+ case CHANNEL_LISTENER_STATE_ERROR:
+ descr = "channel listener error";
+ break;
+ case CHANNEL_LISTENER_STATE_LISTENING:
+ descr = "listening";
+ break;
+ case CHANNEL_LISTENER_STATE_LAST:
+ default:
+ descr = "unknown or invalid channel listener state";
+ }
+
+ return descr;
+}
+
+/***************************************
+ * Channel registration/unregistration *
+ ***************************************/
+
+/**
+ * Register a channel.
+ *
+ * This function registers a newly created channel in the global lists/maps
+ * of active channels.
+ */
+void
+channel_register(channel_t *chan)
+{
+ tor_assert(chan);
+ tor_assert(chan->global_identifier);
+
+ /* No-op if already registered */
+ if (chan->registered) return;
+
+ log_debug(LD_CHANNEL,
+ "Registering channel %p (ID %"PRIu64 ") "
+ "in state %s (%d) with digest %s",
+ chan, (chan->global_identifier),
+ channel_state_to_string(chan->state), chan->state,
+ hex_str(chan->identity_digest, DIGEST_LEN));
+
+ /* Make sure we have all_channels, then add it */
+ if (!all_channels) all_channels = smartlist_new();
+ smartlist_add(all_channels, chan);
+ channel_t *oldval = HT_REPLACE(channel_gid_map, &channel_gid_map, chan);
+ tor_assert(! oldval);
+
+ /* Is it finished? */
+ if (CHANNEL_FINISHED(chan)) {
+ /* Put it in the finished list, creating it if necessary */
+ if (!finished_channels) finished_channels = smartlist_new();
+ smartlist_add(finished_channels, chan);
+ mainloop_schedule_postloop_cleanup();
+ } else {
+ /* Put it in the active list, creating it if necessary */
+ if (!active_channels) active_channels = smartlist_new();
+ smartlist_add(active_channels, chan);
+
+ if (!CHANNEL_IS_CLOSING(chan)) {
+ /* It should have a digest set */
+ if (!tor_digest_is_zero(chan->identity_digest)) {
+ /* Yeah, we're good, add it to the map */
+ channel_add_to_digest_map(chan);
+ } else {
+ log_info(LD_CHANNEL,
+ "Channel %p (global ID %"PRIu64 ") "
+ "in state %s (%d) registered with no identity digest",
+ chan, (chan->global_identifier),
+ channel_state_to_string(chan->state), chan->state);
+ }
+ }
+ }
+
+ /* Mark it as registered */
+ chan->registered = 1;
+}
+
+/**
+ * Unregister a channel.
+ *
+ * This function removes a channel from the global lists and maps and is used
+ * when freeing a closed/errored channel.
+ */
+void
+channel_unregister(channel_t *chan)
+{
+ tor_assert(chan);
+
+ /* No-op if not registered */
+ if (!(chan->registered)) return;
+
+ /* Is it finished? */
+ if (CHANNEL_FINISHED(chan)) {
+ /* Get it out of the finished list */
+ if (finished_channels) smartlist_remove(finished_channels, chan);
+ } else {
+ /* Get it out of the active list */
+ if (active_channels) smartlist_remove(active_channels, chan);
+ }
+
+ /* Get it out of all_channels */
+ if (all_channels) smartlist_remove(all_channels, chan);
+ channel_t *oldval = HT_REMOVE(channel_gid_map, &channel_gid_map, chan);
+ tor_assert(oldval == NULL || oldval == chan);
+
+ /* Mark it as unregistered */
+ chan->registered = 0;
+
+ /* Should it be in the digest map? */
+ if (!tor_digest_is_zero(chan->identity_digest) &&
+ !(CHANNEL_CONDEMNED(chan))) {
+ /* Remove it */
+ channel_remove_from_digest_map(chan);
+ }
+}
+
+/**
+ * Register a channel listener.
+ *
+ * This function registers a newly created channel listener in the global
+ * lists/maps of active channel listeners.
+ */
+void
+channel_listener_register(channel_listener_t *chan_l)
+{
+ tor_assert(chan_l);
+
+ /* No-op if already registered */
+ if (chan_l->registered) return;
+
+ log_debug(LD_CHANNEL,
+ "Registering channel listener %p (ID %"PRIu64 ") "
+ "in state %s (%d)",
+ chan_l, (chan_l->global_identifier),
+ channel_listener_state_to_string(chan_l->state),
+ chan_l->state);
+
+ /* Make sure we have all_listeners, then add it */
+ if (!all_listeners) all_listeners = smartlist_new();
+ smartlist_add(all_listeners, chan_l);
+
+ /* Is it finished? */
+ if (chan_l->state == CHANNEL_LISTENER_STATE_CLOSED ||
+ chan_l->state == CHANNEL_LISTENER_STATE_ERROR) {
+ /* Put it in the finished list, creating it if necessary */
+ if (!finished_listeners) finished_listeners = smartlist_new();
+ smartlist_add(finished_listeners, chan_l);
+ } else {
+ /* Put it in the active list, creating it if necessary */
+ if (!active_listeners) active_listeners = smartlist_new();
+ smartlist_add(active_listeners, chan_l);
+ }
+
+ /* Mark it as registered */
+ chan_l->registered = 1;
+}
+
+/**
+ * Unregister a channel listener.
+ *
+ * This function removes a channel listener from the global lists and maps
+ * and is used when freeing a closed/errored channel listener.
+ */
+void
+channel_listener_unregister(channel_listener_t *chan_l)
+{
+ tor_assert(chan_l);
+
+ /* No-op if not registered */
+ if (!(chan_l->registered)) return;
+
+ /* Is it finished? */
+ if (chan_l->state == CHANNEL_LISTENER_STATE_CLOSED ||
+ chan_l->state == CHANNEL_LISTENER_STATE_ERROR) {
+ /* Get it out of the finished list */
+ if (finished_listeners) smartlist_remove(finished_listeners, chan_l);
+ } else {
+ /* Get it out of the active list */
+ if (active_listeners) smartlist_remove(active_listeners, chan_l);
+ }
+
+ /* Get it out of all_listeners */
+ if (all_listeners) smartlist_remove(all_listeners, chan_l);
+
+ /* Mark it as unregistered */
+ chan_l->registered = 0;
+}
+
+/*********************************
+ * Channel digest map maintenance
+ *********************************/
+
+/**
+ * Add a channel to the digest map.
+ *
+ * This function adds a channel to the digest map and inserts it into the
+ * correct linked list if channels with that remote endpoint identity digest
+ * already exist.
+ */
+STATIC void
+channel_add_to_digest_map(channel_t *chan)
+{
+ channel_idmap_entry_t *ent, search;
+
+ tor_assert(chan);
+
+ /* Assert that the state makes sense */
+ tor_assert(!CHANNEL_CONDEMNED(chan));
+
+ /* Assert that there is a digest */
+ tor_assert(!tor_digest_is_zero(chan->identity_digest));
+
+ memcpy(search.digest, chan->identity_digest, DIGEST_LEN);
+ ent = HT_FIND(channel_idmap, &channel_identity_map, &search);
+ if (! ent) {
+ ent = tor_malloc(sizeof(channel_idmap_entry_t));
+ memcpy(ent->digest, chan->identity_digest, DIGEST_LEN);
+ TOR_LIST_INIT(&ent->channel_list);
+ HT_INSERT(channel_idmap, &channel_identity_map, ent);
+ }
+ TOR_LIST_INSERT_HEAD(&ent->channel_list, chan, next_with_same_id);
+
+ log_debug(LD_CHANNEL,
+ "Added channel %p (global ID %"PRIu64 ") "
+ "to identity map in state %s (%d) with digest %s",
+ chan, (chan->global_identifier),
+ channel_state_to_string(chan->state), chan->state,
+ hex_str(chan->identity_digest, DIGEST_LEN));
+}
+
+/**
+ * Remove a channel from the digest map.
+ *
+ * This function removes a channel from the digest map and the linked list of
+ * channels for that digest if more than one exists.
+ */
+static void
+channel_remove_from_digest_map(channel_t *chan)
+{
+ channel_idmap_entry_t *ent, search;
+
+ tor_assert(chan);
+
+ /* Assert that there is a digest */
+ tor_assert(!tor_digest_is_zero(chan->identity_digest));
+
+ /* Pull it out of its list, wherever that list is */
+ TOR_LIST_REMOVE(chan, next_with_same_id);
+
+ memcpy(search.digest, chan->identity_digest, DIGEST_LEN);
+ ent = HT_FIND(channel_idmap, &channel_identity_map, &search);
+
+ /* Look for it in the map */
+ if (ent) {
+ /* Okay, it's here */
+
+ if (TOR_LIST_EMPTY(&ent->channel_list)) {
+ HT_REMOVE(channel_idmap, &channel_identity_map, ent);
+ tor_free(ent);
+ }
+
+ log_debug(LD_CHANNEL,
+ "Removed channel %p (global ID %"PRIu64 ") from "
+ "identity map in state %s (%d) with digest %s",
+ chan, (chan->global_identifier),
+ channel_state_to_string(chan->state), chan->state,
+ hex_str(chan->identity_digest, DIGEST_LEN));
+ } else {
+ /* Shouldn't happen */
+ log_warn(LD_BUG,
+ "Trying to remove channel %p (global ID %"PRIu64 ") with "
+ "digest %s from identity map, but couldn't find any with "
+ "that digest",
+ chan, (chan->global_identifier),
+ hex_str(chan->identity_digest, DIGEST_LEN));
+ }
+}
+
+/****************************
+ * Channel lookup functions *
+ ***************************/
+
+/**
+ * Find channel by global ID.
+ *
+ * This function searches for a channel by the global_identifier assigned
+ * at initialization time. This identifier is unique for the lifetime of the
+ * Tor process.
+ */
+channel_t *
+channel_find_by_global_id(uint64_t global_identifier)
+{
+ channel_t lookup;
+ channel_t *rv = NULL;
+
+ lookup.global_identifier = global_identifier;
+ rv = HT_FIND(channel_gid_map, &channel_gid_map, &lookup);
+ if (rv) {
+ tor_assert(rv->global_identifier == global_identifier);
+ }
+
+ return rv;
+}
+
+/** Return true iff <b>chan</b> matches <b>rsa_id_digest</b> and <b>ed_id</b>.
+ * as its identity keys. If either is NULL, do not check for a match. */
+static int
+channel_remote_identity_matches(const channel_t *chan,
+ const char *rsa_id_digest,
+ const ed25519_public_key_t *ed_id)
+{
+ if (BUG(!chan))
+ return 0;
+ if (rsa_id_digest) {
+ if (tor_memneq(rsa_id_digest, chan->identity_digest, DIGEST_LEN))
+ return 0;
+ }
+ if (ed_id) {
+ if (tor_memneq(ed_id->pubkey, chan->ed25519_identity.pubkey,
+ ED25519_PUBKEY_LEN))
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Find channel by RSA/Ed25519 identity of of the remote endpoint.
+ *
+ * This function looks up a channel by the digest of its remote endpoint's RSA
+ * identity key. If <b>ed_id</b> is provided and nonzero, only a channel
+ * matching the <b>ed_id</b> will be returned.
+ *
+ * It's possible that more than one channel to a given endpoint exists. Use
+ * channel_next_with_rsa_identity() to walk the list of channels; make sure
+ * to test for Ed25519 identity match too (as appropriate)
+ */
+channel_t *
+channel_find_by_remote_identity(const char *rsa_id_digest,
+ const ed25519_public_key_t *ed_id)
+{
+ channel_t *rv = NULL;
+ channel_idmap_entry_t *ent, search;
+
+ tor_assert(rsa_id_digest); /* For now, we require that every channel have
+ * an RSA identity, and that every lookup
+ * contain an RSA identity */
+ if (ed_id && ed25519_public_key_is_zero(ed_id)) {
+ /* Treat zero as meaning "We don't care about the presence or absence of
+ * an Ed key", not "There must be no Ed key". */
+ ed_id = NULL;
+ }
+
+ memcpy(search.digest, rsa_id_digest, DIGEST_LEN);
+ ent = HT_FIND(channel_idmap, &channel_identity_map, &search);
+ if (ent) {
+ rv = TOR_LIST_FIRST(&ent->channel_list);
+ }
+ while (rv && ! channel_remote_identity_matches(rv, rsa_id_digest, ed_id)) {
+ rv = channel_next_with_rsa_identity(rv);
+ }
+
+ return rv;
+}
+
+/**
+ * Get next channel with digest.
+ *
+ * This function takes a channel and finds the next channel in the list
+ * with the same digest.
+ */
+channel_t *
+channel_next_with_rsa_identity(channel_t *chan)
+{
+ tor_assert(chan);
+
+ return TOR_LIST_NEXT(chan, next_with_same_id);
+}
+
+/**
+ * Relays run this once an hour to look over our list of channels to other
+ * relays. It prints out some statistics if there are multiple connections
+ * to many relays.
+ *
+ * This function is similar to connection_or_set_bad_connections(),
+ * and probably could be adapted to replace it, if it was modified to actually
+ * take action on any of these connections.
+ */
+void
+channel_check_for_duplicates(void)
+{
+ channel_idmap_entry_t **iter;
+ channel_t *chan;
+ int total_relay_connections = 0, total_relays = 0, total_canonical = 0;
+ int total_half_canonical = 0;
+ int total_gt_one_connection = 0, total_gt_two_connections = 0;
+ int total_gt_four_connections = 0;
+
+ HT_FOREACH(iter, channel_idmap, &channel_identity_map) {
+ int connections_to_relay = 0;
+
+ /* Only consider relay connections */
+ if (!connection_or_digest_is_known_relay((char*)(*iter)->digest))
+ continue;
+
+ total_relays++;
+
+ for (chan = TOR_LIST_FIRST(&(*iter)->channel_list); chan;
+ chan = channel_next_with_rsa_identity(chan)) {
+
+ if (CHANNEL_CONDEMNED(chan) || !CHANNEL_IS_OPEN(chan))
+ continue;
+
+ connections_to_relay++;
+ total_relay_connections++;
+
+ if (chan->is_canonical(chan, 0)) total_canonical++;
+
+ if (!chan->is_canonical_to_peer && chan->is_canonical(chan, 0)
+ && chan->is_canonical(chan, 1)) {
+ total_half_canonical++;
+ }
+ }
+
+ if (connections_to_relay > 1) total_gt_one_connection++;
+ if (connections_to_relay > 2) total_gt_two_connections++;
+ if (connections_to_relay > 4) total_gt_four_connections++;
+ }
+
+#define MIN_RELAY_CONNECTIONS_TO_WARN 5
+
+ /* If we average 1.5 or more connections per relay, something is wrong */
+ if (total_relays > MIN_RELAY_CONNECTIONS_TO_WARN &&
+ total_relay_connections >= 1.5*total_relays) {
+ log_notice(LD_OR,
+ "Your relay has a very large number of connections to other relays. "
+ "Is your outbound address the same as your relay address? "
+ "Found %d connections to %d relays. Found %d current canonical "
+ "connections, in %d of which we were a non-canonical peer. "
+ "%d relays had more than 1 connection, %d had more than 2, and "
+ "%d had more than 4 connections.",
+ total_relay_connections, total_relays, total_canonical,
+ total_half_canonical, total_gt_one_connection,
+ total_gt_two_connections, total_gt_four_connections);
+ } else {
+ log_info(LD_OR, "Performed connection pruning. "
+ "Found %d connections to %d relays. Found %d current canonical "
+ "connections, in %d of which we were a non-canonical peer. "
+ "%d relays had more than 1 connection, %d had more than 2, and "
+ "%d had more than 4 connections.",
+ total_relay_connections, total_relays, total_canonical,
+ total_half_canonical, total_gt_one_connection,
+ total_gt_two_connections, total_gt_four_connections);
+ }
+}
+
+/**
+ * Initialize a channel.
+ *
+ * This function should be called by subclasses to set up some per-channel
+ * variables. I.e., this is the superclass constructor. Before this, the
+ * channel should be allocated with tor_malloc_zero().
+ */
+void
+channel_init(channel_t *chan)
+{
+ tor_assert(chan);
+
+ /* Assign an ID and bump the counter */
+ chan->global_identifier = ++n_channels_allocated;
+
+ /* Init timestamp */
+ chan->timestamp_last_had_circuits = time(NULL);
+
+ /* Warn about exhausted circuit IDs no more than hourly. */
+ chan->last_warned_circ_ids_exhausted.rate = 3600;
+
+ /* Initialize list entries. */
+ memset(&chan->next_with_same_id, 0, sizeof(chan->next_with_same_id));
+
+ /* Timestamp it */
+ channel_timestamp_created(chan);
+
+ /* It hasn't been open yet. */
+ chan->has_been_open = 0;
+
+ /* Scheduler state is idle */
+ chan->scheduler_state = SCHED_CHAN_IDLE;
+
+ /* Channel is not in the scheduler heap. */
+ chan->sched_heap_idx = -1;
+}
+
+/**
+ * Initialize a channel listener.
+ *
+ * This function should be called by subclasses to set up some per-channel
+ * variables. I.e., this is the superclass constructor. Before this, the
+ * channel listener should be allocated with tor_malloc_zero().
+ */
+void
+channel_init_listener(channel_listener_t *chan_l)
+{
+ tor_assert(chan_l);
+
+ /* Assign an ID and bump the counter */
+ chan_l->global_identifier = ++n_channels_allocated;
+
+ /* Timestamp it */
+ channel_listener_timestamp_created(chan_l);
+}
+
+/**
+ * Free a channel; nothing outside of channel.c and subclasses should call
+ * this - it frees channels after they have closed and been unregistered.
+ */
+void
+channel_free_(channel_t *chan)
+{
+ if (!chan) return;
+
+ /* It must be closed or errored */
+ tor_assert(CHANNEL_FINISHED(chan));
+
+ /* It must be deregistered */
+ tor_assert(!(chan->registered));
+
+ log_debug(LD_CHANNEL,
+ "Freeing channel %"PRIu64 " at %p",
+ (chan->global_identifier), chan);
+
+ /* Get this one out of the scheduler */
+ scheduler_release_channel(chan);
+
+ /*
+ * Get rid of cmux policy before we do anything, so cmux policies don't
+ * see channels in weird half-freed states.
+ */
+ if (chan->cmux) {
+ circuitmux_set_policy(chan->cmux, NULL);
+ }
+
+ /* Remove all timers and associated handle entries now */
+ timer_free(chan->padding_timer);
+ channel_handle_free(chan->timer_handle);
+ channel_handles_clear(chan);
+
+ /* Call a free method if there is one */
+ if (chan->free_fn) chan->free_fn(chan);
+
+ channel_clear_remote_end(chan);
+
+ /* Get rid of cmux */
+ if (chan->cmux) {
+ circuitmux_detach_all_circuits(chan->cmux, NULL);
+ circuitmux_mark_destroyed_circids_usable(chan->cmux, chan);
+ circuitmux_free(chan->cmux);
+ chan->cmux = NULL;
+ }
+
+ tor_free(chan);
+}
+
+/**
+ * Free a channel listener; nothing outside of channel.c and subclasses
+ * should call this - it frees channel listeners after they have closed and
+ * been unregistered.
+ */
+void
+channel_listener_free_(channel_listener_t *chan_l)
+{
+ if (!chan_l) return;
+
+ log_debug(LD_CHANNEL,
+ "Freeing channel_listener_t %"PRIu64 " at %p",
+ (chan_l->global_identifier),
+ chan_l);
+
+ /* It must be closed or errored */
+ tor_assert(chan_l->state == CHANNEL_LISTENER_STATE_CLOSED ||
+ chan_l->state == CHANNEL_LISTENER_STATE_ERROR);
+ /* It must be deregistered */
+ tor_assert(!(chan_l->registered));
+
+ /* Call a free method if there is one */
+ if (chan_l->free_fn) chan_l->free_fn(chan_l);
+
+ tor_free(chan_l);
+}
+
+/**
+ * Free a channel and skip the state/registration asserts; this internal-
+ * use-only function should be called only from channel_free_all() when
+ * shutting down the Tor process.
+ */
+static void
+channel_force_xfree(channel_t *chan)
+{
+ tor_assert(chan);
+
+ log_debug(LD_CHANNEL,
+ "Force-freeing channel %"PRIu64 " at %p",
+ (chan->global_identifier), chan);
+
+ /* Get this one out of the scheduler */
+ scheduler_release_channel(chan);
+
+ /*
+ * Get rid of cmux policy before we do anything, so cmux policies don't
+ * see channels in weird half-freed states.
+ */
+ if (chan->cmux) {
+ circuitmux_set_policy(chan->cmux, NULL);
+ }
+
+ /* Remove all timers and associated handle entries now */
+ timer_free(chan->padding_timer);
+ channel_handle_free(chan->timer_handle);
+ channel_handles_clear(chan);
+
+ /* Call a free method if there is one */
+ if (chan->free_fn) chan->free_fn(chan);
+
+ channel_clear_remote_end(chan);
+
+ /* Get rid of cmux */
+ if (chan->cmux) {
+ circuitmux_free(chan->cmux);
+ chan->cmux = NULL;
+ }
+
+ tor_free(chan);
+}
+
+/**
+ * Free a channel listener and skip the state/registration asserts; this
+ * internal-use-only function should be called only from channel_free_all()
+ * when shutting down the Tor process.
+ */
+static void
+channel_listener_force_xfree(channel_listener_t *chan_l)
+{
+ tor_assert(chan_l);
+
+ log_debug(LD_CHANNEL,
+ "Force-freeing channel_listener_t %"PRIu64 " at %p",
+ (chan_l->global_identifier),
+ chan_l);
+
+ /* Call a free method if there is one */
+ if (chan_l->free_fn) chan_l->free_fn(chan_l);
+
+ /*
+ * The incoming list just gets emptied and freed; we request close on
+ * any channels we find there, but since we got called while shutting
+ * down they will get deregistered and freed elsewhere anyway.
+ */
+ if (chan_l->incoming_list) {
+ SMARTLIST_FOREACH_BEGIN(chan_l->incoming_list,
+ channel_t *, qchan) {
+ channel_mark_for_close(qchan);
+ } SMARTLIST_FOREACH_END(qchan);
+
+ smartlist_free(chan_l->incoming_list);
+ chan_l->incoming_list = NULL;
+ }
+
+ tor_free(chan_l);
+}
+
+/**
+ * Set the listener for a channel listener.
+ *
+ * This function sets the handler for new incoming channels on a channel
+ * listener.
+ */
+void
+channel_listener_set_listener_fn(channel_listener_t *chan_l,
+ channel_listener_fn_ptr listener)
+{
+ tor_assert(chan_l);
+ tor_assert(chan_l->state == CHANNEL_LISTENER_STATE_LISTENING);
+
+ log_debug(LD_CHANNEL,
+ "Setting listener callback for channel listener %p "
+ "(global ID %"PRIu64 ") to %p",
+ chan_l, (chan_l->global_identifier),
+ listener);
+
+ chan_l->listener = listener;
+ if (chan_l->listener) channel_listener_process_incoming(chan_l);
+}
+
+/**
+ * Return the fixed-length cell handler for a channel.
+ *
+ * This function gets the handler for incoming fixed-length cells installed
+ * on a channel.
+ */
+channel_cell_handler_fn_ptr
+channel_get_cell_handler(channel_t *chan)
+{
+ tor_assert(chan);
+
+ if (CHANNEL_CAN_HANDLE_CELLS(chan))
+ return chan->cell_handler;
+
+ return NULL;
+}
+
+/**
+ * Return the variable-length cell handler for a channel.
+ *
+ * This function gets the handler for incoming variable-length cells
+ * installed on a channel.
+ */
+channel_var_cell_handler_fn_ptr
+channel_get_var_cell_handler(channel_t *chan)
+{
+ tor_assert(chan);
+
+ if (CHANNEL_CAN_HANDLE_CELLS(chan))
+ return chan->var_cell_handler;
+
+ return NULL;
+}
+
+/**
+ * Set both cell handlers for a channel.
+ *
+ * This function sets both the fixed-length and variable length cell handlers
+ * for a channel.
+ */
+void
+channel_set_cell_handlers(channel_t *chan,
+ channel_cell_handler_fn_ptr cell_handler,
+ channel_var_cell_handler_fn_ptr
+ var_cell_handler)
+{
+ tor_assert(chan);
+ tor_assert(CHANNEL_CAN_HANDLE_CELLS(chan));
+
+ log_debug(LD_CHANNEL,
+ "Setting cell_handler callback for channel %p to %p",
+ chan, cell_handler);
+ log_debug(LD_CHANNEL,
+ "Setting var_cell_handler callback for channel %p to %p",
+ chan, var_cell_handler);
+
+ /* Change them */
+ chan->cell_handler = cell_handler;
+ chan->var_cell_handler = var_cell_handler;
+}
+
+/*
+ * On closing channels
+ *
+ * There are three functions that close channels, for use in
+ * different circumstances:
+ *
+ * - Use channel_mark_for_close() for most cases
+ * - Use channel_close_from_lower_layer() if you are connection_or.c
+ * and the other end closes the underlying connection.
+ * - Use channel_close_for_error() if you are connection_or.c and
+ * some sort of error has occurred.
+ */
+
+/**
+ * Mark a channel for closure.
+ *
+ * This function tries to close a channel_t; it will go into the CLOSING
+ * state, and eventually the lower layer should put it into the CLOSED or
+ * ERROR state. Then, channel_run_cleanup() will eventually free it.
+ */
+void
+channel_mark_for_close(channel_t *chan)
+{
+ tor_assert(chan != NULL);
+ tor_assert(chan->close != NULL);
+
+ /* If it's already in CLOSING, CLOSED or ERROR, this is a no-op */
+ if (CHANNEL_CONDEMNED(chan))
+ return;
+
+ log_debug(LD_CHANNEL,
+ "Closing channel %p (global ID %"PRIu64 ") "
+ "by request",
+ chan, (chan->global_identifier));
+
+ /* Note closing by request from above */
+ chan->reason_for_closing = CHANNEL_CLOSE_REQUESTED;
+
+ /* Change state to CLOSING */
+ channel_change_state(chan, CHANNEL_STATE_CLOSING);
+
+ /* Tell the lower layer */
+ chan->close(chan);
+
+ /*
+ * It's up to the lower layer to change state to CLOSED or ERROR when we're
+ * ready; we'll try to free channels that are in the finished list from
+ * channel_run_cleanup(). The lower layer should do this by calling
+ * channel_closed().
+ */
+}
+
+/**
+ * Mark a channel listener for closure.
+ *
+ * This function tries to close a channel_listener_t; it will go into the
+ * CLOSING state, and eventually the lower layer should put it into the CLOSED
+ * or ERROR state. Then, channel_run_cleanup() will eventually free it.
+ */
+void
+channel_listener_mark_for_close(channel_listener_t *chan_l)
+{
+ tor_assert(chan_l != NULL);
+ tor_assert(chan_l->close != NULL);
+
+ /* If it's already in CLOSING, CLOSED or ERROR, this is a no-op */
+ if (chan_l->state == CHANNEL_LISTENER_STATE_CLOSING ||
+ chan_l->state == CHANNEL_LISTENER_STATE_CLOSED ||
+ chan_l->state == CHANNEL_LISTENER_STATE_ERROR) return;
+
+ log_debug(LD_CHANNEL,
+ "Closing channel listener %p (global ID %"PRIu64 ") "
+ "by request",
+ chan_l, (chan_l->global_identifier));
+
+ /* Note closing by request from above */
+ chan_l->reason_for_closing = CHANNEL_LISTENER_CLOSE_REQUESTED;
+
+ /* Change state to CLOSING */
+ channel_listener_change_state(chan_l, CHANNEL_LISTENER_STATE_CLOSING);
+
+ /* Tell the lower layer */
+ chan_l->close(chan_l);
+
+ /*
+ * It's up to the lower layer to change state to CLOSED or ERROR when we're
+ * ready; we'll try to free channels that are in the finished list from
+ * channel_run_cleanup(). The lower layer should do this by calling
+ * channel_listener_closed().
+ */
+}
+
+/**
+ * Close a channel from the lower layer.
+ *
+ * Notify the channel code that the channel is being closed due to a non-error
+ * condition in the lower layer. This does not call the close() method, since
+ * the lower layer already knows.
+ */
+void
+channel_close_from_lower_layer(channel_t *chan)
+{
+ tor_assert(chan != NULL);
+
+ /* If it's already in CLOSING, CLOSED or ERROR, this is a no-op */
+ if (CHANNEL_CONDEMNED(chan))
+ return;
+
+ log_debug(LD_CHANNEL,
+ "Closing channel %p (global ID %"PRIu64 ") "
+ "due to lower-layer event",
+ chan, (chan->global_identifier));
+
+ /* Note closing by event from below */
+ chan->reason_for_closing = CHANNEL_CLOSE_FROM_BELOW;
+
+ /* Change state to CLOSING */
+ channel_change_state(chan, CHANNEL_STATE_CLOSING);
+}
+
+/**
+ * Notify that the channel is being closed due to an error condition.
+ *
+ * This function is called by the lower layer implementing the transport
+ * when a channel must be closed due to an error condition. This does not
+ * call the channel's close method, since the lower layer already knows.
+ */
+void
+channel_close_for_error(channel_t *chan)
+{
+ tor_assert(chan != NULL);
+
+ /* If it's already in CLOSING, CLOSED or ERROR, this is a no-op */
+ if (CHANNEL_CONDEMNED(chan))
+ return;
+
+ log_debug(LD_CHANNEL,
+ "Closing channel %p due to lower-layer error",
+ chan);
+
+ /* Note closing by event from below */
+ chan->reason_for_closing = CHANNEL_CLOSE_FOR_ERROR;
+
+ /* Change state to CLOSING */
+ channel_change_state(chan, CHANNEL_STATE_CLOSING);
+}
+
+/**
+ * Notify that the lower layer is finished closing the channel.
+ *
+ * This function should be called by the lower layer when a channel
+ * is finished closing and it should be regarded as inactive and
+ * freed by the channel code.
+ */
+void
+channel_closed(channel_t *chan)
+{
+ tor_assert(chan);
+ tor_assert(CHANNEL_CONDEMNED(chan));
+
+ /* No-op if already inactive */
+ if (CHANNEL_FINISHED(chan))
+ return;
+
+ /* Inform any pending (not attached) circs that they should
+ * give up. */
+ if (! chan->has_been_open)
+ circuit_n_chan_done(chan, 0, 0);
+
+ /* Now close all the attached circuits on it. */
+ circuit_unlink_all_from_channel(chan, END_CIRC_REASON_CHANNEL_CLOSED);
+
+ if (chan->reason_for_closing != CHANNEL_CLOSE_FOR_ERROR) {
+ channel_change_state(chan, CHANNEL_STATE_CLOSED);
+ } else {
+ channel_change_state(chan, CHANNEL_STATE_ERROR);
+ }
+}
+
+/**
+ * Clear the identity_digest of a channel.
+ *
+ * This function clears the identity digest of the remote endpoint for a
+ * channel; this is intended for use by the lower layer.
+ */
+void
+channel_clear_identity_digest(channel_t *chan)
+{
+ int state_not_in_map;
+
+ tor_assert(chan);
+
+ log_debug(LD_CHANNEL,
+ "Clearing remote endpoint digest on channel %p with "
+ "global ID %"PRIu64,
+ chan, (chan->global_identifier));
+
+ state_not_in_map = CHANNEL_CONDEMNED(chan);
+
+ if (!state_not_in_map && chan->registered &&
+ !tor_digest_is_zero(chan->identity_digest))
+ /* if it's registered get it out of the digest map */
+ channel_remove_from_digest_map(chan);
+
+ memset(chan->identity_digest, 0,
+ sizeof(chan->identity_digest));
+}
+
+/**
+ * Set the identity_digest of a channel.
+ *
+ * This function sets the identity digest of the remote endpoint for a
+ * channel; this is intended for use by the lower layer.
+ */
+void
+channel_set_identity_digest(channel_t *chan,
+ const char *identity_digest,
+ const ed25519_public_key_t *ed_identity)
+{
+ int was_in_digest_map, should_be_in_digest_map, state_not_in_map;
+
+ tor_assert(chan);
+
+ log_debug(LD_CHANNEL,
+ "Setting remote endpoint digest on channel %p with "
+ "global ID %"PRIu64 " to digest %s",
+ chan, (chan->global_identifier),
+ identity_digest ?
+ hex_str(identity_digest, DIGEST_LEN) : "(null)");
+
+ state_not_in_map = CHANNEL_CONDEMNED(chan);
+
+ was_in_digest_map =
+ !state_not_in_map &&
+ chan->registered &&
+ !tor_digest_is_zero(chan->identity_digest);
+ should_be_in_digest_map =
+ !state_not_in_map &&
+ chan->registered &&
+ (identity_digest &&
+ !tor_digest_is_zero(identity_digest));
+
+ if (was_in_digest_map)
+ /* We should always remove it; we'll add it back if we're writing
+ * in a new digest.
+ */
+ channel_remove_from_digest_map(chan);
+
+ if (identity_digest) {
+ memcpy(chan->identity_digest,
+ identity_digest,
+ sizeof(chan->identity_digest));
+ } else {
+ memset(chan->identity_digest, 0,
+ sizeof(chan->identity_digest));
+ }
+ if (ed_identity) {
+ memcpy(&chan->ed25519_identity, ed_identity, sizeof(*ed_identity));
+ } else {
+ memset(&chan->ed25519_identity, 0, sizeof(*ed_identity));
+ }
+
+ /* Put it in the digest map if we should */
+ if (should_be_in_digest_map)
+ channel_add_to_digest_map(chan);
+}
+
+/**
+ * Clear the remote end metadata (identity_digest) of a channel.
+ *
+ * This function clears all the remote end info from a channel; this is
+ * intended for use by the lower layer.
+ */
+void
+channel_clear_remote_end(channel_t *chan)
+{
+ int state_not_in_map;
+
+ tor_assert(chan);
+
+ log_debug(LD_CHANNEL,
+ "Clearing remote endpoint identity on channel %p with "
+ "global ID %"PRIu64,
+ chan, (chan->global_identifier));
+
+ state_not_in_map = CHANNEL_CONDEMNED(chan);
+
+ if (!state_not_in_map && chan->registered &&
+ !tor_digest_is_zero(chan->identity_digest))
+ /* if it's registered get it out of the digest map */
+ channel_remove_from_digest_map(chan);
+
+ memset(chan->identity_digest, 0,
+ sizeof(chan->identity_digest));
+}
+
+/**
+ * Write to a channel the given packed cell.
+ *
+ * Two possible errors can happen. Either the channel is not opened or the
+ * lower layer (specialized channel) failed to write it. In both cases, it is
+ * the caller responsibility to free the cell.
+ */
+static int
+write_packed_cell(channel_t *chan, packed_cell_t *cell)
+{
+ int ret = -1;
+ size_t cell_bytes;
+
+ tor_assert(chan);
+ tor_assert(cell);
+
+ /* Assert that the state makes sense for a cell write */
+ tor_assert(CHANNEL_CAN_HANDLE_CELLS(chan));
+
+ {
+ circid_t circ_id;
+ if (packed_cell_is_destroy(chan, cell, &circ_id)) {
+ channel_note_destroy_not_pending(chan, circ_id);
+ }
+ }
+
+ /* For statistical purposes, figure out how big this cell is */
+ cell_bytes = get_cell_network_size(chan->wide_circ_ids);
+
+ /* Can we send it right out? If so, try */
+ if (!CHANNEL_IS_OPEN(chan)) {
+ goto done;
+ }
+
+ /* Write the cell on the connection's outbuf. */
+ if (chan->write_packed_cell(chan, cell) < 0) {
+ goto done;
+ }
+ /* Timestamp for transmission */
+ channel_timestamp_xmit(chan);
+ /* Update the counter */
+ ++(chan->n_cells_xmitted);
+ chan->n_bytes_xmitted += cell_bytes;
+ /* Successfully sent the cell. */
+ ret = 0;
+
+ done:
+ return ret;
+}
+
+/**
+ * Write a packed cell to a channel.
+ *
+ * Write a packed cell to a channel using the write_cell() method. This is
+ * called by the transport-independent code to deliver a packed cell to a
+ * channel for transmission.
+ *
+ * Return 0 on success else a negative value. In both cases, the caller should
+ * not access the cell anymore, it is freed both on success and error.
+ */
+int
+channel_write_packed_cell(channel_t *chan, packed_cell_t *cell)
+{
+ int ret = -1;
+
+ tor_assert(chan);
+ tor_assert(cell);
+
+ if (CHANNEL_IS_CLOSING(chan)) {
+ log_debug(LD_CHANNEL, "Discarding %p on closing channel %p with "
+ "global ID %"PRIu64, cell, chan,
+ (chan->global_identifier));
+ goto end;
+ }
+ log_debug(LD_CHANNEL,
+ "Writing %p to channel %p with global ID "
+ "%"PRIu64, cell, chan, (chan->global_identifier));
+
+ ret = write_packed_cell(chan, cell);
+
+ end:
+ /* Whatever happens, we free the cell. Either an error occurred or the cell
+ * was put on the connection outbuf, both cases we have ownership of the
+ * cell and we free it. */
+ packed_cell_free(cell);
+ return ret;
+}
+
+/**
+ * Change channel state.
+ *
+ * This internal and subclass use only function is used to change channel
+ * state, performing all transition validity checks and whatever actions
+ * are appropriate to the state transition in question.
+ */
+static void
+channel_change_state_(channel_t *chan, channel_state_t to_state)
+{
+ channel_state_t from_state;
+ unsigned char was_active, is_active;
+ unsigned char was_in_id_map, is_in_id_map;
+
+ tor_assert(chan);
+ from_state = chan->state;
+
+ tor_assert(channel_state_is_valid(from_state));
+ tor_assert(channel_state_is_valid(to_state));
+ tor_assert(channel_state_can_transition(chan->state, to_state));
+
+ /* Check for no-op transitions */
+ if (from_state == to_state) {
+ log_debug(LD_CHANNEL,
+ "Got no-op transition from \"%s\" to itself on channel %p"
+ "(global ID %"PRIu64 ")",
+ channel_state_to_string(to_state),
+ chan, (chan->global_identifier));
+ return;
+ }
+
+ /* If we're going to a closing or closed state, we must have a reason set */
+ if (to_state == CHANNEL_STATE_CLOSING ||
+ to_state == CHANNEL_STATE_CLOSED ||
+ to_state == CHANNEL_STATE_ERROR) {
+ tor_assert(chan->reason_for_closing != CHANNEL_NOT_CLOSING);
+ }
+
+ log_debug(LD_CHANNEL,
+ "Changing state of channel %p (global ID %"PRIu64
+ ") from \"%s\" to \"%s\"",
+ chan,
+ (chan->global_identifier),
+ channel_state_to_string(chan->state),
+ channel_state_to_string(to_state));
+
+ chan->state = to_state;
+
+ /* Need to add to the right lists if the channel is registered */
+ if (chan->registered) {
+ was_active = !(from_state == CHANNEL_STATE_CLOSED ||
+ from_state == CHANNEL_STATE_ERROR);
+ is_active = !(to_state == CHANNEL_STATE_CLOSED ||
+ to_state == CHANNEL_STATE_ERROR);
+
+ /* Need to take off active list and put on finished list? */
+ if (was_active && !is_active) {
+ if (active_channels) smartlist_remove(active_channels, chan);
+ if (!finished_channels) finished_channels = smartlist_new();
+ smartlist_add(finished_channels, chan);
+ mainloop_schedule_postloop_cleanup();
+ }
+ /* Need to put on active list? */
+ else if (!was_active && is_active) {
+ if (finished_channels) smartlist_remove(finished_channels, chan);
+ if (!active_channels) active_channels = smartlist_new();
+ smartlist_add(active_channels, chan);
+ }
+
+ if (!tor_digest_is_zero(chan->identity_digest)) {
+ /* Now we need to handle the identity map */
+ was_in_id_map = !(from_state == CHANNEL_STATE_CLOSING ||
+ from_state == CHANNEL_STATE_CLOSED ||
+ from_state == CHANNEL_STATE_ERROR);
+ is_in_id_map = !(to_state == CHANNEL_STATE_CLOSING ||
+ to_state == CHANNEL_STATE_CLOSED ||
+ to_state == CHANNEL_STATE_ERROR);
+
+ if (!was_in_id_map && is_in_id_map) channel_add_to_digest_map(chan);
+ else if (was_in_id_map && !is_in_id_map)
+ channel_remove_from_digest_map(chan);
+ }
+ }
+
+ /*
+ * If we're going to a closed/closing state, we don't need scheduling any
+ * more; in CHANNEL_STATE_MAINT we can't accept writes.
+ */
+ if (to_state == CHANNEL_STATE_CLOSING ||
+ to_state == CHANNEL_STATE_CLOSED ||
+ to_state == CHANNEL_STATE_ERROR) {
+ scheduler_release_channel(chan);
+ } else if (to_state == CHANNEL_STATE_MAINT) {
+ scheduler_channel_doesnt_want_writes(chan);
+ }
+}
+
+/**
+ * As channel_change_state_, but change the state to any state but open.
+ */
+void
+channel_change_state(channel_t *chan, channel_state_t to_state)
+{
+ tor_assert(to_state != CHANNEL_STATE_OPEN);
+ channel_change_state_(chan, to_state);
+}
+
+/**
+ * As channel_change_state, but change the state to open.
+ */
+void
+channel_change_state_open(channel_t *chan)
+{
+ channel_change_state_(chan, CHANNEL_STATE_OPEN);
+
+ /* Tell circuits if we opened and stuff */
+ channel_do_open_actions(chan);
+ chan->has_been_open = 1;
+}
+
+/**
+ * Change channel listener state.
+ *
+ * This internal and subclass use only function is used to change channel
+ * listener state, performing all transition validity checks and whatever
+ * actions are appropriate to the state transition in question.
+ */
+void
+channel_listener_change_state(channel_listener_t *chan_l,
+ channel_listener_state_t to_state)
+{
+ channel_listener_state_t from_state;
+ unsigned char was_active, is_active;
+
+ tor_assert(chan_l);
+ from_state = chan_l->state;
+
+ tor_assert(channel_listener_state_is_valid(from_state));
+ tor_assert(channel_listener_state_is_valid(to_state));
+ tor_assert(channel_listener_state_can_transition(chan_l->state, to_state));
+
+ /* Check for no-op transitions */
+ if (from_state == to_state) {
+ log_debug(LD_CHANNEL,
+ "Got no-op transition from \"%s\" to itself on channel "
+ "listener %p (global ID %"PRIu64 ")",
+ channel_listener_state_to_string(to_state),
+ chan_l, (chan_l->global_identifier));
+ return;
+ }
+
+ /* If we're going to a closing or closed state, we must have a reason set */
+ if (to_state == CHANNEL_LISTENER_STATE_CLOSING ||
+ to_state == CHANNEL_LISTENER_STATE_CLOSED ||
+ to_state == CHANNEL_LISTENER_STATE_ERROR) {
+ tor_assert(chan_l->reason_for_closing != CHANNEL_LISTENER_NOT_CLOSING);
+ }
+
+ log_debug(LD_CHANNEL,
+ "Changing state of channel listener %p (global ID %"PRIu64
+ "from \"%s\" to \"%s\"",
+ chan_l, (chan_l->global_identifier),
+ channel_listener_state_to_string(chan_l->state),
+ channel_listener_state_to_string(to_state));
+
+ chan_l->state = to_state;
+
+ /* Need to add to the right lists if the channel listener is registered */
+ if (chan_l->registered) {
+ was_active = !(from_state == CHANNEL_LISTENER_STATE_CLOSED ||
+ from_state == CHANNEL_LISTENER_STATE_ERROR);
+ is_active = !(to_state == CHANNEL_LISTENER_STATE_CLOSED ||
+ to_state == CHANNEL_LISTENER_STATE_ERROR);
+
+ /* Need to take off active list and put on finished list? */
+ if (was_active && !is_active) {
+ if (active_listeners) smartlist_remove(active_listeners, chan_l);
+ if (!finished_listeners) finished_listeners = smartlist_new();
+ smartlist_add(finished_listeners, chan_l);
+ mainloop_schedule_postloop_cleanup();
+ }
+ /* Need to put on active list? */
+ else if (!was_active && is_active) {
+ if (finished_listeners) smartlist_remove(finished_listeners, chan_l);
+ if (!active_listeners) active_listeners = smartlist_new();
+ smartlist_add(active_listeners, chan_l);
+ }
+ }
+
+ if (to_state == CHANNEL_LISTENER_STATE_CLOSED ||
+ to_state == CHANNEL_LISTENER_STATE_ERROR) {
+ tor_assert(!(chan_l->incoming_list) ||
+ smartlist_len(chan_l->incoming_list) == 0);
+ }
+}
+
+/* Maximum number of cells that is allowed to flush at once within
+ * channel_flush_some_cells(). */
+#define MAX_CELLS_TO_GET_FROM_CIRCUITS_FOR_UNLIMITED 256
+
+/**
+ * Try to flush cells of the given channel chan up to a maximum of num_cells.
+ *
+ * This is called by the scheduler when it wants to flush cells from the
+ * channel's circuit queue(s) to the connection outbuf (not yet on the wire).
+ *
+ * If the channel is not in state CHANNEL_STATE_OPEN, this does nothing and
+ * will return 0 meaning no cells were flushed.
+ *
+ * If num_cells is -1, we'll try to flush up to the maximum cells allowed
+ * defined in MAX_CELLS_TO_GET_FROM_CIRCUITS_FOR_UNLIMITED.
+ *
+ * On success, the number of flushed cells are returned and it can never be
+ * above num_cells. If 0 is returned, no cells were flushed either because the
+ * channel was not opened or we had no cells on the channel. A negative number
+ * can NOT be sent back.
+ *
+ * This function is part of the fast path. */
+MOCK_IMPL(ssize_t,
+channel_flush_some_cells, (channel_t *chan, ssize_t num_cells))
+{
+ unsigned int unlimited = 0;
+ ssize_t flushed = 0;
+ int clamped_num_cells;
+
+ tor_assert(chan);
+
+ if (num_cells < 0) unlimited = 1;
+ if (!unlimited && num_cells <= flushed) goto done;
+
+ /* If we aren't in CHANNEL_STATE_OPEN, nothing goes through */
+ if (CHANNEL_IS_OPEN(chan)) {
+ if (circuitmux_num_cells(chan->cmux) > 0) {
+ /* Calculate number of cells, including clamp */
+ if (unlimited) {
+ clamped_num_cells = MAX_CELLS_TO_GET_FROM_CIRCUITS_FOR_UNLIMITED;
+ } else {
+ if (num_cells - flushed >
+ MAX_CELLS_TO_GET_FROM_CIRCUITS_FOR_UNLIMITED) {
+ clamped_num_cells = MAX_CELLS_TO_GET_FROM_CIRCUITS_FOR_UNLIMITED;
+ } else {
+ clamped_num_cells = (int)(num_cells - flushed);
+ }
+ }
+
+ /* Try to get more cells from any active circuits */
+ flushed = channel_flush_from_first_active_circuit(
+ chan, clamped_num_cells);
+ }
+ }
+
+ done:
+ return flushed;
+}
+
+/**
+ * Check if any cells are available.
+ *
+ * This is used by the scheduler to know if the channel has more to flush
+ * after a scheduling round.
+ */
+MOCK_IMPL(int,
+channel_more_to_flush, (channel_t *chan))
+{
+ tor_assert(chan);
+
+ if (circuitmux_num_cells(chan->cmux) > 0) return 1;
+
+ /* Else no */
+ return 0;
+}
+
+/**
+ * Notify the channel we're done flushing the output in the lower layer.
+ *
+ * Connection.c will call this when we've flushed the output; there's some
+ * dirreq-related maintenance to do.
+ */
+void
+channel_notify_flushed(channel_t *chan)
+{
+ tor_assert(chan);
+
+ if (chan->dirreq_id != 0)
+ geoip_change_dirreq_state(chan->dirreq_id,
+ DIRREQ_TUNNELED,
+ DIRREQ_CHANNEL_BUFFER_FLUSHED);
+}
+
+/**
+ * Process the queue of incoming channels on a listener.
+ *
+ * Use a listener's registered callback to process as many entries in the
+ * queue of incoming channels as possible.
+ */
+void
+channel_listener_process_incoming(channel_listener_t *listener)
+{
+ tor_assert(listener);
+
+ /*
+ * CHANNEL_LISTENER_STATE_CLOSING permitted because we drain the queue
+ * while closing a listener.
+ */
+ tor_assert(listener->state == CHANNEL_LISTENER_STATE_LISTENING ||
+ listener->state == CHANNEL_LISTENER_STATE_CLOSING);
+ tor_assert(listener->listener);
+
+ log_debug(LD_CHANNEL,
+ "Processing queue of incoming connections for channel "
+ "listener %p (global ID %"PRIu64 ")",
+ listener, (listener->global_identifier));
+
+ if (!(listener->incoming_list)) return;
+
+ SMARTLIST_FOREACH_BEGIN(listener->incoming_list,
+ channel_t *, chan) {
+ tor_assert(chan);
+
+ log_debug(LD_CHANNEL,
+ "Handling incoming channel %p (%"PRIu64 ") "
+ "for listener %p (%"PRIu64 ")",
+ chan,
+ (chan->global_identifier),
+ listener,
+ (listener->global_identifier));
+ /* Make sure this is set correctly */
+ channel_mark_incoming(chan);
+ listener->listener(listener, chan);
+ } SMARTLIST_FOREACH_END(chan);
+
+ smartlist_free(listener->incoming_list);
+ listener->incoming_list = NULL;
+}
+
+/**
+ * Take actions required when a channel becomes open.
+ *
+ * Handle actions we should do when we know a channel is open; a lot of
+ * this comes from the old connection_or_set_state_open() of connection_or.c.
+ *
+ * Because of this mechanism, future channel_t subclasses should take care
+ * not to change a channel from CHANNEL_STATE_OPENING to CHANNEL_STATE_OPEN
+ * until there is positive confirmation that the network is operational.
+ * In particular, anything UDP-based should not make this transition until a
+ * packet is received from the other side.
+ */
+void
+channel_do_open_actions(channel_t *chan)
+{
+ tor_addr_t remote_addr;
+ int started_here;
+ time_t now = time(NULL);
+ int close_origin_circuits = 0;
+
+ tor_assert(chan);
+
+ started_here = channel_is_outgoing(chan);
+
+ if (started_here) {
+ circuit_build_times_network_is_live(get_circuit_build_times_mutable());
+ router_set_status(chan->identity_digest, 1);
+ } else {
+ /* only report it to the geoip module if it's a client */
+ if (channel_is_client(chan)) {
+ if (channel_get_addr_if_possible(chan, &remote_addr)) {
+ char *transport_name = NULL;
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+ if (chan->get_transport_name(chan, &transport_name) < 0)
+ transport_name = NULL;
+
+ geoip_note_client_seen(GEOIP_CLIENT_CONNECT,
+ &remote_addr, transport_name,
+ now);
+ tor_free(transport_name);
+ /* Notify the DoS subsystem of a new client. */
+ if (tlschan && tlschan->conn) {
+ dos_new_client_conn(tlschan->conn);
+ }
+ }
+ /* Otherwise the underlying transport can't tell us this, so skip it */
+ }
+ }
+
+ /* Disable or reduce padding according to user prefs. */
+ if (chan->padding_enabled || get_options()->ConnectionPadding == 1) {
+ if (!get_options()->ConnectionPadding) {
+ /* Disable if torrc disabled */
+ channelpadding_disable_padding_on_channel(chan);
+ } else if (get_options()->Tor2webMode &&
+ !networkstatus_get_param(NULL,
+ CHANNELPADDING_TOR2WEB_PARAM,
+ CHANNELPADDING_TOR2WEB_DEFAULT, 0, 1)) {
+ /* Disable if we're using tor2web and the consensus disabled padding
+ * for tor2web */
+ channelpadding_disable_padding_on_channel(chan);
+ } else if (rend_service_allow_non_anonymous_connection(get_options()) &&
+ !networkstatus_get_param(NULL,
+ CHANNELPADDING_SOS_PARAM,
+ CHANNELPADDING_SOS_DEFAULT, 0, 1)) {
+ /* Disable if we're using RSOS and the consensus disabled padding
+ * for RSOS */
+ channelpadding_disable_padding_on_channel(chan);
+ } else if (get_options()->ReducedConnectionPadding) {
+ /* Padding can be forced and/or reduced by clients, regardless of if
+ * the channel supports it */
+ channelpadding_reduce_padding_on_channel(chan);
+ }
+ }
+
+ circuit_n_chan_done(chan, 1, close_origin_circuits);
+}
+
+/**
+ * Queue an incoming channel on a listener.
+ *
+ * Internal and subclass use only function to queue an incoming channel from
+ * a listener. A subclass of channel_listener_t should call this when a new
+ * incoming channel is created.
+ */
+void
+channel_listener_queue_incoming(channel_listener_t *listener,
+ channel_t *incoming)
+{
+ int need_to_queue = 0;
+
+ tor_assert(listener);
+ tor_assert(listener->state == CHANNEL_LISTENER_STATE_LISTENING);
+ tor_assert(incoming);
+
+ log_debug(LD_CHANNEL,
+ "Queueing incoming channel %p (global ID %"PRIu64 ") on "
+ "channel listener %p (global ID %"PRIu64 ")",
+ incoming, (incoming->global_identifier),
+ listener, (listener->global_identifier));
+
+ /* Do we need to queue it, or can we just call the listener right away? */
+ if (!(listener->listener)) need_to_queue = 1;
+ if (listener->incoming_list &&
+ (smartlist_len(listener->incoming_list) > 0))
+ need_to_queue = 1;
+
+ /* If we need to queue and have no queue, create one */
+ if (need_to_queue && !(listener->incoming_list)) {
+ listener->incoming_list = smartlist_new();
+ }
+
+ /* Bump the counter and timestamp it */
+ channel_listener_timestamp_active(listener);
+ channel_listener_timestamp_accepted(listener);
+ ++(listener->n_accepted);
+
+ /* If we don't need to queue, process it right away */
+ if (!need_to_queue) {
+ tor_assert(listener->listener);
+ listener->listener(listener, incoming);
+ }
+ /*
+ * Otherwise, we need to queue; queue and then process the queue if
+ * we can.
+ */
+ else {
+ tor_assert(listener->incoming_list);
+ smartlist_add(listener->incoming_list, incoming);
+ if (listener->listener) channel_listener_process_incoming(listener);
+ }
+}
+
+/**
+ * Process a cell from the given channel.
+ */
+void
+channel_process_cell(channel_t *chan, cell_t *cell)
+{
+ tor_assert(chan);
+ tor_assert(CHANNEL_IS_CLOSING(chan) || CHANNEL_IS_MAINT(chan) ||
+ CHANNEL_IS_OPEN(chan));
+ tor_assert(cell);
+
+ /* Nothing we can do if we have no registered cell handlers */
+ if (!chan->cell_handler)
+ return;
+
+ /* Timestamp for receiving */
+ channel_timestamp_recv(chan);
+ /* Update received counter. */
+ ++(chan->n_cells_recved);
+ chan->n_bytes_recved += get_cell_network_size(chan->wide_circ_ids);
+
+ log_debug(LD_CHANNEL,
+ "Processing incoming cell_t %p for channel %p (global ID "
+ "%"PRIu64 ")", cell, chan,
+ (chan->global_identifier));
+ chan->cell_handler(chan, cell);
+}
+
+/** If <b>packed_cell</b> on <b>chan</b> is a destroy cell, then set
+ * *<b>circid_out</b> to its circuit ID, and return true. Otherwise, return
+ * false. */
+/* XXXX Move this function. */
+int
+packed_cell_is_destroy(channel_t *chan,
+ const packed_cell_t *packed_cell,
+ circid_t *circid_out)
+{
+ if (chan->wide_circ_ids) {
+ if (packed_cell->body[4] == CELL_DESTROY) {
+ *circid_out = ntohl(get_uint32(packed_cell->body));
+ return 1;
+ }
+ } else {
+ if (packed_cell->body[2] == CELL_DESTROY) {
+ *circid_out = ntohs(get_uint16(packed_cell->body));
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Send destroy cell on a channel.
+ *
+ * Write a destroy cell with circ ID <b>circ_id</b> and reason <b>reason</b>
+ * onto channel <b>chan</b>. Don't perform range-checking on reason:
+ * we may want to propagate reasons from other cells.
+ */
+int
+channel_send_destroy(circid_t circ_id, channel_t *chan, int reason)
+{
+ tor_assert(chan);
+ if (circ_id == 0) {
+ log_warn(LD_BUG, "Attempted to send a destroy cell for circID 0 "
+ "on a channel %"PRIu64 " at %p in state %s (%d)",
+ (chan->global_identifier),
+ chan, channel_state_to_string(chan->state),
+ chan->state);
+ return 0;
+ }
+
+ /* Check to make sure we can send on this channel first */
+ if (!CHANNEL_CONDEMNED(chan) && chan->cmux) {
+ channel_note_destroy_pending(chan, circ_id);
+ circuitmux_append_destroy_cell(chan, chan->cmux, circ_id, reason);
+ log_debug(LD_OR,
+ "Sending destroy (circID %u) on channel %p "
+ "(global ID %"PRIu64 ")",
+ (unsigned)circ_id, chan,
+ (chan->global_identifier));
+ } else {
+ log_warn(LD_BUG,
+ "Someone called channel_send_destroy() for circID %u "
+ "on a channel %"PRIu64 " at %p in state %s (%d)",
+ (unsigned)circ_id, (chan->global_identifier),
+ chan, channel_state_to_string(chan->state),
+ chan->state);
+ }
+
+ return 0;
+}
+
+/**
+ * Dump channel statistics to the log.
+ *
+ * This is called from dumpstats() in main.c and spams the log with
+ * statistics on channels.
+ */
+void
+channel_dumpstats(int severity)
+{
+ if (all_channels && smartlist_len(all_channels) > 0) {
+ tor_log(severity, LD_GENERAL,
+ "Dumping statistics about %d channels:",
+ smartlist_len(all_channels));
+ tor_log(severity, LD_GENERAL,
+ "%d are active, and %d are done and waiting for cleanup",
+ (active_channels != NULL) ?
+ smartlist_len(active_channels) : 0,
+ (finished_channels != NULL) ?
+ smartlist_len(finished_channels) : 0);
+
+ SMARTLIST_FOREACH(all_channels, channel_t *, chan,
+ channel_dump_statistics(chan, severity));
+
+ tor_log(severity, LD_GENERAL,
+ "Done spamming about channels now");
+ } else {
+ tor_log(severity, LD_GENERAL,
+ "No channels to dump");
+ }
+}
+
+/**
+ * Dump channel listener statistics to the log.
+ *
+ * This is called from dumpstats() in main.c and spams the log with
+ * statistics on channel listeners.
+ */
+void
+channel_listener_dumpstats(int severity)
+{
+ if (all_listeners && smartlist_len(all_listeners) > 0) {
+ tor_log(severity, LD_GENERAL,
+ "Dumping statistics about %d channel listeners:",
+ smartlist_len(all_listeners));
+ tor_log(severity, LD_GENERAL,
+ "%d are active and %d are done and waiting for cleanup",
+ (active_listeners != NULL) ?
+ smartlist_len(active_listeners) : 0,
+ (finished_listeners != NULL) ?
+ smartlist_len(finished_listeners) : 0);
+
+ SMARTLIST_FOREACH(all_listeners, channel_listener_t *, chan_l,
+ channel_listener_dump_statistics(chan_l, severity));
+
+ tor_log(severity, LD_GENERAL,
+ "Done spamming about channel listeners now");
+ } else {
+ tor_log(severity, LD_GENERAL,
+ "No channel listeners to dump");
+ }
+}
+
+/**
+ * Clean up channels.
+ *
+ * This gets called periodically from run_scheduled_events() in main.c;
+ * it cleans up after closed channels.
+ */
+void
+channel_run_cleanup(void)
+{
+ channel_t *tmp = NULL;
+
+ /* Check if we need to do anything */
+ if (!finished_channels || smartlist_len(finished_channels) == 0) return;
+
+ /* Iterate through finished_channels and get rid of them */
+ SMARTLIST_FOREACH_BEGIN(finished_channels, channel_t *, curr) {
+ tmp = curr;
+ /* Remove it from the list */
+ SMARTLIST_DEL_CURRENT(finished_channels, curr);
+ /* Also unregister it */
+ channel_unregister(tmp);
+ /* ... and free it */
+ channel_free(tmp);
+ } SMARTLIST_FOREACH_END(curr);
+}
+
+/**
+ * Clean up channel listeners.
+ *
+ * This gets called periodically from run_scheduled_events() in main.c;
+ * it cleans up after closed channel listeners.
+ */
+void
+channel_listener_run_cleanup(void)
+{
+ channel_listener_t *tmp = NULL;
+
+ /* Check if we need to do anything */
+ if (!finished_listeners || smartlist_len(finished_listeners) == 0) return;
+
+ /* Iterate through finished_channels and get rid of them */
+ SMARTLIST_FOREACH_BEGIN(finished_listeners, channel_listener_t *, curr) {
+ tmp = curr;
+ /* Remove it from the list */
+ SMARTLIST_DEL_CURRENT(finished_listeners, curr);
+ /* Also unregister it */
+ channel_listener_unregister(tmp);
+ /* ... and free it */
+ channel_listener_free(tmp);
+ } SMARTLIST_FOREACH_END(curr);
+}
+
+/**
+ * Free a list of channels for channel_free_all().
+ */
+static void
+channel_free_list(smartlist_t *channels, int mark_for_close)
+{
+ if (!channels) return;
+
+ SMARTLIST_FOREACH_BEGIN(channels, channel_t *, curr) {
+ /* Deregister and free it */
+ tor_assert(curr);
+ log_debug(LD_CHANNEL,
+ "Cleaning up channel %p (global ID %"PRIu64 ") "
+ "in state %s (%d)",
+ curr, (curr->global_identifier),
+ channel_state_to_string(curr->state), curr->state);
+ /* Detach circuits early so they can find the channel */
+ if (curr->cmux) {
+ circuitmux_detach_all_circuits(curr->cmux, NULL);
+ }
+ SMARTLIST_DEL_CURRENT(channels, curr);
+ channel_unregister(curr);
+ if (mark_for_close) {
+ if (!CHANNEL_CONDEMNED(curr)) {
+ channel_mark_for_close(curr);
+ }
+ channel_force_xfree(curr);
+ } else channel_free(curr);
+ } SMARTLIST_FOREACH_END(curr);
+}
+
+/**
+ * Free a list of channel listeners for channel_free_all().
+ */
+static void
+channel_listener_free_list(smartlist_t *listeners, int mark_for_close)
+{
+ if (!listeners) return;
+
+ SMARTLIST_FOREACH_BEGIN(listeners, channel_listener_t *, curr) {
+ /* Deregister and free it */
+ tor_assert(curr);
+ log_debug(LD_CHANNEL,
+ "Cleaning up channel listener %p (global ID %"PRIu64 ") "
+ "in state %s (%d)",
+ curr, (curr->global_identifier),
+ channel_listener_state_to_string(curr->state), curr->state);
+ channel_listener_unregister(curr);
+ if (mark_for_close) {
+ if (!(curr->state == CHANNEL_LISTENER_STATE_CLOSING ||
+ curr->state == CHANNEL_LISTENER_STATE_CLOSED ||
+ curr->state == CHANNEL_LISTENER_STATE_ERROR)) {
+ channel_listener_mark_for_close(curr);
+ }
+ channel_listener_force_xfree(curr);
+ } else channel_listener_free(curr);
+ } SMARTLIST_FOREACH_END(curr);
+}
+
+/**
+ * Close all channels and free everything.
+ *
+ * This gets called from tor_free_all() in main.c to clean up on exit.
+ * It will close all registered channels and free associated storage,
+ * then free the all_channels, active_channels, listening_channels and
+ * finished_channels lists and also channel_identity_map.
+ */
+void
+channel_free_all(void)
+{
+ log_debug(LD_CHANNEL,
+ "Shutting down channels...");
+
+ /* First, let's go for finished channels */
+ if (finished_channels) {
+ channel_free_list(finished_channels, 0);
+ smartlist_free(finished_channels);
+ finished_channels = NULL;
+ }
+
+ /* Now the finished listeners */
+ if (finished_listeners) {
+ channel_listener_free_list(finished_listeners, 0);
+ smartlist_free(finished_listeners);
+ finished_listeners = NULL;
+ }
+
+ /* Now all active channels */
+ if (active_channels) {
+ channel_free_list(active_channels, 1);
+ smartlist_free(active_channels);
+ active_channels = NULL;
+ }
+
+ /* Now all active listeners */
+ if (active_listeners) {
+ channel_listener_free_list(active_listeners, 1);
+ smartlist_free(active_listeners);
+ active_listeners = NULL;
+ }
+
+ /* Now all channels, in case any are left over */
+ if (all_channels) {
+ channel_free_list(all_channels, 1);
+ smartlist_free(all_channels);
+ all_channels = NULL;
+ }
+
+ /* Now all listeners, in case any are left over */
+ if (all_listeners) {
+ channel_listener_free_list(all_listeners, 1);
+ smartlist_free(all_listeners);
+ all_listeners = NULL;
+ }
+
+ /* Now free channel_identity_map */
+ log_debug(LD_CHANNEL,
+ "Freeing channel_identity_map");
+ /* Geez, anything still left over just won't die ... let it leak then */
+ HT_CLEAR(channel_idmap, &channel_identity_map);
+
+ /* Same with channel_gid_map */
+ log_debug(LD_CHANNEL,
+ "Freeing channel_gid_map");
+ HT_CLEAR(channel_gid_map, &channel_gid_map);
+
+ log_debug(LD_CHANNEL,
+ "Done cleaning up after channels");
+}
+
+/**
+ * Connect to a given addr/port/digest.
+ *
+ * This sets up a new outgoing channel; in the future if multiple
+ * channel_t subclasses are available, this is where the selection policy
+ * should go. It may also be desirable to fold port into tor_addr_t
+ * or make a new type including a tor_addr_t and port, so we have a
+ * single abstract object encapsulating all the protocol details of
+ * how to contact an OR.
+ */
+channel_t *
+channel_connect(const tor_addr_t *addr, uint16_t port,
+ const char *id_digest,
+ const ed25519_public_key_t *ed_id)
+{
+ return channel_tls_connect(addr, port, id_digest, ed_id);
+}
+
+/**
+ * Decide which of two channels to prefer for extending a circuit.
+ *
+ * This function is called while extending a circuit and returns true iff
+ * a is 'better' than b. The most important criterion here is that a
+ * canonical channel is always better than a non-canonical one, but the
+ * number of circuits and the age are used as tie-breakers.
+ *
+ * This is based on the former connection_or_is_better() of connection_or.c
+ */
+int
+channel_is_better(channel_t *a, channel_t *b)
+{
+ int a_is_canonical, b_is_canonical;
+
+ tor_assert(a);
+ tor_assert(b);
+
+ /* If one channel is bad for new circuits, and the other isn't,
+ * use the one that is still good. */
+ if (!channel_is_bad_for_new_circs(a) && channel_is_bad_for_new_circs(b))
+ return 1;
+ if (channel_is_bad_for_new_circs(a) && !channel_is_bad_for_new_circs(b))
+ return 0;
+
+ /* Check if one is canonical and the other isn't first */
+ a_is_canonical = channel_is_canonical(a);
+ b_is_canonical = channel_is_canonical(b);
+
+ if (a_is_canonical && !b_is_canonical) return 1;
+ if (!a_is_canonical && b_is_canonical) return 0;
+
+ /* Check if we suspect that one of the channels will be preferred
+ * by the peer */
+ if (a->is_canonical_to_peer && !b->is_canonical_to_peer) return 1;
+ if (!a->is_canonical_to_peer && b->is_canonical_to_peer) return 0;
+
+ /*
+ * Okay, if we're here they tied on canonicity, the prefer the older
+ * connection, so that the adversary can't create a new connection
+ * and try to switch us over to it (which will leak information
+ * about long-lived circuits). Additionally, switching connections
+ * too often makes us more vulnerable to attacks like Torscan and
+ * passive netflow-based equivalents.
+ *
+ * Connections will still only live for at most a week, due to
+ * the check in connection_or_group_set_badness() against
+ * TIME_BEFORE_OR_CONN_IS_TOO_OLD, which marks old connections as
+ * unusable for new circuits after 1 week. That check sets
+ * is_bad_for_new_circs, which is checked in channel_get_for_extend().
+ *
+ * We check channel_is_bad_for_new_circs() above here anyway, for safety.
+ */
+ if (channel_when_created(a) < channel_when_created(b)) return 1;
+ else if (channel_when_created(a) > channel_when_created(b)) return 0;
+
+ if (channel_num_circuits(a) > channel_num_circuits(b)) return 1;
+ else return 0;
+}
+
+/**
+ * Get a channel to extend a circuit.
+ *
+ * Pick a suitable channel to extend a circuit to given the desired digest
+ * the address we believe is correct for that digest; this tries to see
+ * if we already have one for the requested endpoint, but if there is no good
+ * channel, set *msg_out to a message describing the channel's state
+ * and our next action, and set *launch_out to a boolean indicated whether
+ * the caller should try to launch a new channel with channel_connect().
+ */
+channel_t *
+channel_get_for_extend(const char *rsa_id_digest,
+ const ed25519_public_key_t *ed_id,
+ const tor_addr_t *target_addr,
+ const char **msg_out,
+ int *launch_out)
+{
+ channel_t *chan, *best = NULL;
+ int n_inprogress_goodaddr = 0, n_old = 0;
+ int n_noncanonical = 0;
+
+ tor_assert(msg_out);
+ tor_assert(launch_out);
+
+ chan = channel_find_by_remote_identity(rsa_id_digest, ed_id);
+
+ /* Walk the list, unrefing the old one and refing the new at each
+ * iteration.
+ */
+ for (; chan; chan = channel_next_with_rsa_identity(chan)) {
+ tor_assert(tor_memeq(chan->identity_digest,
+ rsa_id_digest, DIGEST_LEN));
+
+ if (CHANNEL_CONDEMNED(chan))
+ continue;
+
+ /* Never return a channel on which the other end appears to be
+ * a client. */
+ if (channel_is_client(chan)) {
+ continue;
+ }
+
+ /* The Ed25519 key has to match too */
+ if (!channel_remote_identity_matches(chan, rsa_id_digest, ed_id)) {
+ continue;
+ }
+
+ /* Never return a non-open connection. */
+ if (!CHANNEL_IS_OPEN(chan)) {
+ /* If the address matches, don't launch a new connection for this
+ * circuit. */
+ if (channel_matches_target_addr_for_extend(chan, target_addr))
+ ++n_inprogress_goodaddr;
+ continue;
+ }
+
+ /* Never return a connection that shouldn't be used for circs. */
+ if (channel_is_bad_for_new_circs(chan)) {
+ ++n_old;
+ continue;
+ }
+
+ /* Never return a non-canonical connection using a recent link protocol
+ * if the address is not what we wanted.
+ *
+ * The channel_is_canonical_is_reliable() function asks the lower layer
+ * if we should trust channel_is_canonical(). The below is from the
+ * comments of the old circuit_or_get_for_extend() and applies when
+ * the lower-layer transport is channel_tls_t.
+ *
+ * (For old link protocols, we can't rely on is_canonical getting
+ * set properly if we're talking to the right address, since we might
+ * have an out-of-date descriptor, and we will get no NETINFO cell to
+ * tell us about the right address.)
+ */
+ if (!channel_is_canonical(chan) &&
+ channel_is_canonical_is_reliable(chan) &&
+ !channel_matches_target_addr_for_extend(chan, target_addr)) {
+ ++n_noncanonical;
+ continue;
+ }
+
+ if (!best) {
+ best = chan; /* If we have no 'best' so far, this one is good enough. */
+ continue;
+ }
+
+ if (channel_is_better(chan, best))
+ best = chan;
+ }
+
+ if (best) {
+ *msg_out = "Connection is fine; using it.";
+ *launch_out = 0;
+ return best;
+ } else if (n_inprogress_goodaddr) {
+ *msg_out = "Connection in progress; waiting.";
+ *launch_out = 0;
+ return NULL;
+ } else if (n_old || n_noncanonical) {
+ *msg_out = "Connections all too old, or too non-canonical. "
+ " Launching a new one.";
+ *launch_out = 1;
+ return NULL;
+ } else {
+ *msg_out = "Not connected. Connecting.";
+ *launch_out = 1;
+ return NULL;
+ }
+}
+
+/**
+ * Describe the transport subclass for a channel.
+ *
+ * Invoke a method to get a string description of the lower-layer
+ * transport for this channel.
+ */
+const char *
+channel_describe_transport(channel_t *chan)
+{
+ tor_assert(chan);
+ tor_assert(chan->describe_transport);
+
+ return chan->describe_transport(chan);
+}
+
+/**
+ * Describe the transport subclass for a channel listener.
+ *
+ * Invoke a method to get a string description of the lower-layer
+ * transport for this channel listener.
+ */
+const char *
+channel_listener_describe_transport(channel_listener_t *chan_l)
+{
+ tor_assert(chan_l);
+ tor_assert(chan_l->describe_transport);
+
+ return chan_l->describe_transport(chan_l);
+}
+
+/**
+ * Dump channel statistics.
+ *
+ * Dump statistics for one channel to the log.
+ */
+MOCK_IMPL(void,
+channel_dump_statistics, (channel_t *chan, int severity))
+{
+ double avg, interval, age;
+ time_t now = time(NULL);
+ tor_addr_t remote_addr;
+ int have_remote_addr;
+ char *remote_addr_str;
+
+ tor_assert(chan);
+
+ age = (double)(now - chan->timestamp_created);
+
+ tor_log(severity, LD_GENERAL,
+ "Channel %"PRIu64 " (at %p) with transport %s is in state "
+ "%s (%d)",
+ (chan->global_identifier), chan,
+ channel_describe_transport(chan),
+ channel_state_to_string(chan->state), chan->state);
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " was created at %"PRIu64
+ " (%"PRIu64 " seconds ago) "
+ "and last active at %"PRIu64 " (%"PRIu64 " seconds ago)",
+ (chan->global_identifier),
+ (uint64_t)(chan->timestamp_created),
+ (uint64_t)(now - chan->timestamp_created),
+ (uint64_t)(chan->timestamp_active),
+ (uint64_t)(now - chan->timestamp_active));
+
+ /* Handle digest. */
+ if (!tor_digest_is_zero(chan->identity_digest)) {
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " says it is connected "
+ "to an OR with digest %s",
+ (chan->global_identifier),
+ hex_str(chan->identity_digest, DIGEST_LEN));
+ } else {
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " does not know the digest"
+ " of the OR it is connected to",
+ (chan->global_identifier));
+ }
+
+ /* Handle remote address and descriptions */
+ have_remote_addr = channel_get_addr_if_possible(chan, &remote_addr);
+ if (have_remote_addr) {
+ char *actual = tor_strdup(channel_get_actual_remote_descr(chan));
+ remote_addr_str = tor_addr_to_str_dup(&remote_addr);
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " says its remote address"
+ " is %s, and gives a canonical description of \"%s\" and an "
+ "actual description of \"%s\"",
+ (chan->global_identifier),
+ safe_str(remote_addr_str),
+ safe_str(channel_get_canonical_remote_descr(chan)),
+ safe_str(actual));
+ tor_free(remote_addr_str);
+ tor_free(actual);
+ } else {
+ char *actual = tor_strdup(channel_get_actual_remote_descr(chan));
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " does not know its remote "
+ "address, but gives a canonical description of \"%s\" and an "
+ "actual description of \"%s\"",
+ (chan->global_identifier),
+ channel_get_canonical_remote_descr(chan),
+ actual);
+ tor_free(actual);
+ }
+
+ /* Handle marks */
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " has these marks: %s %s %s "
+ "%s %s %s",
+ (chan->global_identifier),
+ channel_is_bad_for_new_circs(chan) ?
+ "bad_for_new_circs" : "!bad_for_new_circs",
+ channel_is_canonical(chan) ?
+ "canonical" : "!canonical",
+ channel_is_canonical_is_reliable(chan) ?
+ "is_canonical_is_reliable" :
+ "!is_canonical_is_reliable",
+ channel_is_client(chan) ?
+ "client" : "!client",
+ channel_is_local(chan) ?
+ "local" : "!local",
+ channel_is_incoming(chan) ?
+ "incoming" : "outgoing");
+
+ /* Describe circuits */
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " has %d active circuits out of"
+ " %d in total",
+ (chan->global_identifier),
+ (chan->cmux != NULL) ?
+ circuitmux_num_active_circuits(chan->cmux) : 0,
+ (chan->cmux != NULL) ?
+ circuitmux_num_circuits(chan->cmux) : 0);
+
+ /* Describe timestamps */
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " was last used by a "
+ "client at %"PRIu64 " (%"PRIu64 " seconds ago)",
+ (chan->global_identifier),
+ (uint64_t)(chan->timestamp_client),
+ (uint64_t)(now - chan->timestamp_client));
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " last received a cell "
+ "at %"PRIu64 " (%"PRIu64 " seconds ago)",
+ (chan->global_identifier),
+ (uint64_t)(chan->timestamp_recv),
+ (uint64_t)(now - chan->timestamp_recv));
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " last transmitted a cell "
+ "at %"PRIu64 " (%"PRIu64 " seconds ago)",
+ (chan->global_identifier),
+ (uint64_t)(chan->timestamp_xmit),
+ (uint64_t)(now - chan->timestamp_xmit));
+
+ /* Describe counters and rates */
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " has received "
+ "%"PRIu64 " bytes in %"PRIu64 " cells and transmitted "
+ "%"PRIu64 " bytes in %"PRIu64 " cells",
+ (chan->global_identifier),
+ (chan->n_bytes_recved),
+ (chan->n_cells_recved),
+ (chan->n_bytes_xmitted),
+ (chan->n_cells_xmitted));
+ if (now > chan->timestamp_created &&
+ chan->timestamp_created > 0) {
+ if (chan->n_bytes_recved > 0) {
+ avg = (double)(chan->n_bytes_recved) / age;
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " has averaged %f "
+ "bytes received per second",
+ (chan->global_identifier), avg);
+ }
+ if (chan->n_cells_recved > 0) {
+ avg = (double)(chan->n_cells_recved) / age;
+ if (avg >= 1.0) {
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " has averaged %f "
+ "cells received per second",
+ (chan->global_identifier), avg);
+ } else if (avg >= 0.0) {
+ interval = 1.0 / avg;
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " has averaged %f "
+ "seconds between received cells",
+ (chan->global_identifier), interval);
+ }
+ }
+ if (chan->n_bytes_xmitted > 0) {
+ avg = (double)(chan->n_bytes_xmitted) / age;
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " has averaged %f "
+ "bytes transmitted per second",
+ (chan->global_identifier), avg);
+ }
+ if (chan->n_cells_xmitted > 0) {
+ avg = (double)(chan->n_cells_xmitted) / age;
+ if (avg >= 1.0) {
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " has averaged %f "
+ "cells transmitted per second",
+ (chan->global_identifier), avg);
+ } else if (avg >= 0.0) {
+ interval = 1.0 / avg;
+ tor_log(severity, LD_GENERAL,
+ " * Channel %"PRIu64 " has averaged %f "
+ "seconds between transmitted cells",
+ (chan->global_identifier), interval);
+ }
+ }
+ }
+
+ /* Dump anything the lower layer has to say */
+ channel_dump_transport_statistics(chan, severity);
+}
+
+/**
+ * Dump channel listener statistics.
+ *
+ * Dump statistics for one channel listener to the log.
+ */
+void
+channel_listener_dump_statistics(channel_listener_t *chan_l, int severity)
+{
+ double avg, interval, age;
+ time_t now = time(NULL);
+
+ tor_assert(chan_l);
+
+ age = (double)(now - chan_l->timestamp_created);
+
+ tor_log(severity, LD_GENERAL,
+ "Channel listener %"PRIu64 " (at %p) with transport %s is in "
+ "state %s (%d)",
+ (chan_l->global_identifier), chan_l,
+ channel_listener_describe_transport(chan_l),
+ channel_listener_state_to_string(chan_l->state), chan_l->state);
+ tor_log(severity, LD_GENERAL,
+ " * Channel listener %"PRIu64 " was created at %"PRIu64
+ " (%"PRIu64 " seconds ago) "
+ "and last active at %"PRIu64 " (%"PRIu64 " seconds ago)",
+ (chan_l->global_identifier),
+ (uint64_t)(chan_l->timestamp_created),
+ (uint64_t)(now - chan_l->timestamp_created),
+ (uint64_t)(chan_l->timestamp_active),
+ (uint64_t)(now - chan_l->timestamp_active));
+
+ tor_log(severity, LD_GENERAL,
+ " * Channel listener %"PRIu64 " last accepted an incoming "
+ "channel at %"PRIu64 " (%"PRIu64 " seconds ago) "
+ "and has accepted %"PRIu64 " channels in total",
+ (chan_l->global_identifier),
+ (uint64_t)(chan_l->timestamp_accepted),
+ (uint64_t)(now - chan_l->timestamp_accepted),
+ (uint64_t)(chan_l->n_accepted));
+
+ /*
+ * If it's sensible to do so, get the rate of incoming channels on this
+ * listener
+ */
+ if (now > chan_l->timestamp_created &&
+ chan_l->timestamp_created > 0 &&
+ chan_l->n_accepted > 0) {
+ avg = (double)(chan_l->n_accepted) / age;
+ if (avg >= 1.0) {
+ tor_log(severity, LD_GENERAL,
+ " * Channel listener %"PRIu64 " has averaged %f incoming "
+ "channels per second",
+ (chan_l->global_identifier), avg);
+ } else if (avg >= 0.0) {
+ interval = 1.0 / avg;
+ tor_log(severity, LD_GENERAL,
+ " * Channel listener %"PRIu64 " has averaged %f seconds "
+ "between incoming channels",
+ (chan_l->global_identifier), interval);
+ }
+ }
+
+ /* Dump anything the lower layer has to say */
+ channel_listener_dump_transport_statistics(chan_l, severity);
+}
+
+/**
+ * Invoke transport-specific stats dump for channel.
+ *
+ * If there is a lower-layer statistics dump method, invoke it.
+ */
+void
+channel_dump_transport_statistics(channel_t *chan, int severity)
+{
+ tor_assert(chan);
+
+ if (chan->dumpstats) chan->dumpstats(chan, severity);
+}
+
+/**
+ * Invoke transport-specific stats dump for channel listener.
+ *
+ * If there is a lower-layer statistics dump method, invoke it.
+ */
+void
+channel_listener_dump_transport_statistics(channel_listener_t *chan_l,
+ int severity)
+{
+ tor_assert(chan_l);
+
+ if (chan_l->dumpstats) chan_l->dumpstats(chan_l, severity);
+}
+
+/**
+ * Return text description of the remote endpoint.
+ *
+ * This function return a test provided by the lower layer of the remote
+ * endpoint for this channel; it should specify the actual address connected
+ * to/from.
+ *
+ * Subsequent calls to channel_get_{actual,canonical}_remote_{address,descr}
+ * may invalidate the return value from this function.
+ */
+const char *
+channel_get_actual_remote_descr(channel_t *chan)
+{
+ tor_assert(chan);
+ tor_assert(chan->get_remote_descr);
+
+ /* Param 1 indicates the actual description */
+ return chan->get_remote_descr(chan, GRD_FLAG_ORIGINAL);
+}
+
+/**
+ * Return the text address of the remote endpoint.
+ *
+ * Subsequent calls to channel_get_{actual,canonical}_remote_{address,descr}
+ * may invalidate the return value from this function.
+ */
+const char *
+channel_get_actual_remote_address(channel_t *chan)
+{
+ /* Param 1 indicates the actual description */
+ return chan->get_remote_descr(chan, GRD_FLAG_ORIGINAL|GRD_FLAG_ADDR_ONLY);
+}
+
+/**
+ * Return text description of the remote endpoint canonical address.
+ *
+ * This function return a test provided by the lower layer of the remote
+ * endpoint for this channel; it should use the known canonical address for
+ * this OR's identity digest if possible.
+ *
+ * Subsequent calls to channel_get_{actual,canonical}_remote_{address,descr}
+ * may invalidate the return value from this function.
+ */
+const char *
+channel_get_canonical_remote_descr(channel_t *chan)
+{
+ tor_assert(chan);
+ tor_assert(chan->get_remote_descr);
+
+ /* Param 0 indicates the canonicalized description */
+ return chan->get_remote_descr(chan, 0);
+}
+
+/**
+ * Get remote address if possible.
+ *
+ * Write the remote address out to a tor_addr_t if the underlying transport
+ * supports this operation, and return 1. Return 0 if the underlying transport
+ * doesn't let us do this.
+ */
+MOCK_IMPL(int,
+channel_get_addr_if_possible,(channel_t *chan, tor_addr_t *addr_out))
+{
+ tor_assert(chan);
+ tor_assert(addr_out);
+
+ if (chan->get_remote_addr)
+ return chan->get_remote_addr(chan, addr_out);
+ /* Else no support, method not implemented */
+ else return 0;
+}
+
+/**
+ * Return true iff the channel has any cells on the connection outbuf waiting
+ * to be sent onto the network.
+ */
+int
+channel_has_queued_writes(channel_t *chan)
+{
+ tor_assert(chan);
+ tor_assert(chan->has_queued_writes);
+
+ /* Check with the lower layer */
+ return chan->has_queued_writes(chan);
+}
+
+/**
+ * Check the is_bad_for_new_circs flag.
+ *
+ * This function returns the is_bad_for_new_circs flag of the specified
+ * channel.
+ */
+int
+channel_is_bad_for_new_circs(channel_t *chan)
+{
+ tor_assert(chan);
+
+ return chan->is_bad_for_new_circs;
+}
+
+/**
+ * Mark a channel as bad for new circuits.
+ *
+ * Set the is_bad_for_new_circs_flag on chan.
+ */
+void
+channel_mark_bad_for_new_circs(channel_t *chan)
+{
+ tor_assert(chan);
+
+ chan->is_bad_for_new_circs = 1;
+}
+
+/**
+ * Get the client flag.
+ *
+ * This returns the client flag of a channel, which will be set if
+ * command_process_create_cell() in command.c thinks this is a connection
+ * from a client.
+ */
+int
+channel_is_client(const channel_t *chan)
+{
+ tor_assert(chan);
+
+ return chan->is_client;
+}
+
+/**
+ * Set the client flag.
+ *
+ * Mark a channel as being from a client.
+ */
+void
+channel_mark_client(channel_t *chan)
+{
+ tor_assert(chan);
+
+ chan->is_client = 1;
+}
+
+/**
+ * Clear the client flag.
+ *
+ * Mark a channel as being _not_ from a client.
+ */
+void
+channel_clear_client(channel_t *chan)
+{
+ tor_assert(chan);
+
+ chan->is_client = 0;
+}
+
+/**
+ * Get the canonical flag for a channel.
+ *
+ * This returns the is_canonical for a channel; this flag is determined by
+ * the lower layer and can't be set in a transport-independent way.
+ */
+int
+channel_is_canonical(channel_t *chan)
+{
+ tor_assert(chan);
+ tor_assert(chan->is_canonical);
+
+ return chan->is_canonical(chan, 0);
+}
+
+/**
+ * Test if the canonical flag is reliable.
+ *
+ * This function asks if the lower layer thinks it's safe to trust the
+ * result of channel_is_canonical().
+ */
+int
+channel_is_canonical_is_reliable(channel_t *chan)
+{
+ tor_assert(chan);
+ tor_assert(chan->is_canonical);
+
+ return chan->is_canonical(chan, 1);
+}
+
+/**
+ * Test incoming flag.
+ *
+ * This function gets the incoming flag; this is set when a listener spawns
+ * a channel. If this returns true the channel was remotely initiated.
+ */
+int
+channel_is_incoming(channel_t *chan)
+{
+ tor_assert(chan);
+
+ return chan->is_incoming;
+}
+
+/**
+ * Set the incoming flag.
+ *
+ * This function is called when a channel arrives on a listening channel
+ * to mark it as incoming.
+ */
+void
+channel_mark_incoming(channel_t *chan)
+{
+ tor_assert(chan);
+
+ chan->is_incoming = 1;
+}
+
+/**
+ * Test local flag.
+ *
+ * This function gets the local flag; the lower layer should set this when
+ * setting up the channel if is_local_addr() is true for all of the
+ * destinations it will communicate with on behalf of this channel. It's
+ * used to decide whether to declare the network reachable when seeing incoming
+ * traffic on the channel.
+ */
+int
+channel_is_local(channel_t *chan)
+{
+ tor_assert(chan);
+
+ return chan->is_local;
+}
+
+/**
+ * Set the local flag.
+ *
+ * This internal-only function should be called by the lower layer if the
+ * channel is to a local address. See channel_is_local() above or the
+ * description of the is_local bit in channel.h.
+ */
+void
+channel_mark_local(channel_t *chan)
+{
+ tor_assert(chan);
+
+ chan->is_local = 1;
+}
+
+/**
+ * Mark a channel as remote.
+ *
+ * This internal-only function should be called by the lower layer if the
+ * channel is not to a local address but has previously been marked local.
+ * See channel_is_local() above or the description of the is_local bit in
+ * channel.h
+ */
+void
+channel_mark_remote(channel_t *chan)
+{
+ tor_assert(chan);
+
+ chan->is_local = 0;
+}
+
+/**
+ * Test outgoing flag.
+ *
+ * This function gets the outgoing flag; this is the inverse of the incoming
+ * bit set when a listener spawns a channel. If this returns true the channel
+ * was locally initiated.
+ */
+int
+channel_is_outgoing(channel_t *chan)
+{
+ tor_assert(chan);
+
+ return !(chan->is_incoming);
+}
+
+/**
+ * Mark a channel as outgoing.
+ *
+ * This function clears the incoming flag and thus marks a channel as
+ * outgoing.
+ */
+void
+channel_mark_outgoing(channel_t *chan)
+{
+ tor_assert(chan);
+
+ chan->is_incoming = 0;
+}
+
+/************************
+ * Flow control queries *
+ ***********************/
+
+/**
+ * Estimate the number of writeable cells.
+ *
+ * Ask the lower layer for an estimate of how many cells it can accept.
+ */
+int
+channel_num_cells_writeable(channel_t *chan)
+{
+ int result;
+
+ tor_assert(chan);
+ tor_assert(chan->num_cells_writeable);
+
+ if (chan->state == CHANNEL_STATE_OPEN) {
+ /* Query lower layer */
+ result = chan->num_cells_writeable(chan);
+ if (result < 0) result = 0;
+ } else {
+ /* No cells are writeable in any other state */
+ result = 0;
+ }
+
+ return result;
+}
+
+/*********************
+ * Timestamp updates *
+ ********************/
+
+/**
+ * Update the created timestamp for a channel.
+ *
+ * This updates the channel's created timestamp and should only be called
+ * from channel_init().
+ */
+void
+channel_timestamp_created(channel_t *chan)
+{
+ time_t now = time(NULL);
+
+ tor_assert(chan);
+
+ chan->timestamp_created = now;
+}
+
+/**
+ * Update the created timestamp for a channel listener.
+ *
+ * This updates the channel listener's created timestamp and should only be
+ * called from channel_init_listener().
+ */
+void
+channel_listener_timestamp_created(channel_listener_t *chan_l)
+{
+ time_t now = time(NULL);
+
+ tor_assert(chan_l);
+
+ chan_l->timestamp_created = now;
+}
+
+/**
+ * Update the last active timestamp for a channel.
+ *
+ * This function updates the channel's last active timestamp; it should be
+ * called by the lower layer whenever there is activity on the channel which
+ * does not lead to a cell being transmitted or received; the active timestamp
+ * is also updated from channel_timestamp_recv() and channel_timestamp_xmit(),
+ * but it should be updated for things like the v3 handshake and stuff that
+ * produce activity only visible to the lower layer.
+ */
+void
+channel_timestamp_active(channel_t *chan)
+{
+ time_t now = time(NULL);
+
+ tor_assert(chan);
+ monotime_coarse_get(&chan->timestamp_xfer);
+
+ chan->timestamp_active = now;
+
+ /* Clear any potential netflow padding timer. We're active */
+ monotime_coarse_zero(&chan->next_padding_time);
+}
+
+/**
+ * Update the last active timestamp for a channel listener.
+ */
+void
+channel_listener_timestamp_active(channel_listener_t *chan_l)
+{
+ time_t now = time(NULL);
+
+ tor_assert(chan_l);
+
+ chan_l->timestamp_active = now;
+}
+
+/**
+ * Update the last accepted timestamp.
+ *
+ * This function updates the channel listener's last accepted timestamp; it
+ * should be called whenever a new incoming channel is accepted on a
+ * listener.
+ */
+void
+channel_listener_timestamp_accepted(channel_listener_t *chan_l)
+{
+ time_t now = time(NULL);
+
+ tor_assert(chan_l);
+
+ chan_l->timestamp_active = now;
+ chan_l->timestamp_accepted = now;
+}
+
+/**
+ * Update client timestamp.
+ *
+ * This function is called by relay.c to timestamp a channel that appears to
+ * be used as a client.
+ */
+void
+channel_timestamp_client(channel_t *chan)
+{
+ time_t now = time(NULL);
+
+ tor_assert(chan);
+
+ chan->timestamp_client = now;
+}
+
+/**
+ * Update the recv timestamp.
+ *
+ * This is called whenever we get an incoming cell from the lower layer.
+ * This also updates the active timestamp.
+ */
+void
+channel_timestamp_recv(channel_t *chan)
+{
+ time_t now = time(NULL);
+ tor_assert(chan);
+ monotime_coarse_get(&chan->timestamp_xfer);
+
+ chan->timestamp_active = now;
+ chan->timestamp_recv = now;
+
+ /* Clear any potential netflow padding timer. We're active */
+ monotime_coarse_zero(&chan->next_padding_time);
+}
+
+/**
+ * Update the xmit timestamp.
+ *
+ * This is called whenever we pass an outgoing cell to the lower layer. This
+ * also updates the active timestamp.
+ */
+void
+channel_timestamp_xmit(channel_t *chan)
+{
+ time_t now = time(NULL);
+ tor_assert(chan);
+
+ monotime_coarse_get(&chan->timestamp_xfer);
+
+ chan->timestamp_active = now;
+ chan->timestamp_xmit = now;
+
+ /* Clear any potential netflow padding timer. We're active */
+ monotime_coarse_zero(&chan->next_padding_time);
+}
+
+/***************************************************************
+ * Timestamp queries - see above for definitions of timestamps *
+ **************************************************************/
+
+/**
+ * Query created timestamp for a channel.
+ */
+time_t
+channel_when_created(channel_t *chan)
+{
+ tor_assert(chan);
+
+ return chan->timestamp_created;
+}
+
+/**
+ * Query client timestamp.
+ */
+time_t
+channel_when_last_client(channel_t *chan)
+{
+ tor_assert(chan);
+
+ return chan->timestamp_client;
+}
+
+/**
+ * Query xmit timestamp.
+ */
+time_t
+channel_when_last_xmit(channel_t *chan)
+{
+ tor_assert(chan);
+
+ return chan->timestamp_xmit;
+}
+
+/**
+ * Check if a channel matches an extend_info_t.
+ *
+ * This function calls the lower layer and asks if this channel matches a
+ * given extend_info_t.
+ */
+int
+channel_matches_extend_info(channel_t *chan, extend_info_t *extend_info)
+{
+ tor_assert(chan);
+ tor_assert(chan->matches_extend_info);
+ tor_assert(extend_info);
+
+ return chan->matches_extend_info(chan, extend_info);
+}
+
+/**
+ * Check if a channel matches a given target address; return true iff we do.
+ *
+ * This function calls into the lower layer and asks if this channel thinks
+ * it matches a given target address for circuit extension purposes.
+ */
+int
+channel_matches_target_addr_for_extend(channel_t *chan,
+ const tor_addr_t *target)
+{
+ tor_assert(chan);
+ tor_assert(chan->matches_target);
+ tor_assert(target);
+
+ return chan->matches_target(chan, target);
+}
+
+/**
+ * Return the total number of circuits used by a channel.
+ *
+ * @param chan Channel to query
+ * @return Number of circuits using this as n_chan or p_chan
+ */
+unsigned int
+channel_num_circuits(channel_t *chan)
+{
+ tor_assert(chan);
+
+ return chan->num_n_circuits +
+ chan->num_p_circuits;
+}
+
+/**
+ * Set up circuit ID generation.
+ *
+ * This is called when setting up a channel and replaces the old
+ * connection_or_set_circid_type().
+ */
+MOCK_IMPL(void,
+channel_set_circid_type,(channel_t *chan,
+ crypto_pk_t *identity_rcvd,
+ int consider_identity))
+{
+ int started_here;
+ crypto_pk_t *our_identity;
+
+ tor_assert(chan);
+
+ started_here = channel_is_outgoing(chan);
+
+ if (! consider_identity) {
+ if (started_here)
+ chan->circ_id_type = CIRC_ID_TYPE_HIGHER;
+ else
+ chan->circ_id_type = CIRC_ID_TYPE_LOWER;
+ return;
+ }
+
+ our_identity = started_here ?
+ get_tlsclient_identity_key() : get_server_identity_key();
+
+ if (identity_rcvd) {
+ if (crypto_pk_cmp_keys(our_identity, identity_rcvd) < 0) {
+ chan->circ_id_type = CIRC_ID_TYPE_LOWER;
+ } else {
+ chan->circ_id_type = CIRC_ID_TYPE_HIGHER;
+ }
+ } else {
+ chan->circ_id_type = CIRC_ID_TYPE_NEITHER;
+ }
+}
+
+static int
+channel_sort_by_ed25519_identity(const void **a_, const void **b_)
+{
+ const channel_t *a = *a_,
+ *b = *b_;
+ return fast_memcmp(&a->ed25519_identity.pubkey,
+ &b->ed25519_identity.pubkey,
+ sizeof(a->ed25519_identity.pubkey));
+}
+
+/** Helper for channel_update_bad_for_new_circs(): Perform the
+ * channel_update_bad_for_new_circs operation on all channels in <b>lst</b>,
+ * all of which MUST have the same RSA ID. (They MAY have different
+ * Ed25519 IDs.) */
+static void
+channel_rsa_id_group_set_badness(struct channel_list_s *lst, int force)
+{
+ /*XXXX This function should really be about channels. 15056 */
+ channel_t *chan = TOR_LIST_FIRST(lst);
+
+ if (!chan)
+ return;
+
+ /* if there is only one channel, don't bother looping */
+ if (PREDICT_LIKELY(!TOR_LIST_NEXT(chan, next_with_same_id))) {
+ connection_or_single_set_badness_(
+ time(NULL), BASE_CHAN_TO_TLS(chan)->conn, force);
+ return;
+ }
+
+ smartlist_t *channels = smartlist_new();
+
+ TOR_LIST_FOREACH(chan, lst, next_with_same_id) {
+ if (BASE_CHAN_TO_TLS(chan)->conn) {
+ smartlist_add(channels, chan);
+ }
+ }
+
+ smartlist_sort(channels, channel_sort_by_ed25519_identity);
+
+ const ed25519_public_key_t *common_ed25519_identity = NULL;
+ /* it would be more efficient to do a slice, but this case is rare */
+ smartlist_t *or_conns = smartlist_new();
+ SMARTLIST_FOREACH_BEGIN(channels, channel_t *, channel) {
+ if (!common_ed25519_identity)
+ common_ed25519_identity = &channel->ed25519_identity;
+
+ if (! ed25519_pubkey_eq(&channel->ed25519_identity,
+ common_ed25519_identity)) {
+ connection_or_group_set_badness_(or_conns, force);
+ smartlist_clear(or_conns);
+ common_ed25519_identity = &channel->ed25519_identity;
+ }
+
+ smartlist_add(or_conns, BASE_CHAN_TO_TLS(channel)->conn);
+ } SMARTLIST_FOREACH_END(channel);
+
+ connection_or_group_set_badness_(or_conns, force);
+
+ /* XXXX 15056 we may want to do something special with connections that have
+ * no set Ed25519 identity! */
+
+ smartlist_free(or_conns);
+ smartlist_free(channels);
+}
+
+/** Go through all the channels (or if <b>digest</b> is non-NULL, just
+ * the OR connections with that digest), and set the is_bad_for_new_circs
+ * flag based on the rules in connection_or_group_set_badness() (or just
+ * always set it if <b>force</b> is true).
+ */
+void
+channel_update_bad_for_new_circs(const char *digest, int force)
+{
+ if (digest) {
+ channel_idmap_entry_t *ent;
+ channel_idmap_entry_t search;
+ memset(&search, 0, sizeof(search));
+ memcpy(search.digest, digest, DIGEST_LEN);
+ ent = HT_FIND(channel_idmap, &channel_identity_map, &search);
+ if (ent) {
+ channel_rsa_id_group_set_badness(&ent->channel_list, force);
+ }
+ return;
+ }
+
+ /* no digest; just look at everything. */
+ channel_idmap_entry_t **iter;
+ HT_FOREACH(iter, channel_idmap, &channel_identity_map) {
+ channel_rsa_id_group_set_badness(&(*iter)->channel_list, force);
+ }
+}
diff --git a/src/core/or/channel.h b/src/core/or/channel.h
new file mode 100644
index 0000000000..7f25056769
--- /dev/null
+++ b/src/core/or/channel.h
@@ -0,0 +1,780 @@
+/* * Copyright (c) 2012-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file channel.h
+ * \brief Header file for channel.c
+ **/
+
+#ifndef TOR_CHANNEL_H
+#define TOR_CHANNEL_H
+
+#include "or/or.h"
+#include "or/circuitmux.h"
+#include "lib/container/handles.h"
+#include "lib/crypt_ops/crypto_ed25519.h"
+
+#include "tor_queue.h"
+
+#define tor_timer_t timeout
+struct tor_timer_t;
+
+/* Channel handler function pointer typedefs */
+typedef void (*channel_listener_fn_ptr)(channel_listener_t *, channel_t *);
+typedef void (*channel_cell_handler_fn_ptr)(channel_t *, cell_t *);
+typedef void (*channel_var_cell_handler_fn_ptr)(channel_t *, var_cell_t *);
+
+/**
+ * This enum is used by channelpadding to decide when to pad channels.
+ * Don't add values to it without updating the checks in
+ * channelpadding_decide_to_pad_channel().
+ */
+typedef enum {
+ CHANNEL_USED_NOT_USED_FOR_FULL_CIRCS = 0,
+ CHANNEL_USED_FOR_FULL_CIRCS,
+ CHANNEL_USED_FOR_USER_TRAFFIC,
+} channel_usage_info_t;
+
+/** Possible rules for generating circuit IDs on an OR connection. */
+typedef enum {
+ CIRC_ID_TYPE_LOWER=0, /**< Pick from 0..1<<15-1. */
+ CIRC_ID_TYPE_HIGHER=1, /**< Pick from 1<<15..1<<16-1. */
+ /** The other side of a connection is an OP: never create circuits to it,
+ * and let it use any circuit ID it wants. */
+ CIRC_ID_TYPE_NEITHER=2
+} circ_id_type_t;
+#define circ_id_type_bitfield_t ENUM_BF(circ_id_type_t)
+
+/* channel states for channel_t */
+
+typedef enum {
+ /*
+ * Closed state - channel is inactive
+ *
+ * Permitted transitions from:
+ * - CHANNEL_STATE_CLOSING
+ * Permitted transitions to:
+ * - CHANNEL_STATE_OPENING
+ */
+ CHANNEL_STATE_CLOSED = 0,
+ /*
+ * Opening state - channel is trying to connect
+ *
+ * Permitted transitions from:
+ * - CHANNEL_STATE_CLOSED
+ * Permitted transitions to:
+ * - CHANNEL_STATE_CLOSING
+ * - CHANNEL_STATE_ERROR
+ * - CHANNEL_STATE_OPEN
+ */
+ CHANNEL_STATE_OPENING,
+ /*
+ * Open state - channel is active and ready for use
+ *
+ * Permitted transitions from:
+ * - CHANNEL_STATE_MAINT
+ * - CHANNEL_STATE_OPENING
+ * Permitted transitions to:
+ * - CHANNEL_STATE_CLOSING
+ * - CHANNEL_STATE_ERROR
+ * - CHANNEL_STATE_MAINT
+ */
+ CHANNEL_STATE_OPEN,
+ /*
+ * Maintenance state - channel is temporarily offline for subclass specific
+ * maintenance activities such as TLS renegotiation.
+ *
+ * Permitted transitions from:
+ * - CHANNEL_STATE_OPEN
+ * Permitted transitions to:
+ * - CHANNEL_STATE_CLOSING
+ * - CHANNEL_STATE_ERROR
+ * - CHANNEL_STATE_OPEN
+ */
+ CHANNEL_STATE_MAINT,
+ /*
+ * Closing state - channel is shutting down
+ *
+ * Permitted transitions from:
+ * - CHANNEL_STATE_MAINT
+ * - CHANNEL_STATE_OPEN
+ * Permitted transitions to:
+ * - CHANNEL_STATE_CLOSED,
+ * - CHANNEL_STATE_ERROR
+ */
+ CHANNEL_STATE_CLOSING,
+ /*
+ * Error state - channel has experienced a permanent error
+ *
+ * Permitted transitions from:
+ * - CHANNEL_STATE_CLOSING
+ * - CHANNEL_STATE_MAINT
+ * - CHANNEL_STATE_OPENING
+ * - CHANNEL_STATE_OPEN
+ * Permitted transitions to:
+ * - None
+ */
+ CHANNEL_STATE_ERROR,
+ /*
+ * Placeholder for maximum state value
+ */
+ CHANNEL_STATE_LAST
+} channel_state_t;
+
+/* channel listener states for channel_listener_t */
+
+typedef enum {
+ /*
+ * Closed state - channel listener is inactive
+ *
+ * Permitted transitions from:
+ * - CHANNEL_LISTENER_STATE_CLOSING
+ * Permitted transitions to:
+ * - CHANNEL_LISTENER_STATE_LISTENING
+ */
+ CHANNEL_LISTENER_STATE_CLOSED = 0,
+ /*
+ * Listening state - channel listener is listening for incoming
+ * connections
+ *
+ * Permitted transitions from:
+ * - CHANNEL_LISTENER_STATE_CLOSED
+ * Permitted transitions to:
+ * - CHANNEL_LISTENER_STATE_CLOSING
+ * - CHANNEL_LISTENER_STATE_ERROR
+ */
+ CHANNEL_LISTENER_STATE_LISTENING,
+ /*
+ * Closing state - channel listener is shutting down
+ *
+ * Permitted transitions from:
+ * - CHANNEL_LISTENER_STATE_LISTENING
+ * Permitted transitions to:
+ * - CHANNEL_LISTENER_STATE_CLOSED,
+ * - CHANNEL_LISTENER_STATE_ERROR
+ */
+ CHANNEL_LISTENER_STATE_CLOSING,
+ /*
+ * Error state - channel listener has experienced a permanent error
+ *
+ * Permitted transitions from:
+ * - CHANNEL_STATE_CLOSING
+ * - CHANNEL_STATE_LISTENING
+ * Permitted transitions to:
+ * - None
+ */
+ CHANNEL_LISTENER_STATE_ERROR,
+ /*
+ * Placeholder for maximum state value
+ */
+ CHANNEL_LISTENER_STATE_LAST
+} channel_listener_state_t;
+
+/**
+ * Channel struct; see the channel_t typedef in or.h. A channel is an
+ * abstract interface for the OR-to-OR connection, similar to connection_or_t,
+ * but without the strong coupling to the underlying TLS implementation. They
+ * are constructed by calling a protocol-specific function to open a channel
+ * to a particular node, and once constructed support the abstract operations
+ * defined below.
+ */
+struct channel_s {
+ /** Magic number for type-checking cast macros */
+ uint32_t magic;
+
+ /** List entry for hashtable for global-identifier lookup. */
+ HT_ENTRY(channel_s) gidmap_node;
+
+ /** Handle entry for handle-based lookup */
+ HANDLE_ENTRY(channel, channel_s);
+
+ /** Current channel state */
+ channel_state_t state;
+
+ /** Globally unique ID number for a channel over the lifetime of a Tor
+ * process. This may not be 0.
+ */
+ uint64_t global_identifier;
+
+ /** Should we expect to see this channel in the channel lists? */
+ unsigned char registered:1;
+
+ /** has this channel ever been open? */
+ unsigned int has_been_open:1;
+
+ /**
+ * This field indicates if the other side has enabled or disabled
+ * padding via either the link protocol version or
+ * channelpadding_negotiate cells.
+ *
+ * Clients can override this with ConnectionPadding in torrc to
+ * disable or force padding to relays, but relays cannot override the
+ * client's request.
+ */
+ unsigned int padding_enabled:1;
+
+ /** Cached value of our decision to pad (to avoid expensive
+ * checks during critical path statistics counting). */
+ unsigned int currently_padding:1;
+
+ /** Is there a pending netflow padding callback? */
+ unsigned int pending_padding_callback:1;
+
+ /** Is our peer likely to consider this channel canonical? */
+ unsigned int is_canonical_to_peer:1;
+
+ /** Has this channel ever been used for non-directory traffic?
+ * Used to decide what channels to pad, and when. */
+ channel_usage_info_t channel_usage;
+
+ /** When should we send a cell for netflow padding? 0 means no padding is
+ * scheduled. */
+ monotime_coarse_t next_padding_time;
+
+ /** The callback pointer for the padding callbacks */
+ struct tor_timer_t *padding_timer;
+ /** The handle to this channel (to free on canceled timers) */
+ struct channel_handle_t *timer_handle;
+
+ /**
+ * These two fields specify the minimum and maximum negotiated timeout
+ * values for inactivity (send or receive) before we decide to pad a
+ * channel. These fields can be set either via a PADDING_NEGOTIATE cell,
+ * or the torrc option ReducedConnectionPadding. The consensus parameters
+ * nf_ito_low and nf_ito_high are used to ensure that padding can only be
+ * negotiated to be less frequent than what is specified in the consensus.
+ * (This is done to prevent wingnut clients from requesting excessive
+ * padding).
+ *
+ * The actual timeout value is randomly chosen between these two values
+ * as per the table in channelpadding_get_netflow_inactive_timeout_ms(),
+ * after ensuring that these values do not specify lower timeouts than
+ * the consensus parameters.
+ *
+ * If these are 0, we have not negotiated or specified custom padding
+ * times, and instead use consensus defaults. */
+ uint16_t padding_timeout_low_ms;
+ uint16_t padding_timeout_high_ms;
+
+ /** Why did we close?
+ */
+ enum {
+ CHANNEL_NOT_CLOSING = 0,
+ CHANNEL_CLOSE_REQUESTED,
+ CHANNEL_CLOSE_FROM_BELOW,
+ CHANNEL_CLOSE_FOR_ERROR
+ } reason_for_closing;
+
+ /** State variable for use by the scheduler */
+ enum {
+ /*
+ * The channel is not open, or it has a full output buffer but no queued
+ * cells.
+ */
+ SCHED_CHAN_IDLE = 0,
+ /*
+ * The channel has space on its output buffer to write, but no queued
+ * cells.
+ */
+ SCHED_CHAN_WAITING_FOR_CELLS,
+ /*
+ * The scheduler has queued cells but no output buffer space to write.
+ */
+ SCHED_CHAN_WAITING_TO_WRITE,
+ /*
+ * The scheduler has both queued cells and output buffer space, and is
+ * eligible for the scheduler loop.
+ */
+ SCHED_CHAN_PENDING
+ } scheduler_state;
+
+ /** Heap index for use by the scheduler */
+ int sched_heap_idx;
+
+ /** Timestamps for both cell channels and listeners */
+ time_t timestamp_created; /* Channel created */
+ time_t timestamp_active; /* Any activity */
+
+ /**
+ * This is a monotonic timestamp that marks when we
+ * believe the channel has actually sent or received data to/from
+ * the wire. Right now, it is used to determine when we should send
+ * a padding cell for channelpadding.
+ *
+ * XXX: Are we setting timestamp_xfer_ms in the right places to
+ * accurately reflect actual network data transfer? Or might this be
+ * very wrong wrt when bytes actually go on the wire?
+ */
+ monotime_coarse_t timestamp_xfer;
+
+ /* Methods implemented by the lower layer */
+
+ /** Free a channel */
+ void (*free_fn)(channel_t *);
+ /** Close an open channel */
+ void (*close)(channel_t *);
+ /** Describe the transport subclass for this channel */
+ const char * (*describe_transport)(channel_t *);
+ /** Optional method to dump transport-specific statistics on the channel */
+ void (*dumpstats)(channel_t *, int);
+
+ /** Registered handlers for incoming cells */
+ channel_cell_handler_fn_ptr cell_handler;
+ channel_var_cell_handler_fn_ptr var_cell_handler;
+
+ /* Methods implemented by the lower layer */
+
+ /**
+ * Ask the lower layer for an estimate of the average overhead for
+ * transmissions on this channel.
+ */
+ double (*get_overhead_estimate)(channel_t *);
+ /*
+ * Ask the underlying transport what the remote endpoint address is, in
+ * a tor_addr_t. This is optional and subclasses may leave this NULL.
+ * If they implement it, they should write the address out to the
+ * provided tor_addr_t *, and return 1 if successful or 0 if no address
+ * available.
+ */
+ int (*get_remote_addr)(channel_t *, tor_addr_t *);
+ int (*get_transport_name)(channel_t *chan, char **transport_out);
+
+#define GRD_FLAG_ORIGINAL 1
+#define GRD_FLAG_ADDR_ONLY 2
+ /**
+ * Get a text description of the remote endpoint; canonicalized if the flag
+ * GRD_FLAG_ORIGINAL is not set, or the one we originally connected
+ * to/received from if it is. If GRD_FLAG_ADDR_ONLY is set, we return only
+ * the original address.
+ */
+ const char * (*get_remote_descr)(channel_t *, int);
+ /** Check if the lower layer has queued writes */
+ int (*has_queued_writes)(channel_t *);
+ /**
+ * If the second param is zero, ask the lower layer if this is
+ * 'canonical', for a transport-specific definition of canonical; if
+ * it is 1, ask if the answer to the preceding query is safe to rely
+ * on.
+ */
+ int (*is_canonical)(channel_t *, int);
+ /** Check if this channel matches a specified extend_info_t */
+ int (*matches_extend_info)(channel_t *, extend_info_t *);
+ /** Check if this channel matches a target address when extending */
+ int (*matches_target)(channel_t *, const tor_addr_t *);
+ /* Ask the lower layer how many bytes it has queued but not yet sent */
+ size_t (*num_bytes_queued)(channel_t *);
+ /* Ask the lower layer how many cells can be written */
+ int (*num_cells_writeable)(channel_t *);
+ /* Write a cell to an open channel */
+ int (*write_cell)(channel_t *, cell_t *);
+ /** Write a packed cell to an open channel */
+ int (*write_packed_cell)(channel_t *, packed_cell_t *);
+ /** Write a variable-length cell to an open channel */
+ int (*write_var_cell)(channel_t *, var_cell_t *);
+
+ /**
+ * Hash of the public RSA key for the other side's RSA identity key -- or
+ * zeroes if we don't have an RSA identity in mind for the other side, and
+ * it hasn't shown us one.
+ *
+ * Note that this is the RSA identity that we hope the other side has -- not
+ * necessarily its true identity. Don't believe this identity unless
+ * authentication has happened.
+ */
+ char identity_digest[DIGEST_LEN];
+ /**
+ * Ed25519 key for the other side of this channel -- or zeroes if we don't
+ * have an Ed25519 identity in mind for the other side, and it hasn't shown
+ * us one.
+ *
+ * Note that this is the identity that we hope the other side has -- not
+ * necessarily its true identity. Don't believe this identity unless
+ * authentication has happened.
+ */
+ struct ed25519_public_key_t ed25519_identity;
+
+ /**
+ * Linked list of channels with the same RSA identity digest, for use with
+ * the digest->channel map
+ */
+ TOR_LIST_ENTRY(channel_s) next_with_same_id;
+
+ /** Circuit mux for circuits sending on this channel */
+ circuitmux_t *cmux;
+
+ /** Circuit ID generation stuff for use by circuitbuild.c */
+
+ /**
+ * When we send CREATE cells along this connection, which half of the
+ * space should we use?
+ */
+ circ_id_type_bitfield_t circ_id_type:2;
+ /* DOCDOC */
+ unsigned wide_circ_ids:1;
+
+ /** For how many circuits are we n_chan? What about p_chan? */
+ unsigned int num_n_circuits, num_p_circuits;
+
+ /**
+ * True iff this channel shouldn't get any new circs attached to it,
+ * because the connection is too old, or because there's a better one.
+ * More generally, this flag is used to note an unhealthy connection;
+ * for example, if a bad connection fails we shouldn't assume that the
+ * router itself has a problem.
+ */
+ unsigned int is_bad_for_new_circs:1;
+
+ /** True iff we have decided that the other end of this connection
+ * is a client or bridge relay. Connections with this flag set should never
+ * be used to satisfy an EXTEND request. */
+ unsigned int is_client:1;
+
+ /** Set if the channel was initiated remotely (came from a listener) */
+ unsigned int is_incoming:1;
+
+ /** Set by lower layer if this is local; i.e., everything it communicates
+ * with for this channel returns true for is_local_addr(). This is used
+ * to decide whether to declare reachability when we receive something on
+ * this channel in circuitbuild.c
+ */
+ unsigned int is_local:1;
+
+ /** Have we logged a warning about circID exhaustion on this channel?
+ * If so, when? */
+ ratelim_t last_warned_circ_ids_exhausted;
+
+ /** Channel timestamps for cell channels */
+ time_t timestamp_client; /* Client used this, according to relay.c */
+ time_t timestamp_recv; /* Cell received from lower layer */
+ time_t timestamp_xmit; /* Cell sent to lower layer */
+
+ /** Timestamp for run_connection_housekeeping(). We update this once a
+ * second when we run housekeeping and find a circuit on this channel, and
+ * whenever we add a circuit to the channel. */
+ time_t timestamp_last_had_circuits;
+
+ /** Unique ID for measuring direct network status requests;vtunneled ones
+ * come over a circuit_t, which has a dirreq_id field as well, but is a
+ * distinct namespace. */
+ uint64_t dirreq_id;
+
+ /** Channel counters for cell channels */
+ uint64_t n_cells_recved, n_bytes_recved;
+ uint64_t n_cells_xmitted, n_bytes_xmitted;
+};
+
+struct channel_listener_s {
+ /* Current channel listener state */
+ channel_listener_state_t state;
+
+ /* Globally unique ID number for a channel over the lifetime of a Tor
+ * process.
+ */
+ uint64_t global_identifier;
+
+ /** Should we expect to see this channel in the channel lists? */
+ unsigned char registered:1;
+
+ /** Why did we close?
+ */
+ enum {
+ CHANNEL_LISTENER_NOT_CLOSING = 0,
+ CHANNEL_LISTENER_CLOSE_REQUESTED,
+ CHANNEL_LISTENER_CLOSE_FROM_BELOW,
+ CHANNEL_LISTENER_CLOSE_FOR_ERROR
+ } reason_for_closing;
+
+ /** Timestamps for both cell channels and listeners */
+ time_t timestamp_created; /* Channel created */
+ time_t timestamp_active; /* Any activity */
+
+ /* Methods implemented by the lower layer */
+
+ /** Free a channel */
+ void (*free_fn)(channel_listener_t *);
+ /** Close an open channel */
+ void (*close)(channel_listener_t *);
+ /** Describe the transport subclass for this channel */
+ const char * (*describe_transport)(channel_listener_t *);
+ /** Optional method to dump transport-specific statistics on the channel */
+ void (*dumpstats)(channel_listener_t *, int);
+
+ /** Registered listen handler to call on incoming connection */
+ channel_listener_fn_ptr listener;
+
+ /** List of pending incoming connections */
+ smartlist_t *incoming_list;
+
+ /** Timestamps for listeners */
+ time_t timestamp_accepted;
+
+ /** Counters for listeners */
+ uint64_t n_accepted;
+};
+
+/* Channel state manipulations */
+
+int channel_state_is_valid(channel_state_t state);
+int channel_listener_state_is_valid(channel_listener_state_t state);
+
+int channel_state_can_transition(channel_state_t from, channel_state_t to);
+int channel_listener_state_can_transition(channel_listener_state_t from,
+ channel_listener_state_t to);
+
+const char * channel_state_to_string(channel_state_t state);
+const char *
+channel_listener_state_to_string(channel_listener_state_t state);
+
+/* Abstract channel operations */
+
+void channel_mark_for_close(channel_t *chan);
+int channel_write_packed_cell(channel_t *chan, packed_cell_t *cell);
+
+void channel_listener_mark_for_close(channel_listener_t *chan_l);
+
+/* Channel callback registrations */
+
+/* Listener callback */
+void channel_listener_set_listener_fn(channel_listener_t *chan,
+ channel_listener_fn_ptr listener);
+
+/* Incoming cell callbacks */
+channel_cell_handler_fn_ptr channel_get_cell_handler(channel_t *chan);
+
+channel_var_cell_handler_fn_ptr
+channel_get_var_cell_handler(channel_t *chan);
+
+void channel_set_cell_handlers(channel_t *chan,
+ channel_cell_handler_fn_ptr cell_handler,
+ channel_var_cell_handler_fn_ptr
+ var_cell_handler);
+
+/* Clean up closed channels and channel listeners periodically; these are
+ * called from run_scheduled_events() in main.c.
+ */
+void channel_run_cleanup(void);
+void channel_listener_run_cleanup(void);
+
+/* Close all channels and deallocate everything */
+void channel_free_all(void);
+
+/* Dump some statistics in the log */
+void channel_dumpstats(int severity);
+void channel_listener_dumpstats(int severity);
+
+#ifdef TOR_CHANNEL_INTERNAL_
+
+#ifdef CHANNEL_PRIVATE_
+
+STATIC void channel_add_to_digest_map(channel_t *chan);
+
+#endif /* defined(CHANNEL_PRIVATE_) */
+
+/* Channel operations for subclasses and internal use only */
+
+/* Initialize a newly allocated channel - do this first in subclass
+ * constructors.
+ */
+
+void channel_init(channel_t *chan);
+void channel_init_listener(channel_listener_t *chan);
+
+/* Channel registration/unregistration */
+void channel_register(channel_t *chan);
+void channel_unregister(channel_t *chan);
+
+/* Channel listener registration/unregistration */
+void channel_listener_register(channel_listener_t *chan_l);
+void channel_listener_unregister(channel_listener_t *chan_l);
+
+/* Close from below */
+void channel_close_from_lower_layer(channel_t *chan);
+void channel_close_for_error(channel_t *chan);
+void channel_closed(channel_t *chan);
+
+/* Free a channel */
+void channel_free_(channel_t *chan);
+#define channel_free(chan) FREE_AND_NULL(channel_t, channel_free_, (chan))
+void channel_listener_free_(channel_listener_t *chan_l);
+#define channel_listener_free(chan_l) \
+ FREE_AND_NULL(channel_listener_t, channel_listener_free_, (chan_l))
+
+/* State/metadata setters */
+
+void channel_change_state(channel_t *chan, channel_state_t to_state);
+void channel_change_state_open(channel_t *chan);
+void channel_clear_identity_digest(channel_t *chan);
+void channel_clear_remote_end(channel_t *chan);
+void channel_mark_local(channel_t *chan);
+void channel_mark_incoming(channel_t *chan);
+void channel_mark_outgoing(channel_t *chan);
+void channel_mark_remote(channel_t *chan);
+void channel_set_identity_digest(channel_t *chan,
+ const char *identity_digest,
+ const struct ed25519_public_key_t *ed_identity);
+
+void channel_listener_change_state(channel_listener_t *chan_l,
+ channel_listener_state_t to_state);
+
+/* Timestamp updates */
+void channel_timestamp_created(channel_t *chan);
+void channel_timestamp_active(channel_t *chan);
+void channel_timestamp_recv(channel_t *chan);
+void channel_timestamp_xmit(channel_t *chan);
+
+void channel_listener_timestamp_created(channel_listener_t *chan_l);
+void channel_listener_timestamp_active(channel_listener_t *chan_l);
+void channel_listener_timestamp_accepted(channel_listener_t *chan_l);
+
+/* Incoming channel handling */
+void channel_listener_process_incoming(channel_listener_t *listener);
+void channel_listener_queue_incoming(channel_listener_t *listener,
+ channel_t *incoming);
+
+/* Incoming cell handling */
+void channel_process_cell(channel_t *chan, cell_t *cell);
+
+/* Request from lower layer for more cells if available */
+MOCK_DECL(ssize_t, channel_flush_some_cells,
+ (channel_t *chan, ssize_t num_cells));
+
+/* Query if data available on this channel */
+MOCK_DECL(int, channel_more_to_flush, (channel_t *chan));
+
+/* Notify flushed outgoing for dirreq handling */
+void channel_notify_flushed(channel_t *chan);
+
+/* Handle stuff we need to do on open like notifying circuits */
+void channel_do_open_actions(channel_t *chan);
+
+#endif /* defined(TOR_CHANNEL_INTERNAL_) */
+
+/* Helper functions to perform operations on channels */
+
+int channel_send_destroy(circid_t circ_id, channel_t *chan,
+ int reason);
+
+/*
+ * Outside abstract interfaces that should eventually get turned into
+ * something transport/address format independent.
+ */
+
+channel_t * channel_connect(const tor_addr_t *addr, uint16_t port,
+ const char *rsa_id_digest,
+ const struct ed25519_public_key_t *ed_id);
+
+channel_t * channel_get_for_extend(const char *rsa_id_digest,
+ const struct ed25519_public_key_t *ed_id,
+ const tor_addr_t *target_addr,
+ const char **msg_out,
+ int *launch_out);
+
+/* Ask which of two channels is better for circuit-extension purposes */
+int channel_is_better(channel_t *a, channel_t *b);
+
+/** Channel lookups
+ */
+
+channel_t * channel_find_by_global_id(uint64_t global_identifier);
+channel_t * channel_find_by_remote_identity(const char *rsa_id_digest,
+ const struct ed25519_public_key_t *ed_id);
+
+/** For things returned by channel_find_by_remote_digest(), walk the list.
+ * The RSA key will match for all returned elements; the Ed25519 key might not.
+ */
+channel_t * channel_next_with_rsa_identity(channel_t *chan);
+
+/*
+ * Helper macros to lookup state of given channel.
+ */
+
+#define CHANNEL_IS_CLOSED(chan) (channel_is_in_state((chan), \
+ CHANNEL_STATE_CLOSED))
+#define CHANNEL_IS_OPENING(chan) (channel_is_in_state((chan), \
+ CHANNEL_STATE_OPENING))
+#define CHANNEL_IS_OPEN(chan) (channel_is_in_state((chan), \
+ CHANNEL_STATE_OPEN))
+#define CHANNEL_IS_MAINT(chan) (channel_is_in_state((chan), \
+ CHANNEL_STATE_MAINT))
+#define CHANNEL_IS_CLOSING(chan) (channel_is_in_state((chan), \
+ CHANNEL_STATE_CLOSING))
+#define CHANNEL_IS_ERROR(chan) (channel_is_in_state((chan), \
+ CHANNEL_STATE_ERROR))
+
+#define CHANNEL_FINISHED(chan) (CHANNEL_IS_CLOSED(chan) || \
+ CHANNEL_IS_ERROR(chan))
+
+#define CHANNEL_CONDEMNED(chan) (CHANNEL_IS_CLOSING(chan) || \
+ CHANNEL_FINISHED(chan))
+
+#define CHANNEL_CAN_HANDLE_CELLS(chan) (CHANNEL_IS_OPENING(chan) || \
+ CHANNEL_IS_OPEN(chan) || \
+ CHANNEL_IS_MAINT(chan))
+
+static inline int
+channel_is_in_state(channel_t *chan, channel_state_t state)
+{
+ return chan->state == state;
+}
+
+/*
+ * Metadata queries/updates
+ */
+
+const char * channel_describe_transport(channel_t *chan);
+MOCK_DECL(void, channel_dump_statistics, (channel_t *chan, int severity));
+void channel_dump_transport_statistics(channel_t *chan, int severity);
+const char * channel_get_actual_remote_descr(channel_t *chan);
+const char * channel_get_actual_remote_address(channel_t *chan);
+MOCK_DECL(int, channel_get_addr_if_possible, (channel_t *chan,
+ tor_addr_t *addr_out));
+const char * channel_get_canonical_remote_descr(channel_t *chan);
+int channel_has_queued_writes(channel_t *chan);
+int channel_is_bad_for_new_circs(channel_t *chan);
+void channel_mark_bad_for_new_circs(channel_t *chan);
+int channel_is_canonical(channel_t *chan);
+int channel_is_canonical_is_reliable(channel_t *chan);
+int channel_is_client(const channel_t *chan);
+int channel_is_local(channel_t *chan);
+int channel_is_incoming(channel_t *chan);
+int channel_is_outgoing(channel_t *chan);
+void channel_mark_client(channel_t *chan);
+void channel_clear_client(channel_t *chan);
+int channel_matches_extend_info(channel_t *chan, extend_info_t *extend_info);
+int channel_matches_target_addr_for_extend(channel_t *chan,
+ const tor_addr_t *target);
+unsigned int channel_num_circuits(channel_t *chan);
+MOCK_DECL(void,channel_set_circid_type,(channel_t *chan,
+ crypto_pk_t *identity_rcvd,
+ int consider_identity));
+void channel_timestamp_client(channel_t *chan);
+
+const char * channel_listener_describe_transport(channel_listener_t *chan_l);
+void channel_listener_dump_statistics(channel_listener_t *chan_l,
+ int severity);
+void channel_listener_dump_transport_statistics(channel_listener_t *chan_l,
+ int severity);
+void channel_check_for_duplicates(void);
+
+void channel_update_bad_for_new_circs(const char *digest, int force);
+
+/* Flow control queries */
+int channel_num_cells_writeable(channel_t *chan);
+
+/* Timestamp queries */
+time_t channel_when_created(channel_t *chan);
+time_t channel_when_last_client(channel_t *chan);
+time_t channel_when_last_xmit(channel_t *chan);
+
+/* Counter queries */
+int packed_cell_is_destroy(channel_t *chan,
+ const packed_cell_t *packed_cell,
+ circid_t *circid_out);
+
+/* Declare the handle helpers */
+HANDLE_DECL(channel, channel_s,)
+#define channel_handle_free(h) \
+ FREE_AND_NULL(channel_handle_t, channel_handle_free_, (h))
+#undef tor_timer_t
+
+#endif /* !defined(TOR_CHANNEL_H) */
diff --git a/src/core/or/channelpadding.c b/src/core/or/channelpadding.c
new file mode 100644
index 0000000000..504f6f8f83
--- /dev/null
+++ b/src/core/or/channelpadding.c
@@ -0,0 +1,800 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/* TOR_CHANNEL_INTERNAL_ define needed for an O(1) implementation of
+ * channelpadding_channel_to_channelinfo() */
+#define TOR_CHANNEL_INTERNAL_
+
+#include "or/or.h"
+#include "or/channel.h"
+#include "or/channelpadding.h"
+#include "or/channeltls.h"
+#include "or/config.h"
+#include "or/networkstatus.h"
+#include "or/connection.h"
+#include "or/connection_or.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "or/main.h"
+#include "or/rephist.h"
+#include "or/router.h"
+#include "lib/time/compat_time.h"
+#include "or/rendservice.h"
+#include "lib/evloop/timers.h"
+
+#include "or/cell_st.h"
+#include "or/or_connection_st.h"
+
+STATIC int32_t channelpadding_get_netflow_inactive_timeout_ms(
+ const channel_t *);
+STATIC int channelpadding_send_disable_command(channel_t *);
+STATIC int64_t channelpadding_compute_time_until_pad_for_netflow(channel_t *);
+
+/** The total number of pending channelpadding timers */
+static uint64_t total_timers_pending;
+
+/** These are cached consensus parameters for netflow */
+/** The timeout lower bound that is allowed before sending padding */
+static int consensus_nf_ito_low;
+/** The timeout upper bound that is allowed before sending padding */
+static int consensus_nf_ito_high;
+/** The timeout lower bound that is allowed before sending reduced padding */
+static int consensus_nf_ito_low_reduced;
+/** The timeout upper bound that is allowed before sending reduced padding */
+static int consensus_nf_ito_high_reduced;
+/** The connection timeout between relays */
+static int consensus_nf_conntimeout_relays;
+/** The connection timeout for client connections */
+static int consensus_nf_conntimeout_clients;
+/** Should we pad before circuits are actually used for client data? */
+static int consensus_nf_pad_before_usage;
+/** Should we pad relay-to-relay connections? */
+static int consensus_nf_pad_relays;
+/** Should we pad tor2web connections? */
+static int consensus_nf_pad_tor2web;
+/** Should we pad rosos connections? */
+static int consensus_nf_pad_single_onion;
+
+#define TOR_MSEC_PER_SEC 1000
+#define TOR_USEC_PER_MSEC 1000
+
+/**
+ * How often do we get called by the connection housekeeping (ie: once
+ * per second) */
+#define TOR_HOUSEKEEPING_CALLBACK_MSEC 1000
+/**
+ * Additional extra time buffer on the housekeeping callback, since
+ * it can be delayed. This extra slack is used to decide if we should
+ * schedule a timer or wait for the next callback. */
+#define TOR_HOUSEKEEPING_CALLBACK_SLACK_MSEC 100
+
+/**
+ * This macro tells us if either end of the channel is connected to a client.
+ * (If we're not a server, we're definitely a client. If the channel thinks
+ * it's a client, use that. Then finally verify in the consensus).
+ */
+#define CHANNEL_IS_CLIENT(chan, options) \
+ (!public_server_mode((options)) || channel_is_client(chan) || \
+ !connection_or_digest_is_known_relay((chan)->identity_digest))
+
+/**
+ * This function is called to update cached consensus parameters every time
+ * there is a consensus update. This allows us to move the consensus param
+ * search off of the critical path, so it does not need to be evaluated
+ * for every single connection, every second.
+ */
+void
+channelpadding_new_consensus_params(networkstatus_t *ns)
+{
+#define DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW 1500
+#define DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH 9500
+#define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN 0
+#define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX 60000
+ consensus_nf_ito_low = networkstatus_get_param(ns, "nf_ito_low",
+ DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW,
+ DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN,
+ DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
+ consensus_nf_ito_high = networkstatus_get_param(ns, "nf_ito_high",
+ DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH,
+ consensus_nf_ito_low,
+ DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
+
+#define DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW 9000
+#define DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH 14000
+#define DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN 0
+#define DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX 60000
+ consensus_nf_ito_low_reduced =
+ networkstatus_get_param(ns, "nf_ito_low_reduced",
+ DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW,
+ DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN,
+ DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
+
+ consensus_nf_ito_high_reduced =
+ networkstatus_get_param(ns, "nf_ito_high_reduced",
+ DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH,
+ consensus_nf_ito_low_reduced,
+ DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
+
+#define CONNTIMEOUT_RELAYS_DFLT (60*60) // 1 hour
+#define CONNTIMEOUT_RELAYS_MIN 60
+#define CONNTIMEOUT_RELAYS_MAX (7*24*60*60) // 1 week
+ consensus_nf_conntimeout_relays =
+ networkstatus_get_param(ns, "nf_conntimeout_relays",
+ CONNTIMEOUT_RELAYS_DFLT,
+ CONNTIMEOUT_RELAYS_MIN,
+ CONNTIMEOUT_RELAYS_MAX);
+
+#define CIRCTIMEOUT_CLIENTS_DFLT (30*60) // 30 minutes
+#define CIRCTIMEOUT_CLIENTS_MIN 60
+#define CIRCTIMEOUT_CLIENTS_MAX (24*60*60) // 24 hours
+ consensus_nf_conntimeout_clients =
+ networkstatus_get_param(ns, "nf_conntimeout_clients",
+ CIRCTIMEOUT_CLIENTS_DFLT,
+ CIRCTIMEOUT_CLIENTS_MIN,
+ CIRCTIMEOUT_CLIENTS_MAX);
+
+ consensus_nf_pad_before_usage =
+ networkstatus_get_param(ns, "nf_pad_before_usage", 1, 0, 1);
+
+ consensus_nf_pad_relays =
+ networkstatus_get_param(ns, "nf_pad_relays", 0, 0, 1);
+
+ consensus_nf_pad_tor2web =
+ networkstatus_get_param(ns,
+ CHANNELPADDING_TOR2WEB_PARAM,
+ CHANNELPADDING_TOR2WEB_DEFAULT, 0, 1);
+
+ consensus_nf_pad_single_onion =
+ networkstatus_get_param(ns,
+ CHANNELPADDING_SOS_PARAM,
+ CHANNELPADDING_SOS_DEFAULT, 0, 1);
+}
+
+/**
+ * Get a random netflow inactive timeout keepalive period in milliseconds,
+ * the range for which is determined by consensus parameters, negotiation,
+ * configuration, or default values. The consensus parameters enforce the
+ * minimum possible value, to avoid excessively frequent padding.
+ *
+ * The ranges for this value were chosen to be low enough to ensure that
+ * routers do not emit a new netflow record for a connection due to it
+ * being idle.
+ *
+ * Specific timeout values for major routers are listed in Proposal 251.
+ * No major router appeared capable of setting an inactive timeout below 10
+ * seconds, so we set the defaults below that value, since we can always
+ * scale back if it ends up being too much padding.
+ *
+ * Returns the next timeout period (in milliseconds) after which we should
+ * send a padding packet, or 0 if padding is disabled.
+ */
+STATIC int32_t
+channelpadding_get_netflow_inactive_timeout_ms(const channel_t *chan)
+{
+ int low_timeout = consensus_nf_ito_low;
+ int high_timeout = consensus_nf_ito_high;
+ int X1, X2;
+
+ if (low_timeout == 0 && low_timeout == high_timeout)
+ return 0; // No padding
+
+ /* If we have negotiated different timeout values, use those, but
+ * don't allow them to be lower than the consensus ones */
+ if (chan->padding_timeout_low_ms && chan->padding_timeout_high_ms) {
+ low_timeout = MAX(low_timeout, chan->padding_timeout_low_ms);
+ high_timeout = MAX(high_timeout, chan->padding_timeout_high_ms);
+ }
+
+ if (low_timeout == high_timeout)
+ return low_timeout; // No randomization
+
+ /*
+ * This MAX() hack is here because we apply the timeout on both the client
+ * and the server. This creates the situation where the total time before
+ * sending a packet in either direction is actually
+ * min(client_timeout,server_timeout).
+ *
+ * If X is a random variable uniform from 0..R-1 (where R=high-low),
+ * then Y=max(X,X) has Prob(Y == i) = (2.0*i + 1)/(R*R).
+ *
+ * If we create a third random variable Z=min(Y,Y), then it turns out that
+ * Exp[Z] ~= Exp[X]. Here's a table:
+ *
+ * R Exp[X] Exp[Z] Exp[min(X,X)] Exp[max(X,X)]
+ * 2000 999.5 1066 666.2 1332.8
+ * 3000 1499.5 1599.5 999.5 1999.5
+ * 5000 2499.5 2666 1666.2 3332.8
+ * 6000 2999.5 3199.5 1999.5 3999.5
+ * 7000 3499.5 3732.8 2332.8 4666.2
+ * 8000 3999.5 4266.2 2666.2 5332.8
+ * 10000 4999.5 5328 3332.8 6666.2
+ * 15000 7499.5 7995 4999.5 9999.5
+ * 20000 9900.5 10661 6666.2 13332.8
+ *
+ * In other words, this hack makes it so that when both the client and
+ * the guard are sending this padding, then the averages work out closer
+ * to the midpoint of the range, making the overhead easier to tune.
+ * If only one endpoint is padding (for example: if the relay does not
+ * support padding, but the client has set ConnectionPadding 1; or
+ * if the relay does support padding, but the client has set
+ * ReducedConnectionPadding 1), then the defense will still prevent
+ * record splitting, but with less overhead than the midpoint
+ * (as seen by the Exp[max(X,X)] column).
+ *
+ * To calculate average padding packet frequency (and thus overhead),
+ * index into the table by picking a row based on R = high-low. Then,
+ * use the appropriate column (Exp[Z] for two-sided padding, and
+ * Exp[max(X,X)] for one-sided padding). Finally, take this value
+ * and add it to the low timeout value. This value is the average
+ * frequency which padding packets will be sent.
+ */
+
+ X1 = crypto_rand_int(high_timeout - low_timeout);
+ X2 = crypto_rand_int(high_timeout - low_timeout);
+ return low_timeout + MAX(X1, X2);
+}
+
+/**
+ * Update this channel's padding settings based on the PADDING_NEGOTIATE
+ * contents.
+ *
+ * Returns -1 on error; 1 on success.
+ */
+int
+channelpadding_update_padding_for_channel(channel_t *chan,
+ const channelpadding_negotiate_t *pad_vars)
+{
+ if (pad_vars->version != 0) {
+ static ratelim_t version_limit = RATELIM_INIT(600);
+
+ log_fn_ratelim(&version_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
+ "Got a PADDING_NEGOTIATE cell with an unknown version. Ignoring.");
+ return -1;
+ }
+
+ // We should not allow malicious relays to disable or reduce padding for
+ // us as clients. In fact, we should only accept this cell at all if we're
+ // operating as a relay. Bridges should not accept it from relays, either
+ // (only from their clients).
+ if ((get_options()->BridgeRelay &&
+ connection_or_digest_is_known_relay(chan->identity_digest)) ||
+ !get_options()->ORPort_set) {
+ static ratelim_t relay_limit = RATELIM_INIT(600);
+
+ log_fn_ratelim(&relay_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
+ "Got a PADDING_NEGOTIATE from relay at %s (%s). "
+ "This should not happen.",
+ chan->get_remote_descr(chan, 0),
+ hex_str(chan->identity_digest, DIGEST_LEN));
+ return -1;
+ }
+
+ chan->padding_enabled = (pad_vars->command == CHANNELPADDING_COMMAND_START);
+
+ /* Min must not be lower than the current consensus parameter
+ nf_ito_low. */
+ chan->padding_timeout_low_ms = MAX(consensus_nf_ito_low,
+ pad_vars->ito_low_ms);
+
+ /* Max must not be lower than ito_low_ms */
+ chan->padding_timeout_high_ms = MAX(chan->padding_timeout_low_ms,
+ pad_vars->ito_high_ms);
+
+ log_fn(LOG_INFO,LD_OR,
+ "Negotiated padding=%d, lo=%d, hi=%d on %"PRIu64,
+ chan->padding_enabled, chan->padding_timeout_low_ms,
+ chan->padding_timeout_high_ms,
+ (chan->global_identifier));
+
+ return 1;
+}
+
+/**
+ * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side not
+ * to send padding.
+ *
+ * Returns -1 on error, 0 on success.
+ */
+STATIC int
+channelpadding_send_disable_command(channel_t *chan)
+{
+ channelpadding_negotiate_t disable;
+ cell_t cell;
+
+ tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
+ MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
+
+ memset(&cell, 0, sizeof(cell_t));
+ memset(&disable, 0, sizeof(channelpadding_negotiate_t));
+ cell.command = CELL_PADDING_NEGOTIATE;
+
+ channelpadding_negotiate_set_command(&disable, CHANNELPADDING_COMMAND_STOP);
+
+ if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
+ &disable) < 0)
+ return -1;
+
+ if (chan->write_cell(chan, &cell) == 1)
+ return 0;
+ else
+ return -1;
+}
+
+/**
+ * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side to
+ * resume sending padding at some rate.
+ *
+ * Returns -1 on error, 0 on success.
+ */
+int
+channelpadding_send_enable_command(channel_t *chan, uint16_t low_timeout,
+ uint16_t high_timeout)
+{
+ channelpadding_negotiate_t enable;
+ cell_t cell;
+
+ tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
+ MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
+
+ memset(&cell, 0, sizeof(cell_t));
+ memset(&enable, 0, sizeof(channelpadding_negotiate_t));
+ cell.command = CELL_PADDING_NEGOTIATE;
+
+ channelpadding_negotiate_set_command(&enable, CHANNELPADDING_COMMAND_START);
+ channelpadding_negotiate_set_ito_low_ms(&enable, low_timeout);
+ channelpadding_negotiate_set_ito_high_ms(&enable, high_timeout);
+
+ if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
+ &enable) < 0)
+ return -1;
+
+ if (chan->write_cell(chan, &cell) == 1)
+ return 0;
+ else
+ return -1;
+}
+
+/**
+ * Sends a CELL_PADDING cell on a channel if it has been idle since
+ * our callback was scheduled.
+ *
+ * This function also clears the pending padding timer and the callback
+ * flags.
+ */
+static void
+channelpadding_send_padding_cell_for_callback(channel_t *chan)
+{
+ cell_t cell;
+
+ /* Check that the channel is still valid and open */
+ if (!chan || chan->state != CHANNEL_STATE_OPEN) {
+ if (chan) chan->pending_padding_callback = 0;
+ log_fn(LOG_INFO,LD_OR,
+ "Scheduled a netflow padding cell, but connection already closed.");
+ return;
+ }
+
+ /* We should have a pending callback flag set. */
+ if (BUG(chan->pending_padding_callback == 0))
+ return;
+
+ chan->pending_padding_callback = 0;
+
+ if (monotime_coarse_is_zero(&chan->next_padding_time) ||
+ chan->has_queued_writes(chan)) {
+ /* We must have been active before the timer fired */
+ monotime_coarse_zero(&chan->next_padding_time);
+ return;
+ }
+
+ {
+ monotime_coarse_t now;
+ monotime_coarse_get(&now);
+
+ log_fn(LOG_INFO,LD_OR,
+ "Sending netflow keepalive on %"PRIu64" to %s (%s) after "
+ "%"PRId64" ms. Delta %"PRId64"ms",
+ (chan->global_identifier),
+ safe_str_client(chan->get_remote_descr(chan, 0)),
+ safe_str_client(hex_str(chan->identity_digest, DIGEST_LEN)),
+ (monotime_coarse_diff_msec(&chan->timestamp_xfer,&now)),
+ (
+ monotime_coarse_diff_msec(&chan->next_padding_time,&now)));
+ }
+
+ /* Clear the timer */
+ monotime_coarse_zero(&chan->next_padding_time);
+
+ /* Send the padding cell. This will cause the channel to get a
+ * fresh timestamp_active */
+ memset(&cell, 0, sizeof(cell));
+ cell.command = CELL_PADDING;
+ chan->write_cell(chan, &cell);
+}
+
+/**
+ * tor_timer callback function for us to send padding on an idle channel.
+ *
+ * This function just obtains the channel from the callback handle, ensures
+ * it is still valid, and then hands it off to
+ * channelpadding_send_padding_cell_for_callback(), which checks if
+ * the channel is still idle before sending padding.
+ */
+static void
+channelpadding_send_padding_callback(tor_timer_t *timer, void *args,
+ const struct monotime_t *when)
+{
+ channel_t *chan = channel_handle_get((struct channel_handle_t*)args);
+ (void)timer; (void)when;
+
+ if (chan && CHANNEL_CAN_HANDLE_CELLS(chan)) {
+ /* Hrmm.. It might be nice to have an equivalent to assert_connection_ok
+ * for channels. Then we could get rid of the channeltls dependency */
+ tor_assert(TO_CONN(BASE_CHAN_TO_TLS(chan)->conn)->magic ==
+ OR_CONNECTION_MAGIC);
+ assert_connection_ok(TO_CONN(BASE_CHAN_TO_TLS(chan)->conn), approx_time());
+
+ channelpadding_send_padding_cell_for_callback(chan);
+ } else {
+ log_fn(LOG_INFO,LD_OR,
+ "Channel closed while waiting for timer.");
+ }
+
+ total_timers_pending--;
+}
+
+/**
+ * Schedules a callback to send padding on a channel in_ms milliseconds from
+ * now.
+ *
+ * Returns CHANNELPADDING_WONTPAD on error, CHANNELPADDING_PADDING_SENT if we
+ * sent the packet immediately without a timer, and
+ * CHANNELPADDING_PADDING_SCHEDULED if we decided to schedule a timer.
+ */
+static channelpadding_decision_t
+channelpadding_schedule_padding(channel_t *chan, int in_ms)
+{
+ struct timeval timeout;
+ tor_assert(!chan->pending_padding_callback);
+
+ if (in_ms <= 0) {
+ chan->pending_padding_callback = 1;
+ channelpadding_send_padding_cell_for_callback(chan);
+ return CHANNELPADDING_PADDING_SENT;
+ }
+
+ timeout.tv_sec = in_ms/TOR_MSEC_PER_SEC;
+ timeout.tv_usec = (in_ms%TOR_USEC_PER_MSEC)*TOR_USEC_PER_MSEC;
+
+ if (!chan->timer_handle) {
+ chan->timer_handle = channel_handle_new(chan);
+ }
+
+ if (chan->padding_timer) {
+ timer_set_cb(chan->padding_timer,
+ channelpadding_send_padding_callback,
+ chan->timer_handle);
+ } else {
+ chan->padding_timer = timer_new(channelpadding_send_padding_callback,
+ chan->timer_handle);
+ }
+ timer_schedule(chan->padding_timer, &timeout);
+
+ rep_hist_padding_count_timers(++total_timers_pending);
+
+ chan->pending_padding_callback = 1;
+ return CHANNELPADDING_PADDING_SCHEDULED;
+}
+
+/**
+ * Calculates the number of milliseconds from now to schedule a padding cell.
+ *
+ * Returns the number of milliseconds from now (relative) to schedule the
+ * padding callback. If the padding timer is more than 1.1 seconds in the
+ * future, we return -1, to avoid scheduling excessive callbacks. If padding
+ * is disabled in the consensus, we return -2.
+ *
+ * Side-effects: Updates chan->next_padding_time_ms, storing an (absolute, not
+ * relative) millisecond representation of when we should send padding, unless
+ * other activity happens first. This side-effect allows us to avoid
+ * scheduling a libevent callback until we're within 1.1 seconds of the padding
+ * time.
+ */
+#define CHANNELPADDING_TIME_LATER -1
+#define CHANNELPADDING_TIME_DISABLED -2
+STATIC int64_t
+channelpadding_compute_time_until_pad_for_netflow(channel_t *chan)
+{
+ monotime_coarse_t now;
+ monotime_coarse_get(&now);
+
+ if (monotime_coarse_is_zero(&chan->next_padding_time)) {
+ /* If the below line or crypto_rand_int() shows up on a profile,
+ * we can avoid getting a timeout until we're at least nf_ito_lo
+ * from a timeout window. That will prevent us from setting timers
+ * on connections that were active up to 1.5 seconds ago.
+ * Idle connections should only call this once every 5.5s on average
+ * though, so that might be a micro-optimization for little gain. */
+ int32_t padding_timeout =
+ channelpadding_get_netflow_inactive_timeout_ms(chan);
+
+ if (!padding_timeout)
+ return CHANNELPADDING_TIME_DISABLED;
+
+ monotime_coarse_add_msec(&chan->next_padding_time,
+ &chan->timestamp_xfer,
+ padding_timeout);
+ }
+
+ const int64_t ms_till_pad =
+ monotime_coarse_diff_msec(&now, &chan->next_padding_time);
+
+ /* If the next padding time is beyond the maximum possible consensus value,
+ * then this indicates a clock jump, so just send padding now. This is
+ * better than using monotonic time because we want to avoid the situation
+ * where we wait around forever for monotonic time to move forward after
+ * a clock jump far into the past.
+ */
+ if (ms_till_pad > DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX) {
+ tor_fragile_assert();
+ log_warn(LD_BUG,
+ "Channel padding timeout scheduled %"PRId64"ms in the future. "
+ "Did the monotonic clock just jump?",
+ (ms_till_pad));
+ return 0; /* Clock jumped: Send padding now */
+ }
+
+ /* If the timeout will expire before the next time we're called (1000ms
+ from now, plus some slack), then calculate the number of milliseconds
+ from now which we should send padding, so we can schedule a callback
+ then.
+ */
+ if (ms_till_pad < (TOR_HOUSEKEEPING_CALLBACK_MSEC +
+ TOR_HOUSEKEEPING_CALLBACK_SLACK_MSEC)) {
+ /* If the padding time is in the past, that means that libevent delayed
+ * calling the once-per-second callback due to other work taking too long.
+ * See https://bugs.torproject.org/22212 and
+ * https://bugs.torproject.org/16585. This is a systemic problem
+ * with being single-threaded, but let's emit a notice if this
+ * is long enough in the past that we might have missed a netflow window,
+ * and allowed a router to emit a netflow frame, just so we don't forget
+ * about it entirely.. */
+#define NETFLOW_MISSED_WINDOW (150000 - DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH)
+ if (ms_till_pad < 0) {
+ int severity = (ms_till_pad < -NETFLOW_MISSED_WINDOW)
+ ? LOG_NOTICE : LOG_INFO;
+ log_fn(severity, LD_OR,
+ "Channel padding timeout scheduled %"PRId64"ms in the past. ",
+ (-ms_till_pad));
+ return 0; /* Clock jumped: Send padding now */
+ }
+
+ return ms_till_pad;
+ }
+ return CHANNELPADDING_TIME_LATER;
+}
+
+/**
+ * Returns a randomized value for channel idle timeout in seconds.
+ * The channel idle timeout governs how quickly we close a channel
+ * after its last circuit has disappeared.
+ *
+ * There are three classes of channels:
+ * 1. Client+non-canonical. These live for 3-4.5 minutes
+ * 2. relay to relay. These live for 45-75 min by default
+ * 3. Reduced padding clients. These live for 1.5-2.25 minutes.
+ *
+ * Also allows the default relay-to-relay value to be controlled by the
+ * consensus.
+ */
+unsigned int
+channelpadding_get_channel_idle_timeout(const channel_t *chan,
+ int is_canonical)
+{
+ const or_options_t *options = get_options();
+ unsigned int timeout;
+
+ /* Non-canonical and client channels only last for 3-4.5 min when idle */
+ if (!is_canonical || CHANNEL_IS_CLIENT(chan, options)) {
+#define CONNTIMEOUT_CLIENTS_BASE 180 // 3 to 4.5 min
+ timeout = CONNTIMEOUT_CLIENTS_BASE
+ + crypto_rand_int(CONNTIMEOUT_CLIENTS_BASE/2);
+ } else { // Canonical relay-to-relay channels
+ // 45..75min or consensus +/- 25%
+ timeout = consensus_nf_conntimeout_relays;
+ timeout = 3*timeout/4 + crypto_rand_int(timeout/2);
+ }
+
+ /* If ReducedConnectionPadding is set, we want to halve the duration of
+ * the channel idle timeout, since reducing the additional time that
+ * a channel stays open will reduce the total overhead for making
+ * new channels. This reduction in overhead/channel expense
+ * is important for mobile users. The option cannot be set by relays.
+ *
+ * We also don't reduce any values for timeout that the user explicitly
+ * set.
+ */
+ if (options->ReducedConnectionPadding
+ && !options->CircuitsAvailableTimeout) {
+ timeout /= 2;
+ }
+
+ return timeout;
+}
+
+/**
+ * This function controls how long we keep idle circuits open,
+ * and how long we build predicted circuits. This behavior is under
+ * the control of channelpadding because circuit availability is the
+ * dominant factor in channel lifespan, which influences total padding
+ * overhead.
+ *
+ * Returns a randomized number of seconds in a range from
+ * CircuitsAvailableTimeout to 2*CircuitsAvailableTimeout. This value is halved
+ * if ReducedConnectionPadding is set. The default value of
+ * CircuitsAvailableTimeout can be controlled by the consensus.
+ */
+int
+channelpadding_get_circuits_available_timeout(void)
+{
+ const or_options_t *options = get_options();
+ int timeout = options->CircuitsAvailableTimeout;
+
+ if (!timeout) {
+ timeout = consensus_nf_conntimeout_clients;
+
+ /* If ReducedConnectionPadding is set, we want to halve the duration of
+ * the channel idle timeout, since reducing the additional time that
+ * a channel stays open will reduce the total overhead for making
+ * new connections. This reduction in overhead/connection expense
+ * is important for mobile users. The option cannot be set by relays.
+ *
+ * We also don't reduce any values for timeout that the user explicitly
+ * set.
+ */
+ if (options->ReducedConnectionPadding) {
+ // half the value to 15..30min by default
+ timeout /= 2;
+ }
+ }
+
+ // 30..60min by default
+ timeout = timeout + crypto_rand_int(timeout);
+
+ return timeout;
+}
+
+/**
+ * Calling this function on a channel causes it to tell the other side
+ * not to send padding, and disables sending padding from this side as well.
+ */
+void
+channelpadding_disable_padding_on_channel(channel_t *chan)
+{
+ chan->padding_enabled = 0;
+
+ // Send cell to disable padding on the other end
+ channelpadding_send_disable_command(chan);
+}
+
+/**
+ * Calling this function on a channel causes it to tell the other side
+ * not to send padding, and reduces the rate that padding is sent from
+ * this side.
+ */
+void
+channelpadding_reduce_padding_on_channel(channel_t *chan)
+{
+ /* Padding can be forced and reduced by clients, regardless of if
+ * the channel supports it. So we check for support here before
+ * sending any commands. */
+ if (chan->padding_enabled) {
+ channelpadding_send_disable_command(chan);
+ }
+
+ chan->padding_timeout_low_ms = consensus_nf_ito_low_reduced;
+ chan->padding_timeout_high_ms = consensus_nf_ito_high_reduced;
+
+ log_fn(LOG_INFO,LD_OR,
+ "Reduced padding on channel %"PRIu64": lo=%d, hi=%d",
+ (chan->global_identifier),
+ chan->padding_timeout_low_ms, chan->padding_timeout_high_ms);
+}
+
+/**
+ * This function is called once per second by run_connection_housekeeping(),
+ * but only if the channel is still open, valid, and non-wedged.
+ *
+ * It decides if and when we should send a padding cell, and if needed,
+ * schedules a callback to send that cell at the appropriate time.
+ *
+ * Returns an enum that represents the current padding decision state.
+ * Return value is currently used only by unit tests.
+ */
+channelpadding_decision_t
+channelpadding_decide_to_pad_channel(channel_t *chan)
+{
+ const or_options_t *options = get_options();
+
+ /* Only pad open channels */
+ if (chan->state != CHANNEL_STATE_OPEN)
+ return CHANNELPADDING_WONTPAD;
+
+ if (chan->channel_usage == CHANNEL_USED_FOR_FULL_CIRCS) {
+ if (!consensus_nf_pad_before_usage)
+ return CHANNELPADDING_WONTPAD;
+ } else if (chan->channel_usage != CHANNEL_USED_FOR_USER_TRAFFIC) {
+ return CHANNELPADDING_WONTPAD;
+ }
+
+ if (chan->pending_padding_callback)
+ return CHANNELPADDING_PADDING_ALREADY_SCHEDULED;
+
+ /* Don't pad the channel if we didn't negotiate it, but still
+ * allow clients to force padding if options->ChannelPadding is
+ * explicitly set to 1.
+ */
+ if (!chan->padding_enabled && options->ConnectionPadding != 1) {
+ return CHANNELPADDING_WONTPAD;
+ }
+
+ if (options->Tor2webMode && !consensus_nf_pad_tor2web) {
+ /* If the consensus just changed values, this channel may still
+ * think padding is enabled. Negotiate it off. */
+ if (chan->padding_enabled)
+ channelpadding_disable_padding_on_channel(chan);
+
+ return CHANNELPADDING_WONTPAD;
+ }
+
+ if (rend_service_allow_non_anonymous_connection(options) &&
+ !consensus_nf_pad_single_onion) {
+ /* If the consensus just changed values, this channel may still
+ * think padding is enabled. Negotiate it off. */
+ if (chan->padding_enabled)
+ channelpadding_disable_padding_on_channel(chan);
+
+ return CHANNELPADDING_WONTPAD;
+ }
+
+ if (!chan->has_queued_writes(chan)) {
+ int is_client_channel = 0;
+
+ if (CHANNEL_IS_CLIENT(chan, options)) {
+ is_client_channel = 1;
+ }
+
+ /* If nf_pad_relays=1 is set in the consensus, we pad
+ * on *all* idle connections, relay-relay or relay-client.
+ * Otherwise pad only for client+bridge cons */
+ if (is_client_channel || consensus_nf_pad_relays) {
+ int64_t pad_time_ms =
+ channelpadding_compute_time_until_pad_for_netflow(chan);
+
+ if (pad_time_ms == CHANNELPADDING_TIME_DISABLED) {
+ return CHANNELPADDING_WONTPAD;
+ } else if (pad_time_ms == CHANNELPADDING_TIME_LATER) {
+ chan->currently_padding = 1;
+ return CHANNELPADDING_PADLATER;
+ } else {
+ if (BUG(pad_time_ms > INT_MAX)) {
+ pad_time_ms = INT_MAX;
+ }
+ /* We have to schedule a callback because we're called exactly once per
+ * second, but we don't want padding packets to go out exactly on an
+ * integer multiple of seconds. This callback will only be scheduled
+ * if we're within 1.1 seconds of the padding time.
+ */
+ chan->currently_padding = 1;
+ return channelpadding_schedule_padding(chan, (int)pad_time_ms);
+ }
+ } else {
+ chan->currently_padding = 0;
+ return CHANNELPADDING_WONTPAD;
+ }
+ } else {
+ return CHANNELPADDING_PADLATER;
+ }
+}
diff --git a/src/core/or/channelpadding.h b/src/core/or/channelpadding.h
new file mode 100644
index 0000000000..7eddbdbe2d
--- /dev/null
+++ b/src/core/or/channelpadding.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitbuild.h
+ * \brief Header file for circuitbuild.c.
+ **/
+#ifndef TOR_CHANNELPADDING_H
+#define TOR_CHANNELPADDING_H
+
+#include "trunnel/channelpadding_negotiation.h"
+
+#define CHANNELPADDING_TOR2WEB_PARAM "nf_pad_tor2web"
+#define CHANNELPADDING_TOR2WEB_DEFAULT 1
+#define CHANNELPADDING_SOS_PARAM "nf_pad_single_onion"
+#define CHANNELPADDING_SOS_DEFAULT 1
+
+typedef enum {
+ CHANNELPADDING_WONTPAD,
+ CHANNELPADDING_PADLATER,
+ CHANNELPADDING_PADDING_SCHEDULED,
+ CHANNELPADDING_PADDING_ALREADY_SCHEDULED,
+ CHANNELPADDING_PADDING_SENT,
+} channelpadding_decision_t;
+
+channelpadding_decision_t channelpadding_decide_to_pad_channel(channel_t
+ *chan);
+int channelpadding_update_padding_for_channel(channel_t *,
+ const channelpadding_negotiate_t
+ *chan);
+
+void channelpadding_disable_padding_on_channel(channel_t *chan);
+void channelpadding_reduce_padding_on_channel(channel_t *chan);
+int channelpadding_send_enable_command(channel_t *chan, uint16_t low_timeout,
+ uint16_t high_timeout);
+
+int channelpadding_get_circuits_available_timeout(void);
+unsigned int channelpadding_get_channel_idle_timeout(const channel_t *, int);
+void channelpadding_new_consensus_params(networkstatus_t *ns);
+
+#endif /* !defined(TOR_CHANNELPADDING_H) */
+
diff --git a/src/core/or/channeltls.c b/src/core/or/channeltls.c
new file mode 100644
index 0000000000..85dfe0c0f1
--- /dev/null
+++ b/src/core/or/channeltls.c
@@ -0,0 +1,2458 @@
+/* * Copyright (c) 2012-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file channeltls.c
+ *
+ * \brief A concrete subclass of channel_t using or_connection_t to transfer
+ * cells between Tor instances.
+ *
+ * This module fills in the various function pointers in channel_t, to
+ * implement the channel_tls_t channels as used in Tor today. These channels
+ * are created from channel_tls_connect() and
+ * channel_tls_handle_incoming(). Each corresponds 1:1 to or_connection_t
+ * object, as implemented in connection_or.c. These channels transmit cells
+ * to the underlying or_connection_t by calling
+ * connection_or_write_*_cell_to_buf(), and receive cells from the underlying
+ * or_connection_t when connection_or_process_cells_from_inbuf() calls
+ * channel_tls_handle_*_cell().
+ *
+ * Here we also implement the server (responder) side of the v3+ Tor link
+ * handshake, which uses CERTS and AUTHENTICATE cell to negotiate versions,
+ * exchange expected and observed IP and time information, and bootstrap a
+ * level of authentication higher than we have gotten on the raw TLS
+ * handshake.
+ *
+ * NOTE: Since there is currently only one type of channel, there are probably
+ * more than a few cases where functionality that is currently in
+ * channeltls.c, connection_or.c, and channel.c ought to be divided up
+ * differently. The right time to do this is probably whenever we introduce
+ * our next channel type.
+ **/
+
+/*
+ * Define this so channel.h gives us things only channel_t subclasses
+ * should touch.
+ */
+#define TOR_CHANNEL_INTERNAL_
+
+#define CHANNELTLS_PRIVATE
+
+#include "or/or.h"
+#include "or/channel.h"
+#include "or/channeltls.h"
+#include "or/circuitmux.h"
+#include "or/circuitmux_ewma.h"
+#include "or/command.h"
+#include "or/config.h"
+#include "or/connection.h"
+#include "or/connection_or.h"
+#include "or/control.h"
+#include "or/entrynodes.h"
+#include "trunnel/link_handshake.h"
+#include "or/relay.h"
+#include "or/rephist.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+#include "or/scheduler.h"
+#include "or/torcert.h"
+#include "or/networkstatus.h"
+#include "trunnel/channelpadding_negotiation.h"
+#include "or/channelpadding.h"
+
+#include "or/cell_st.h"
+#include "or/cell_queue_st.h"
+#include "or/extend_info_st.h"
+#include "or/or_connection_st.h"
+#include "or/or_handshake_certs_st.h"
+#include "or/or_handshake_state_st.h"
+#include "or/routerinfo_st.h"
+#include "or/var_cell_st.h"
+
+#include "lib/tls/tortls.h"
+
+/** How many CELL_PADDING cells have we received, ever? */
+uint64_t stats_n_padding_cells_processed = 0;
+/** How many CELL_VERSIONS cells have we received, ever? */
+uint64_t stats_n_versions_cells_processed = 0;
+/** How many CELL_NETINFO cells have we received, ever? */
+uint64_t stats_n_netinfo_cells_processed = 0;
+/** How many CELL_VPADDING cells have we received, ever? */
+uint64_t stats_n_vpadding_cells_processed = 0;
+/** How many CELL_CERTS cells have we received, ever? */
+uint64_t stats_n_certs_cells_processed = 0;
+/** How many CELL_AUTH_CHALLENGE cells have we received, ever? */
+uint64_t stats_n_auth_challenge_cells_processed = 0;
+/** How many CELL_AUTHENTICATE cells have we received, ever? */
+uint64_t stats_n_authenticate_cells_processed = 0;
+/** How many CELL_AUTHORIZE cells have we received, ever? */
+uint64_t stats_n_authorize_cells_processed = 0;
+
+/** Active listener, if any */
+static channel_listener_t *channel_tls_listener = NULL;
+
+/* channel_tls_t method declarations */
+
+static void channel_tls_close_method(channel_t *chan);
+static const char * channel_tls_describe_transport_method(channel_t *chan);
+static void channel_tls_free_method(channel_t *chan);
+static double channel_tls_get_overhead_estimate_method(channel_t *chan);
+static int
+channel_tls_get_remote_addr_method(channel_t *chan, tor_addr_t *addr_out);
+static int
+channel_tls_get_transport_name_method(channel_t *chan, char **transport_out);
+static const char *
+channel_tls_get_remote_descr_method(channel_t *chan, int flags);
+static int channel_tls_has_queued_writes_method(channel_t *chan);
+static int channel_tls_is_canonical_method(channel_t *chan, int req);
+static int
+channel_tls_matches_extend_info_method(channel_t *chan,
+ extend_info_t *extend_info);
+static int channel_tls_matches_target_method(channel_t *chan,
+ const tor_addr_t *target);
+static int channel_tls_num_cells_writeable_method(channel_t *chan);
+static size_t channel_tls_num_bytes_queued_method(channel_t *chan);
+static int channel_tls_write_cell_method(channel_t *chan,
+ cell_t *cell);
+static int channel_tls_write_packed_cell_method(channel_t *chan,
+ packed_cell_t *packed_cell);
+static int channel_tls_write_var_cell_method(channel_t *chan,
+ var_cell_t *var_cell);
+
+/* channel_listener_tls_t method declarations */
+
+static void channel_tls_listener_close_method(channel_listener_t *chan_l);
+static const char *
+channel_tls_listener_describe_transport_method(channel_listener_t *chan_l);
+
+/** Handle incoming cells for the handshake stuff here rather than
+ * passing them on up. */
+
+static void channel_tls_process_versions_cell(var_cell_t *cell,
+ channel_tls_t *tlschan);
+static void channel_tls_process_netinfo_cell(cell_t *cell,
+ channel_tls_t *tlschan);
+static int command_allowed_before_handshake(uint8_t command);
+static int enter_v3_handshake_with_cell(var_cell_t *cell,
+ channel_tls_t *tlschan);
+static void channel_tls_process_padding_negotiate_cell(cell_t *cell,
+ channel_tls_t *chan);
+
+/**
+ * Do parts of channel_tls_t initialization common to channel_tls_connect()
+ * and channel_tls_handle_incoming().
+ */
+STATIC void
+channel_tls_common_init(channel_tls_t *tlschan)
+{
+ channel_t *chan;
+
+ tor_assert(tlschan);
+
+ chan = &(tlschan->base_);
+ channel_init(chan);
+ chan->magic = TLS_CHAN_MAGIC;
+ chan->state = CHANNEL_STATE_OPENING;
+ chan->close = channel_tls_close_method;
+ chan->describe_transport = channel_tls_describe_transport_method;
+ chan->free_fn = channel_tls_free_method;
+ chan->get_overhead_estimate = channel_tls_get_overhead_estimate_method;
+ chan->get_remote_addr = channel_tls_get_remote_addr_method;
+ chan->get_remote_descr = channel_tls_get_remote_descr_method;
+ chan->get_transport_name = channel_tls_get_transport_name_method;
+ chan->has_queued_writes = channel_tls_has_queued_writes_method;
+ chan->is_canonical = channel_tls_is_canonical_method;
+ chan->matches_extend_info = channel_tls_matches_extend_info_method;
+ chan->matches_target = channel_tls_matches_target_method;
+ chan->num_bytes_queued = channel_tls_num_bytes_queued_method;
+ chan->num_cells_writeable = channel_tls_num_cells_writeable_method;
+ chan->write_cell = channel_tls_write_cell_method;
+ chan->write_packed_cell = channel_tls_write_packed_cell_method;
+ chan->write_var_cell = channel_tls_write_var_cell_method;
+
+ chan->cmux = circuitmux_alloc();
+ /* We only have one policy for now so always set it to EWMA. */
+ circuitmux_set_policy(chan->cmux, &ewma_policy);
+}
+
+/**
+ * Start a new TLS channel.
+ *
+ * Launch a new OR connection to <b>addr</b>:<b>port</b> and expect to
+ * handshake with an OR with identity digest <b>id_digest</b>, and wrap
+ * it in a channel_tls_t.
+ */
+channel_t *
+channel_tls_connect(const tor_addr_t *addr, uint16_t port,
+ const char *id_digest,
+ const ed25519_public_key_t *ed_id)
+{
+ channel_tls_t *tlschan = tor_malloc_zero(sizeof(*tlschan));
+ channel_t *chan = &(tlschan->base_);
+
+ channel_tls_common_init(tlschan);
+
+ log_debug(LD_CHANNEL,
+ "In channel_tls_connect() for channel %p "
+ "(global id %"PRIu64 ")",
+ tlschan,
+ (chan->global_identifier));
+
+ if (is_local_addr(addr)) {
+ log_debug(LD_CHANNEL,
+ "Marking new outgoing channel %"PRIu64 " at %p as local",
+ (chan->global_identifier), chan);
+ channel_mark_local(chan);
+ } else {
+ log_debug(LD_CHANNEL,
+ "Marking new outgoing channel %"PRIu64 " at %p as remote",
+ (chan->global_identifier), chan);
+ channel_mark_remote(chan);
+ }
+
+ channel_mark_outgoing(chan);
+
+ /* Set up or_connection stuff */
+ tlschan->conn = connection_or_connect(addr, port, id_digest, ed_id, tlschan);
+ /* connection_or_connect() will fill in tlschan->conn */
+ if (!(tlschan->conn)) {
+ chan->reason_for_closing = CHANNEL_CLOSE_FOR_ERROR;
+ channel_change_state(chan, CHANNEL_STATE_ERROR);
+ goto err;
+ }
+
+ log_debug(LD_CHANNEL,
+ "Got orconn %p for channel with global id %"PRIu64,
+ tlschan->conn, (chan->global_identifier));
+
+ goto done;
+
+ err:
+ circuitmux_free(chan->cmux);
+ tor_free(tlschan);
+ chan = NULL;
+
+ done:
+ /* If we got one, we should register it */
+ if (chan) channel_register(chan);
+
+ return chan;
+}
+
+/**
+ * Return the current channel_tls_t listener.
+ *
+ * Returns the current channel listener for incoming TLS connections, or
+ * NULL if none has been established
+ */
+channel_listener_t *
+channel_tls_get_listener(void)
+{
+ return channel_tls_listener;
+}
+
+/**
+ * Start a channel_tls_t listener if necessary.
+ *
+ * Return the current channel_tls_t listener, or start one if we haven't yet,
+ * and return that.
+ */
+channel_listener_t *
+channel_tls_start_listener(void)
+{
+ channel_listener_t *listener;
+
+ if (!channel_tls_listener) {
+ listener = tor_malloc_zero(sizeof(*listener));
+ channel_init_listener(listener);
+ listener->state = CHANNEL_LISTENER_STATE_LISTENING;
+ listener->close = channel_tls_listener_close_method;
+ listener->describe_transport =
+ channel_tls_listener_describe_transport_method;
+
+ channel_tls_listener = listener;
+
+ log_debug(LD_CHANNEL,
+ "Starting TLS channel listener %p with global id %"PRIu64,
+ listener, (listener->global_identifier));
+
+ channel_listener_register(listener);
+ } else listener = channel_tls_listener;
+
+ return listener;
+}
+
+/**
+ * Free everything on shutdown.
+ *
+ * Not much to do here, since channel_free_all() takes care of a lot, but let's
+ * get rid of the listener.
+ */
+void
+channel_tls_free_all(void)
+{
+ channel_listener_t *old_listener = NULL;
+
+ log_debug(LD_CHANNEL,
+ "Shutting down TLS channels...");
+
+ if (channel_tls_listener) {
+ /*
+ * When we close it, channel_tls_listener will get nulled out, so save
+ * a pointer so we can free it.
+ */
+ old_listener = channel_tls_listener;
+ log_debug(LD_CHANNEL,
+ "Closing channel_tls_listener with ID %"PRIu64
+ " at %p.",
+ (old_listener->global_identifier),
+ old_listener);
+ channel_listener_unregister(old_listener);
+ channel_listener_mark_for_close(old_listener);
+ channel_listener_free(old_listener);
+ tor_assert(channel_tls_listener == NULL);
+ }
+
+ log_debug(LD_CHANNEL,
+ "Done shutting down TLS channels");
+}
+
+/**
+ * Create a new channel around an incoming or_connection_t.
+ */
+channel_t *
+channel_tls_handle_incoming(or_connection_t *orconn)
+{
+ channel_tls_t *tlschan = tor_malloc_zero(sizeof(*tlschan));
+ channel_t *chan = &(tlschan->base_);
+
+ tor_assert(orconn);
+ tor_assert(!(orconn->chan));
+
+ channel_tls_common_init(tlschan);
+
+ /* Link the channel and orconn to each other */
+ tlschan->conn = orconn;
+ orconn->chan = tlschan;
+
+ if (is_local_addr(&(TO_CONN(orconn)->addr))) {
+ log_debug(LD_CHANNEL,
+ "Marking new incoming channel %"PRIu64 " at %p as local",
+ (chan->global_identifier), chan);
+ channel_mark_local(chan);
+ } else {
+ log_debug(LD_CHANNEL,
+ "Marking new incoming channel %"PRIu64 " at %p as remote",
+ (chan->global_identifier), chan);
+ channel_mark_remote(chan);
+ }
+
+ channel_mark_incoming(chan);
+
+ /* Register it */
+ channel_register(chan);
+
+ return chan;
+}
+
+/*********
+ * Casts *
+ ********/
+
+/**
+ * Cast a channel_tls_t to a channel_t.
+ */
+channel_t *
+channel_tls_to_base(channel_tls_t *tlschan)
+{
+ if (!tlschan) return NULL;
+
+ return &(tlschan->base_);
+}
+
+/**
+ * Cast a channel_t to a channel_tls_t, with appropriate type-checking
+ * asserts.
+ */
+channel_tls_t *
+channel_tls_from_base(channel_t *chan)
+{
+ if (!chan) return NULL;
+
+ tor_assert(chan->magic == TLS_CHAN_MAGIC);
+
+ return (channel_tls_t *)(chan);
+}
+
+/********************************************
+ * Method implementations for channel_tls_t *
+ *******************************************/
+
+/**
+ * Close a channel_tls_t.
+ *
+ * This implements the close method for channel_tls_t.
+ */
+static void
+channel_tls_close_method(channel_t *chan)
+{
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+
+ tor_assert(tlschan);
+
+ if (tlschan->conn) connection_or_close_normally(tlschan->conn, 1);
+ else {
+ /* Weird - we'll have to change the state ourselves, I guess */
+ log_info(LD_CHANNEL,
+ "Tried to close channel_tls_t %p with NULL conn",
+ tlschan);
+ channel_change_state(chan, CHANNEL_STATE_ERROR);
+ }
+}
+
+/**
+ * Describe the transport for a channel_tls_t.
+ *
+ * This returns the string "TLS channel on connection <id>" to the upper
+ * layer.
+ */
+static const char *
+channel_tls_describe_transport_method(channel_t *chan)
+{
+ static char *buf = NULL;
+ uint64_t id;
+ channel_tls_t *tlschan;
+ const char *rv = NULL;
+
+ tor_assert(chan);
+
+ tlschan = BASE_CHAN_TO_TLS(chan);
+
+ if (tlschan->conn) {
+ id = TO_CONN(tlschan->conn)->global_identifier;
+
+ if (buf) tor_free(buf);
+ tor_asprintf(&buf,
+ "TLS channel (connection %"PRIu64 ")",
+ (id));
+
+ rv = buf;
+ } else {
+ rv = "TLS channel (no connection)";
+ }
+
+ return rv;
+}
+
+/**
+ * Free a channel_tls_t.
+ *
+ * This is called by the generic channel layer when freeing a channel_tls_t;
+ * this happens either on a channel which has already reached
+ * CHANNEL_STATE_CLOSED or CHANNEL_STATE_ERROR from channel_run_cleanup() or
+ * on shutdown from channel_free_all(). In the latter case we might still
+ * have an orconn active (which connection_free_all() will get to later),
+ * so we should null out its channel pointer now.
+ */
+static void
+channel_tls_free_method(channel_t *chan)
+{
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+
+ tor_assert(tlschan);
+
+ if (tlschan->conn) {
+ tlschan->conn->chan = NULL;
+ tlschan->conn = NULL;
+ }
+}
+
+/**
+ * Get an estimate of the average TLS overhead for the upper layer.
+ */
+static double
+channel_tls_get_overhead_estimate_method(channel_t *chan)
+{
+ double overhead = 1.0;
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+
+ tor_assert(tlschan);
+ tor_assert(tlschan->conn);
+
+ /* Just return 1.0f if we don't have sensible data */
+ if (tlschan->conn->bytes_xmitted > 0 &&
+ tlschan->conn->bytes_xmitted_by_tls >=
+ tlschan->conn->bytes_xmitted) {
+ overhead = ((double)(tlschan->conn->bytes_xmitted_by_tls)) /
+ ((double)(tlschan->conn->bytes_xmitted));
+
+ /*
+ * Never estimate more than 2.0; otherwise we get silly large estimates
+ * at the very start of a new TLS connection.
+ */
+ if (overhead > 2.0)
+ overhead = 2.0;
+ }
+
+ log_debug(LD_CHANNEL,
+ "Estimated overhead ratio for TLS chan %"PRIu64 " is %f",
+ (chan->global_identifier), overhead);
+
+ return overhead;
+}
+
+/**
+ * Get the remote address of a channel_tls_t.
+ *
+ * This implements the get_remote_addr method for channel_tls_t; copy the
+ * remote endpoint of the channel to addr_out and return 1 (always
+ * succeeds for this transport).
+ */
+static int
+channel_tls_get_remote_addr_method(channel_t *chan, tor_addr_t *addr_out)
+{
+ int rv = 0;
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+
+ tor_assert(tlschan);
+ tor_assert(addr_out);
+
+ if (tlschan->conn) {
+ tor_addr_copy(addr_out, &(tlschan->conn->real_addr));
+ rv = 1;
+ } else tor_addr_make_unspec(addr_out);
+
+ return rv;
+}
+
+/**
+ * Get the name of the pluggable transport used by a channel_tls_t.
+ *
+ * This implements the get_transport_name for channel_tls_t. If the
+ * channel uses a pluggable transport, copy its name to
+ * <b>transport_out</b> and return 0. If the channel did not use a
+ * pluggable transport, return -1.
+ */
+static int
+channel_tls_get_transport_name_method(channel_t *chan, char **transport_out)
+{
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+
+ tor_assert(tlschan);
+ tor_assert(transport_out);
+ tor_assert(tlschan->conn);
+
+ if (!tlschan->conn->ext_or_transport)
+ return -1;
+
+ *transport_out = tor_strdup(tlschan->conn->ext_or_transport);
+ return 0;
+}
+
+/**
+ * Get endpoint description of a channel_tls_t.
+ *
+ * This implements the get_remote_descr method for channel_tls_t; it returns
+ * a text description of the remote endpoint of the channel suitable for use
+ * in log messages. The req parameter is 0 for the canonical address or 1 for
+ * the actual address seen.
+ */
+static const char *
+channel_tls_get_remote_descr_method(channel_t *chan, int flags)
+{
+#define MAX_DESCR_LEN 32
+
+ static char buf[MAX_DESCR_LEN + 1];
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+ connection_t *conn;
+ const char *answer = NULL;
+ char *addr_str;
+
+ tor_assert(tlschan);
+
+ if (tlschan->conn) {
+ conn = TO_CONN(tlschan->conn);
+ switch (flags) {
+ case 0:
+ /* Canonical address with port*/
+ tor_snprintf(buf, MAX_DESCR_LEN + 1,
+ "%s:%u", conn->address, conn->port);
+ answer = buf;
+ break;
+ case GRD_FLAG_ORIGINAL:
+ /* Actual address with port */
+ addr_str = tor_addr_to_str_dup(&(tlschan->conn->real_addr));
+ tor_snprintf(buf, MAX_DESCR_LEN + 1,
+ "%s:%u", addr_str, conn->port);
+ tor_free(addr_str);
+ answer = buf;
+ break;
+ case GRD_FLAG_ADDR_ONLY:
+ /* Canonical address, no port */
+ strlcpy(buf, conn->address, sizeof(buf));
+ answer = buf;
+ break;
+ case GRD_FLAG_ORIGINAL|GRD_FLAG_ADDR_ONLY:
+ /* Actual address, no port */
+ addr_str = tor_addr_to_str_dup(&(tlschan->conn->real_addr));
+ strlcpy(buf, addr_str, sizeof(buf));
+ tor_free(addr_str);
+ answer = buf;
+ break;
+ default:
+ /* Something's broken in channel.c */
+ tor_assert_nonfatal_unreached_once();
+ }
+ } else {
+ strlcpy(buf, "(No connection)", sizeof(buf));
+ answer = buf;
+ }
+
+ return answer;
+}
+
+/**
+ * Tell the upper layer if we have queued writes.
+ *
+ * This implements the has_queued_writes method for channel_tls t_; it returns
+ * 1 iff we have queued writes on the outbuf of the underlying or_connection_t.
+ */
+static int
+channel_tls_has_queued_writes_method(channel_t *chan)
+{
+ size_t outbuf_len;
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+
+ tor_assert(tlschan);
+ if (!(tlschan->conn)) {
+ log_info(LD_CHANNEL,
+ "something called has_queued_writes on a tlschan "
+ "(%p with ID %"PRIu64 " but no conn",
+ chan, (chan->global_identifier));
+ }
+
+ outbuf_len = (tlschan->conn != NULL) ?
+ connection_get_outbuf_len(TO_CONN(tlschan->conn)) :
+ 0;
+
+ return (outbuf_len > 0);
+}
+
+/**
+ * Tell the upper layer if we're canonical.
+ *
+ * This implements the is_canonical method for channel_tls_t; if req is zero,
+ * it returns whether this is a canonical channel, and if it is one it returns
+ * whether that can be relied upon.
+ */
+static int
+channel_tls_is_canonical_method(channel_t *chan, int req)
+{
+ int answer = 0;
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+
+ tor_assert(tlschan);
+
+ if (tlschan->conn) {
+ switch (req) {
+ case 0:
+ answer = tlschan->conn->is_canonical;
+ break;
+ case 1:
+ /*
+ * Is the is_canonical bit reliable? In protocols version 2 and up
+ * we get the canonical address from a NETINFO cell, but in older
+ * versions it might be based on an obsolete descriptor.
+ */
+ answer = (tlschan->conn->link_proto >= 2);
+ break;
+ default:
+ /* This shouldn't happen; channel.c is broken if it does */
+ tor_assert_nonfatal_unreached_once();
+ }
+ }
+ /* else return 0 for tlschan->conn == NULL */
+
+ return answer;
+}
+
+/**
+ * Check if we match an extend_info_t.
+ *
+ * This implements the matches_extend_info method for channel_tls_t; the upper
+ * layer wants to know if this channel matches an extend_info_t.
+ */
+static int
+channel_tls_matches_extend_info_method(channel_t *chan,
+ extend_info_t *extend_info)
+{
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+
+ tor_assert(tlschan);
+ tor_assert(extend_info);
+
+ /* Never match if we have no conn */
+ if (!(tlschan->conn)) {
+ log_info(LD_CHANNEL,
+ "something called matches_extend_info on a tlschan "
+ "(%p with ID %"PRIu64 " but no conn",
+ chan, (chan->global_identifier));
+ return 0;
+ }
+
+ return (tor_addr_eq(&(extend_info->addr),
+ &(TO_CONN(tlschan->conn)->addr)) &&
+ (extend_info->port == TO_CONN(tlschan->conn)->port));
+}
+
+/**
+ * Check if we match a target address; return true iff we do.
+ *
+ * This implements the matches_target method for channel_tls t_; the upper
+ * layer wants to know if this channel matches a target address when extending
+ * a circuit.
+ */
+static int
+channel_tls_matches_target_method(channel_t *chan,
+ const tor_addr_t *target)
+{
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+
+ tor_assert(tlschan);
+ tor_assert(target);
+
+ /* Never match if we have no conn */
+ if (!(tlschan->conn)) {
+ log_info(LD_CHANNEL,
+ "something called matches_target on a tlschan "
+ "(%p with ID %"PRIu64 " but no conn",
+ chan, (chan->global_identifier));
+ return 0;
+ }
+
+ /* real_addr is the address this connection came from.
+ * base_.addr is updated by connection_or_init_conn_from_address()
+ * to be the address in the descriptor. It may be tempting to
+ * allow either address to be allowed, but if we did so, it would
+ * enable someone who steals a relay's keys to impersonate/MITM it
+ * from anywhere on the Internet! (Because they could make long-lived
+ * TLS connections from anywhere to all relays, and wait for them to
+ * be used for extends).
+ */
+ return tor_addr_eq(&(tlschan->conn->real_addr), target);
+}
+
+/**
+ * Tell the upper layer how many bytes we have queued and not yet
+ * sent.
+ */
+static size_t
+channel_tls_num_bytes_queued_method(channel_t *chan)
+{
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+
+ tor_assert(tlschan);
+ tor_assert(tlschan->conn);
+
+ return connection_get_outbuf_len(TO_CONN(tlschan->conn));
+}
+
+/**
+ * Tell the upper layer how many cells we can accept to write.
+ *
+ * This implements the num_cells_writeable method for channel_tls_t; it
+ * returns an estimate of the number of cells we can accept with
+ * channel_tls_write_*_cell().
+ */
+static int
+channel_tls_num_cells_writeable_method(channel_t *chan)
+{
+ size_t outbuf_len;
+ ssize_t n;
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+ size_t cell_network_size;
+
+ tor_assert(tlschan);
+ tor_assert(tlschan->conn);
+
+ cell_network_size = get_cell_network_size(tlschan->conn->wide_circ_ids);
+ outbuf_len = connection_get_outbuf_len(TO_CONN(tlschan->conn));
+ /* Get the number of cells */
+ n = CEIL_DIV(OR_CONN_HIGHWATER - outbuf_len, cell_network_size);
+ if (n < 0) n = 0;
+#if SIZEOF_SIZE_T > SIZEOF_INT
+ if (n > INT_MAX) n = INT_MAX;
+#endif
+
+ return (int)n;
+}
+
+/**
+ * Write a cell to a channel_tls_t.
+ *
+ * This implements the write_cell method for channel_tls_t; given a
+ * channel_tls_t and a cell_t, transmit the cell_t.
+ */
+static int
+channel_tls_write_cell_method(channel_t *chan, cell_t *cell)
+{
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+ int written = 0;
+
+ tor_assert(tlschan);
+ tor_assert(cell);
+
+ if (tlschan->conn) {
+ connection_or_write_cell_to_buf(cell, tlschan->conn);
+ ++written;
+ } else {
+ log_info(LD_CHANNEL,
+ "something called write_cell on a tlschan "
+ "(%p with ID %"PRIu64 " but no conn",
+ chan, (chan->global_identifier));
+ }
+
+ return written;
+}
+
+/**
+ * Write a packed cell to a channel_tls_t.
+ *
+ * This implements the write_packed_cell method for channel_tls_t; given a
+ * channel_tls_t and a packed_cell_t, transmit the packed_cell_t.
+ *
+ * Return 0 on success or negative value on error. The caller must free the
+ * packed cell.
+ */
+static int
+channel_tls_write_packed_cell_method(channel_t *chan,
+ packed_cell_t *packed_cell)
+{
+ tor_assert(chan);
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+ size_t cell_network_size = get_cell_network_size(chan->wide_circ_ids);
+
+ tor_assert(tlschan);
+ tor_assert(packed_cell);
+
+ if (tlschan->conn) {
+ connection_buf_add(packed_cell->body, cell_network_size,
+ TO_CONN(tlschan->conn));
+ } else {
+ log_info(LD_CHANNEL,
+ "something called write_packed_cell on a tlschan "
+ "(%p with ID %"PRIu64 " but no conn",
+ chan, (chan->global_identifier));
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Write a variable-length cell to a channel_tls_t.
+ *
+ * This implements the write_var_cell method for channel_tls_t; given a
+ * channel_tls_t and a var_cell_t, transmit the var_cell_t.
+ */
+static int
+channel_tls_write_var_cell_method(channel_t *chan, var_cell_t *var_cell)
+{
+ channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
+ int written = 0;
+
+ tor_assert(tlschan);
+ tor_assert(var_cell);
+
+ if (tlschan->conn) {
+ connection_or_write_var_cell_to_buf(var_cell, tlschan->conn);
+ ++written;
+ } else {
+ log_info(LD_CHANNEL,
+ "something called write_var_cell on a tlschan "
+ "(%p with ID %"PRIu64 " but no conn",
+ chan, (chan->global_identifier));
+ }
+
+ return written;
+}
+
+/*************************************************
+ * Method implementations for channel_listener_t *
+ ************************************************/
+
+/**
+ * Close a channel_listener_t.
+ *
+ * This implements the close method for channel_listener_t.
+ */
+static void
+channel_tls_listener_close_method(channel_listener_t *chan_l)
+{
+ tor_assert(chan_l);
+
+ /*
+ * Listeners we just go ahead and change state through to CLOSED, but
+ * make sure to check if they're channel_tls_listener to NULL it out.
+ */
+ if (chan_l == channel_tls_listener)
+ channel_tls_listener = NULL;
+
+ if (!(chan_l->state == CHANNEL_LISTENER_STATE_CLOSING ||
+ chan_l->state == CHANNEL_LISTENER_STATE_CLOSED ||
+ chan_l->state == CHANNEL_LISTENER_STATE_ERROR)) {
+ channel_listener_change_state(chan_l, CHANNEL_LISTENER_STATE_CLOSING);
+ }
+
+ if (chan_l->incoming_list) {
+ SMARTLIST_FOREACH_BEGIN(chan_l->incoming_list,
+ channel_t *, ichan) {
+ channel_mark_for_close(ichan);
+ } SMARTLIST_FOREACH_END(ichan);
+
+ smartlist_free(chan_l->incoming_list);
+ chan_l->incoming_list = NULL;
+ }
+
+ if (!(chan_l->state == CHANNEL_LISTENER_STATE_CLOSED ||
+ chan_l->state == CHANNEL_LISTENER_STATE_ERROR)) {
+ channel_listener_change_state(chan_l, CHANNEL_LISTENER_STATE_CLOSED);
+ }
+}
+
+/**
+ * Describe the transport for a channel_listener_t.
+ *
+ * This returns the string "TLS channel (listening)" to the upper
+ * layer.
+ */
+static const char *
+channel_tls_listener_describe_transport_method(channel_listener_t *chan_l)
+{
+ tor_assert(chan_l);
+
+ return "TLS channel (listening)";
+}
+
+/*******************************************************
+ * Functions for handling events on an or_connection_t *
+ ******************************************************/
+
+/**
+ * Handle an orconn state change.
+ *
+ * This function will be called by connection_or.c when the or_connection_t
+ * associated with this channel_tls_t changes state.
+ */
+void
+channel_tls_handle_state_change_on_orconn(channel_tls_t *chan,
+ or_connection_t *conn,
+ uint8_t old_state,
+ uint8_t state)
+{
+ channel_t *base_chan;
+
+ tor_assert(chan);
+ tor_assert(conn);
+ tor_assert(conn->chan == chan);
+ tor_assert(chan->conn == conn);
+ /* Shut the compiler up without triggering -Wtautological-compare */
+ (void)old_state;
+
+ base_chan = TLS_CHAN_TO_BASE(chan);
+
+ /* Make sure the base connection state makes sense - shouldn't be error
+ * or closed. */
+
+ tor_assert(CHANNEL_IS_OPENING(base_chan) ||
+ CHANNEL_IS_OPEN(base_chan) ||
+ CHANNEL_IS_MAINT(base_chan) ||
+ CHANNEL_IS_CLOSING(base_chan));
+
+ /* Did we just go to state open? */
+ if (state == OR_CONN_STATE_OPEN) {
+ /*
+ * We can go to CHANNEL_STATE_OPEN from CHANNEL_STATE_OPENING or
+ * CHANNEL_STATE_MAINT on this.
+ */
+ channel_change_state_open(base_chan);
+ /* We might have just become writeable; check and tell the scheduler */
+ if (connection_or_num_cells_writeable(conn) > 0) {
+ scheduler_channel_wants_writes(base_chan);
+ }
+ } else {
+ /*
+ * Not open, so from CHANNEL_STATE_OPEN we go to CHANNEL_STATE_MAINT,
+ * otherwise no change.
+ */
+ if (CHANNEL_IS_OPEN(base_chan)) {
+ channel_change_state(base_chan, CHANNEL_STATE_MAINT);
+ }
+ }
+}
+
+#ifdef KEEP_TIMING_STATS
+
+/**
+ * Timing states wrapper.
+ *
+ * This is a wrapper function around the actual function that processes the
+ * <b>cell</b> that just arrived on <b>chan</b>. Increment <b>*time</b>
+ * by the number of microseconds used by the call to <b>*func(cell, chan)</b>.
+ */
+static void
+channel_tls_time_process_cell(cell_t *cell, channel_tls_t *chan, int *time,
+ void (*func)(cell_t *, channel_tls_t *))
+{
+ struct timeval start, end;
+ long time_passed;
+
+ tor_gettimeofday(&start);
+
+ (*func)(cell, chan);
+
+ tor_gettimeofday(&end);
+ time_passed = tv_udiff(&start, &end) ;
+
+ if (time_passed > 10000) { /* more than 10ms */
+ log_debug(LD_OR,"That call just took %ld ms.",time_passed/1000);
+ }
+
+ if (time_passed < 0) {
+ log_info(LD_GENERAL,"That call took us back in time!");
+ time_passed = 0;
+ }
+
+ *time += time_passed;
+}
+#endif /* defined(KEEP_TIMING_STATS) */
+
+/**
+ * Handle an incoming cell on a channel_tls_t.
+ *
+ * This is called from connection_or.c to handle an arriving cell; it checks
+ * for cell types specific to the handshake for this transport protocol and
+ * handles them, and queues all other cells to the channel_t layer, which
+ * eventually will hand them off to command.c.
+ *
+ * The channel layer itself decides whether the cell should be queued or
+ * can be handed off immediately to the upper-layer code. It is responsible
+ * for copying in the case that it queues; we merely pass pointers through
+ * which we get from connection_or_process_cells_from_inbuf().
+ */
+void
+channel_tls_handle_cell(cell_t *cell, or_connection_t *conn)
+{
+ channel_tls_t *chan;
+ int handshaking;
+
+#ifdef KEEP_TIMING_STATS
+#define PROCESS_CELL(tp, cl, cn) STMT_BEGIN { \
+ ++num ## tp; \
+ channel_tls_time_process_cell(cl, cn, & tp ## time , \
+ channel_tls_process_ ## tp ## _cell); \
+ } STMT_END
+#else /* !(defined(KEEP_TIMING_STATS)) */
+#define PROCESS_CELL(tp, cl, cn) channel_tls_process_ ## tp ## _cell(cl, cn)
+#endif /* defined(KEEP_TIMING_STATS) */
+
+ tor_assert(cell);
+ tor_assert(conn);
+
+ chan = conn->chan;
+
+ if (!chan) {
+ log_warn(LD_CHANNEL,
+ "Got a cell_t on an OR connection with no channel");
+ return;
+ }
+
+ handshaking = (TO_CONN(conn)->state != OR_CONN_STATE_OPEN);
+
+ if (conn->base_.marked_for_close)
+ return;
+
+ /* Reject all but VERSIONS and NETINFO when handshaking. */
+ /* (VERSIONS should actually be impossible; it's variable-length.) */
+ if (handshaking && cell->command != CELL_VERSIONS &&
+ cell->command != CELL_NETINFO) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Received unexpected cell command %d in chan state %s / "
+ "conn state %s; closing the connection.",
+ (int)cell->command,
+ channel_state_to_string(TLS_CHAN_TO_BASE(chan)->state),
+ conn_state_to_string(CONN_TYPE_OR, TO_CONN(conn)->state));
+ connection_or_close_for_error(conn, 0);
+ return;
+ }
+
+ if (conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3)
+ or_handshake_state_record_cell(conn, conn->handshake_state, cell, 1);
+
+ /* We note that we're on the internet whenever we read a cell. This is
+ * a fast operation. */
+ entry_guards_note_internet_connectivity(get_guard_selection_info());
+ rep_hist_padding_count_read(PADDING_TYPE_TOTAL);
+
+ if (TLS_CHAN_TO_BASE(chan)->currently_padding)
+ rep_hist_padding_count_read(PADDING_TYPE_ENABLED_TOTAL);
+
+ switch (cell->command) {
+ case CELL_PADDING:
+ rep_hist_padding_count_read(PADDING_TYPE_CELL);
+ if (TLS_CHAN_TO_BASE(chan)->currently_padding)
+ rep_hist_padding_count_read(PADDING_TYPE_ENABLED_CELL);
+ ++stats_n_padding_cells_processed;
+ /* do nothing */
+ break;
+ case CELL_VERSIONS:
+ tor_fragile_assert();
+ break;
+ case CELL_NETINFO:
+ ++stats_n_netinfo_cells_processed;
+ PROCESS_CELL(netinfo, cell, chan);
+ break;
+ case CELL_PADDING_NEGOTIATE:
+ ++stats_n_netinfo_cells_processed;
+ PROCESS_CELL(padding_negotiate, cell, chan);
+ break;
+ case CELL_CREATE:
+ case CELL_CREATE_FAST:
+ case CELL_CREATED:
+ case CELL_CREATED_FAST:
+ case CELL_RELAY:
+ case CELL_RELAY_EARLY:
+ case CELL_DESTROY:
+ case CELL_CREATE2:
+ case CELL_CREATED2:
+ /*
+ * These are all transport independent and we pass them up through the
+ * channel_t mechanism. They are ultimately handled in command.c.
+ */
+ channel_process_cell(TLS_CHAN_TO_BASE(chan), cell);
+ break;
+ default:
+ log_fn(LOG_INFO, LD_PROTOCOL,
+ "Cell of unknown type (%d) received in channeltls.c. "
+ "Dropping.",
+ cell->command);
+ break;
+ }
+}
+
+/**
+ * Handle an incoming variable-length cell on a channel_tls_t.
+ *
+ * Process a <b>var_cell</b> that was just received on <b>conn</b>. Keep
+ * internal statistics about how many of each cell we've processed so far
+ * this second, and the total number of microseconds it took to
+ * process each type of cell. All the var_cell commands are handshake-
+ * related and live below the channel_t layer, so no variable-length
+ * cells ever get delivered in the current implementation, but I've left
+ * the mechanism in place for future use.
+ *
+ * If we were handing them off to the upper layer, the channel_t queueing
+ * code would be responsible for memory management, and we'd just be passing
+ * pointers through from connection_or_process_cells_from_inbuf(). That
+ * caller always frees them after this function returns, so this function
+ * should never free var_cell.
+ */
+void
+channel_tls_handle_var_cell(var_cell_t *var_cell, or_connection_t *conn)
+{
+ channel_tls_t *chan;
+
+#ifdef KEEP_TIMING_STATS
+ /* how many of each cell have we seen so far this second? needs better
+ * name. */
+ static int num_versions = 0, num_certs = 0;
+ static time_t current_second = 0; /* from previous calls to time */
+ time_t now = time(NULL);
+
+ if (current_second == 0) current_second = now;
+ if (now > current_second) { /* the second has rolled over */
+ /* print stats */
+ log_info(LD_OR,
+ "At end of second: %d versions (%d ms), %d certs (%d ms)",
+ num_versions, versions_time / ((now - current_second) * 1000),
+ num_certs, certs_time / ((now - current_second) * 1000));
+
+ num_versions = num_certs = 0;
+ versions_time = certs_time = 0;
+
+ /* remember which second it is, for next time */
+ current_second = now;
+ }
+#endif /* defined(KEEP_TIMING_STATS) */
+
+ tor_assert(var_cell);
+ tor_assert(conn);
+
+ chan = conn->chan;
+
+ if (!chan) {
+ log_warn(LD_CHANNEL,
+ "Got a var_cell_t on an OR connection with no channel");
+ return;
+ }
+
+ if (TO_CONN(conn)->marked_for_close)
+ return;
+
+ switch (TO_CONN(conn)->state) {
+ case OR_CONN_STATE_OR_HANDSHAKING_V2:
+ if (var_cell->command != CELL_VERSIONS) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Received a cell with command %d in unexpected "
+ "orconn state \"%s\" [%d], channel state \"%s\" [%d]; "
+ "closing the connection.",
+ (int)(var_cell->command),
+ conn_state_to_string(CONN_TYPE_OR, TO_CONN(conn)->state),
+ TO_CONN(conn)->state,
+ channel_state_to_string(TLS_CHAN_TO_BASE(chan)->state),
+ (int)(TLS_CHAN_TO_BASE(chan)->state));
+ /*
+ * The code in connection_or.c will tell channel_t to close for
+ * error; it will go to CHANNEL_STATE_CLOSING, and then to
+ * CHANNEL_STATE_ERROR when conn is closed.
+ */
+ connection_or_close_for_error(conn, 0);
+ return;
+ }
+ break;
+ case OR_CONN_STATE_TLS_HANDSHAKING:
+ /* If we're using bufferevents, it's entirely possible for us to
+ * notice "hey, data arrived!" before we notice "hey, the handshake
+ * finished!" And we need to be accepting both at once to handle both
+ * the v2 and v3 handshakes. */
+ /* But that should be happening any longer've disabled bufferevents. */
+ tor_assert_nonfatal_unreached_once();
+
+ /* fall through */
+ case OR_CONN_STATE_TLS_SERVER_RENEGOTIATING:
+ if (!(command_allowed_before_handshake(var_cell->command))) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Received a cell with command %d in unexpected "
+ "orconn state \"%s\" [%d], channel state \"%s\" [%d]; "
+ "closing the connection.",
+ (int)(var_cell->command),
+ conn_state_to_string(CONN_TYPE_OR, TO_CONN(conn)->state),
+ (int)(TO_CONN(conn)->state),
+ channel_state_to_string(TLS_CHAN_TO_BASE(chan)->state),
+ (int)(TLS_CHAN_TO_BASE(chan)->state));
+ /* see above comment about CHANNEL_STATE_ERROR */
+ connection_or_close_for_error(conn, 0);
+ return;
+ } else {
+ if (enter_v3_handshake_with_cell(var_cell, chan) < 0)
+ return;
+ }
+ break;
+ case OR_CONN_STATE_OR_HANDSHAKING_V3:
+ if (var_cell->command != CELL_AUTHENTICATE)
+ or_handshake_state_record_var_cell(conn, conn->handshake_state,
+ var_cell, 1);
+ break; /* Everything is allowed */
+ case OR_CONN_STATE_OPEN:
+ if (conn->link_proto < 3) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Received a variable-length cell with command %d in orconn "
+ "state %s [%d], channel state %s [%d] with link protocol %d; "
+ "ignoring it.",
+ (int)(var_cell->command),
+ conn_state_to_string(CONN_TYPE_OR, TO_CONN(conn)->state),
+ (int)(TO_CONN(conn)->state),
+ channel_state_to_string(TLS_CHAN_TO_BASE(chan)->state),
+ (int)(TLS_CHAN_TO_BASE(chan)->state),
+ (int)(conn->link_proto));
+ return;
+ }
+ break;
+ default:
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Received var-length cell with command %d in unexpected "
+ "orconn state \"%s\" [%d], channel state \"%s\" [%d]; "
+ "ignoring it.",
+ (int)(var_cell->command),
+ conn_state_to_string(CONN_TYPE_OR, TO_CONN(conn)->state),
+ (int)(TO_CONN(conn)->state),
+ channel_state_to_string(TLS_CHAN_TO_BASE(chan)->state),
+ (int)(TLS_CHAN_TO_BASE(chan)->state));
+ return;
+ }
+
+ /* We note that we're on the internet whenever we read a cell. This is
+ * a fast operation. */
+ entry_guards_note_internet_connectivity(get_guard_selection_info());
+
+ /* Now handle the cell */
+
+ switch (var_cell->command) {
+ case CELL_VERSIONS:
+ ++stats_n_versions_cells_processed;
+ PROCESS_CELL(versions, var_cell, chan);
+ break;
+ case CELL_VPADDING:
+ ++stats_n_vpadding_cells_processed;
+ /* Do nothing */
+ break;
+ case CELL_CERTS:
+ ++stats_n_certs_cells_processed;
+ PROCESS_CELL(certs, var_cell, chan);
+ break;
+ case CELL_AUTH_CHALLENGE:
+ ++stats_n_auth_challenge_cells_processed;
+ PROCESS_CELL(auth_challenge, var_cell, chan);
+ break;
+ case CELL_AUTHENTICATE:
+ ++stats_n_authenticate_cells_processed;
+ PROCESS_CELL(authenticate, var_cell, chan);
+ break;
+ case CELL_AUTHORIZE:
+ ++stats_n_authorize_cells_processed;
+ /* Ignored so far. */
+ break;
+ default:
+ log_fn(LOG_INFO, LD_PROTOCOL,
+ "Variable-length cell of unknown type (%d) received.",
+ (int)(var_cell->command));
+ break;
+ }
+}
+
+/**
+ * Update channel marks after connection_or.c has changed an address.
+ *
+ * This is called from connection_or_init_conn_from_address() after the
+ * connection's _base.addr or real_addr fields have potentially been changed
+ * so we can recalculate the local mark. Notably, this happens when incoming
+ * connections are reverse-proxied and we only learn the real address of the
+ * remote router by looking it up in the consensus after we finish the
+ * handshake and know an authenticated identity digest.
+ */
+void
+channel_tls_update_marks(or_connection_t *conn)
+{
+ channel_t *chan = NULL;
+
+ tor_assert(conn);
+ tor_assert(conn->chan);
+
+ chan = TLS_CHAN_TO_BASE(conn->chan);
+
+ if (is_local_addr(&(TO_CONN(conn)->addr))) {
+ if (!channel_is_local(chan)) {
+ log_debug(LD_CHANNEL,
+ "Marking channel %"PRIu64 " at %p as local",
+ (chan->global_identifier), chan);
+ channel_mark_local(chan);
+ }
+ } else {
+ if (channel_is_local(chan)) {
+ log_debug(LD_CHANNEL,
+ "Marking channel %"PRIu64 " at %p as remote",
+ (chan->global_identifier), chan);
+ channel_mark_remote(chan);
+ }
+ }
+}
+
+/**
+ * Check if this cell type is allowed before the handshake is finished.
+ *
+ * Return true if <b>command</b> is a cell command that's allowed to start a
+ * V3 handshake.
+ */
+static int
+command_allowed_before_handshake(uint8_t command)
+{
+ switch (command) {
+ case CELL_VERSIONS:
+ case CELL_VPADDING:
+ case CELL_AUTHORIZE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Start a V3 handshake on an incoming connection.
+ *
+ * Called when we as a server receive an appropriate cell while waiting
+ * either for a cell or a TLS handshake. Set the connection's state to
+ * "handshaking_v3', initializes the or_handshake_state field as needed,
+ * and add the cell to the hash of incoming cells.)
+ */
+static int
+enter_v3_handshake_with_cell(var_cell_t *cell, channel_tls_t *chan)
+{
+ int started_here = 0;
+
+ tor_assert(cell);
+ tor_assert(chan);
+ tor_assert(chan->conn);
+
+ started_here = connection_or_nonopen_was_started_here(chan->conn);
+
+ tor_assert(TO_CONN(chan->conn)->state == OR_CONN_STATE_TLS_HANDSHAKING ||
+ TO_CONN(chan->conn)->state ==
+ OR_CONN_STATE_TLS_SERVER_RENEGOTIATING);
+
+ if (started_here) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Received a cell while TLS-handshaking, not in "
+ "OR_HANDSHAKING_V3, on a connection we originated.");
+ }
+ connection_or_block_renegotiation(chan->conn);
+ chan->conn->base_.state = OR_CONN_STATE_OR_HANDSHAKING_V3;
+ if (connection_init_or_handshake_state(chan->conn, started_here) < 0) {
+ connection_or_close_for_error(chan->conn, 0);
+ return -1;
+ }
+ or_handshake_state_record_var_cell(chan->conn,
+ chan->conn->handshake_state, cell, 1);
+ return 0;
+}
+
+/**
+ * Process a 'versions' cell.
+ *
+ * This function is called to handle an incoming VERSIONS cell; the current
+ * link protocol version must be 0 to indicate that no version has yet been
+ * negotiated. We compare the versions in the cell to the list of versions
+ * we support, pick the highest version we have in common, and continue the
+ * negotiation from there.
+ */
+static void
+channel_tls_process_versions_cell(var_cell_t *cell, channel_tls_t *chan)
+{
+ int highest_supported_version = 0;
+ int started_here = 0;
+
+ tor_assert(cell);
+ tor_assert(chan);
+ tor_assert(chan->conn);
+
+ if ((cell->payload_len % 2) == 1) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Received a VERSION cell with odd payload length %d; "
+ "closing connection.",cell->payload_len);
+ connection_or_close_for_error(chan->conn, 0);
+ return;
+ }
+
+ started_here = connection_or_nonopen_was_started_here(chan->conn);
+
+ if (chan->conn->link_proto != 0 ||
+ (chan->conn->handshake_state &&
+ chan->conn->handshake_state->received_versions)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Received a VERSIONS cell on a connection with its version "
+ "already set to %d; dropping",
+ (int)(chan->conn->link_proto));
+ return;
+ }
+ switch (chan->conn->base_.state)
+ {
+ case OR_CONN_STATE_OR_HANDSHAKING_V2:
+ case OR_CONN_STATE_OR_HANDSHAKING_V3:
+ break;
+ case OR_CONN_STATE_TLS_HANDSHAKING:
+ case OR_CONN_STATE_TLS_SERVER_RENEGOTIATING:
+ default:
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "VERSIONS cell while in unexpected state");
+ return;
+ }
+
+ tor_assert(chan->conn->handshake_state);
+
+ {
+ int i;
+ const uint8_t *cp = cell->payload;
+ for (i = 0; i < cell->payload_len / 2; ++i, cp += 2) {
+ uint16_t v = ntohs(get_uint16(cp));
+ if (is_or_protocol_version_known(v) && v > highest_supported_version)
+ highest_supported_version = v;
+ }
+ }
+ if (!highest_supported_version) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Couldn't find a version in common between my version list and the "
+ "list in the VERSIONS cell; closing connection.");
+ connection_or_close_for_error(chan->conn, 0);
+ return;
+ } else if (highest_supported_version == 1) {
+ /* Negotiating version 1 makes no sense, since version 1 has no VERSIONS
+ * cells. */
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Used version negotiation protocol to negotiate a v1 connection. "
+ "That's crazily non-compliant. Closing connection.");
+ connection_or_close_for_error(chan->conn, 0);
+ return;
+ } else if (highest_supported_version < 3 &&
+ chan->conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Negotiated link protocol 2 or lower after doing a v3 TLS "
+ "handshake. Closing connection.");
+ connection_or_close_for_error(chan->conn, 0);
+ return;
+ } else if (highest_supported_version != 2 &&
+ chan->conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V2) {
+ /* XXXX This should eventually be a log_protocol_warn */
+ log_fn(LOG_WARN, LD_OR,
+ "Negotiated link with non-2 protocol after doing a v2 TLS "
+ "handshake with %s. Closing connection.",
+ fmt_addr(&chan->conn->base_.addr));
+ connection_or_close_for_error(chan->conn, 0);
+ return;
+ }
+
+ rep_hist_note_negotiated_link_proto(highest_supported_version, started_here);
+
+ chan->conn->link_proto = highest_supported_version;
+ chan->conn->handshake_state->received_versions = 1;
+
+ if (chan->conn->link_proto == 2) {
+ log_info(LD_OR,
+ "Negotiated version %d with %s:%d; sending NETINFO.",
+ highest_supported_version,
+ safe_str_client(chan->conn->base_.address),
+ chan->conn->base_.port);
+
+ if (connection_or_send_netinfo(chan->conn) < 0) {
+ connection_or_close_for_error(chan->conn, 0);
+ return;
+ }
+ } else {
+ const int send_versions = !started_here;
+ /* If we want to authenticate, send a CERTS cell */
+ const int send_certs = !started_here || public_server_mode(get_options());
+ /* If we're a host that got a connection, ask for authentication. */
+ const int send_chall = !started_here;
+ /* If our certs cell will authenticate us, we can send a netinfo cell
+ * right now. */
+ const int send_netinfo = !started_here;
+ const int send_any =
+ send_versions || send_certs || send_chall || send_netinfo;
+ tor_assert(chan->conn->link_proto >= 3);
+
+ log_info(LD_OR,
+ "Negotiated version %d with %s:%d; %s%s%s%s%s",
+ highest_supported_version,
+ safe_str_client(chan->conn->base_.address),
+ chan->conn->base_.port,
+ send_any ? "Sending cells:" : "Waiting for CERTS cell",
+ send_versions ? " VERSIONS" : "",
+ send_certs ? " CERTS" : "",
+ send_chall ? " AUTH_CHALLENGE" : "",
+ send_netinfo ? " NETINFO" : "");
+
+#ifdef DISABLE_V3_LINKPROTO_SERVERSIDE
+ if (1) {
+ connection_or_close_normally(chan->conn, 1);
+ return;
+ }
+#endif /* defined(DISABLE_V3_LINKPROTO_SERVERSIDE) */
+
+ if (send_versions) {
+ if (connection_or_send_versions(chan->conn, 1) < 0) {
+ log_warn(LD_OR, "Couldn't send versions cell");
+ connection_or_close_for_error(chan->conn, 0);
+ return;
+ }
+ }
+
+ /* We set this after sending the versions cell. */
+ /*XXXXX symbolic const.*/
+ TLS_CHAN_TO_BASE(chan)->wide_circ_ids =
+ chan->conn->link_proto >= MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS;
+ chan->conn->wide_circ_ids = TLS_CHAN_TO_BASE(chan)->wide_circ_ids;
+
+ TLS_CHAN_TO_BASE(chan)->padding_enabled =
+ chan->conn->link_proto >= MIN_LINK_PROTO_FOR_CHANNEL_PADDING;
+
+ if (send_certs) {
+ if (connection_or_send_certs_cell(chan->conn) < 0) {
+ log_warn(LD_OR, "Couldn't send certs cell");
+ connection_or_close_for_error(chan->conn, 0);
+ return;
+ }
+ }
+ if (send_chall) {
+ if (connection_or_send_auth_challenge_cell(chan->conn) < 0) {
+ log_warn(LD_OR, "Couldn't send auth_challenge cell");
+ connection_or_close_for_error(chan->conn, 0);
+ return;
+ }
+ }
+ if (send_netinfo) {
+ if (connection_or_send_netinfo(chan->conn) < 0) {
+ log_warn(LD_OR, "Couldn't send netinfo cell");
+ connection_or_close_for_error(chan->conn, 0);
+ return;
+ }
+ }
+ }
+}
+
+/**
+ * Process a 'padding_negotiate' cell.
+ *
+ * This function is called to handle an incoming PADDING_NEGOTIATE cell;
+ * enable or disable padding accordingly, and read and act on its timeout
+ * value contents.
+ */
+static void
+channel_tls_process_padding_negotiate_cell(cell_t *cell, channel_tls_t *chan)
+{
+ channelpadding_negotiate_t *negotiation;
+ tor_assert(cell);
+ tor_assert(chan);
+ tor_assert(chan->conn);
+
+ if (chan->conn->link_proto < MIN_LINK_PROTO_FOR_CHANNEL_PADDING) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Received a PADDING_NEGOTIATE cell on v%d connection; dropping.",
+ chan->conn->link_proto);
+ return;
+ }
+
+ if (channelpadding_negotiate_parse(&negotiation, cell->payload,
+ CELL_PAYLOAD_SIZE) < 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Received malformed PADDING_NEGOTIATE cell on v%d connection; "
+ "dropping.", chan->conn->link_proto);
+
+ return;
+ }
+
+ channelpadding_update_padding_for_channel(TLS_CHAN_TO_BASE(chan),
+ negotiation);
+
+ channelpadding_negotiate_free(negotiation);
+}
+
+/**
+ * Process a 'netinfo' cell.
+ *
+ * This function is called to handle an incoming NETINFO cell; read and act
+ * on its contents, and set the connection state to "open".
+ */
+static void
+channel_tls_process_netinfo_cell(cell_t *cell, channel_tls_t *chan)
+{
+ time_t timestamp;
+ uint8_t my_addr_type;
+ uint8_t my_addr_len;
+ const uint8_t *my_addr_ptr;
+ const uint8_t *cp, *end;
+ uint8_t n_other_addrs;
+ time_t now = time(NULL);
+ const routerinfo_t *me = router_get_my_routerinfo();
+
+ long apparent_skew = 0;
+ tor_addr_t my_apparent_addr = TOR_ADDR_NULL;
+ int started_here = 0;
+ const char *identity_digest = NULL;
+
+ tor_assert(cell);
+ tor_assert(chan);
+ tor_assert(chan->conn);
+
+ if (chan->conn->link_proto < 2) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Received a NETINFO cell on %s connection; dropping.",
+ chan->conn->link_proto == 0 ? "non-versioned" : "a v1");
+ return;
+ }
+ if (chan->conn->base_.state != OR_CONN_STATE_OR_HANDSHAKING_V2 &&
+ chan->conn->base_.state != OR_CONN_STATE_OR_HANDSHAKING_V3) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Received a NETINFO cell on non-handshaking connection; dropping.");
+ return;
+ }
+ tor_assert(chan->conn->handshake_state &&
+ chan->conn->handshake_state->received_versions);
+ started_here = connection_or_nonopen_was_started_here(chan->conn);
+ identity_digest = chan->conn->identity_digest;
+
+ if (chan->conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3) {
+ tor_assert(chan->conn->link_proto >= 3);
+ if (started_here) {
+ if (!(chan->conn->handshake_state->authenticated)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Got a NETINFO cell from server, "
+ "but no authentication. Closing the connection.");
+ connection_or_close_for_error(chan->conn, 0);
+ return;
+ }
+ } else {
+ /* we're the server. If the client never authenticated, we have
+ some housekeeping to do.*/
+ if (!(chan->conn->handshake_state->authenticated)) {
+ tor_assert(tor_digest_is_zero(
+ (const char*)(chan->conn->handshake_state->
+ authenticated_rsa_peer_id)));
+ tor_assert(tor_mem_is_zero(
+ (const char*)(chan->conn->handshake_state->
+ authenticated_ed25519_peer_id.pubkey), 32));
+ /* If the client never authenticated, it's a tor client or bridge
+ * relay, and we must not use it for EXTEND requests (nor could we, as
+ * there are no authenticated peer IDs) */
+ channel_mark_client(TLS_CHAN_TO_BASE(chan));
+ channel_set_circid_type(TLS_CHAN_TO_BASE(chan), NULL,
+ chan->conn->link_proto < MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS);
+
+ connection_or_init_conn_from_address(chan->conn,
+ &(chan->conn->base_.addr),
+ chan->conn->base_.port,
+ /* zero, checked above */
+ (const char*)(chan->conn->handshake_state->
+ authenticated_rsa_peer_id),
+ NULL, /* Ed25519 ID: Also checked as zero */
+ 0);
+ }
+ }
+ }
+
+ /* Decode the cell. */
+ timestamp = ntohl(get_uint32(cell->payload));
+ if (labs(now - chan->conn->handshake_state->sent_versions_at) < 180) {
+ apparent_skew = now - timestamp;
+ }
+
+ my_addr_type = (uint8_t) cell->payload[4];
+ my_addr_len = (uint8_t) cell->payload[5];
+ my_addr_ptr = (uint8_t*) cell->payload + 6;
+ end = cell->payload + CELL_PAYLOAD_SIZE;
+ cp = cell->payload + 6 + my_addr_len;
+
+ /* We used to check:
+ * if (my_addr_len >= CELL_PAYLOAD_SIZE - 6) {
+ *
+ * This is actually never going to happen, since my_addr_len is at most 255,
+ * and CELL_PAYLOAD_LEN - 6 is 503. So we know that cp is < end. */
+
+ if (my_addr_type == RESOLVED_TYPE_IPV4 && my_addr_len == 4) {
+ tor_addr_from_ipv4n(&my_apparent_addr, get_uint32(my_addr_ptr));
+
+ if (!get_options()->BridgeRelay && me &&
+ get_uint32(my_addr_ptr) == htonl(me->addr)) {
+ TLS_CHAN_TO_BASE(chan)->is_canonical_to_peer = 1;
+ }
+
+ } else if (my_addr_type == RESOLVED_TYPE_IPV6 && my_addr_len == 16) {
+ tor_addr_from_ipv6_bytes(&my_apparent_addr, (const char *) my_addr_ptr);
+
+ if (!get_options()->BridgeRelay && me &&
+ !tor_addr_is_null(&me->ipv6_addr) &&
+ tor_addr_eq(&my_apparent_addr, &me->ipv6_addr)) {
+ TLS_CHAN_TO_BASE(chan)->is_canonical_to_peer = 1;
+ }
+ }
+
+ n_other_addrs = (uint8_t) *cp++;
+ while (n_other_addrs && cp < end-2) {
+ /* Consider all the other addresses; if any matches, this connection is
+ * "canonical." */
+ tor_addr_t addr;
+ const uint8_t *next =
+ decode_address_from_payload(&addr, cp, (int)(end-cp));
+ if (next == NULL) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Bad address in netinfo cell; closing connection.");
+ connection_or_close_for_error(chan->conn, 0);
+ return;
+ }
+ /* A relay can connect from anywhere and be canonical, so
+ * long as it tells you from where it came. This may sound a bit
+ * concerning... but that's what "canonical" means: that the
+ * address is one that the relay itself has claimed. The relay
+ * might be doing something funny, but nobody else is doing a MITM
+ * on the relay's TCP.
+ */
+ if (tor_addr_eq(&addr, &(chan->conn->real_addr))) {
+ connection_or_set_canonical(chan->conn, 1);
+ break;
+ }
+ cp = next;
+ --n_other_addrs;
+ }
+
+ if (me && !TLS_CHAN_TO_BASE(chan)->is_canonical_to_peer &&
+ channel_is_canonical(TLS_CHAN_TO_BASE(chan))) {
+ const char *descr =
+ TLS_CHAN_TO_BASE(chan)->get_remote_descr(TLS_CHAN_TO_BASE(chan), 0);
+ log_info(LD_OR,
+ "We made a connection to a relay at %s (fp=%s) but we think "
+ "they will not consider this connection canonical. They "
+ "think we are at %s, but we think its %s.",
+ safe_str(descr),
+ safe_str(hex_str(identity_digest, DIGEST_LEN)),
+ safe_str(tor_addr_is_null(&my_apparent_addr) ?
+ "<none>" : fmt_and_decorate_addr(&my_apparent_addr)),
+ safe_str(fmt_addr32(me->addr)));
+ }
+
+ /* Act on apparent skew. */
+ /** Warn when we get a netinfo skew with at least this value. */
+#define NETINFO_NOTICE_SKEW 3600
+ if (labs(apparent_skew) > NETINFO_NOTICE_SKEW &&
+ (started_here ||
+ connection_or_digest_is_known_relay(chan->conn->identity_digest))) {
+ int trusted = router_digest_is_trusted_dir(chan->conn->identity_digest);
+ clock_skew_warning(TO_CONN(chan->conn), apparent_skew, trusted, LD_GENERAL,
+ "NETINFO cell", "OR");
+ }
+
+ /* XXX maybe act on my_apparent_addr, if the source is sufficiently
+ * trustworthy. */
+
+ if (! chan->conn->handshake_state->sent_netinfo) {
+ /* If we were prepared to authenticate, but we never got an AUTH_CHALLENGE
+ * cell, then we would not previously have sent a NETINFO cell. Do so
+ * now. */
+ if (connection_or_send_netinfo(chan->conn) < 0) {
+ connection_or_close_for_error(chan->conn, 0);
+ return;
+ }
+ }
+
+ if (connection_or_set_state_open(chan->conn) < 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Got good NETINFO cell from %s:%d; but "
+ "was unable to make the OR connection become open.",
+ safe_str_client(chan->conn->base_.address),
+ chan->conn->base_.port);
+ connection_or_close_for_error(chan->conn, 0);
+ } else {
+ log_info(LD_OR,
+ "Got good NETINFO cell from %s:%d; OR connection is now "
+ "open, using protocol version %d. Its ID digest is %s. "
+ "Our address is apparently %s.",
+ safe_str_client(chan->conn->base_.address),
+ chan->conn->base_.port,
+ (int)(chan->conn->link_proto),
+ hex_str(identity_digest, DIGEST_LEN),
+ tor_addr_is_null(&my_apparent_addr) ?
+ "<none>" : fmt_and_decorate_addr(&my_apparent_addr));
+ }
+ assert_connection_ok(TO_CONN(chan->conn),time(NULL));
+}
+
+/** Types of certificates that we know how to parse from CERTS cells. Each
+ * type corresponds to a different encoding format. */
+typedef enum cert_encoding_t {
+ CERT_ENCODING_UNKNOWN, /**< We don't recognize this. */
+ CERT_ENCODING_X509, /**< It's an RSA key, signed with RSA, encoded in x509.
+ * (Actually, it might not be RSA. We test that later.) */
+ CERT_ENCODING_ED25519, /**< It's something signed with an Ed25519 key,
+ * encoded asa a tor_cert_t.*/
+ CERT_ENCODING_RSA_CROSSCERT, /**< It's an Ed key signed with an RSA key. */
+} cert_encoding_t;
+
+/**
+ * Given one of the certificate type codes used in a CERTS cell,
+ * return the corresponding cert_encoding_t that we should use to parse
+ * the certificate.
+ */
+static cert_encoding_t
+certs_cell_typenum_to_cert_type(int typenum)
+{
+ switch (typenum) {
+ case CERTTYPE_RSA1024_ID_LINK:
+ case CERTTYPE_RSA1024_ID_ID:
+ case CERTTYPE_RSA1024_ID_AUTH:
+ return CERT_ENCODING_X509;
+ case CERTTYPE_ED_ID_SIGN:
+ case CERTTYPE_ED_SIGN_LINK:
+ case CERTTYPE_ED_SIGN_AUTH:
+ return CERT_ENCODING_ED25519;
+ case CERTTYPE_RSA1024_ID_EDID:
+ return CERT_ENCODING_RSA_CROSSCERT;
+ default:
+ return CERT_ENCODING_UNKNOWN;
+ }
+}
+
+/**
+ * Process a CERTS cell from a channel.
+ *
+ * This function is called to process an incoming CERTS cell on a
+ * channel_tls_t:
+ *
+ * If the other side should not have sent us a CERTS cell, or the cell is
+ * malformed, or it is supposed to authenticate the TLS key but it doesn't,
+ * then mark the connection.
+ *
+ * If the cell has a good cert chain and we're doing a v3 handshake, then
+ * store the certificates in or_handshake_state. If this is the client side
+ * of the connection, we then authenticate the server or mark the connection.
+ * If it's the server side, wait for an AUTHENTICATE cell.
+ */
+STATIC void
+channel_tls_process_certs_cell(var_cell_t *cell, channel_tls_t *chan)
+{
+#define MAX_CERT_TYPE_WANTED CERTTYPE_RSA1024_ID_EDID
+ /* These arrays will be sparse, since a cert type can be at most one
+ * of ed/x509 */
+ tor_x509_cert_t *x509_certs[MAX_CERT_TYPE_WANTED + 1];
+ tor_cert_t *ed_certs[MAX_CERT_TYPE_WANTED + 1];
+ uint8_t *rsa_ed_cc_cert = NULL;
+ size_t rsa_ed_cc_cert_len = 0;
+
+ int n_certs, i;
+ certs_cell_t *cc = NULL;
+
+ int send_netinfo = 0, started_here = 0;
+
+ memset(x509_certs, 0, sizeof(x509_certs));
+ memset(ed_certs, 0, sizeof(ed_certs));
+ tor_assert(cell);
+ tor_assert(chan);
+ tor_assert(chan->conn);
+
+#define ERR(s) \
+ do { \
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, \
+ "Received a bad CERTS cell from %s:%d: %s", \
+ safe_str(chan->conn->base_.address), \
+ chan->conn->base_.port, (s)); \
+ connection_or_close_for_error(chan->conn, 0); \
+ goto err; \
+ } while (0)
+
+ /* Can't use connection_or_nonopen_was_started_here(); its conn->tls
+ * check looks like it breaks
+ * test_link_handshake_recv_certs_ok_server(). */
+ started_here = chan->conn->handshake_state->started_here;
+
+ if (chan->conn->base_.state != OR_CONN_STATE_OR_HANDSHAKING_V3)
+ ERR("We're not doing a v3 handshake!");
+ if (chan->conn->link_proto < 3)
+ ERR("We're not using link protocol >= 3");
+ if (chan->conn->handshake_state->received_certs_cell)
+ ERR("We already got one");
+ if (chan->conn->handshake_state->authenticated) {
+ /* Should be unreachable, but let's make sure. */
+ ERR("We're already authenticated!");
+ }
+ if (cell->payload_len < 1)
+ ERR("It had no body");
+ if (cell->circ_id)
+ ERR("It had a nonzero circuit ID");
+
+ if (certs_cell_parse(&cc, cell->payload, cell->payload_len) < 0)
+ ERR("It couldn't be parsed.");
+
+ n_certs = cc->n_certs;
+
+ for (i = 0; i < n_certs; ++i) {
+ certs_cell_cert_t *c = certs_cell_get_certs(cc, i);
+
+ uint16_t cert_type = c->cert_type;
+ uint16_t cert_len = c->cert_len;
+ uint8_t *cert_body = certs_cell_cert_getarray_body(c);
+
+ if (cert_type > MAX_CERT_TYPE_WANTED)
+ continue;
+ const cert_encoding_t ct = certs_cell_typenum_to_cert_type(cert_type);
+ switch (ct) {
+ default:
+ case CERT_ENCODING_UNKNOWN:
+ break;
+ case CERT_ENCODING_X509: {
+ tor_x509_cert_t *x509_cert = tor_x509_cert_decode(cert_body, cert_len);
+ if (!x509_cert) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Received undecodable certificate in CERTS cell from %s:%d",
+ safe_str(chan->conn->base_.address),
+ chan->conn->base_.port);
+ } else {
+ if (x509_certs[cert_type]) {
+ tor_x509_cert_free(x509_cert);
+ ERR("Duplicate x509 certificate");
+ } else {
+ x509_certs[cert_type] = x509_cert;
+ }
+ }
+ break;
+ }
+ case CERT_ENCODING_ED25519: {
+ tor_cert_t *ed_cert = tor_cert_parse(cert_body, cert_len);
+ if (!ed_cert) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Received undecodable Ed certificate "
+ "in CERTS cell from %s:%d",
+ safe_str(chan->conn->base_.address),
+ chan->conn->base_.port);
+ } else {
+ if (ed_certs[cert_type]) {
+ tor_cert_free(ed_cert);
+ ERR("Duplicate Ed25519 certificate");
+ } else {
+ ed_certs[cert_type] = ed_cert;
+ }
+ }
+ break;
+ }
+
+ case CERT_ENCODING_RSA_CROSSCERT: {
+ if (rsa_ed_cc_cert) {
+ ERR("Duplicate RSA->Ed25519 crosscert");
+ } else {
+ rsa_ed_cc_cert = tor_memdup(cert_body, cert_len);
+ rsa_ed_cc_cert_len = cert_len;
+ }
+ break;
+ }
+ }
+ }
+
+ /* Move the certificates we (might) want into the handshake_state->certs
+ * structure. */
+ tor_x509_cert_t *id_cert = x509_certs[CERTTYPE_RSA1024_ID_ID];
+ tor_x509_cert_t *auth_cert = x509_certs[CERTTYPE_RSA1024_ID_AUTH];
+ tor_x509_cert_t *link_cert = x509_certs[CERTTYPE_RSA1024_ID_LINK];
+ chan->conn->handshake_state->certs->auth_cert = auth_cert;
+ chan->conn->handshake_state->certs->link_cert = link_cert;
+ chan->conn->handshake_state->certs->id_cert = id_cert;
+ x509_certs[CERTTYPE_RSA1024_ID_ID] =
+ x509_certs[CERTTYPE_RSA1024_ID_AUTH] =
+ x509_certs[CERTTYPE_RSA1024_ID_LINK] = NULL;
+
+ tor_cert_t *ed_id_sign = ed_certs[CERTTYPE_ED_ID_SIGN];
+ tor_cert_t *ed_sign_link = ed_certs[CERTTYPE_ED_SIGN_LINK];
+ tor_cert_t *ed_sign_auth = ed_certs[CERTTYPE_ED_SIGN_AUTH];
+ chan->conn->handshake_state->certs->ed_id_sign = ed_id_sign;
+ chan->conn->handshake_state->certs->ed_sign_link = ed_sign_link;
+ chan->conn->handshake_state->certs->ed_sign_auth = ed_sign_auth;
+ ed_certs[CERTTYPE_ED_ID_SIGN] =
+ ed_certs[CERTTYPE_ED_SIGN_LINK] =
+ ed_certs[CERTTYPE_ED_SIGN_AUTH] = NULL;
+
+ chan->conn->handshake_state->certs->ed_rsa_crosscert = rsa_ed_cc_cert;
+ chan->conn->handshake_state->certs->ed_rsa_crosscert_len =
+ rsa_ed_cc_cert_len;
+ rsa_ed_cc_cert = NULL;
+
+ int severity;
+ /* Note that this warns more loudly about time and validity if we were
+ * _trying_ to connect to an authority, not necessarily if we _did_ connect
+ * to one. */
+ if (started_here &&
+ router_digest_is_trusted_dir(TLS_CHAN_TO_BASE(chan)->identity_digest))
+ severity = LOG_WARN;
+ else
+ severity = LOG_PROTOCOL_WARN;
+
+ const ed25519_public_key_t *checked_ed_id = NULL;
+ const common_digests_t *checked_rsa_id = NULL;
+ or_handshake_certs_check_both(severity,
+ chan->conn->handshake_state->certs,
+ chan->conn->tls,
+ time(NULL),
+ &checked_ed_id,
+ &checked_rsa_id);
+
+ if (!checked_rsa_id)
+ ERR("Invalid certificate chain!");
+
+ if (started_here) {
+ /* No more information is needed. */
+
+ chan->conn->handshake_state->authenticated = 1;
+ chan->conn->handshake_state->authenticated_rsa = 1;
+ {
+ const common_digests_t *id_digests = checked_rsa_id;
+ crypto_pk_t *identity_rcvd;
+ if (!id_digests)
+ ERR("Couldn't compute digests for key in ID cert");
+
+ identity_rcvd = tor_tls_cert_get_key(id_cert);
+ if (!identity_rcvd) {
+ ERR("Couldn't get RSA key from ID cert.");
+ }
+ memcpy(chan->conn->handshake_state->authenticated_rsa_peer_id,
+ id_digests->d[DIGEST_SHA1], DIGEST_LEN);
+ channel_set_circid_type(TLS_CHAN_TO_BASE(chan), identity_rcvd,
+ chan->conn->link_proto < MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS);
+ crypto_pk_free(identity_rcvd);
+ }
+
+ if (checked_ed_id) {
+ chan->conn->handshake_state->authenticated_ed25519 = 1;
+ memcpy(&chan->conn->handshake_state->authenticated_ed25519_peer_id,
+ checked_ed_id, sizeof(ed25519_public_key_t));
+ }
+
+ log_debug(LD_HANDSHAKE, "calling client_learned_peer_id from "
+ "process_certs_cell");
+
+ if (connection_or_client_learned_peer_id(chan->conn,
+ chan->conn->handshake_state->authenticated_rsa_peer_id,
+ checked_ed_id) < 0)
+ ERR("Problem setting or checking peer id");
+
+ log_info(LD_HANDSHAKE,
+ "Got some good certificates from %s:%d: Authenticated it with "
+ "RSA%s",
+ safe_str(chan->conn->base_.address), chan->conn->base_.port,
+ checked_ed_id ? " and Ed25519" : "");
+
+ if (!public_server_mode(get_options())) {
+ /* If we initiated the connection and we are not a public server, we
+ * aren't planning to authenticate at all. At this point we know who we
+ * are talking to, so we can just send a netinfo now. */
+ send_netinfo = 1;
+ }
+ } else {
+ /* We can't call it authenticated till we see an AUTHENTICATE cell. */
+ log_info(LD_OR,
+ "Got some good RSA%s certificates from %s:%d. "
+ "Waiting for AUTHENTICATE.",
+ checked_ed_id ? " and Ed25519" : "",
+ safe_str(chan->conn->base_.address),
+ chan->conn->base_.port);
+ /* XXXX check more stuff? */
+ }
+
+ chan->conn->handshake_state->received_certs_cell = 1;
+
+ if (send_netinfo) {
+ if (connection_or_send_netinfo(chan->conn) < 0) {
+ log_warn(LD_OR, "Couldn't send netinfo cell");
+ connection_or_close_for_error(chan->conn, 0);
+ goto err;
+ }
+ }
+
+ err:
+ for (unsigned u = 0; u < ARRAY_LENGTH(x509_certs); ++u) {
+ tor_x509_cert_free(x509_certs[u]);
+ }
+ for (unsigned u = 0; u < ARRAY_LENGTH(ed_certs); ++u) {
+ tor_cert_free(ed_certs[u]);
+ }
+ tor_free(rsa_ed_cc_cert);
+ certs_cell_free(cc);
+#undef ERR
+}
+
+/**
+ * Process an AUTH_CHALLENGE cell from a channel_tls_t.
+ *
+ * This function is called to handle an incoming AUTH_CHALLENGE cell on a
+ * channel_tls_t; if we weren't supposed to get one (for example, because we're
+ * not the originator of the channel), or it's ill-formed, or we aren't doing
+ * a v3 handshake, mark the channel. If the cell is well-formed but we don't
+ * want to authenticate, just drop it. If the cell is well-formed *and* we
+ * want to authenticate, send an AUTHENTICATE cell and then a NETINFO cell.
+ */
+STATIC void
+channel_tls_process_auth_challenge_cell(var_cell_t *cell, channel_tls_t *chan)
+{
+ int n_types, i, use_type = -1;
+ auth_challenge_cell_t *ac = NULL;
+
+ tor_assert(cell);
+ tor_assert(chan);
+ tor_assert(chan->conn);
+
+#define ERR(s) \
+ do { \
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, \
+ "Received a bad AUTH_CHALLENGE cell from %s:%d: %s", \
+ safe_str(chan->conn->base_.address), \
+ chan->conn->base_.port, (s)); \
+ connection_or_close_for_error(chan->conn, 0); \
+ goto done; \
+ } while (0)
+
+ if (chan->conn->base_.state != OR_CONN_STATE_OR_HANDSHAKING_V3)
+ ERR("We're not currently doing a v3 handshake");
+ if (chan->conn->link_proto < 3)
+ ERR("We're not using link protocol >= 3");
+ if (!(chan->conn->handshake_state->started_here))
+ ERR("We didn't originate this connection");
+ if (chan->conn->handshake_state->received_auth_challenge)
+ ERR("We already received one");
+ if (!(chan->conn->handshake_state->received_certs_cell))
+ ERR("We haven't gotten a CERTS cell yet");
+ if (cell->circ_id)
+ ERR("It had a nonzero circuit ID");
+
+ if (auth_challenge_cell_parse(&ac, cell->payload, cell->payload_len) < 0)
+ ERR("It was not well-formed.");
+
+ n_types = ac->n_methods;
+
+ /* Now see if there is an authentication type we can use */
+ for (i = 0; i < n_types; ++i) {
+ uint16_t authtype = auth_challenge_cell_get_methods(ac, i);
+ if (authchallenge_type_is_supported(authtype)) {
+ if (use_type == -1 ||
+ authchallenge_type_is_better(authtype, use_type)) {
+ use_type = authtype;
+ }
+ }
+ }
+
+ chan->conn->handshake_state->received_auth_challenge = 1;
+
+ if (! public_server_mode(get_options())) {
+ /* If we're not a public server then we don't want to authenticate on a
+ connection we originated, and we already sent a NETINFO cell when we
+ got the CERTS cell. We have nothing more to do. */
+ goto done;
+ }
+
+ if (use_type >= 0) {
+ log_info(LD_OR,
+ "Got an AUTH_CHALLENGE cell from %s:%d: Sending "
+ "authentication type %d",
+ safe_str(chan->conn->base_.address),
+ chan->conn->base_.port,
+ use_type);
+
+ if (connection_or_send_authenticate_cell(chan->conn, use_type) < 0) {
+ log_warn(LD_OR,
+ "Couldn't send authenticate cell");
+ connection_or_close_for_error(chan->conn, 0);
+ goto done;
+ }
+ } else {
+ log_info(LD_OR,
+ "Got an AUTH_CHALLENGE cell from %s:%d, but we don't "
+ "know any of its authentication types. Not authenticating.",
+ safe_str(chan->conn->base_.address),
+ chan->conn->base_.port);
+ }
+
+ if (connection_or_send_netinfo(chan->conn) < 0) {
+ log_warn(LD_OR, "Couldn't send netinfo cell");
+ connection_or_close_for_error(chan->conn, 0);
+ goto done;
+ }
+
+ done:
+ auth_challenge_cell_free(ac);
+
+#undef ERR
+}
+
+/**
+ * Process an AUTHENTICATE cell from a channel_tls_t.
+ *
+ * If it's ill-formed or we weren't supposed to get one or we're not doing a
+ * v3 handshake, then mark the connection. If it does not authenticate the
+ * other side of the connection successfully (because it isn't signed right,
+ * we didn't get a CERTS cell, etc) mark the connection. Otherwise, accept
+ * the identity of the router on the other side of the connection.
+ */
+STATIC void
+channel_tls_process_authenticate_cell(var_cell_t *cell, channel_tls_t *chan)
+{
+ var_cell_t *expected_cell = NULL;
+ const uint8_t *auth;
+ int authlen;
+ int authtype;
+ int bodylen;
+
+ tor_assert(cell);
+ tor_assert(chan);
+ tor_assert(chan->conn);
+
+#define ERR(s) \
+ do { \
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, \
+ "Received a bad AUTHENTICATE cell from %s:%d: %s", \
+ safe_str(chan->conn->base_.address), \
+ chan->conn->base_.port, (s)); \
+ connection_or_close_for_error(chan->conn, 0); \
+ var_cell_free(expected_cell); \
+ return; \
+ } while (0)
+
+ if (chan->conn->base_.state != OR_CONN_STATE_OR_HANDSHAKING_V3)
+ ERR("We're not doing a v3 handshake");
+ if (chan->conn->link_proto < 3)
+ ERR("We're not using link protocol >= 3");
+ if (chan->conn->handshake_state->started_here)
+ ERR("We originated this connection");
+ if (chan->conn->handshake_state->received_authenticate)
+ ERR("We already got one!");
+ if (chan->conn->handshake_state->authenticated) {
+ /* Should be impossible given other checks */
+ ERR("The peer is already authenticated");
+ }
+ if (!(chan->conn->handshake_state->received_certs_cell))
+ ERR("We never got a certs cell");
+ if (chan->conn->handshake_state->certs->id_cert == NULL)
+ ERR("We never got an identity certificate");
+ if (cell->payload_len < 4)
+ ERR("Cell was way too short");
+
+ auth = cell->payload;
+ {
+ uint16_t type = ntohs(get_uint16(auth));
+ uint16_t len = ntohs(get_uint16(auth+2));
+ if (4 + len > cell->payload_len)
+ ERR("Authenticator was truncated");
+
+ if (! authchallenge_type_is_supported(type))
+ ERR("Authenticator type was not recognized");
+ authtype = type;
+
+ auth += 4;
+ authlen = len;
+ }
+
+ if (authlen < V3_AUTH_BODY_LEN + 1)
+ ERR("Authenticator was too short");
+
+ expected_cell = connection_or_compute_authenticate_cell_body(
+ chan->conn, authtype, NULL, NULL, 1);
+ if (! expected_cell)
+ ERR("Couldn't compute expected AUTHENTICATE cell body");
+
+ int sig_is_rsa;
+ if (authtype == AUTHTYPE_RSA_SHA256_TLSSECRET ||
+ authtype == AUTHTYPE_RSA_SHA256_RFC5705) {
+ bodylen = V3_AUTH_BODY_LEN;
+ sig_is_rsa = 1;
+ } else {
+ tor_assert(authtype == AUTHTYPE_ED25519_SHA256_RFC5705);
+ /* Our earlier check had better have made sure we had room
+ * for an ed25519 sig (inadvertently) */
+ tor_assert(V3_AUTH_BODY_LEN > ED25519_SIG_LEN);
+ bodylen = authlen - ED25519_SIG_LEN;
+ sig_is_rsa = 0;
+ }
+ if (expected_cell->payload_len != bodylen+4) {
+ ERR("Expected AUTHENTICATE cell body len not as expected.");
+ }
+
+ /* Length of random part. */
+ if (BUG(bodylen < 24)) {
+ // LCOV_EXCL_START
+ ERR("Bodylen is somehow less than 24, which should really be impossible");
+ // LCOV_EXCL_STOP
+ }
+
+ if (tor_memneq(expected_cell->payload+4, auth, bodylen-24))
+ ERR("Some field in the AUTHENTICATE cell body was not as expected");
+
+ if (sig_is_rsa) {
+ if (chan->conn->handshake_state->certs->ed_id_sign != NULL)
+ ERR("RSA-signed AUTHENTICATE response provided with an ED25519 cert");
+
+ if (chan->conn->handshake_state->certs->auth_cert == NULL)
+ ERR("We never got an RSA authentication certificate");
+
+ crypto_pk_t *pk = tor_tls_cert_get_key(
+ chan->conn->handshake_state->certs->auth_cert);
+ char d[DIGEST256_LEN];
+ char *signed_data;
+ size_t keysize;
+ int signed_len;
+
+ if (! pk) {
+ ERR("Couldn't get RSA key from AUTH cert.");
+ }
+ crypto_digest256(d, (char*)auth, V3_AUTH_BODY_LEN, DIGEST_SHA256);
+
+ keysize = crypto_pk_keysize(pk);
+ signed_data = tor_malloc(keysize);
+ signed_len = crypto_pk_public_checksig(pk, signed_data, keysize,
+ (char*)auth + V3_AUTH_BODY_LEN,
+ authlen - V3_AUTH_BODY_LEN);
+ crypto_pk_free(pk);
+ if (signed_len < 0) {
+ tor_free(signed_data);
+ ERR("RSA signature wasn't valid");
+ }
+ if (signed_len < DIGEST256_LEN) {
+ tor_free(signed_data);
+ ERR("Not enough data was signed");
+ }
+ /* Note that we deliberately allow *more* than DIGEST256_LEN bytes here,
+ * in case they're later used to hold a SHA3 digest or something. */
+ if (tor_memneq(signed_data, d, DIGEST256_LEN)) {
+ tor_free(signed_data);
+ ERR("Signature did not match data to be signed.");
+ }
+ tor_free(signed_data);
+ } else {
+ if (chan->conn->handshake_state->certs->ed_id_sign == NULL)
+ ERR("We never got an Ed25519 identity certificate.");
+ if (chan->conn->handshake_state->certs->ed_sign_auth == NULL)
+ ERR("We never got an Ed25519 authentication certificate.");
+
+ const ed25519_public_key_t *authkey =
+ &chan->conn->handshake_state->certs->ed_sign_auth->signed_key;
+ ed25519_signature_t sig;
+ tor_assert(authlen > ED25519_SIG_LEN);
+ memcpy(&sig.sig, auth + authlen - ED25519_SIG_LEN, ED25519_SIG_LEN);
+ if (ed25519_checksig(&sig, auth, authlen - ED25519_SIG_LEN, authkey)<0) {
+ ERR("Ed25519 signature wasn't valid.");
+ }
+ }
+
+ /* Okay, we are authenticated. */
+ chan->conn->handshake_state->received_authenticate = 1;
+ chan->conn->handshake_state->authenticated = 1;
+ chan->conn->handshake_state->authenticated_rsa = 1;
+ chan->conn->handshake_state->digest_received_data = 0;
+ {
+ tor_x509_cert_t *id_cert = chan->conn->handshake_state->certs->id_cert;
+ crypto_pk_t *identity_rcvd = tor_tls_cert_get_key(id_cert);
+ const common_digests_t *id_digests = tor_x509_cert_get_id_digests(id_cert);
+ const ed25519_public_key_t *ed_identity_received = NULL;
+
+ if (! sig_is_rsa) {
+ chan->conn->handshake_state->authenticated_ed25519 = 1;
+ ed_identity_received =
+ &chan->conn->handshake_state->certs->ed_id_sign->signing_key;
+ memcpy(&chan->conn->handshake_state->authenticated_ed25519_peer_id,
+ ed_identity_received, sizeof(ed25519_public_key_t));
+ }
+
+ /* This must exist; we checked key type when reading the cert. */
+ tor_assert(id_digests);
+
+ memcpy(chan->conn->handshake_state->authenticated_rsa_peer_id,
+ id_digests->d[DIGEST_SHA1], DIGEST_LEN);
+
+ channel_set_circid_type(TLS_CHAN_TO_BASE(chan), identity_rcvd,
+ chan->conn->link_proto < MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS);
+ crypto_pk_free(identity_rcvd);
+
+ log_debug(LD_HANDSHAKE,
+ "Calling connection_or_init_conn_from_address for %s "
+ " from %s, with%s ed25519 id.",
+ safe_str(chan->conn->base_.address),
+ __func__,
+ ed_identity_received ? "" : "out");
+
+ connection_or_init_conn_from_address(chan->conn,
+ &(chan->conn->base_.addr),
+ chan->conn->base_.port,
+ (const char*)(chan->conn->handshake_state->
+ authenticated_rsa_peer_id),
+ ed_identity_received,
+ 0);
+
+ log_debug(LD_HANDSHAKE,
+ "Got an AUTHENTICATE cell from %s:%d, type %d: Looks good.",
+ safe_str(chan->conn->base_.address),
+ chan->conn->base_.port,
+ authtype);
+ }
+
+ var_cell_free(expected_cell);
+
+#undef ERR
+}
diff --git a/src/core/or/channeltls.h b/src/core/or/channeltls.h
new file mode 100644
index 0000000000..1ab899af96
--- /dev/null
+++ b/src/core/or/channeltls.h
@@ -0,0 +1,77 @@
+/* * Copyright (c) 2012-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file channeltls.h
+ * \brief Header file for channeltls.c
+ **/
+
+#ifndef TOR_CHANNELTLS_H
+#define TOR_CHANNELTLS_H
+
+#include "or/or.h"
+#include "or/channel.h"
+
+struct ed25519_public_key_t;
+struct curve25519_public_key_t;
+
+#define BASE_CHAN_TO_TLS(c) (channel_tls_from_base((c)))
+#define TLS_CHAN_TO_BASE(c) (channel_tls_to_base((c)))
+
+#define TLS_CHAN_MAGIC 0x8a192427U
+
+#ifdef TOR_CHANNEL_INTERNAL_
+
+struct channel_tls_s {
+ /* Base channel_t struct */
+ channel_t base_;
+ /* or_connection_t pointer */
+ or_connection_t *conn;
+};
+
+#endif /* defined(TOR_CHANNEL_INTERNAL_) */
+
+channel_t * channel_tls_connect(const tor_addr_t *addr, uint16_t port,
+ const char *id_digest,
+ const struct ed25519_public_key_t *ed_id);
+channel_listener_t * channel_tls_get_listener(void);
+channel_listener_t * channel_tls_start_listener(void);
+channel_t * channel_tls_handle_incoming(or_connection_t *orconn);
+
+/* Casts */
+
+channel_t * channel_tls_to_base(channel_tls_t *tlschan);
+channel_tls_t * channel_tls_from_base(channel_t *chan);
+
+/* Things for connection_or.c to call back into */
+void channel_tls_handle_cell(cell_t *cell, or_connection_t *conn);
+void channel_tls_handle_state_change_on_orconn(channel_tls_t *chan,
+ or_connection_t *conn,
+ uint8_t old_state,
+ uint8_t state);
+void channel_tls_handle_var_cell(var_cell_t *var_cell,
+ or_connection_t *conn);
+void channel_tls_update_marks(or_connection_t *conn);
+
+/* Cleanup at shutdown */
+void channel_tls_free_all(void);
+
+extern uint64_t stats_n_authorize_cells_processed;
+extern uint64_t stats_n_authenticate_cells_processed;
+extern uint64_t stats_n_versions_cells_processed;
+extern uint64_t stats_n_netinfo_cells_processed;
+extern uint64_t stats_n_vpadding_cells_processed;
+extern uint64_t stats_n_certs_cells_processed;
+extern uint64_t stats_n_auth_challenge_cells_processed;
+
+#ifdef CHANNELTLS_PRIVATE
+STATIC void channel_tls_process_certs_cell(var_cell_t *cell,
+ channel_tls_t *tlschan);
+STATIC void channel_tls_process_auth_challenge_cell(var_cell_t *cell,
+ channel_tls_t *tlschan);
+STATIC void channel_tls_common_init(channel_tls_t *tlschan);
+STATIC void channel_tls_process_authenticate_cell(var_cell_t *cell,
+ channel_tls_t *tlschan);
+#endif /* defined(CHANNELTLS_PRIVATE) */
+
+#endif /* !defined(TOR_CHANNELTLS_H) */
diff --git a/src/core/or/circuit_st.h b/src/core/or/circuit_st.h
new file mode 100644
index 0000000000..8453efa633
--- /dev/null
+++ b/src/core/or/circuit_st.h
@@ -0,0 +1,182 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef CIRCUIT_ST_H
+#define CIRCUIT_ST_H
+
+#include "or/or.h"
+
+#include "or/cell_queue_st.h"
+
+struct hs_token_t;
+
+/** "magic" value for an origin_circuit_t */
+#define ORIGIN_CIRCUIT_MAGIC 0x35315243u
+/** "magic" value for an or_circuit_t */
+#define OR_CIRCUIT_MAGIC 0x98ABC04Fu
+/** "magic" value for a circuit that would have been freed by circuit_free,
+ * but which we're keeping around until a cpuworker reply arrives. See
+ * circuit_free() for more documentation. */
+#define DEAD_CIRCUIT_MAGIC 0xdeadc14c
+
+/**
+ * A circuit is a path over the onion routing
+ * network. Applications can connect to one end of the circuit, and can
+ * create exit connections at the other end of the circuit. AP and exit
+ * connections have only one circuit associated with them (and thus these
+ * connection types are closed when the circuit is closed), whereas
+ * OR connections multiplex many circuits at once, and stay standing even
+ * when there are no circuits running over them.
+ *
+ * A circuit_t structure can fill one of two roles. First, a or_circuit_t
+ * links two connections together: either an edge connection and an OR
+ * connection, or two OR connections. (When joined to an OR connection, a
+ * circuit_t affects only cells sent to a particular circID on that
+ * connection. When joined to an edge connection, a circuit_t affects all
+ * data.)
+
+ * Second, an origin_circuit_t holds the cipher keys and state for sending data
+ * along a given circuit. At the OP, it has a sequence of ciphers, each
+ * of which is shared with a single OR along the circuit. Separate
+ * ciphers are used for data going "forward" (away from the OP) and
+ * "backward" (towards the OP). At the OR, a circuit has only two stream
+ * ciphers: one for data going forward, and one for data going backward.
+ */
+struct circuit_t {
+ uint32_t magic; /**< For memory and type debugging: must equal
+ * ORIGIN_CIRCUIT_MAGIC or OR_CIRCUIT_MAGIC. */
+
+ /** The channel that is next in this circuit. */
+ channel_t *n_chan;
+
+ /**
+ * The circuit_id used in the next (forward) hop of this circuit;
+ * this is unique to n_chan, but this ordered pair is globally
+ * unique:
+ *
+ * (n_chan->global_identifier, n_circ_id)
+ */
+ circid_t n_circ_id;
+
+ /**
+ * Circuit mux associated with n_chan to which this circuit is attached;
+ * NULL if we have no n_chan.
+ */
+ circuitmux_t *n_mux;
+
+ /** Queue of cells waiting to be transmitted on n_chan */
+ cell_queue_t n_chan_cells;
+
+ /**
+ * The hop to which we want to extend this circuit. Should be NULL if
+ * the circuit has attached to a channel.
+ */
+ extend_info_t *n_hop;
+
+ /** True iff we are waiting for n_chan_cells to become less full before
+ * allowing p_streams to add any more cells. (Origin circuit only.) */
+ unsigned int streams_blocked_on_n_chan : 1;
+ /** True iff we are waiting for p_chan_cells to become less full before
+ * allowing n_streams to add any more cells. (OR circuit only.) */
+ unsigned int streams_blocked_on_p_chan : 1;
+
+ /** True iff we have queued a delete backwards on this circuit, but not put
+ * it on the output buffer. */
+ unsigned int p_delete_pending : 1;
+ /** True iff we have queued a delete forwards on this circuit, but not put
+ * it on the output buffer. */
+ unsigned int n_delete_pending : 1;
+
+ /** True iff this circuit has received a DESTROY cell in either direction */
+ unsigned int received_destroy : 1;
+
+ uint8_t state; /**< Current status of this circuit. */
+ uint8_t purpose; /**< Why are we creating this circuit? */
+
+ /** How many relay data cells can we package (read from edge streams)
+ * on this circuit before we receive a circuit-level sendme cell asking
+ * for more? */
+ int package_window;
+ /** How many relay data cells will we deliver (write to edge streams)
+ * on this circuit? When deliver_window gets low, we send some
+ * circuit-level sendme cells to indicate that we're willing to accept
+ * more. */
+ int deliver_window;
+
+ /** Temporary field used during circuits_handle_oom. */
+ uint32_t age_tmp;
+
+ /** For storage while n_chan is pending (state CIRCUIT_STATE_CHAN_WAIT). */
+ struct create_cell_t *n_chan_create_cell;
+
+ /** When did circuit construction actually begin (ie send the
+ * CREATE cell or begin cannibalization).
+ *
+ * Note: This timer will get reset if we decide to cannibalize
+ * a circuit. It may also get reset during certain phases of hidden
+ * service circuit use.
+ *
+ * We keep this timestamp with a higher resolution than most so that the
+ * circuit-build-time tracking code can get millisecond resolution.
+ */
+ struct timeval timestamp_began;
+
+ /** This timestamp marks when the init_circuit_base constructor ran. */
+ struct timeval timestamp_created;
+
+ /** When the circuit was first used, or 0 if the circuit is clean.
+ *
+ * XXXX Note that some code will artificially adjust this value backward
+ * in time in order to indicate that a circuit shouldn't be used for new
+ * streams, but that it can stay alive as long as it has streams on it.
+ * That's a kludge we should fix.
+ *
+ * XXX The CBT code uses this field to record when HS-related
+ * circuits entered certain states. This usage probably won't
+ * interfere with this field's primary purpose, but we should
+ * document it more thoroughly to make sure of that.
+ *
+ * XXX The SocksPort option KeepaliveIsolateSOCKSAuth will artificially
+ * adjust this value forward each time a suitable stream is attached to an
+ * already constructed circuit, potentially keeping the circuit alive
+ * indefinitely.
+ */
+ time_t timestamp_dirty;
+
+ uint16_t marked_for_close; /**< Should we close this circuit at the end of
+ * the main loop? (If true, holds the line number
+ * where this circuit was marked.) */
+ const char *marked_for_close_file; /**< For debugging: in which file was this
+ * circuit marked for close? */
+ /** For what reason (See END_CIRC_REASON...) is this circuit being closed?
+ * This field is set in circuit_mark_for_close and used later in
+ * circuit_about_to_free. */
+ int marked_for_close_reason;
+ /** As marked_for_close_reason, but reflects the underlying reason for
+ * closing this circuit.
+ */
+ int marked_for_close_orig_reason;
+
+ /** Unique ID for measuring tunneled network status requests. */
+ uint64_t dirreq_id;
+
+ /** Index in smartlist of all circuits (global_circuitlist). */
+ int global_circuitlist_idx;
+
+ /** Various statistics about cells being added to or removed from this
+ * circuit's queues; used only if CELL_STATS events are enabled and
+ * cleared after being sent to control port. */
+ smartlist_t *testing_cell_stats;
+
+ /** If set, points to an HS token that this circuit might be carrying.
+ * Used by the HS circuitmap. */
+ struct hs_token_t *hs_token;
+ /** Hashtable node: used to look up the circuit by its HS token using the HS
+ circuitmap. */
+ HT_ENTRY(circuit_t) hs_circuitmap_node;
+};
+
+#endif
diff --git a/src/core/or/circuitbuild.c b/src/core/or/circuitbuild.c
new file mode 100644
index 0000000000..39ae7ebf48
--- /dev/null
+++ b/src/core/or/circuitbuild.c
@@ -0,0 +1,3098 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitbuild.c
+ *
+ * \brief Implements the details of building circuits (by chosing paths,
+ * constructing/sending create/extend cells, and so on).
+ *
+ * On the client side, this module handles launching circuits. Circuit
+ * launches are srtarted from circuit_establish_circuit(), called from
+ * circuit_launch_by_extend_info()). To choose the path the circuit will
+ * take, onion_extend_cpath() calls into a maze of node selection functions.
+ *
+ * Once the circuit is ready to be launched, the first hop is treated as a
+ * special case with circuit_handle_first_hop(), since it might need to open a
+ * channel. As the channel opens, and later as CREATED and RELAY_EXTENDED
+ * cells arrive, the client will invoke circuit_send_next_onion_skin() to send
+ * CREATE or RELAY_EXTEND cells.
+ *
+ * On the server side, this module also handles the logic of responding to
+ * RELAY_EXTEND requests, using circuit_extend().
+ **/
+
+#define CIRCUITBUILD_PRIVATE
+
+#include "or/or.h"
+#include "or/bridges.h"
+#include "or/channel.h"
+#include "or/circpathbias.h"
+#define CIRCUITBUILD_PRIVATE
+#include "or/circuitbuild.h"
+#include "or/circuitlist.h"
+#include "or/circuitstats.h"
+#include "or/circuituse.h"
+#include "or/command.h"
+#include "or/config.h"
+#include "or/confparse.h"
+#include "or/connection.h"
+#include "or/connection_edge.h"
+#include "or/connection_or.h"
+#include "or/control.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "or/directory.h"
+#include "or/entrynodes.h"
+#include "or/hs_ntor.h"
+#include "or/main.h"
+#include "or/microdesc.h"
+#include "or/networkstatus.h"
+#include "or/nodelist.h"
+#include "or/onion.h"
+#include "or/onion_tap.h"
+#include "or/onion_fast.h"
+#include "or/policies.h"
+#include "or/relay.h"
+#include "or/relay_crypto.h"
+#include "or/rendcommon.h"
+#include "or/rephist.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+#include "or/routerparse.h"
+#include "or/routerset.h"
+#include "or/transports.h"
+
+#include "or/cell_st.h"
+#include "or/cpath_build_state_st.h"
+#include "or/entry_connection_st.h"
+#include "or/extend_info_st.h"
+#include "or/node_st.h"
+#include "or/or_circuit_st.h"
+#include "or/origin_circuit_st.h"
+#include "or/microdesc_st.h"
+#include "or/routerinfo_st.h"
+#include "or/routerstatus_st.h"
+
+static channel_t * channel_connect_for_circuit(const tor_addr_t *addr,
+ uint16_t port,
+ const char *id_digest,
+ const ed25519_public_key_t *ed_id);
+static int circuit_deliver_create_cell(circuit_t *circ,
+ const create_cell_t *create_cell,
+ int relayed);
+static crypt_path_t *onion_next_hop_in_cpath(crypt_path_t *cpath);
+STATIC int onion_append_hop(crypt_path_t **head_ptr, extend_info_t *choice);
+static int circuit_send_first_onion_skin(origin_circuit_t *circ);
+static int circuit_build_no_more_hops(origin_circuit_t *circ);
+static int circuit_send_intermediate_onion_skin(origin_circuit_t *circ,
+ crypt_path_t *hop);
+static const node_t *choose_good_middle_server(uint8_t purpose,
+ cpath_build_state_t *state,
+ crypt_path_t *head,
+ int cur_len);
+
+/** This function tries to get a channel to the specified endpoint,
+ * and then calls command_setup_channel() to give it the right
+ * callbacks.
+ */
+static channel_t *
+channel_connect_for_circuit(const tor_addr_t *addr, uint16_t port,
+ const char *id_digest,
+ const ed25519_public_key_t *ed_id)
+{
+ channel_t *chan;
+
+ chan = channel_connect(addr, port, id_digest, ed_id);
+ if (chan) command_setup_channel(chan);
+
+ return chan;
+}
+
+/** Search for a value for circ_id that we can use on <b>chan</b> for an
+ * outbound circuit, until we get a circ_id that is not in use by any other
+ * circuit on that conn.
+ *
+ * Return it, or 0 if can't get a unique circ_id.
+ */
+STATIC circid_t
+get_unique_circ_id_by_chan(channel_t *chan)
+{
+/* This number is chosen somewhat arbitrarily; see comment below for more
+ * info. When the space is 80% full, it gives a one-in-a-million failure
+ * chance; when the space is 90% full, it gives a one-in-850 chance; and when
+ * the space is 95% full, it gives a one-in-26 failure chance. That seems
+ * okay, though you could make a case IMO for anything between N=32 and
+ * N=256. */
+#define MAX_CIRCID_ATTEMPTS 64
+ int in_use;
+ unsigned n_with_circ = 0, n_pending_destroy = 0, n_weird_pending_destroy = 0;
+ circid_t test_circ_id;
+ circid_t attempts=0;
+ circid_t high_bit, max_range, mask;
+ int64_t pending_destroy_time_total = 0;
+ int64_t pending_destroy_time_max = 0;
+
+ tor_assert(chan);
+
+ if (chan->circ_id_type == CIRC_ID_TYPE_NEITHER) {
+ log_warn(LD_BUG,
+ "Trying to pick a circuit ID for a connection from "
+ "a client with no identity.");
+ return 0;
+ }
+ max_range = (chan->wide_circ_ids) ? (1u<<31) : (1u<<15);
+ mask = max_range - 1;
+ high_bit = (chan->circ_id_type == CIRC_ID_TYPE_HIGHER) ? max_range : 0;
+ do {
+ if (++attempts > MAX_CIRCID_ATTEMPTS) {
+ /* Make sure we don't loop forever because all circuit IDs are used.
+ *
+ * Once, we would try until we had tried every possible circuit ID. But
+ * that's quite expensive. Instead, we try MAX_CIRCID_ATTEMPTS random
+ * circuit IDs, and then give up.
+ *
+ * This potentially causes us to give up early if our circuit ID space
+ * is nearly full. If we have N circuit IDs in use, then we will reject
+ * a new circuit with probability (N / max_range) ^ MAX_CIRCID_ATTEMPTS.
+ * This means that in practice, a few percent of our circuit ID capacity
+ * will go unused.
+ *
+ * The alternative here, though, is to do a linear search over the
+ * whole circuit ID space every time we extend a circuit, which is
+ * not so great either.
+ */
+ int64_t queued_destroys;
+ char *m = rate_limit_log(&chan->last_warned_circ_ids_exhausted,
+ approx_time());
+ if (m == NULL)
+ return 0; /* This message has been rate-limited away. */
+ if (n_pending_destroy)
+ pending_destroy_time_total /= n_pending_destroy;
+ log_warn(LD_CIRC,"No unused circIDs found on channel %s wide "
+ "circID support, with %u inbound and %u outbound circuits. "
+ "Found %u circuit IDs in use by circuits, and %u with "
+ "pending destroy cells. (%u of those were marked bogusly.) "
+ "The ones with pending destroy cells "
+ "have been marked unusable for an average of %ld seconds "
+ "and a maximum of %ld seconds. This channel is %ld seconds "
+ "old. Failing a circuit.%s",
+ chan->wide_circ_ids ? "with" : "without",
+ chan->num_p_circuits, chan->num_n_circuits,
+ n_with_circ, n_pending_destroy, n_weird_pending_destroy,
+ (long)pending_destroy_time_total,
+ (long)pending_destroy_time_max,
+ (long)(approx_time() - chan->timestamp_created),
+ m);
+ tor_free(m);
+
+ if (!chan->cmux) {
+ /* This warning should be impossible. */
+ log_warn(LD_BUG, " This channel somehow has no cmux on it!");
+ return 0;
+ }
+
+ /* analysis so far on 12184 suggests that we're running out of circuit
+ IDs because it looks like we have too many pending destroy
+ cells. Let's see how many we really have pending.
+ */
+ queued_destroys = circuitmux_count_queued_destroy_cells(chan,
+ chan->cmux);
+
+ log_warn(LD_CIRC, " Circuitmux on this channel has %u circuits, "
+ "of which %u are active. It says it has %"PRId64
+ " destroy cells queued.",
+ circuitmux_num_circuits(chan->cmux),
+ circuitmux_num_active_circuits(chan->cmux),
+ (queued_destroys));
+
+ /* Change this into "if (1)" in order to get more information about
+ * possible failure modes here. You'll need to know how to use gdb with
+ * Tor: this will make Tor exit with an assertion failure if the cmux is
+ * corrupt. */
+ if (0)
+ circuitmux_assert_okay(chan->cmux);
+
+ channel_dump_statistics(chan, LOG_WARN);
+
+ return 0;
+ }
+
+ do {
+ crypto_rand((char*) &test_circ_id, sizeof(test_circ_id));
+ test_circ_id &= mask;
+ } while (test_circ_id == 0);
+
+ test_circ_id |= high_bit;
+
+ in_use = circuit_id_in_use_on_channel(test_circ_id, chan);
+ if (in_use == 1)
+ ++n_with_circ;
+ else if (in_use == 2) {
+ time_t since_when;
+ ++n_pending_destroy;
+ since_when =
+ circuit_id_when_marked_unusable_on_channel(test_circ_id, chan);
+ if (since_when) {
+ time_t waiting = approx_time() - since_when;
+ pending_destroy_time_total += waiting;
+ if (waiting > pending_destroy_time_max)
+ pending_destroy_time_max = waiting;
+ } else {
+ ++n_weird_pending_destroy;
+ }
+ }
+ } while (in_use);
+ return test_circ_id;
+}
+
+/** If <b>verbose</b> is false, allocate and return a comma-separated list of
+ * the currently built elements of <b>circ</b>. If <b>verbose</b> is true, also
+ * list information about link status in a more verbose format using spaces.
+ * If <b>verbose_names</b> is false, give nicknames for Named routers and hex
+ * digests for others; if <b>verbose_names</b> is true, use $DIGEST=Name style
+ * names.
+ */
+static char *
+circuit_list_path_impl(origin_circuit_t *circ, int verbose, int verbose_names)
+{
+ crypt_path_t *hop;
+ smartlist_t *elements;
+ const char *states[] = {"closed", "waiting for keys", "open"};
+ char *s;
+
+ elements = smartlist_new();
+
+ if (verbose) {
+ const char *nickname = build_state_get_exit_nickname(circ->build_state);
+ smartlist_add_asprintf(elements, "%s%s circ (length %d%s%s):",
+ circ->build_state->is_internal ? "internal" : "exit",
+ circ->build_state->need_uptime ? " (high-uptime)" : "",
+ circ->build_state->desired_path_len,
+ circ->base_.state == CIRCUIT_STATE_OPEN ? "" : ", last hop ",
+ circ->base_.state == CIRCUIT_STATE_OPEN ? "" :
+ (nickname?nickname:"*unnamed*"));
+ }
+
+ hop = circ->cpath;
+ do {
+ char *elt;
+ const char *id;
+ const node_t *node;
+ if (!hop)
+ break;
+ if (!verbose && hop->state != CPATH_STATE_OPEN)
+ break;
+ if (!hop->extend_info)
+ break;
+ id = hop->extend_info->identity_digest;
+ if (verbose_names) {
+ elt = tor_malloc(MAX_VERBOSE_NICKNAME_LEN+1);
+ if ((node = node_get_by_id(id))) {
+ node_get_verbose_nickname(node, elt);
+ } else if (is_legal_nickname(hop->extend_info->nickname)) {
+ elt[0] = '$';
+ base16_encode(elt+1, HEX_DIGEST_LEN+1, id, DIGEST_LEN);
+ elt[HEX_DIGEST_LEN+1]= '~';
+ strlcpy(elt+HEX_DIGEST_LEN+2,
+ hop->extend_info->nickname, MAX_NICKNAME_LEN+1);
+ } else {
+ elt[0] = '$';
+ base16_encode(elt+1, HEX_DIGEST_LEN+1, id, DIGEST_LEN);
+ }
+ } else { /* ! verbose_names */
+ elt = tor_malloc(HEX_DIGEST_LEN+2);
+ elt[0] = '$';
+ base16_encode(elt+1, HEX_DIGEST_LEN+1, id, DIGEST_LEN);
+ }
+ tor_assert(elt);
+ if (verbose) {
+ tor_assert(hop->state <= 2);
+ smartlist_add_asprintf(elements,"%s(%s)",elt,states[hop->state]);
+ tor_free(elt);
+ } else {
+ smartlist_add(elements, elt);
+ }
+ hop = hop->next;
+ } while (hop != circ->cpath);
+
+ s = smartlist_join_strings(elements, verbose?" ":",", 0, NULL);
+ SMARTLIST_FOREACH(elements, char*, cp, tor_free(cp));
+ smartlist_free(elements);
+ return s;
+}
+
+/** If <b>verbose</b> is false, allocate and return a comma-separated
+ * list of the currently built elements of <b>circ</b>. If
+ * <b>verbose</b> is true, also list information about link status in
+ * a more verbose format using spaces.
+ */
+char *
+circuit_list_path(origin_circuit_t *circ, int verbose)
+{
+ return circuit_list_path_impl(circ, verbose, 0);
+}
+
+/** Allocate and return a comma-separated list of the currently built elements
+ * of <b>circ</b>, giving each as a verbose nickname.
+ */
+char *
+circuit_list_path_for_controller(origin_circuit_t *circ)
+{
+ return circuit_list_path_impl(circ, 0, 1);
+}
+
+/** Log, at severity <b>severity</b>, the nicknames of each router in
+ * <b>circ</b>'s cpath. Also log the length of the cpath, and the intended
+ * exit point.
+ */
+void
+circuit_log_path(int severity, unsigned int domain, origin_circuit_t *circ)
+{
+ char *s = circuit_list_path(circ,1);
+ tor_log(severity,domain,"%s",s);
+ tor_free(s);
+}
+
+/** Return 1 iff every node in circ's cpath definitely supports ntor. */
+static int
+circuit_cpath_supports_ntor(const origin_circuit_t *circ)
+{
+ crypt_path_t *head, *cpath;
+
+ cpath = head = circ->cpath;
+ do {
+ /* if the extend_info is missing, we can't tell if it supports ntor */
+ if (!cpath->extend_info) {
+ return 0;
+ }
+
+ /* if the key is blank, it definitely doesn't support ntor */
+ if (!extend_info_supports_ntor(cpath->extend_info)) {
+ return 0;
+ }
+ cpath = cpath->next;
+ } while (cpath != head);
+
+ return 1;
+}
+
+/** Pick all the entries in our cpath. Stop and return 0 when we're
+ * happy, or return -1 if an error occurs. */
+static int
+onion_populate_cpath(origin_circuit_t *circ)
+{
+ int r = 0;
+
+ /* onion_extend_cpath assumes these are non-NULL */
+ tor_assert(circ);
+ tor_assert(circ->build_state);
+
+ while (r == 0) {
+ r = onion_extend_cpath(circ);
+ if (r < 0) {
+ log_info(LD_CIRC,"Generating cpath hop failed.");
+ return -1;
+ }
+ }
+
+ /* The path is complete */
+ tor_assert(r == 1);
+
+ /* Does every node in this path support ntor? */
+ int path_supports_ntor = circuit_cpath_supports_ntor(circ);
+
+ /* We would like every path to support ntor, but we have to allow for some
+ * edge cases. */
+ tor_assert(circuit_get_cpath_len(circ));
+ if (circuit_can_use_tap(circ)) {
+ /* Circuits from clients to intro points, and hidden services to
+ * rend points do not support ntor, because the hidden service protocol
+ * does not include ntor onion keys. This is also true for Tor2web clients
+ * and Single Onion Services. */
+ return 0;
+ }
+
+ if (circuit_get_cpath_len(circ) == 1) {
+ /* Allow for bootstrapping: when we're fetching directly from a fallback,
+ * authority, or bridge, we have no way of knowing its ntor onion key
+ * before we connect to it. So instead, we try connecting, and end up using
+ * CREATE_FAST. */
+ tor_assert(circ->cpath);
+ tor_assert(circ->cpath->extend_info);
+ const node_t *node = node_get_by_id(
+ circ->cpath->extend_info->identity_digest);
+ /* If we don't know the node and its descriptor, we must be bootstrapping.
+ */
+ if (!node || !node_has_preferred_descriptor(node, 1)) {
+ return 0;
+ }
+ }
+
+ if (BUG(!path_supports_ntor)) {
+ /* If we're building a multi-hop path, and it's not one of the HS or
+ * bootstrapping exceptions, and it doesn't support ntor, something has
+ * gone wrong. */
+ return -1;
+ }
+
+ return 0;
+}
+
+/** Create and return a new origin circuit. Initialize its purpose and
+ * build-state based on our arguments. The <b>flags</b> argument is a
+ * bitfield of CIRCLAUNCH_* flags. */
+origin_circuit_t *
+origin_circuit_init(uint8_t purpose, int flags)
+{
+ /* sets circ->p_circ_id and circ->p_chan */
+ origin_circuit_t *circ = origin_circuit_new();
+ circuit_set_state(TO_CIRCUIT(circ), CIRCUIT_STATE_CHAN_WAIT);
+ circ->build_state = tor_malloc_zero(sizeof(cpath_build_state_t));
+ circ->build_state->onehop_tunnel =
+ ((flags & CIRCLAUNCH_ONEHOP_TUNNEL) ? 1 : 0);
+ circ->build_state->need_uptime =
+ ((flags & CIRCLAUNCH_NEED_UPTIME) ? 1 : 0);
+ circ->build_state->need_capacity =
+ ((flags & CIRCLAUNCH_NEED_CAPACITY) ? 1 : 0);
+ circ->build_state->is_internal =
+ ((flags & CIRCLAUNCH_IS_INTERNAL) ? 1 : 0);
+ circ->base_.purpose = purpose;
+ return circ;
+}
+
+/** Build a new circuit for <b>purpose</b>. If <b>exit</b>
+ * is defined, then use that as your exit router, else choose a suitable
+ * exit node.
+ *
+ * Also launch a connection to the first OR in the chosen path, if
+ * it's not open already.
+ */
+origin_circuit_t *
+circuit_establish_circuit(uint8_t purpose, extend_info_t *exit_ei, int flags)
+{
+ origin_circuit_t *circ;
+ int err_reason = 0;
+ int is_hs_v3_rp_circuit = 0;
+
+ if (flags & CIRCLAUNCH_IS_V3_RP) {
+ is_hs_v3_rp_circuit = 1;
+ }
+
+ circ = origin_circuit_init(purpose, flags);
+
+ if (onion_pick_cpath_exit(circ, exit_ei, is_hs_v3_rp_circuit) < 0 ||
+ onion_populate_cpath(circ) < 0) {
+ circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_NOPATH);
+ return NULL;
+ }
+
+ control_event_circuit_status(circ, CIRC_EVENT_LAUNCHED, 0);
+
+ if ((err_reason = circuit_handle_first_hop(circ)) < 0) {
+ circuit_mark_for_close(TO_CIRCUIT(circ), -err_reason);
+ return NULL;
+ }
+ return circ;
+}
+
+/** Return the guard state associated with <b>circ</b>, which may be NULL. */
+circuit_guard_state_t *
+origin_circuit_get_guard_state(origin_circuit_t *circ)
+{
+ return circ->guard_state;
+}
+
+/** Start establishing the first hop of our circuit. Figure out what
+ * OR we should connect to, and if necessary start the connection to
+ * it. If we're already connected, then send the 'create' cell.
+ * Return 0 for ok, -reason if circ should be marked-for-close. */
+int
+circuit_handle_first_hop(origin_circuit_t *circ)
+{
+ crypt_path_t *firsthop;
+ channel_t *n_chan;
+ int err_reason = 0;
+ const char *msg = NULL;
+ int should_launch = 0;
+ const or_options_t *options = get_options();
+
+ firsthop = onion_next_hop_in_cpath(circ->cpath);
+ tor_assert(firsthop);
+ tor_assert(firsthop->extend_info);
+
+ /* Some bridges are on private addresses. Others pass a dummy private
+ * address to the pluggable transport, which ignores it.
+ * Deny the connection if:
+ * - the address is internal, and
+ * - we're not connecting to a configured bridge, and
+ * - we're not configured to allow extends to private addresses. */
+ if (tor_addr_is_internal(&firsthop->extend_info->addr, 0) &&
+ !extend_info_is_a_configured_bridge(firsthop->extend_info) &&
+ !options->ExtendAllowPrivateAddresses) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Client asked me to connect directly to a private address");
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+
+ /* now see if we're already connected to the first OR in 'route' */
+ log_debug(LD_CIRC,"Looking for firsthop '%s'",
+ fmt_addrport(&firsthop->extend_info->addr,
+ firsthop->extend_info->port));
+
+ n_chan = channel_get_for_extend(firsthop->extend_info->identity_digest,
+ &firsthop->extend_info->ed_identity,
+ &firsthop->extend_info->addr,
+ &msg,
+ &should_launch);
+
+ if (!n_chan) {
+ /* not currently connected in a useful way. */
+ log_info(LD_CIRC, "Next router is %s: %s",
+ safe_str_client(extend_info_describe(firsthop->extend_info)),
+ msg?msg:"???");
+ circ->base_.n_hop = extend_info_dup(firsthop->extend_info);
+
+ if (should_launch) {
+ if (circ->build_state->onehop_tunnel)
+ control_event_bootstrap(BOOTSTRAP_STATUS_CONN_DIR, 0);
+ n_chan = channel_connect_for_circuit(
+ &firsthop->extend_info->addr,
+ firsthop->extend_info->port,
+ firsthop->extend_info->identity_digest,
+ &firsthop->extend_info->ed_identity);
+ if (!n_chan) { /* connect failed, forget the whole thing */
+ log_info(LD_CIRC,"connect to firsthop failed. Closing.");
+ return -END_CIRC_REASON_CONNECTFAILED;
+ }
+ }
+
+ log_debug(LD_CIRC,"connecting in progress (or finished). Good.");
+ /* return success. The onion/circuit/etc will be taken care of
+ * automatically (may already have been) whenever n_chan reaches
+ * OR_CONN_STATE_OPEN.
+ */
+ return 0;
+ } else { /* it's already open. use it. */
+ tor_assert(!circ->base_.n_hop);
+ circ->base_.n_chan = n_chan;
+ log_debug(LD_CIRC,"Conn open. Delivering first onion skin.");
+ if ((err_reason = circuit_send_next_onion_skin(circ)) < 0) {
+ log_info(LD_CIRC,"circuit_send_next_onion_skin failed.");
+ circ->base_.n_chan = NULL;
+ return err_reason;
+ }
+ }
+ return 0;
+}
+
+/** Find any circuits that are waiting on <b>or_conn</b> to become
+ * open and get them to send their create cells forward.
+ *
+ * Status is 1 if connect succeeded, or 0 if connect failed.
+ *
+ * Close_origin_circuits is 1 if we should close all the origin circuits
+ * through this channel, or 0 otherwise. (This happens when we want to retry
+ * an older guard.)
+ */
+void
+circuit_n_chan_done(channel_t *chan, int status, int close_origin_circuits)
+{
+ smartlist_t *pending_circs;
+ int err_reason = 0;
+
+ tor_assert(chan);
+
+ log_debug(LD_CIRC,"chan to %s, status=%d",
+ channel_get_canonical_remote_descr(chan), status);
+
+ pending_circs = smartlist_new();
+ circuit_get_all_pending_on_channel(pending_circs, chan);
+
+ SMARTLIST_FOREACH_BEGIN(pending_circs, circuit_t *, circ)
+ {
+ /* These checks are redundant wrt get_all_pending_on_or_conn, but I'm
+ * leaving them in in case it's possible for the status of a circuit to
+ * change as we're going down the list. */
+ if (circ->marked_for_close || circ->n_chan || !circ->n_hop ||
+ circ->state != CIRCUIT_STATE_CHAN_WAIT)
+ continue;
+
+ if (tor_digest_is_zero(circ->n_hop->identity_digest)) {
+ /* Look at addr/port. This is an unkeyed connection. */
+ if (!channel_matches_extend_info(chan, circ->n_hop))
+ continue;
+ } else {
+ /* We expected a key. See if it's the right one. */
+ if (tor_memneq(chan->identity_digest,
+ circ->n_hop->identity_digest, DIGEST_LEN))
+ continue;
+ }
+ if (!status) { /* chan failed; close circ */
+ log_info(LD_CIRC,"Channel failed; closing circ.");
+ circuit_mark_for_close(circ, END_CIRC_REASON_CHANNEL_CLOSED);
+ continue;
+ }
+ if (close_origin_circuits && CIRCUIT_IS_ORIGIN(circ)) {
+ log_info(LD_CIRC,"Channel deprecated for origin circs; closing circ.");
+ circuit_mark_for_close(circ, END_CIRC_REASON_CHANNEL_CLOSED);
+ continue;
+ }
+ log_debug(LD_CIRC, "Found circ, sending create cell.");
+ /* circuit_deliver_create_cell will set n_circ_id and add us to
+ * chan_circuid_circuit_map, so we don't need to call
+ * set_circid_chan here. */
+ circ->n_chan = chan;
+ extend_info_free(circ->n_hop);
+ circ->n_hop = NULL;
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ if ((err_reason =
+ circuit_send_next_onion_skin(TO_ORIGIN_CIRCUIT(circ))) < 0) {
+ log_info(LD_CIRC,
+ "send_next_onion_skin failed; circuit marked for closing.");
+ circuit_mark_for_close(circ, -err_reason);
+ continue;
+ /* XXX could this be bad, eg if next_onion_skin failed because conn
+ * died? */
+ }
+ } else {
+ /* pull the create cell out of circ->n_chan_create_cell, and send it */
+ tor_assert(circ->n_chan_create_cell);
+ if (circuit_deliver_create_cell(circ, circ->n_chan_create_cell, 1)<0) {
+ circuit_mark_for_close(circ, END_CIRC_REASON_RESOURCELIMIT);
+ continue;
+ }
+ tor_free(circ->n_chan_create_cell);
+ circuit_set_state(circ, CIRCUIT_STATE_OPEN);
+ }
+ }
+ SMARTLIST_FOREACH_END(circ);
+
+ smartlist_free(pending_circs);
+}
+
+/** Find a new circid that isn't currently in use on the circ->n_chan
+ * for the outgoing
+ * circuit <b>circ</b>, and deliver the cell <b>create_cell</b> to this
+ * circuit. If <b>relayed</b> is true, this is a create cell somebody
+ * gave us via an EXTEND cell, so we shouldn't worry if we don't understand
+ * it. Return -1 if we failed to find a suitable circid, else return 0.
+ */
+static int
+circuit_deliver_create_cell(circuit_t *circ, const create_cell_t *create_cell,
+ int relayed)
+{
+ cell_t cell;
+ circid_t id;
+ int r;
+
+ tor_assert(circ);
+ tor_assert(circ->n_chan);
+ tor_assert(create_cell);
+ tor_assert(create_cell->cell_type == CELL_CREATE ||
+ create_cell->cell_type == CELL_CREATE_FAST ||
+ create_cell->cell_type == CELL_CREATE2);
+
+ id = get_unique_circ_id_by_chan(circ->n_chan);
+ if (!id) {
+ static ratelim_t circid_warning_limit = RATELIM_INIT(9600);
+ log_fn_ratelim(&circid_warning_limit, LOG_WARN, LD_CIRC,
+ "failed to get unique circID.");
+ goto error;
+ }
+
+ memset(&cell, 0, sizeof(cell_t));
+ r = relayed ? create_cell_format_relayed(&cell, create_cell)
+ : create_cell_format(&cell, create_cell);
+ if (r < 0) {
+ log_warn(LD_CIRC,"Couldn't format create cell");
+ goto error;
+ }
+ log_debug(LD_CIRC,"Chosen circID %u.", (unsigned)id);
+ circuit_set_n_circid_chan(circ, id, circ->n_chan);
+ cell.circ_id = circ->n_circ_id;
+
+ append_cell_to_circuit_queue(circ, circ->n_chan, &cell,
+ CELL_DIRECTION_OUT, 0);
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ /* Update began timestamp for circuits starting their first hop */
+ if (TO_ORIGIN_CIRCUIT(circ)->cpath->state == CPATH_STATE_CLOSED) {
+ if (!CHANNEL_IS_OPEN(circ->n_chan)) {
+ log_warn(LD_CIRC,
+ "Got first hop for a circuit without an opened channel. "
+ "State: %s.", channel_state_to_string(circ->n_chan->state));
+ tor_fragile_assert();
+ }
+
+ tor_gettimeofday(&circ->timestamp_began);
+ }
+
+ /* mark it so it gets better rate limiting treatment. */
+ channel_timestamp_client(circ->n_chan);
+ }
+
+ return 0;
+ error:
+ circ->n_chan = NULL;
+ return -1;
+}
+
+/** We've decided to start our reachability testing. If all
+ * is set, log this to the user. Return 1 if we did, or 0 if
+ * we chose not to log anything. */
+int
+inform_testing_reachability(void)
+{
+ char dirbuf[128];
+ char *address;
+ const routerinfo_t *me = router_get_my_routerinfo();
+ if (!me)
+ return 0;
+ address = tor_dup_ip(me->addr);
+ control_event_server_status(LOG_NOTICE,
+ "CHECKING_REACHABILITY ORADDRESS=%s:%d",
+ address, me->or_port);
+ if (me->dir_port) {
+ tor_snprintf(dirbuf, sizeof(dirbuf), " and DirPort %s:%d",
+ address, me->dir_port);
+ control_event_server_status(LOG_NOTICE,
+ "CHECKING_REACHABILITY DIRADDRESS=%s:%d",
+ address, me->dir_port);
+ }
+ log_notice(LD_OR, "Now checking whether ORPort %s:%d%s %s reachable... "
+ "(this may take up to %d minutes -- look for log "
+ "messages indicating success)",
+ address, me->or_port,
+ me->dir_port ? dirbuf : "",
+ me->dir_port ? "are" : "is",
+ TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT/60);
+
+ tor_free(address);
+ return 1;
+}
+
+/** Return true iff we should send a create_fast cell to start building a given
+ * circuit */
+static inline int
+should_use_create_fast_for_circuit(origin_circuit_t *circ)
+{
+ const or_options_t *options = get_options();
+ tor_assert(circ->cpath);
+ tor_assert(circ->cpath->extend_info);
+
+ if (!circuit_has_usable_onion_key(circ)) {
+ /* We don't have ntor, and we don't have or can't use TAP,
+ * so our hand is forced: only a create_fast will work. */
+ return 1;
+ }
+ if (public_server_mode(options)) {
+ /* We're a server, and we have a usable onion key. We can choose.
+ * Prefer to blend our circuit into the other circuits we are
+ * creating on behalf of others. */
+ return 0;
+ }
+ return networkstatus_get_param(NULL, "usecreatefast", 0, 0, 1);
+}
+
+/**
+ * Return true if <b>circ</b> is the type of circuit we want to count
+ * timeouts from.
+ *
+ * In particular, we want to consider any circuit that plans to build
+ * at least 3 hops (but maybe more), but has 3 or fewer hops built
+ * so far.
+ *
+ * We still want to consider circuits before 3 hops, because we need
+ * to decide if we should convert them to a measurement circuit in
+ * circuit_build_times_handle_completed_hop(), rather than letting
+ * slow circuits get killed right away.
+ */
+int
+circuit_timeout_want_to_count_circ(const origin_circuit_t *circ)
+{
+ return !circ->has_opened
+ && circ->build_state->desired_path_len >= DEFAULT_ROUTE_LEN
+ && circuit_get_cpath_opened_len(circ) <= DEFAULT_ROUTE_LEN;
+}
+
+/** Decide whether to use a TAP or ntor handshake for connecting to <b>ei</b>
+ * directly, and set *<b>cell_type_out</b> and *<b>handshake_type_out</b>
+ * accordingly.
+ * Note that TAP handshakes in CREATE cells are only used for direct
+ * connections:
+ * - from Tor2web to intro points not in the client's consensus, and
+ * - from Single Onions to rend points not in the service's consensus.
+ * This is checked in onion_populate_cpath. */
+static void
+circuit_pick_create_handshake(uint8_t *cell_type_out,
+ uint16_t *handshake_type_out,
+ const extend_info_t *ei)
+{
+ /* torspec says: In general, clients SHOULD use CREATE whenever they are
+ * using the TAP handshake, and CREATE2 otherwise. */
+ if (extend_info_supports_ntor(ei)) {
+ *cell_type_out = CELL_CREATE2;
+ *handshake_type_out = ONION_HANDSHAKE_TYPE_NTOR;
+ } else {
+ /* XXXX030 Remove support for deciding to use TAP and EXTEND. */
+ *cell_type_out = CELL_CREATE;
+ *handshake_type_out = ONION_HANDSHAKE_TYPE_TAP;
+ }
+}
+
+/** Decide whether to use a TAP or ntor handshake for extending to <b>ei</b>
+ * and set *<b>handshake_type_out</b> accordingly. Decide whether we should
+ * use an EXTEND2 or an EXTEND cell to do so, and set *<b>cell_type_out</b>
+ * and *<b>create_cell_type_out</b> accordingly.
+ * Note that TAP handshakes in EXTEND cells are only used:
+ * - from clients to intro points, and
+ * - from hidden services to rend points.
+ * This is checked in onion_populate_cpath.
+ */
+static void
+circuit_pick_extend_handshake(uint8_t *cell_type_out,
+ uint8_t *create_cell_type_out,
+ uint16_t *handshake_type_out,
+ const extend_info_t *ei)
+{
+ uint8_t t;
+ circuit_pick_create_handshake(&t, handshake_type_out, ei);
+
+ /* torspec says: Clients SHOULD use the EXTEND format whenever sending a TAP
+ * handshake... In other cases, clients SHOULD use EXTEND2. */
+ if (*handshake_type_out != ONION_HANDSHAKE_TYPE_TAP) {
+ *cell_type_out = RELAY_COMMAND_EXTEND2;
+ *create_cell_type_out = CELL_CREATE2;
+ } else {
+ /* XXXX030 Remove support for deciding to use TAP and EXTEND. */
+ *cell_type_out = RELAY_COMMAND_EXTEND;
+ *create_cell_type_out = CELL_CREATE;
+ }
+}
+
+/**
+ * Return true iff <b>purpose</b> is a purpose for a circuit which is
+ * allowed to have no guard configured, even if the circuit is multihop
+ * and guards are enabled.
+ */
+static int
+circuit_purpose_may_omit_guard(int purpose)
+{
+ switch (purpose) {
+ case CIRCUIT_PURPOSE_TESTING:
+ case CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT:
+ /* Testing circuits may omit guards because they're measuring
+ * liveness or performance, and don't want guards to interfere. */
+ return 1;
+ default:
+ /* All other multihop circuits should use guards if guards are
+ * enabled. */
+ return 0;
+ }
+}
+
+/** This is the backbone function for building circuits.
+ *
+ * If circ's first hop is closed, then we need to build a create
+ * cell and send it forward.
+ *
+ * Otherwise, if circ's cpath still has any non-open hops, we need to
+ * build a relay extend cell and send it forward to the next non-open hop.
+ *
+ * If all hops on the cpath are open, we're done building the circuit
+ * and we should do housekeeping for the newly opened circuit.
+ *
+ * Return -reason if we want to tear down circ, else return 0.
+ */
+int
+circuit_send_next_onion_skin(origin_circuit_t *circ)
+{
+ tor_assert(circ);
+
+ if (circ->cpath->state == CPATH_STATE_CLOSED) {
+ /* Case one: we're on the first hop. */
+ return circuit_send_first_onion_skin(circ);
+ }
+
+ tor_assert(circ->cpath->state == CPATH_STATE_OPEN);
+ tor_assert(circ->base_.state == CIRCUIT_STATE_BUILDING);
+
+ crypt_path_t *hop = onion_next_hop_in_cpath(circ->cpath);
+ circuit_build_times_handle_completed_hop(circ);
+
+ if (hop) {
+ /* Case two: we're on a hop after the first. */
+ return circuit_send_intermediate_onion_skin(circ, hop);
+ }
+
+ /* Case three: the circuit is finished. Do housekeeping tasks on it. */
+ return circuit_build_no_more_hops(circ);
+}
+
+/**
+ * Called from circuit_send_next_onion_skin() when we find ourselves connected
+ * to the first hop in <b>circ</b>: Send a CREATE or CREATE2 or CREATE_FAST
+ * cell to that hop. Return 0 on success; -reason on failure (if the circuit
+ * should be torn down).
+ */
+static int
+circuit_send_first_onion_skin(origin_circuit_t *circ)
+{
+ int fast;
+ int len;
+ const node_t *node;
+ create_cell_t cc;
+ memset(&cc, 0, sizeof(cc));
+
+ log_debug(LD_CIRC,"First skin; sending create cell.");
+
+ if (circ->build_state->onehop_tunnel) {
+ control_event_bootstrap(BOOTSTRAP_STATUS_ONEHOP_CREATE, 0);
+ } else {
+ control_event_bootstrap(BOOTSTRAP_STATUS_CIRCUIT_CREATE, 0);
+
+ /* If this is not a one-hop tunnel, the channel is being used
+ * for traffic that wants anonymity and protection from traffic
+ * analysis (such as netflow record retention). That means we want
+ * to pad it.
+ */
+ if (circ->base_.n_chan->channel_usage < CHANNEL_USED_FOR_FULL_CIRCS)
+ circ->base_.n_chan->channel_usage = CHANNEL_USED_FOR_FULL_CIRCS;
+ }
+
+ node = node_get_by_id(circ->base_.n_chan->identity_digest);
+ fast = should_use_create_fast_for_circuit(circ);
+ if (!fast) {
+ /* We know the right onion key: we should send a create cell. */
+ circuit_pick_create_handshake(&cc.cell_type, &cc.handshake_type,
+ circ->cpath->extend_info);
+ } else {
+ /* We don't know an onion key, so we need to fall back to CREATE_FAST. */
+ cc.cell_type = CELL_CREATE_FAST;
+ cc.handshake_type = ONION_HANDSHAKE_TYPE_FAST;
+ }
+
+ len = onion_skin_create(cc.handshake_type,
+ circ->cpath->extend_info,
+ &circ->cpath->handshake_state,
+ cc.onionskin);
+ if (len < 0) {
+ log_warn(LD_CIRC,"onion_skin_create (first hop) failed.");
+ return - END_CIRC_REASON_INTERNAL;
+ }
+ cc.handshake_len = len;
+
+ if (circuit_deliver_create_cell(TO_CIRCUIT(circ), &cc, 0) < 0)
+ return - END_CIRC_REASON_RESOURCELIMIT;
+
+ circ->cpath->state = CPATH_STATE_AWAITING_KEYS;
+ circuit_set_state(TO_CIRCUIT(circ), CIRCUIT_STATE_BUILDING);
+ log_info(LD_CIRC,"First hop: finished sending %s cell to '%s'",
+ fast ? "CREATE_FAST" : "CREATE",
+ node ? node_describe(node) : "<unnamed>");
+ return 0;
+}
+
+/**
+ * Called from circuit_send_next_onion_skin() when we find that we have no
+ * more hops: mark the circuit as finished, and perform the necessary
+ * bookkeeping. Return 0 on success; -reason on failure (if the circuit
+ * should be torn down).
+ */
+static int
+circuit_build_no_more_hops(origin_circuit_t *circ)
+{
+ guard_usable_t r;
+ if (! circ->guard_state) {
+ if (circuit_get_cpath_len(circ) != 1 &&
+ ! circuit_purpose_may_omit_guard(circ->base_.purpose) &&
+ get_options()->UseEntryGuards) {
+ log_warn(LD_BUG, "%d-hop circuit %p with purpose %d has no "
+ "guard state",
+ circuit_get_cpath_len(circ), circ, circ->base_.purpose);
+ }
+ r = GUARD_USABLE_NOW;
+ } else {
+ r = entry_guard_succeeded(&circ->guard_state);
+ }
+ const int is_usable_for_streams = (r == GUARD_USABLE_NOW);
+ if (r == GUARD_USABLE_NOW) {
+ circuit_set_state(TO_CIRCUIT(circ), CIRCUIT_STATE_OPEN);
+ } else if (r == GUARD_MAYBE_USABLE_LATER) {
+ // Wait till either a better guard succeeds, or till
+ // all better guards fail.
+ circuit_set_state(TO_CIRCUIT(circ), CIRCUIT_STATE_GUARD_WAIT);
+ } else {
+ tor_assert_nonfatal(r == GUARD_USABLE_NEVER);
+ return - END_CIRC_REASON_INTERNAL;
+ }
+
+ /* XXXX #21422 -- the rest of this branch needs careful thought!
+ * Some of the things here need to happen when a circuit becomes
+ * mechanically open; some need to happen when it is actually usable.
+ * I think I got them right, but more checking would be wise. -NM
+ */
+
+ log_info(LD_CIRC,"circuit built!");
+ circuit_reset_failure_count(0);
+
+ if (circ->build_state->onehop_tunnel || circ->has_opened) {
+ control_event_bootstrap(BOOTSTRAP_STATUS_REQUESTING_STATUS, 0);
+ }
+
+ pathbias_count_build_success(circ);
+ if (is_usable_for_streams)
+ circuit_has_opened(circ); /* do other actions as necessary */
+
+ if (!have_completed_a_circuit() && !circ->build_state->onehop_tunnel) {
+ const or_options_t *options = get_options();
+ note_that_we_completed_a_circuit();
+ /* FFFF Log a count of known routers here */
+ log_notice(LD_GENERAL,
+ "Tor has successfully opened a circuit. "
+ "Looks like client functionality is working.");
+ if (control_event_bootstrap(BOOTSTRAP_STATUS_DONE, 0) == 0) {
+ log_notice(LD_GENERAL,
+ "Tor has successfully opened a circuit. "
+ "Looks like client functionality is working.");
+ }
+ control_event_client_status(LOG_NOTICE, "CIRCUIT_ESTABLISHED");
+ clear_broken_connection_map(1);
+ if (server_mode(options) && !check_whether_orport_reachable(options)) {
+ inform_testing_reachability();
+ router_do_reachability_checks(1, 1);
+ }
+ }
+
+ /* We're done with measurement circuits here. Just close them */
+ if (circ->base_.purpose == CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT) {
+ circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_FINISHED);
+ }
+ return 0;
+}
+
+/**
+ * Called from circuit_send_next_onion_skin() when we find that we have a hop
+ * other than the first that we need to extend to: use <b>hop</b>'s
+ * information to extend the circuit another step. Return 0 on success;
+ * -reason on failure (if the circuit should be torn down).
+ */
+static int
+circuit_send_intermediate_onion_skin(origin_circuit_t *circ,
+ crypt_path_t *hop)
+{
+ int len;
+ extend_cell_t ec;
+ memset(&ec, 0, sizeof(ec));
+
+ log_debug(LD_CIRC,"starting to send subsequent skin.");
+
+ if (tor_addr_family(&hop->extend_info->addr) != AF_INET) {
+ log_warn(LD_BUG, "Trying to extend to a non-IPv4 address.");
+ return - END_CIRC_REASON_INTERNAL;
+ }
+
+ circuit_pick_extend_handshake(&ec.cell_type,
+ &ec.create_cell.cell_type,
+ &ec.create_cell.handshake_type,
+ hop->extend_info);
+
+ tor_addr_copy(&ec.orport_ipv4.addr, &hop->extend_info->addr);
+ ec.orport_ipv4.port = hop->extend_info->port;
+ tor_addr_make_unspec(&ec.orport_ipv6.addr);
+ memcpy(ec.node_id, hop->extend_info->identity_digest, DIGEST_LEN);
+ /* Set the ED25519 identity too -- it will only get included
+ * in the extend2 cell if we're configured to use it, though. */
+ ed25519_pubkey_copy(&ec.ed_pubkey, &hop->extend_info->ed_identity);
+
+ len = onion_skin_create(ec.create_cell.handshake_type,
+ hop->extend_info,
+ &hop->handshake_state,
+ ec.create_cell.onionskin);
+ if (len < 0) {
+ log_warn(LD_CIRC,"onion_skin_create failed.");
+ return - END_CIRC_REASON_INTERNAL;
+ }
+ ec.create_cell.handshake_len = len;
+
+ log_info(LD_CIRC,"Sending extend relay cell.");
+ {
+ uint8_t command = 0;
+ uint16_t payload_len=0;
+ uint8_t payload[RELAY_PAYLOAD_SIZE];
+ if (extend_cell_format(&command, &payload_len, payload, &ec)<0) {
+ log_warn(LD_CIRC,"Couldn't format extend cell");
+ return -END_CIRC_REASON_INTERNAL;
+ }
+
+ /* send it to hop->prev, because that relay will transfer
+ * it to a create cell and then send to hop */
+ if (relay_send_command_from_edge(0, TO_CIRCUIT(circ),
+ command,
+ (char*)payload, payload_len,
+ hop->prev) < 0)
+ return 0; /* circuit is closed */
+ }
+ hop->state = CPATH_STATE_AWAITING_KEYS;
+ return 0;
+}
+
+/** Our clock just jumped by <b>seconds_elapsed</b>. If <b>was_idle</b> is
+ * true, then the monotonic time matches; otherwise it doesn't. Assume
+ * something has also gone wrong with our network: notify the user, and
+ * abandon all not-yet-used circuits. */
+void
+circuit_note_clock_jumped(int64_t seconds_elapsed, bool was_idle)
+{
+ int severity = server_mode(get_options()) ? LOG_WARN : LOG_NOTICE;
+ if (was_idle) {
+ tor_log(severity, LD_GENERAL, "Tor has been idle for %"PRId64
+ " seconds; assuming established circuits no longer work.",
+ (seconds_elapsed));
+ } else {
+ tor_log(severity, LD_GENERAL,
+ "Your system clock just jumped %"PRId64" seconds %s; "
+ "assuming established circuits no longer work.",
+ (
+ seconds_elapsed >=0 ? seconds_elapsed : -seconds_elapsed),
+ seconds_elapsed >=0 ? "forward" : "backward");
+ }
+ control_event_general_status(LOG_WARN, "CLOCK_JUMPED TIME=%"PRId64
+ " IDLE=%d",
+ (seconds_elapsed), was_idle?1:0);
+ /* so we log when it works again */
+ note_that_we_maybe_cant_complete_circuits();
+ control_event_client_status(severity, "CIRCUIT_NOT_ESTABLISHED REASON=%s",
+ "CLOCK_JUMPED");
+ circuit_mark_all_unused_circs();
+ circuit_mark_all_dirty_circs_as_unusable();
+ if (seconds_elapsed < 0) {
+ /* Restart all the timers in case we jumped a long way into the past. */
+ reset_all_main_loop_timers();
+ }
+}
+
+/** Take the 'extend' <b>cell</b>, pull out addr/port plus the onion
+ * skin and identity digest for the next hop. If we're already connected,
+ * pass the onion skin to the next hop using a create cell; otherwise
+ * launch a new OR connection, and <b>circ</b> will notice when the
+ * connection succeeds or fails.
+ *
+ * Return -1 if we want to warn and tear down the circuit, else return 0.
+ */
+int
+circuit_extend(cell_t *cell, circuit_t *circ)
+{
+ channel_t *n_chan;
+ relay_header_t rh;
+ extend_cell_t ec;
+ const char *msg = NULL;
+ int should_launch = 0;
+
+ if (circ->n_chan) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "n_chan already set. Bug/attack. Closing.");
+ return -1;
+ }
+ if (circ->n_hop) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "conn to next hop already launched. Bug/attack. Closing.");
+ return -1;
+ }
+
+ if (!server_mode(get_options())) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Got an extend cell, but running as a client. Closing.");
+ return -1;
+ }
+
+ relay_header_unpack(&rh, cell->payload);
+
+ if (extend_cell_parse(&ec, rh.command,
+ cell->payload+RELAY_HEADER_SIZE,
+ rh.length) < 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Can't parse extend cell. Closing circuit.");
+ return -1;
+ }
+
+ if (!ec.orport_ipv4.port || tor_addr_is_null(&ec.orport_ipv4.addr)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Client asked me to extend to zero destination port or addr.");
+ return -1;
+ }
+
+ if (tor_addr_is_internal(&ec.orport_ipv4.addr, 0) &&
+ !get_options()->ExtendAllowPrivateAddresses) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Client asked me to extend to a private address");
+ return -1;
+ }
+
+ /* Check if they asked us for 0000..0000. We support using
+ * an empty fingerprint for the first hop (e.g. for a bridge relay),
+ * but we don't want to let clients send us extend cells for empty
+ * fingerprints -- a) because it opens the user up to a mitm attack,
+ * and b) because it lets an attacker force the relay to hold open a
+ * new TLS connection for each extend request. */
+ if (tor_digest_is_zero((const char*)ec.node_id)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Client asked me to extend without specifying an id_digest.");
+ return -1;
+ }
+
+ /* Fill in ed_pubkey if it was not provided and we can infer it from
+ * our networkstatus */
+ if (ed25519_public_key_is_zero(&ec.ed_pubkey)) {
+ const node_t *node = node_get_by_id((const char*)ec.node_id);
+ const ed25519_public_key_t *node_ed_id = NULL;
+ if (node &&
+ node_supports_ed25519_link_authentication(node, 1) &&
+ (node_ed_id = node_get_ed25519_id(node))) {
+ ed25519_pubkey_copy(&ec.ed_pubkey, node_ed_id);
+ }
+ }
+
+ /* Next, check if we're being asked to connect to the hop that the
+ * extend cell came from. There isn't any reason for that, and it can
+ * assist circular-path attacks. */
+ if (tor_memeq(ec.node_id,
+ TO_OR_CIRCUIT(circ)->p_chan->identity_digest,
+ DIGEST_LEN)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Client asked me to extend back to the previous hop.");
+ return -1;
+ }
+
+ /* Check the previous hop Ed25519 ID too */
+ if (! ed25519_public_key_is_zero(&ec.ed_pubkey) &&
+ ed25519_pubkey_eq(&ec.ed_pubkey,
+ &TO_OR_CIRCUIT(circ)->p_chan->ed25519_identity)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Client asked me to extend back to the previous hop "
+ "(by Ed25519 ID).");
+ return -1;
+ }
+
+ n_chan = channel_get_for_extend((const char*)ec.node_id,
+ &ec.ed_pubkey,
+ &ec.orport_ipv4.addr,
+ &msg,
+ &should_launch);
+
+ if (!n_chan) {
+ log_debug(LD_CIRC|LD_OR,"Next router (%s): %s",
+ fmt_addrport(&ec.orport_ipv4.addr,ec.orport_ipv4.port),
+ msg?msg:"????");
+
+ circ->n_hop = extend_info_new(NULL /*nickname*/,
+ (const char*)ec.node_id,
+ &ec.ed_pubkey,
+ NULL, /*onion_key*/
+ NULL, /*curve25519_key*/
+ &ec.orport_ipv4.addr,
+ ec.orport_ipv4.port);
+
+ circ->n_chan_create_cell = tor_memdup(&ec.create_cell,
+ sizeof(ec.create_cell));
+
+ circuit_set_state(circ, CIRCUIT_STATE_CHAN_WAIT);
+
+ if (should_launch) {
+ /* we should try to open a connection */
+ n_chan = channel_connect_for_circuit(&ec.orport_ipv4.addr,
+ ec.orport_ipv4.port,
+ (const char*)ec.node_id,
+ &ec.ed_pubkey);
+ if (!n_chan) {
+ log_info(LD_CIRC,"Launching n_chan failed. Closing circuit.");
+ circuit_mark_for_close(circ, END_CIRC_REASON_CONNECTFAILED);
+ return 0;
+ }
+ log_debug(LD_CIRC,"connecting in progress (or finished). Good.");
+ }
+ /* return success. The onion/circuit/etc will be taken care of
+ * automatically (may already have been) whenever n_chan reaches
+ * OR_CONN_STATE_OPEN.
+ */
+ return 0;
+ }
+
+ tor_assert(!circ->n_hop); /* Connection is already established. */
+ circ->n_chan = n_chan;
+ log_debug(LD_CIRC,
+ "n_chan is %s",
+ channel_get_canonical_remote_descr(n_chan));
+
+ if (circuit_deliver_create_cell(circ, &ec.create_cell, 1) < 0)
+ return -1;
+
+ return 0;
+}
+
+/** Initialize cpath-\>{f|b}_{crypto|digest} from the key material in key_data.
+ *
+ * If <b>is_hs_v3</b> is set, this cpath will be used for next gen hidden
+ * service circuits and <b>key_data</b> must be at least
+ * HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN bytes in length.
+ *
+ * If <b>is_hs_v3</b> is not set, key_data must contain CPATH_KEY_MATERIAL_LEN
+ * bytes, which are used as follows:
+ * - 20 to initialize f_digest
+ * - 20 to initialize b_digest
+ * - 16 to key f_crypto
+ * - 16 to key b_crypto
+ *
+ * (If 'reverse' is true, then f_XX and b_XX are swapped.)
+ *
+ * Return 0 if init was successful, else -1 if it failed.
+ */
+int
+circuit_init_cpath_crypto(crypt_path_t *cpath,
+ const char *key_data, size_t key_data_len,
+ int reverse, int is_hs_v3)
+{
+
+ tor_assert(cpath);
+ return relay_crypto_init(&cpath->crypto, key_data, key_data_len, reverse,
+ is_hs_v3);
+}
+
+/** A "created" cell <b>reply</b> came back to us on circuit <b>circ</b>.
+ * (The body of <b>reply</b> varies depending on what sort of handshake
+ * this is.)
+ *
+ * Calculate the appropriate keys and digests, make sure KH is
+ * correct, and initialize this hop of the cpath.
+ *
+ * Return - reason if we want to mark circ for close, else return 0.
+ */
+int
+circuit_finish_handshake(origin_circuit_t *circ,
+ const created_cell_t *reply)
+{
+ char keys[CPATH_KEY_MATERIAL_LEN];
+ crypt_path_t *hop;
+ int rv;
+
+ if ((rv = pathbias_count_build_attempt(circ)) < 0) {
+ log_warn(LD_CIRC, "pathbias_count_build_attempt failed: %d", rv);
+ return rv;
+ }
+
+ if (circ->cpath->state == CPATH_STATE_AWAITING_KEYS) {
+ hop = circ->cpath;
+ } else {
+ hop = onion_next_hop_in_cpath(circ->cpath);
+ if (!hop) { /* got an extended when we're all done? */
+ log_warn(LD_PROTOCOL,"got extended when circ already built? Closing.");
+ return - END_CIRC_REASON_TORPROTOCOL;
+ }
+ }
+ tor_assert(hop->state == CPATH_STATE_AWAITING_KEYS);
+
+ {
+ const char *msg = NULL;
+ if (onion_skin_client_handshake(hop->handshake_state.tag,
+ &hop->handshake_state,
+ reply->reply, reply->handshake_len,
+ (uint8_t*)keys, sizeof(keys),
+ (uint8_t*)hop->rend_circ_nonce,
+ &msg) < 0) {
+ if (msg)
+ log_warn(LD_CIRC,"onion_skin_client_handshake failed: %s", msg);
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+ }
+
+ onion_handshake_state_release(&hop->handshake_state);
+
+ if (circuit_init_cpath_crypto(hop, keys, sizeof(keys), 0, 0)<0) {
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+
+ hop->state = CPATH_STATE_OPEN;
+ log_info(LD_CIRC,"Finished building circuit hop:");
+ circuit_log_path(LOG_INFO,LD_CIRC,circ);
+ control_event_circuit_status(circ, CIRC_EVENT_EXTENDED, 0);
+
+ return 0;
+}
+
+/** We received a relay truncated cell on circ.
+ *
+ * Since we don't send truncates currently, getting a truncated
+ * means that a connection broke or an extend failed. For now,
+ * just give up: force circ to close, and return 0.
+ */
+int
+circuit_truncated(origin_circuit_t *circ, crypt_path_t *layer, int reason)
+{
+// crypt_path_t *victim;
+// connection_t *stream;
+
+ tor_assert(circ);
+ tor_assert(layer);
+
+ /* XXX Since we don't send truncates currently, getting a truncated
+ * means that a connection broke or an extend failed. For now,
+ * just give up.
+ */
+ circuit_mark_for_close(TO_CIRCUIT(circ),
+ END_CIRC_REASON_FLAG_REMOTE|reason);
+ return 0;
+
+#if 0
+ while (layer->next != circ->cpath) {
+ /* we need to clear out layer->next */
+ victim = layer->next;
+ log_debug(LD_CIRC, "Killing a layer of the cpath.");
+
+ for (stream = circ->p_streams; stream; stream=stream->next_stream) {
+ if (stream->cpath_layer == victim) {
+ log_info(LD_APP, "Marking stream %d for close because of truncate.",
+ stream->stream_id);
+ /* no need to send 'end' relay cells,
+ * because the other side's already dead
+ */
+ connection_mark_unattached_ap(stream, END_STREAM_REASON_DESTROY);
+ }
+ }
+
+ layer->next = victim->next;
+ circuit_free_cpath_node(victim);
+ }
+
+ log_info(LD_CIRC, "finished");
+ return 0;
+#endif /* 0 */
+}
+
+/** Given a response payload and keys, initialize, then send a created
+ * cell back.
+ */
+int
+onionskin_answer(or_circuit_t *circ,
+ const created_cell_t *created_cell,
+ const char *keys, size_t keys_len,
+ const uint8_t *rend_circ_nonce)
+{
+ cell_t cell;
+
+ tor_assert(keys_len == CPATH_KEY_MATERIAL_LEN);
+
+ if (created_cell_format(&cell, created_cell) < 0) {
+ log_warn(LD_BUG,"couldn't format created cell (type=%d, len=%d)",
+ (int)created_cell->cell_type, (int)created_cell->handshake_len);
+ return -1;
+ }
+ cell.circ_id = circ->p_circ_id;
+
+ circuit_set_state(TO_CIRCUIT(circ), CIRCUIT_STATE_OPEN);
+
+ log_debug(LD_CIRC,"init digest forward 0x%.8x, backward 0x%.8x.",
+ (unsigned int)get_uint32(keys),
+ (unsigned int)get_uint32(keys+20));
+ if (relay_crypto_init(&circ->crypto, keys, keys_len, 0, 0)<0) {
+ log_warn(LD_BUG,"Circuit initialization failed");
+ return -1;
+ }
+
+ memcpy(circ->rend_circ_nonce, rend_circ_nonce, DIGEST_LEN);
+
+ int used_create_fast = (created_cell->cell_type == CELL_CREATED_FAST);
+
+ append_cell_to_circuit_queue(TO_CIRCUIT(circ),
+ circ->p_chan, &cell, CELL_DIRECTION_IN, 0);
+ log_debug(LD_CIRC,"Finished sending '%s' cell.",
+ used_create_fast ? "created_fast" : "created");
+
+ /* Ignore the local bit when ExtendAllowPrivateAddresses is set:
+ * it violates the assumption that private addresses are local.
+ * Also, many test networks run on local addresses, and
+ * TestingTorNetwork sets ExtendAllowPrivateAddresses. */
+ if ((!channel_is_local(circ->p_chan)
+ || get_options()->ExtendAllowPrivateAddresses)
+ && !channel_is_outgoing(circ->p_chan)) {
+ /* record that we could process create cells from a non-local conn
+ * that we didn't initiate; presumably this means that create cells
+ * can reach us too. */
+ router_orport_found_reachable();
+ }
+
+ return 0;
+}
+
+/** Helper for new_route_len(). Choose a circuit length for purpose
+ * <b>purpose</b>: DEFAULT_ROUTE_LEN (+ 1 if someone else chose the
+ * exit). If someone else chose the exit, they could be colluding
+ * with the exit, so add a randomly selected node to preserve
+ * anonymity.
+ *
+ * Here, "exit node" sometimes means an OR acting as an internal
+ * endpoint, rather than as a relay to an external endpoint. This
+ * means there need to be at least DEFAULT_ROUTE_LEN routers between
+ * us and the internal endpoint to preserve the same anonymity
+ * properties that we would get when connecting to an external
+ * endpoint. These internal endpoints can include:
+ *
+ * - Connections to a directory of hidden services
+ * (CIRCUIT_PURPOSE_C_GENERAL)
+ *
+ * - A client connecting to an introduction point, which the hidden
+ * service picked (CIRCUIT_PURPOSE_C_INTRODUCING, via
+ * circuit_get_open_circ_or_launch() which rewrites it from
+ * CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT)
+ *
+ * - A hidden service connecting to a rendezvous point, which the
+ * client picked (CIRCUIT_PURPOSE_S_CONNECT_REND, via
+ * rend_service_receive_introduction() and
+ * rend_service_relaunch_rendezvous)
+ *
+ * There are currently two situations where we picked the exit node
+ * ourselves, making DEFAULT_ROUTE_LEN a safe circuit length:
+ *
+ * - We are a hidden service connecting to an introduction point
+ * (CIRCUIT_PURPOSE_S_ESTABLISH_INTRO, via
+ * rend_service_launch_establish_intro())
+ *
+ * - We are a router testing its own reachabiity
+ * (CIRCUIT_PURPOSE_TESTING, via router_do_reachability_checks())
+ *
+ * onion_pick_cpath_exit() bypasses us (by not calling
+ * new_route_len()) in the one-hop tunnel case, so we don't need to
+ * handle that.
+ */
+int
+route_len_for_purpose(uint8_t purpose, extend_info_t *exit_ei)
+{
+ int routelen = DEFAULT_ROUTE_LEN;
+ int known_purpose = 0;
+
+ if (circuit_should_use_vanguards(purpose)) {
+ /* Clients want an extra hop for rends to avoid linkability.
+ * Services want it for intro points to avoid publishing their
+ * layer3 guards. They want it for hsdir posts to use
+ * their full layer3 guard set for those connections.
+ * Ex: C - G - L2 - L3 - R
+ * S - G - L2 - L3 - HSDIR
+ * S - G - L2 - L3 - I
+ */
+ if (purpose == CIRCUIT_PURPOSE_C_ESTABLISH_REND ||
+ purpose == CIRCUIT_PURPOSE_S_HSDIR_POST ||
+ purpose == CIRCUIT_PURPOSE_HS_VANGUARDS ||
+ purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO)
+ return routelen+1;
+
+ /* If we only have Layer2 vanguards, then we do not need
+ * the extra hop for linkabilty reasons (see below).
+ * This means all hops can be of the form:
+ * S/C - G - L2 - M - R/HSDir/I
+ */
+ if (get_options()->HSLayer2Nodes && !get_options()->HSLayer3Nodes)
+ return routelen+1;
+
+ /* For connections to hsdirs, clients want two extra hops
+ * when using layer3 guards, to avoid linkability.
+ * Same goes for intro points. Note that the route len
+ * includes the intro point or hsdir, hence the +2.
+ * Ex: C - G - L2 - L3 - M - I
+ * C - G - L2 - L3 - M - HSDIR
+ * S - G - L2 - L3 - M - R
+ */
+ if (purpose == CIRCUIT_PURPOSE_S_CONNECT_REND ||
+ purpose == CIRCUIT_PURPOSE_C_HSDIR_GET ||
+ purpose == CIRCUIT_PURPOSE_C_INTRODUCING)
+ return routelen+2;
+ }
+
+ if (!exit_ei)
+ return routelen;
+
+ switch (purpose) {
+ /* These two purposes connect to a router that we chose, so
+ * DEFAULT_ROUTE_LEN is safe. */
+ case CIRCUIT_PURPOSE_S_ESTABLISH_INTRO:
+ /* hidden service connecting to introduction point */
+ case CIRCUIT_PURPOSE_TESTING:
+ /* router reachability testing */
+ known_purpose = 1;
+ break;
+
+ /* These three purposes connect to a router that someone else
+ * might have chosen, so add an extra hop to protect anonymity. */
+ case CIRCUIT_PURPOSE_C_GENERAL:
+ case CIRCUIT_PURPOSE_C_HSDIR_GET:
+ case CIRCUIT_PURPOSE_S_HSDIR_POST:
+ /* connecting to hidden service directory */
+ case CIRCUIT_PURPOSE_C_INTRODUCING:
+ /* client connecting to introduction point */
+ case CIRCUIT_PURPOSE_S_CONNECT_REND:
+ /* hidden service connecting to rendezvous point */
+ known_purpose = 1;
+ routelen++;
+ break;
+
+ default:
+ /* Got a purpose not listed above along with a chosen exit.
+ * Increase the circuit length by one anyway for safety. */
+ routelen++;
+ break;
+ }
+
+ if (BUG(exit_ei && !known_purpose)) {
+ log_warn(LD_BUG, "Unhandled purpose %d with a chosen exit; "
+ "assuming routelen %d.", purpose, routelen);
+ }
+ return routelen;
+}
+
+/** Choose a length for a circuit of purpose <b>purpose</b> and check
+ * if enough routers are available.
+ *
+ * If the routerlist <b>nodes</b> doesn't have enough routers
+ * to handle the desired path length, return -1.
+ */
+STATIC int
+new_route_len(uint8_t purpose, extend_info_t *exit_ei, smartlist_t *nodes)
+{
+ int num_acceptable_routers;
+ int routelen;
+
+ tor_assert(nodes);
+
+ routelen = route_len_for_purpose(purpose, exit_ei);
+
+ num_acceptable_routers = count_acceptable_nodes(nodes);
+
+ log_debug(LD_CIRC,"Chosen route length %d (%d/%d routers suitable).",
+ routelen, num_acceptable_routers, smartlist_len(nodes));
+
+ if (num_acceptable_routers < routelen) {
+ log_info(LD_CIRC,
+ "Not enough acceptable routers (%d/%d). Discarding this circuit.",
+ num_acceptable_routers, routelen);
+ return -1;
+ }
+
+ return routelen;
+}
+
+/** Return a newly allocated list of uint16_t * for each predicted port not
+ * handled by a current circuit. */
+static smartlist_t *
+circuit_get_unhandled_ports(time_t now)
+{
+ smartlist_t *dest = rep_hist_get_predicted_ports(now);
+ circuit_remove_handled_ports(dest);
+ return dest;
+}
+
+/** Return 1 if we already have circuits present or on the way for
+ * all anticipated ports. Return 0 if we should make more.
+ *
+ * If we're returning 0, set need_uptime and need_capacity to
+ * indicate any requirements that the unhandled ports have.
+ */
+MOCK_IMPL(int,
+circuit_all_predicted_ports_handled, (time_t now, int *need_uptime,
+ int *need_capacity))
+{
+ int i, enough;
+ uint16_t *port;
+ smartlist_t *sl = circuit_get_unhandled_ports(now);
+ smartlist_t *LongLivedServices = get_options()->LongLivedPorts;
+ tor_assert(need_uptime);
+ tor_assert(need_capacity);
+ // Always predict need_capacity
+ *need_capacity = 1;
+ enough = (smartlist_len(sl) == 0);
+ for (i = 0; i < smartlist_len(sl); ++i) {
+ port = smartlist_get(sl, i);
+ if (smartlist_contains_int_as_string(LongLivedServices, *port))
+ *need_uptime = 1;
+ tor_free(port);
+ }
+ smartlist_free(sl);
+ return enough;
+}
+
+/** Return 1 if <b>node</b> can handle one or more of the ports in
+ * <b>needed_ports</b>, else return 0.
+ */
+static int
+node_handles_some_port(const node_t *node, smartlist_t *needed_ports)
+{ /* XXXX MOVE */
+ int i;
+ uint16_t port;
+
+ for (i = 0; i < smartlist_len(needed_ports); ++i) {
+ addr_policy_result_t r;
+ /* alignment issues aren't a worry for this dereference, since
+ needed_ports is explicitly a smartlist of uint16_t's */
+ port = *(uint16_t *)smartlist_get(needed_ports, i);
+ tor_assert(port);
+ if (node)
+ r = compare_tor_addr_to_node_policy(NULL, port, node);
+ else
+ continue;
+ if (r != ADDR_POLICY_REJECTED && r != ADDR_POLICY_PROBABLY_REJECTED)
+ return 1;
+ }
+ return 0;
+}
+
+/** Return true iff <b>conn</b> needs another general circuit to be
+ * built. */
+static int
+ap_stream_wants_exit_attention(connection_t *conn)
+{
+ entry_connection_t *entry;
+ if (conn->type != CONN_TYPE_AP)
+ return 0;
+ entry = TO_ENTRY_CONN(conn);
+
+ if (conn->state == AP_CONN_STATE_CIRCUIT_WAIT &&
+ !conn->marked_for_close &&
+ !(entry->want_onehop) && /* ignore one-hop streams */
+ !(entry->use_begindir) && /* ignore targeted dir fetches */
+ !(entry->chosen_exit_name) && /* ignore defined streams */
+ !connection_edge_is_rendezvous_stream(TO_EDGE_CONN(conn)) &&
+ !circuit_stream_is_being_handled(TO_ENTRY_CONN(conn), 0,
+ MIN_CIRCUITS_HANDLING_STREAM))
+ return 1;
+ return 0;
+}
+
+/** Return a pointer to a suitable router to be the exit node for the
+ * general-purpose circuit we're about to build.
+ *
+ * Look through the connection array, and choose a router that maximizes
+ * the number of pending streams that can exit from this router.
+ *
+ * Return NULL if we can't find any suitable routers.
+ */
+static const node_t *
+choose_good_exit_server_general(router_crn_flags_t flags)
+{
+ int *n_supported;
+ int n_pending_connections = 0;
+ smartlist_t *connections;
+ int best_support = -1;
+ int n_best_support=0;
+ const or_options_t *options = get_options();
+ const smartlist_t *the_nodes;
+ const node_t *selected_node=NULL;
+ const int need_uptime = (flags & CRN_NEED_UPTIME) != 0;
+ const int need_capacity = (flags & CRN_NEED_CAPACITY) != 0;
+ const int direct_conn = (flags & CRN_DIRECT_CONN) != 0;
+
+ connections = get_connection_array();
+
+ /* Count how many connections are waiting for a circuit to be built.
+ * We use this for log messages now, but in the future we may depend on it.
+ */
+ SMARTLIST_FOREACH(connections, connection_t *, conn,
+ {
+ if (ap_stream_wants_exit_attention(conn))
+ ++n_pending_connections;
+ });
+// log_fn(LOG_DEBUG, "Choosing exit node; %d connections are pending",
+// n_pending_connections);
+ /* Now we count, for each of the routers in the directory, how many
+ * of the pending connections could possibly exit from that
+ * router (n_supported[i]). (We can't be sure about cases where we
+ * don't know the IP address of the pending connection.)
+ *
+ * -1 means "Don't use this router at all."
+ */
+ the_nodes = nodelist_get_list();
+ n_supported = tor_calloc(smartlist_len(the_nodes), sizeof(int));
+ SMARTLIST_FOREACH_BEGIN(the_nodes, const node_t *, node) {
+ const int i = node_sl_idx;
+ if (router_digest_is_me(node->identity)) {
+ n_supported[i] = -1;
+// log_fn(LOG_DEBUG,"Skipping node %s -- it's me.", router->nickname);
+ /* XXX there's probably a reverse predecessor attack here, but
+ * it's slow. should we take this out? -RD
+ */
+ continue;
+ }
+ if (!node_has_preferred_descriptor(node, direct_conn)) {
+ n_supported[i] = -1;
+ continue;
+ }
+ if (!node->is_running || node->is_bad_exit) {
+ n_supported[i] = -1;
+ continue; /* skip routers that are known to be down or bad exits */
+ }
+ if (node_get_purpose(node) != ROUTER_PURPOSE_GENERAL) {
+ /* never pick a non-general node as a random exit. */
+ n_supported[i] = -1;
+ continue;
+ }
+ if (routerset_contains_node(options->ExcludeExitNodesUnion_, node)) {
+ n_supported[i] = -1;
+ continue; /* user asked us not to use it, no matter what */
+ }
+ if (options->ExitNodes &&
+ !routerset_contains_node(options->ExitNodes, node)) {
+ n_supported[i] = -1;
+ continue; /* not one of our chosen exit nodes */
+ }
+
+ if (node_is_unreliable(node, need_uptime, need_capacity, 0)) {
+ n_supported[i] = -1;
+ continue; /* skip routers that are not suitable. Don't worry if
+ * this makes us reject all the possible routers: if so,
+ * we'll retry later in this function with need_update and
+ * need_capacity set to 0. */
+ }
+ if (!(node->is_valid)) {
+ /* if it's invalid and we don't want it */
+ n_supported[i] = -1;
+// log_fn(LOG_DEBUG,"Skipping node %s (index %d) -- invalid router.",
+// router->nickname, i);
+ continue; /* skip invalid routers */
+ }
+ /* We do not allow relays that allow single hop exits by default. Option
+ * was deprecated in 0.2.9.2-alpha and removed in 0.3.1.0-alpha. */
+ if (node_allows_single_hop_exits(node)) {
+ n_supported[i] = -1;
+ continue;
+ }
+ if (node_exit_policy_rejects_all(node)) {
+ n_supported[i] = -1;
+// log_fn(LOG_DEBUG,"Skipping node %s (index %d) -- it rejects all.",
+// router->nickname, i);
+ continue; /* skip routers that reject all */
+ }
+ n_supported[i] = 0;
+ /* iterate over connections */
+ SMARTLIST_FOREACH_BEGIN(connections, connection_t *, conn) {
+ if (!ap_stream_wants_exit_attention(conn))
+ continue; /* Skip everything but APs in CIRCUIT_WAIT */
+ if (connection_ap_can_use_exit(TO_ENTRY_CONN(conn), node)) {
+ ++n_supported[i];
+// log_fn(LOG_DEBUG,"%s is supported. n_supported[%d] now %d.",
+// router->nickname, i, n_supported[i]);
+ } else {
+// log_fn(LOG_DEBUG,"%s (index %d) would reject this stream.",
+// router->nickname, i);
+ }
+ } SMARTLIST_FOREACH_END(conn);
+ if (n_pending_connections > 0 && n_supported[i] == 0) {
+ /* Leave best_support at -1 if that's where it is, so we can
+ * distinguish it later. */
+ continue;
+ }
+ if (n_supported[i] > best_support) {
+ /* If this router is better than previous ones, remember its index
+ * and goodness, and start counting how many routers are this good. */
+ best_support = n_supported[i]; n_best_support=1;
+// log_fn(LOG_DEBUG,"%s is new best supported option so far.",
+// router->nickname);
+ } else if (n_supported[i] == best_support) {
+ /* If this router is _as good_ as the best one, just increment the
+ * count of equally good routers.*/
+ ++n_best_support;
+ }
+ } SMARTLIST_FOREACH_END(node);
+ log_info(LD_CIRC,
+ "Found %d servers that might support %d/%d pending connections.",
+ n_best_support, best_support >= 0 ? best_support : 0,
+ n_pending_connections);
+
+ /* If any routers definitely support any pending connections, choose one
+ * at random. */
+ if (best_support > 0) {
+ smartlist_t *supporting = smartlist_new();
+
+ SMARTLIST_FOREACH(the_nodes, const node_t *, node, {
+ if (n_supported[node_sl_idx] == best_support)
+ smartlist_add(supporting, (void*)node);
+ });
+
+ selected_node = node_sl_choose_by_bandwidth(supporting, WEIGHT_FOR_EXIT);
+ smartlist_free(supporting);
+ } else {
+ /* Either there are no pending connections, or no routers even seem to
+ * possibly support any of them. Choose a router at random that satisfies
+ * at least one predicted exit port. */
+
+ int attempt;
+ smartlist_t *needed_ports, *supporting;
+
+ if (best_support == -1) {
+ if (need_uptime || need_capacity) {
+ log_info(LD_CIRC,
+ "We couldn't find any live%s%s routers; falling back "
+ "to list of all routers.",
+ need_capacity?", fast":"",
+ need_uptime?", stable":"");
+ tor_free(n_supported);
+ flags &= ~(CRN_NEED_UPTIME|CRN_NEED_CAPACITY);
+ return choose_good_exit_server_general(flags);
+ }
+ log_notice(LD_CIRC, "All routers are down or won't exit%s -- "
+ "choosing a doomed exit at random.",
+ options->ExcludeExitNodesUnion_ ? " or are Excluded" : "");
+ }
+ supporting = smartlist_new();
+ needed_ports = circuit_get_unhandled_ports(time(NULL));
+ for (attempt = 0; attempt < 2; attempt++) {
+ /* try once to pick only from routers that satisfy a needed port,
+ * then if there are none, pick from any that support exiting. */
+ SMARTLIST_FOREACH_BEGIN(the_nodes, const node_t *, node) {
+ if (n_supported[node_sl_idx] != -1 &&
+ (attempt || node_handles_some_port(node, needed_ports))) {
+// log_fn(LOG_DEBUG,"Try %d: '%s' is a possibility.",
+// try, router->nickname);
+ smartlist_add(supporting, (void*)node);
+ }
+ } SMARTLIST_FOREACH_END(node);
+
+ selected_node = node_sl_choose_by_bandwidth(supporting, WEIGHT_FOR_EXIT);
+ if (selected_node)
+ break;
+ smartlist_clear(supporting);
+ /* If we reach this point, we can't actually support any unhandled
+ * predicted ports, so clear all the remaining ones. */
+ if (smartlist_len(needed_ports))
+ rep_hist_remove_predicted_ports(needed_ports);
+ }
+ SMARTLIST_FOREACH(needed_ports, uint16_t *, cp, tor_free(cp));
+ smartlist_free(needed_ports);
+ smartlist_free(supporting);
+ }
+
+ tor_free(n_supported);
+ if (selected_node) {
+ log_info(LD_CIRC, "Chose exit server '%s'", node_describe(selected_node));
+ return selected_node;
+ }
+ if (options->ExitNodes) {
+ log_warn(LD_CIRC,
+ "No exits in ExitNodes%s seem to be running: "
+ "can't choose an exit.",
+ options->ExcludeExitNodesUnion_ ?
+ ", except possibly those excluded by your configuration, " : "");
+ }
+ return NULL;
+}
+
+#if defined(ENABLE_TOR2WEB_MODE) || defined(TOR_UNIT_TESTS)
+/* The config option Tor2webRendezvousPoints has been set and we need
+ * to pick an RP out of that set. Make sure that the RP we choose is
+ * alive, and return it. Return NULL if no usable RP could be found in
+ * Tor2webRendezvousPoints. */
+STATIC const node_t *
+pick_tor2web_rendezvous_node(router_crn_flags_t flags,
+ const or_options_t *options)
+{
+ const node_t *rp_node = NULL;
+ const int need_desc = (flags & CRN_NEED_DESC) != 0;
+ const int pref_addr = (flags & CRN_PREF_ADDR) != 0;
+ const int direct_conn = (flags & CRN_DIRECT_CONN) != 0;
+
+ smartlist_t *whitelisted_live_rps = smartlist_new();
+ smartlist_t *all_live_nodes = smartlist_new();
+
+ tor_assert(options->Tor2webRendezvousPoints);
+
+ /* Add all running nodes to all_live_nodes */
+ router_add_running_nodes_to_smartlist(all_live_nodes,
+ 0, 0, 0,
+ need_desc,
+ pref_addr,
+ direct_conn);
+
+ /* Filter all_live_nodes to only add live *and* whitelisted RPs to
+ * the list whitelisted_live_rps. */
+ SMARTLIST_FOREACH_BEGIN(all_live_nodes, node_t *, live_node) {
+ if (routerset_contains_node(options->Tor2webRendezvousPoints, live_node)) {
+ smartlist_add(whitelisted_live_rps, live_node);
+ }
+ } SMARTLIST_FOREACH_END(live_node);
+
+ /* Honor ExcludeNodes */
+ if (options->ExcludeNodes) {
+ routerset_subtract_nodes(whitelisted_live_rps, options->ExcludeNodes);
+ }
+
+ /* Now pick randomly amongst the whitelisted RPs. No need to waste time
+ doing bandwidth load balancing, for most use cases
+ 'whitelisted_live_rps' contains a single OR anyway. */
+ rp_node = smartlist_choose(whitelisted_live_rps);
+
+ if (!rp_node) {
+ log_warn(LD_REND, "Could not find a Rendezvous Point that suits "
+ "the purposes of Tor2webRendezvousPoints. Choosing random one.");
+ }
+
+ smartlist_free(whitelisted_live_rps);
+ smartlist_free(all_live_nodes);
+
+ return rp_node;
+}
+#endif /* defined(ENABLE_TOR2WEB_MODE) || defined(TOR_UNIT_TESTS) */
+
+/* Pick a Rendezvous Point for our HS circuits according to <b>flags</b>. */
+static const node_t *
+pick_rendezvous_node(router_crn_flags_t flags)
+{
+ const or_options_t *options = get_options();
+
+#ifdef ENABLE_TOR2WEB_MODE
+ /* We want to connect directly to the node if we can */
+ router_crn_flags_t direct_flags = flags;
+ direct_flags |= CRN_PREF_ADDR;
+ direct_flags |= CRN_DIRECT_CONN;
+
+ /* The user wants us to pick specific RPs. */
+ if (options->Tor2webRendezvousPoints) {
+ const node_t *tor2web_rp = pick_tor2web_rendezvous_node(direct_flags,
+ options);
+ if (tor2web_rp) {
+ return tor2web_rp;
+ }
+ }
+
+ /* Else, if no direct, preferred tor2web RP was found, fall back to choosing
+ * a random direct node */
+ const node_t *node = router_choose_random_node(NULL, options->ExcludeNodes,
+ direct_flags);
+ /* Return the direct node (if found), or log a message and fall back to an
+ * indirect connection. */
+ if (node) {
+ return node;
+ } else {
+ log_info(LD_REND,
+ "Unable to find a random rendezvous point that is reachable via "
+ "a direct connection, falling back to a 3-hop path.");
+ }
+#endif /* defined(ENABLE_TOR2WEB_MODE) */
+
+ return router_choose_random_node(NULL, options->ExcludeNodes, flags);
+}
+
+/*
+ * Helper function to pick a configured restricted middle node
+ * (either HSLayer2Nodes or HSLayer3Nodes).
+ *
+ * Make sure that the node we chose is alive, and not excluded,
+ * and return it.
+ *
+ * The exclude_set is a routerset of nodes that the selected node
+ * must not match, and the exclude_list is a simple list of nodes
+ * that the selected node must not be in. Either or both may be
+ * NULL.
+ *
+ * Return NULL if no usable nodes could be found. */
+static const node_t *
+pick_restricted_middle_node(router_crn_flags_t flags,
+ const routerset_t *pick_from,
+ const routerset_t *exclude_set,
+ const smartlist_t *exclude_list,
+ int position_hint)
+{
+ const node_t *middle_node = NULL;
+
+ smartlist_t *whitelisted_live_middles = smartlist_new();
+ smartlist_t *all_live_nodes = smartlist_new();
+
+ tor_assert(pick_from);
+
+ /* Add all running nodes to all_live_nodes */
+ router_add_running_nodes_to_smartlist(all_live_nodes,
+ (flags & CRN_NEED_UPTIME) != 0,
+ (flags & CRN_NEED_CAPACITY) != 0,
+ (flags & CRN_NEED_GUARD) != 0,
+ (flags & CRN_NEED_DESC) != 0,
+ (flags & CRN_PREF_ADDR) != 0,
+ (flags & CRN_DIRECT_CONN) != 0);
+
+ /* Filter all_live_nodes to only add live *and* whitelisted middles
+ * to the list whitelisted_live_middles. */
+ SMARTLIST_FOREACH_BEGIN(all_live_nodes, node_t *, live_node) {
+ if (routerset_contains_node(pick_from, live_node)) {
+ smartlist_add(whitelisted_live_middles, live_node);
+ }
+ } SMARTLIST_FOREACH_END(live_node);
+
+ /* Honor ExcludeNodes */
+ if (exclude_set) {
+ routerset_subtract_nodes(whitelisted_live_middles, exclude_set);
+ }
+
+ if (exclude_list) {
+ smartlist_subtract(whitelisted_live_middles, exclude_list);
+ }
+
+ /**
+ * Max number of restricted nodes before we alert the user and try
+ * to load balance for them.
+ *
+ * The most aggressive vanguard design had 16 nodes at layer3.
+ * Let's give a small ceiling above that. */
+#define MAX_SANE_RESTRICTED_NODES 20
+ /* If the user (or associated tor controller) selected only a few nodes,
+ * assume they took load balancing into account and don't do it for them.
+ *
+ * If there are a lot of nodes in here, assume they did not load balance
+ * and do it for them, but also warn them that they may be Doing It Wrong.
+ */
+ if (smartlist_len(whitelisted_live_middles) <=
+ MAX_SANE_RESTRICTED_NODES) {
+ middle_node = smartlist_choose(whitelisted_live_middles);
+ } else {
+ static ratelim_t pinned_notice_limit = RATELIM_INIT(24*3600);
+ log_fn_ratelim(&pinned_notice_limit, LOG_NOTICE, LD_CIRC,
+ "Your _HSLayer%dNodes setting has resulted "
+ "in %d total nodes. This is a lot of nodes. "
+ "You may want to consider using a Tor controller "
+ "to select and update a smaller set of nodes instead.",
+ position_hint, smartlist_len(whitelisted_live_middles));
+
+ /* NO_WEIGHTING here just means don't take node flags into account
+ * (ie: use consensus measurement only). This is done so that
+ * we don't further surprise the user by not using Exits that they
+ * specified at all */
+ middle_node = node_sl_choose_by_bandwidth(whitelisted_live_middles,
+ NO_WEIGHTING);
+ }
+
+ smartlist_free(whitelisted_live_middles);
+ smartlist_free(all_live_nodes);
+
+ return middle_node;
+}
+
+/** Return a pointer to a suitable router to be the exit node for the
+ * circuit of purpose <b>purpose</b> that we're about to build (or NULL
+ * if no router is suitable).
+ *
+ * For general-purpose circuits, pass it off to
+ * choose_good_exit_server_general()
+ *
+ * For client-side rendezvous circuits, choose a random node, weighted
+ * toward the preferences in 'options'.
+ */
+static const node_t *
+choose_good_exit_server(origin_circuit_t *circ,
+ router_crn_flags_t flags, int is_internal)
+{
+ const or_options_t *options = get_options();
+ flags |= CRN_NEED_DESC;
+
+ switch (TO_CIRCUIT(circ)->purpose) {
+ case CIRCUIT_PURPOSE_C_HSDIR_GET:
+ case CIRCUIT_PURPOSE_S_HSDIR_POST:
+ case CIRCUIT_PURPOSE_HS_VANGUARDS:
+ /* For these three, we want to pick the exit like a middle hop,
+ * since it should be random. */
+ tor_assert_nonfatal(is_internal);
+ /* Falls through */
+ case CIRCUIT_PURPOSE_C_GENERAL:
+ if (is_internal) /* pick it like a middle hop */
+ return router_choose_random_node(NULL, options->ExcludeNodes, flags);
+ else
+ return choose_good_exit_server_general(flags);
+ case CIRCUIT_PURPOSE_C_ESTABLISH_REND:
+ {
+ /* Pick a new RP */
+ const node_t *rendezvous_node = pick_rendezvous_node(flags);
+ log_info(LD_REND, "Picked new RP: %s",
+ safe_str_client(node_describe(rendezvous_node)));
+ return rendezvous_node;
+ }
+ }
+ log_warn(LD_BUG,"Unhandled purpose %d", TO_CIRCUIT(circ)->purpose);
+ tor_fragile_assert();
+ return NULL;
+}
+
+/** Log a warning if the user specified an exit for the circuit that
+ * has been excluded from use by ExcludeNodes or ExcludeExitNodes. */
+static void
+warn_if_last_router_excluded(origin_circuit_t *circ,
+ const extend_info_t *exit_ei)
+{
+ const or_options_t *options = get_options();
+ routerset_t *rs = options->ExcludeNodes;
+ const char *description;
+ uint8_t purpose = circ->base_.purpose;
+
+ if (circ->build_state->onehop_tunnel)
+ return;
+
+ switch (purpose)
+ {
+ default:
+ case CIRCUIT_PURPOSE_OR:
+ case CIRCUIT_PURPOSE_INTRO_POINT:
+ case CIRCUIT_PURPOSE_REND_POINT_WAITING:
+ case CIRCUIT_PURPOSE_REND_ESTABLISHED:
+ log_warn(LD_BUG, "Called on non-origin circuit (purpose %d, %s)",
+ (int)purpose,
+ circuit_purpose_to_string(purpose));
+ return;
+ case CIRCUIT_PURPOSE_S_HSDIR_POST:
+ case CIRCUIT_PURPOSE_C_HSDIR_GET:
+ case CIRCUIT_PURPOSE_C_GENERAL:
+ if (circ->build_state->is_internal)
+ return;
+ description = "requested exit node";
+ rs = options->ExcludeExitNodesUnion_;
+ break;
+ case CIRCUIT_PURPOSE_C_INTRODUCING:
+ case CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT:
+ case CIRCUIT_PURPOSE_C_INTRODUCE_ACKED:
+ case CIRCUIT_PURPOSE_S_ESTABLISH_INTRO:
+ case CIRCUIT_PURPOSE_S_CONNECT_REND:
+ case CIRCUIT_PURPOSE_S_REND_JOINED:
+ case CIRCUIT_PURPOSE_TESTING:
+ return;
+ case CIRCUIT_PURPOSE_C_ESTABLISH_REND:
+ case CIRCUIT_PURPOSE_C_REND_READY:
+ case CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED:
+ case CIRCUIT_PURPOSE_C_REND_JOINED:
+ description = "chosen rendezvous point";
+ break;
+ case CIRCUIT_PURPOSE_CONTROLLER:
+ rs = options->ExcludeExitNodesUnion_;
+ description = "controller-selected circuit target";
+ break;
+ }
+
+ if (routerset_contains_extendinfo(rs, exit_ei)) {
+ /* We should never get here if StrictNodes is set to 1. */
+ if (options->StrictNodes) {
+ log_warn(LD_BUG, "Using %s '%s' which is listed in ExcludeNodes%s, "
+ "even though StrictNodes is set. Please report. "
+ "(Circuit purpose: %s)",
+ description, extend_info_describe(exit_ei),
+ rs==options->ExcludeNodes?"":" or ExcludeExitNodes",
+ circuit_purpose_to_string(purpose));
+ } else {
+ log_warn(LD_CIRC, "Using %s '%s' which is listed in "
+ "ExcludeNodes%s, because no better options were available. To "
+ "prevent this (and possibly break your Tor functionality), "
+ "set the StrictNodes configuration option. "
+ "(Circuit purpose: %s)",
+ description, extend_info_describe(exit_ei),
+ rs==options->ExcludeNodes?"":" or ExcludeExitNodes",
+ circuit_purpose_to_string(purpose));
+ }
+ circuit_log_path(LOG_WARN, LD_CIRC, circ);
+ }
+
+ return;
+}
+
+/** Decide a suitable length for circ's cpath, and pick an exit
+ * router (or use <b>exit</b> if provided). Store these in the
+ * cpath.
+ *
+ * If <b>is_hs_v3_rp_circuit</b> is set, then this exit should be suitable to
+ * be used as an HS v3 rendezvous point.
+ *
+ * Return 0 if ok, -1 if circuit should be closed. */
+STATIC int
+onion_pick_cpath_exit(origin_circuit_t *circ, extend_info_t *exit_ei,
+ int is_hs_v3_rp_circuit)
+{
+ cpath_build_state_t *state = circ->build_state;
+
+ if (state->onehop_tunnel) {
+ log_debug(LD_CIRC, "Launching a one-hop circuit for dir tunnel%s.",
+ (rend_allow_non_anonymous_connection(get_options()) ?
+ ", or intro or rendezvous connection" : ""));
+ state->desired_path_len = 1;
+ } else {
+ int r = new_route_len(circ->base_.purpose, exit_ei, nodelist_get_list());
+ if (r < 1) /* must be at least 1 */
+ return -1;
+ state->desired_path_len = r;
+ }
+
+ if (exit_ei) { /* the circuit-builder pre-requested one */
+ warn_if_last_router_excluded(circ, exit_ei);
+ log_info(LD_CIRC,"Using requested exit node '%s'",
+ extend_info_describe(exit_ei));
+ exit_ei = extend_info_dup(exit_ei);
+ } else { /* we have to decide one */
+ router_crn_flags_t flags = CRN_NEED_DESC;
+ if (state->need_uptime)
+ flags |= CRN_NEED_UPTIME;
+ if (state->need_capacity)
+ flags |= CRN_NEED_CAPACITY;
+ if (is_hs_v3_rp_circuit)
+ flags |= CRN_RENDEZVOUS_V3;
+ if (state->onehop_tunnel)
+ flags |= CRN_DIRECT_CONN;
+ const node_t *node =
+ choose_good_exit_server(circ, flags, state->is_internal);
+ if (!node) {
+ log_warn(LD_CIRC,"Failed to choose an exit server");
+ return -1;
+ }
+ exit_ei = extend_info_from_node(node, state->onehop_tunnel);
+ if (BUG(exit_ei == NULL))
+ return -1;
+ }
+ state->chosen_exit = exit_ei;
+ return 0;
+}
+
+/** Give <b>circ</b> a new exit destination to <b>exit</b>, and add a
+ * hop to the cpath reflecting this. Don't send the next extend cell --
+ * the caller will do this if it wants to.
+ */
+int
+circuit_append_new_exit(origin_circuit_t *circ, extend_info_t *exit_ei)
+{
+ cpath_build_state_t *state;
+ tor_assert(exit_ei);
+ tor_assert(circ);
+
+ state = circ->build_state;
+ tor_assert(state);
+ extend_info_free(state->chosen_exit);
+ state->chosen_exit = extend_info_dup(exit_ei);
+
+ ++circ->build_state->desired_path_len;
+ onion_append_hop(&circ->cpath, exit_ei);
+ return 0;
+}
+
+/** Take an open <b>circ</b>, and add a new hop at the end, based on
+ * <b>info</b>. Set its state back to CIRCUIT_STATE_BUILDING, and then
+ * send the next extend cell to begin connecting to that hop.
+ */
+int
+circuit_extend_to_new_exit(origin_circuit_t *circ, extend_info_t *exit_ei)
+{
+ int err_reason = 0;
+ warn_if_last_router_excluded(circ, exit_ei);
+
+ tor_gettimeofday(&circ->base_.timestamp_began);
+
+ circuit_append_new_exit(circ, exit_ei);
+ circuit_set_state(TO_CIRCUIT(circ), CIRCUIT_STATE_BUILDING);
+ if ((err_reason = circuit_send_next_onion_skin(circ))<0) {
+ log_warn(LD_CIRC, "Couldn't extend circuit to new point %s.",
+ extend_info_describe(exit_ei));
+ circuit_mark_for_close(TO_CIRCUIT(circ), -err_reason);
+ return -1;
+ }
+
+ // XXX: Should cannibalized circuits be dirty or not? Not easy to say..
+
+ return 0;
+}
+
+/** Return the number of routers in <b>routers</b> that are currently up
+ * and available for building circuits through.
+ *
+ * (Note that this function may overcount or undercount, if we have
+ * descriptors that are not the type we would prefer to use for some
+ * particular router. See bug #25885.)
+ */
+MOCK_IMPL(STATIC int,
+count_acceptable_nodes, (smartlist_t *nodes))
+{
+ int num=0;
+
+ SMARTLIST_FOREACH_BEGIN(nodes, const node_t *, node) {
+ // log_debug(LD_CIRC,
+// "Contemplating whether router %d (%s) is a new option.",
+// i, r->nickname);
+ if (! node->is_running)
+// log_debug(LD_CIRC,"Nope, the directory says %d is not running.",i);
+ continue;
+ if (! node->is_valid)
+// log_debug(LD_CIRC,"Nope, the directory says %d is not valid.",i);
+ continue;
+ if (! node_has_any_descriptor(node))
+ continue;
+ /* The node has a descriptor, so we can just check the ntor key directly */
+ if (!node_has_curve25519_onion_key(node))
+ continue;
+ ++num;
+ } SMARTLIST_FOREACH_END(node);
+
+// log_debug(LD_CIRC,"I like %d. num_acceptable_routers now %d.",i, num);
+
+ return num;
+}
+
+/** Add <b>new_hop</b> to the end of the doubly-linked-list <b>head_ptr</b>.
+ * This function is used to extend cpath by another hop.
+ */
+void
+onion_append_to_cpath(crypt_path_t **head_ptr, crypt_path_t *new_hop)
+{
+ if (*head_ptr) {
+ new_hop->next = (*head_ptr);
+ new_hop->prev = (*head_ptr)->prev;
+ (*head_ptr)->prev->next = new_hop;
+ (*head_ptr)->prev = new_hop;
+ } else {
+ *head_ptr = new_hop;
+ new_hop->prev = new_hop->next = new_hop;
+ }
+}
+
+#ifdef TOR_UNIT_TESTS
+
+/** Unittest helper function: Count number of hops in cpath linked list. */
+unsigned int
+cpath_get_n_hops(crypt_path_t **head_ptr)
+{
+ unsigned int n_hops = 0;
+ crypt_path_t *tmp;
+
+ if (!*head_ptr) {
+ return 0;
+ }
+
+ tmp = *head_ptr;
+ do {
+ n_hops++;
+ tmp = tmp->next;
+ } while (tmp != *head_ptr);
+
+ return n_hops;
+}
+
+#endif /* defined(TOR_UNIT_TESTS) */
+
+/**
+ * Build the exclude list for vanguard circuits.
+ *
+ * For vanguard circuits we exclude all the already chosen nodes (including the
+ * exit) from being middle hops to prevent the creation of A - B - A subpaths.
+ * We also allow the 4th hop to be the same as the guard node so as to not leak
+ * guard information to RP/IP/HSDirs.
+ *
+ * For vanguard circuits, we don't apply any subnet or family restrictions.
+ * This is to avoid impossible-to-build circuit paths, or just situations where
+ * our earlier guards prevent us from using most of our later ones.
+ *
+ * The alternative is building the circuit in reverse. Reverse calls to
+ * onion_extend_cpath() (ie: select outer hops first) would then have the
+ * property that you don't gain information about inner hops by observing
+ * outer ones. See https://trac.torproject.org/projects/tor/ticket/24487
+ * for this.
+ *
+ * (Note further that we still exclude the exit to prevent A - B - A
+ * at the end of the path. */
+static smartlist_t *
+build_vanguard_middle_exclude_list(uint8_t purpose,
+ cpath_build_state_t *state,
+ crypt_path_t *head,
+ int cur_len)
+{
+ smartlist_t *excluded;
+ const node_t *r;
+ crypt_path_t *cpath;
+ int i;
+
+ (void) purpose;
+
+ excluded = smartlist_new();
+
+ /* Add the exit to the exclude list (note that the exit/last hop is always
+ * chosen first in circuit_establish_circuit()). */
+ if ((r = build_state_get_exit_node(state))) {
+ smartlist_add(excluded, (node_t*)r);
+ }
+
+ /* If we are picking the 4th hop, allow that node to be the guard too.
+ * This prevents us from avoiding the Guard for those hops, which
+ * gives the adversary information about our guard if they control
+ * the RP, IP, or HSDIR. We don't do this check based on purpose
+ * because we also want to allow HS_VANGUARDS pre-build circuits
+ * to use the guard for that last hop.
+ */
+ if (cur_len == DEFAULT_ROUTE_LEN+1) {
+ /* Skip the first hop for the exclude list below */
+ head = head->next;
+ cur_len--;
+ }
+
+ for (i = 0, cpath = head; cpath && i < cur_len; ++i, cpath=cpath->next) {
+ if ((r = node_get_by_id(cpath->extend_info->identity_digest))) {
+ smartlist_add(excluded, (node_t*)r);
+ }
+ }
+
+ return excluded;
+}
+
+/**
+ * Build a list of nodes to exclude from the choice of this middle
+ * hop, based on already chosen nodes.
+ */
+static smartlist_t *
+build_middle_exclude_list(uint8_t purpose,
+ cpath_build_state_t *state,
+ crypt_path_t *head,
+ int cur_len)
+{
+ smartlist_t *excluded;
+ const node_t *r;
+ crypt_path_t *cpath;
+ int i;
+
+ /** Vanguard circuits have their own path selection rules */
+ if (circuit_should_use_vanguards(purpose)) {
+ return build_vanguard_middle_exclude_list(purpose, state, head, cur_len);
+ }
+
+ excluded = smartlist_new();
+
+ /* For non-vanguard circuits, add the exit and its family to the exclude list
+ * (note that the exit/last hop is always chosen first in
+ * circuit_establish_circuit()). */
+ if ((r = build_state_get_exit_node(state))) {
+ nodelist_add_node_and_family(excluded, r);
+ }
+
+ /* also exclude all other already chosen nodes and their family */
+ for (i = 0, cpath = head; cpath && i < cur_len; ++i, cpath=cpath->next) {
+ if ((r = node_get_by_id(cpath->extend_info->identity_digest))) {
+ nodelist_add_node_and_family(excluded, r);
+ }
+ }
+
+ return excluded;
+}
+
+/** Return true if we MUST use vanguards for picking this middle node. */
+static int
+middle_node_must_be_vanguard(const or_options_t *options,
+ uint8_t purpose, int cur_len)
+{
+ /* If this is not a hidden service circuit, don't use vanguards */
+ if (!circuit_purpose_is_hidden_service(purpose)) {
+ return 0;
+ }
+
+ /* If we have sticky L2 nodes, and this is an L2 pick, use vanguards */
+ if (options->HSLayer2Nodes && cur_len == 1) {
+ return 1;
+ }
+
+ /* If we have sticky L3 nodes, and this is an L3 pick, use vanguards */
+ if (options->HSLayer3Nodes && cur_len == 2) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/** Pick a sticky vanguard middle node or return NULL if not found.
+ * See doc of pick_restricted_middle_node() for argument details. */
+static const node_t *
+pick_vanguard_middle_node(const or_options_t *options,
+ router_crn_flags_t flags, int cur_len,
+ const smartlist_t *excluded)
+{
+ const routerset_t *vanguard_routerset = NULL;
+ const node_t *node = NULL;
+
+ /* Pick the right routerset based on the current hop */
+ if (cur_len == 1) {
+ vanguard_routerset = options->HSLayer2Nodes;
+ } else if (cur_len == 2) {
+ vanguard_routerset = options->HSLayer3Nodes;
+ } else {
+ /* guaranteed by middle_node_should_be_vanguard() */
+ tor_assert_nonfatal_unreached();
+ return NULL;
+ }
+
+ node = pick_restricted_middle_node(flags, vanguard_routerset,
+ options->ExcludeNodes, excluded,
+ cur_len+1);
+
+ if (!node) {
+ static ratelim_t pinned_warning_limit = RATELIM_INIT(300);
+ log_fn_ratelim(&pinned_warning_limit, LOG_WARN, LD_CIRC,
+ "Could not find a node that matches the configured "
+ "_HSLayer%dNodes set", cur_len+1);
+ }
+
+ return node;
+}
+
+/** A helper function used by onion_extend_cpath(). Use <b>purpose</b>
+ * and <b>state</b> and the cpath <b>head</b> (currently populated only
+ * to length <b>cur_len</b> to decide a suitable middle hop for a
+ * circuit. In particular, make sure we don't pick the exit node or its
+ * family, and make sure we don't duplicate any previous nodes or their
+ * families. */
+static const node_t *
+choose_good_middle_server(uint8_t purpose,
+ cpath_build_state_t *state,
+ crypt_path_t *head,
+ int cur_len)
+{
+ const node_t *choice;
+ smartlist_t *excluded;
+ const or_options_t *options = get_options();
+ router_crn_flags_t flags = CRN_NEED_DESC;
+ tor_assert(CIRCUIT_PURPOSE_MIN_ <= purpose &&
+ purpose <= CIRCUIT_PURPOSE_MAX_);
+
+ log_debug(LD_CIRC, "Contemplating intermediate hop #%d: random choice.",
+ cur_len+1);
+
+ excluded = build_middle_exclude_list(purpose, state, head, cur_len);
+
+ if (state->need_uptime)
+ flags |= CRN_NEED_UPTIME;
+ if (state->need_capacity)
+ flags |= CRN_NEED_CAPACITY;
+
+ /** If a hidden service circuit wants a specific middle node, pin it. */
+ if (middle_node_must_be_vanguard(options, purpose, cur_len)) {
+ log_debug(LD_GENERAL, "Picking a sticky node (cur_len = %d)", cur_len);
+ choice = pick_vanguard_middle_node(options, flags, cur_len, excluded);
+ smartlist_free(excluded);
+ return choice;
+ }
+
+ choice = router_choose_random_node(excluded, options->ExcludeNodes, flags);
+ smartlist_free(excluded);
+ return choice;
+}
+
+/** Pick a good entry server for the circuit to be built according to
+ * <b>state</b>. Don't reuse a chosen exit (if any), don't use this
+ * router (if we're an OR), and respect firewall settings; if we're
+ * configured to use entry guards, return one.
+ *
+ * Set *<b>guard_state_out</b> to information about the guard that
+ * we're selecting, which we'll use later to remember whether the
+ * guard worked or not.
+ */
+const node_t *
+choose_good_entry_server(uint8_t purpose, cpath_build_state_t *state,
+ circuit_guard_state_t **guard_state_out)
+{
+ const node_t *choice;
+ smartlist_t *excluded;
+ const or_options_t *options = get_options();
+ /* If possible, choose an entry server with a preferred address,
+ * otherwise, choose one with an allowed address */
+ router_crn_flags_t flags = (CRN_NEED_GUARD|CRN_NEED_DESC|CRN_PREF_ADDR|
+ CRN_DIRECT_CONN);
+ const node_t *node;
+
+ /* Once we used this function to select a node to be a guard. We had
+ * 'state == NULL' be the signal for that. But we don't do that any more.
+ */
+ tor_assert_nonfatal(state);
+
+ if (state && options->UseEntryGuards &&
+ (purpose != CIRCUIT_PURPOSE_TESTING || options->BridgeRelay)) {
+ /* This request is for an entry server to use for a regular circuit,
+ * and we use entry guard nodes. Just return one of the guard nodes. */
+ tor_assert(guard_state_out);
+ return guards_choose_guard(state, purpose, guard_state_out);
+ }
+
+ excluded = smartlist_new();
+
+ if (state && (node = build_state_get_exit_node(state))) {
+ /* Exclude the exit node from the state, if we have one. Also exclude its
+ * family. */
+ nodelist_add_node_and_family(excluded, node);
+ }
+
+ if (state) {
+ if (state->need_uptime)
+ flags |= CRN_NEED_UPTIME;
+ if (state->need_capacity)
+ flags |= CRN_NEED_CAPACITY;
+ }
+
+ choice = router_choose_random_node(excluded, options->ExcludeNodes, flags);
+ smartlist_free(excluded);
+ return choice;
+}
+
+/** Return the first non-open hop in cpath, or return NULL if all
+ * hops are open. */
+static crypt_path_t *
+onion_next_hop_in_cpath(crypt_path_t *cpath)
+{
+ crypt_path_t *hop = cpath;
+ do {
+ if (hop->state != CPATH_STATE_OPEN)
+ return hop;
+ hop = hop->next;
+ } while (hop != cpath);
+ return NULL;
+}
+
+/** Choose a suitable next hop in the cpath <b>head_ptr</b>,
+ * based on <b>state</b>. Append the hop info to head_ptr.
+ *
+ * Return 1 if the path is complete, 0 if we successfully added a hop,
+ * and -1 on error.
+ */
+STATIC int
+onion_extend_cpath(origin_circuit_t *circ)
+{
+ uint8_t purpose = circ->base_.purpose;
+ cpath_build_state_t *state = circ->build_state;
+ int cur_len = circuit_get_cpath_len(circ);
+ extend_info_t *info = NULL;
+
+ if (cur_len >= state->desired_path_len) {
+ log_debug(LD_CIRC, "Path is complete: %d steps long",
+ state->desired_path_len);
+ return 1;
+ }
+
+ log_debug(LD_CIRC, "Path is %d long; we want %d", cur_len,
+ state->desired_path_len);
+
+ if (cur_len == state->desired_path_len - 1) { /* Picking last node */
+ info = extend_info_dup(state->chosen_exit);
+ } else if (cur_len == 0) { /* picking first node */
+ const node_t *r = choose_good_entry_server(purpose, state,
+ &circ->guard_state);
+ if (r) {
+ /* If we're a client, use the preferred address rather than the
+ primary address, for potentially connecting to an IPv6 OR
+ port. Servers always want the primary (IPv4) address. */
+ int client = (server_mode(get_options()) == 0);
+ info = extend_info_from_node(r, client);
+ /* Clients can fail to find an allowed address */
+ tor_assert_nonfatal(info || client);
+ }
+ } else {
+ const node_t *r =
+ choose_good_middle_server(purpose, state, circ->cpath, cur_len);
+ if (r) {
+ info = extend_info_from_node(r, 0);
+ tor_assert_nonfatal(info);
+ }
+ }
+
+ if (!info) {
+ log_warn(LD_CIRC,"Failed to find node for hop #%d of our path. Discarding "
+ "this circuit.", cur_len+1);
+ return -1;
+ }
+
+ log_debug(LD_CIRC,"Chose router %s for hop #%d (exit is %s)",
+ extend_info_describe(info),
+ cur_len+1, build_state_get_exit_nickname(state));
+
+ onion_append_hop(&circ->cpath, info);
+ extend_info_free(info);
+ return 0;
+}
+
+/** Create a new hop, annotate it with information about its
+ * corresponding router <b>choice</b>, and append it to the
+ * end of the cpath <b>head_ptr</b>. */
+STATIC int
+onion_append_hop(crypt_path_t **head_ptr, extend_info_t *choice)
+{
+ crypt_path_t *hop = tor_malloc_zero(sizeof(crypt_path_t));
+
+ /* link hop into the cpath, at the end. */
+ onion_append_to_cpath(head_ptr, hop);
+
+ hop->magic = CRYPT_PATH_MAGIC;
+ hop->state = CPATH_STATE_CLOSED;
+
+ hop->extend_info = extend_info_dup(choice);
+
+ hop->package_window = circuit_initial_package_window();
+ hop->deliver_window = CIRCWINDOW_START;
+
+ return 0;
+}
+
+/** Allocate a new extend_info object based on the various arguments. */
+extend_info_t *
+extend_info_new(const char *nickname,
+ const char *rsa_id_digest,
+ const ed25519_public_key_t *ed_id,
+ crypto_pk_t *onion_key,
+ const curve25519_public_key_t *ntor_key,
+ const tor_addr_t *addr, uint16_t port)
+{
+ extend_info_t *info = tor_malloc_zero(sizeof(extend_info_t));
+ memcpy(info->identity_digest, rsa_id_digest, DIGEST_LEN);
+ if (ed_id && !ed25519_public_key_is_zero(ed_id))
+ memcpy(&info->ed_identity, ed_id, sizeof(ed25519_public_key_t));
+ if (nickname)
+ strlcpy(info->nickname, nickname, sizeof(info->nickname));
+ if (onion_key)
+ info->onion_key = crypto_pk_dup_key(onion_key);
+ if (ntor_key)
+ memcpy(&info->curve25519_onion_key, ntor_key,
+ sizeof(curve25519_public_key_t));
+ tor_addr_copy(&info->addr, addr);
+ info->port = port;
+ return info;
+}
+
+/** Allocate and return a new extend_info that can be used to build a
+ * circuit to or through the node <b>node</b>. Use the primary address
+ * of the node (i.e. its IPv4 address) unless
+ * <b>for_direct_connect</b> is true, in which case the preferred
+ * address is used instead. May return NULL if there is not enough
+ * info about <b>node</b> to extend to it--for example, if the preferred
+ * routerinfo_t or microdesc_t is missing, or if for_direct_connect is
+ * true and none of the node's addresses is allowed by tor's firewall
+ * and IP version config.
+ **/
+extend_info_t *
+extend_info_from_node(const node_t *node, int for_direct_connect)
+{
+ tor_addr_port_t ap;
+ int valid_addr = 0;
+
+ if (!node_has_preferred_descriptor(node, for_direct_connect)) {
+ return NULL;
+ }
+
+ /* Choose a preferred address first, but fall back to an allowed address. */
+ if (for_direct_connect)
+ fascist_firewall_choose_address_node(node, FIREWALL_OR_CONNECTION, 0, &ap);
+ else {
+ node_get_prim_orport(node, &ap);
+ }
+ valid_addr = tor_addr_port_is_valid_ap(&ap, 0);
+
+ if (valid_addr)
+ log_debug(LD_CIRC, "using %s for %s",
+ fmt_addrport(&ap.addr, ap.port),
+ node->ri ? node->ri->nickname : node->rs->nickname);
+ else
+ log_warn(LD_CIRC, "Could not choose valid address for %s",
+ node->ri ? node->ri->nickname : node->rs->nickname);
+
+ /* Every node we connect or extend to must support ntor */
+ if (!node_has_curve25519_onion_key(node)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
+ "Attempted to create extend_info for a node that does not support "
+ "ntor: %s", node_describe(node));
+ return NULL;
+ }
+
+ const ed25519_public_key_t *ed_pubkey = NULL;
+
+ /* Don't send the ed25519 pubkey unless the target node actually supports
+ * authenticating with it. */
+ if (node_supports_ed25519_link_authentication(node, 0)) {
+ log_info(LD_CIRC, "Including Ed25519 ID for %s", node_describe(node));
+ ed_pubkey = node_get_ed25519_id(node);
+ } else if (node_get_ed25519_id(node)) {
+ log_info(LD_CIRC, "Not including the ed25519 ID for %s, since it won't "
+ "be able to authenticate it.",
+ node_describe(node));
+ }
+
+ /* Retrieve the curve25519 pubkey. */
+ const curve25519_public_key_t *curve_pubkey =
+ node_get_curve25519_onion_key(node);
+
+ if (valid_addr && node->ri)
+ return extend_info_new(node->ri->nickname,
+ node->identity,
+ ed_pubkey,
+ node->ri->onion_pkey,
+ curve_pubkey,
+ &ap.addr,
+ ap.port);
+ else if (valid_addr && node->rs && node->md)
+ return extend_info_new(node->rs->nickname,
+ node->identity,
+ ed_pubkey,
+ node->md->onion_pkey,
+ curve_pubkey,
+ &ap.addr,
+ ap.port);
+ else
+ return NULL;
+}
+
+/** Release storage held by an extend_info_t struct. */
+void
+extend_info_free_(extend_info_t *info)
+{
+ if (!info)
+ return;
+ crypto_pk_free(info->onion_key);
+ tor_free(info);
+}
+
+/** Allocate and return a new extend_info_t with the same contents as
+ * <b>info</b>. */
+extend_info_t *
+extend_info_dup(extend_info_t *info)
+{
+ extend_info_t *newinfo;
+ tor_assert(info);
+ newinfo = tor_malloc(sizeof(extend_info_t));
+ memcpy(newinfo, info, sizeof(extend_info_t));
+ if (info->onion_key)
+ newinfo->onion_key = crypto_pk_dup_key(info->onion_key);
+ else
+ newinfo->onion_key = NULL;
+ return newinfo;
+}
+
+/** Return the node_t for the chosen exit router in <b>state</b>.
+ * If there is no chosen exit, or if we don't know the node_t for
+ * the chosen exit, return NULL.
+ */
+const node_t *
+build_state_get_exit_node(cpath_build_state_t *state)
+{
+ if (!state || !state->chosen_exit)
+ return NULL;
+ return node_get_by_id(state->chosen_exit->identity_digest);
+}
+
+/** Return the RSA ID digest for the chosen exit router in <b>state</b>.
+ * If there is no chosen exit, return NULL.
+ */
+const uint8_t *
+build_state_get_exit_rsa_id(cpath_build_state_t *state)
+{
+ if (!state || !state->chosen_exit)
+ return NULL;
+ return (const uint8_t *) state->chosen_exit->identity_digest;
+}
+
+/** Return the nickname for the chosen exit router in <b>state</b>. If
+ * there is no chosen exit, or if we don't know the routerinfo_t for the
+ * chosen exit, return NULL.
+ */
+const char *
+build_state_get_exit_nickname(cpath_build_state_t *state)
+{
+ if (!state || !state->chosen_exit)
+ return NULL;
+ return state->chosen_exit->nickname;
+}
+
+/** Return true iff the given address can be used to extend to. */
+int
+extend_info_addr_is_allowed(const tor_addr_t *addr)
+{
+ tor_assert(addr);
+
+ /* Check if we have a private address and if we can extend to it. */
+ if ((tor_addr_is_internal(addr, 0) || tor_addr_is_multicast(addr)) &&
+ !get_options()->ExtendAllowPrivateAddresses) {
+ goto disallow;
+ }
+ /* Allowed! */
+ return 1;
+ disallow:
+ return 0;
+}
+
+/* Does ei have a valid TAP key? */
+int
+extend_info_supports_tap(const extend_info_t* ei)
+{
+ tor_assert(ei);
+ /* Valid TAP keys are not NULL */
+ return ei->onion_key != NULL;
+}
+
+/* Does ei have a valid ntor key? */
+int
+extend_info_supports_ntor(const extend_info_t* ei)
+{
+ tor_assert(ei);
+ /* Valid ntor keys have at least one non-zero byte */
+ return !tor_mem_is_zero(
+ (const char*)ei->curve25519_onion_key.public_key,
+ CURVE25519_PUBKEY_LEN);
+}
+
+/* Is circuit purpose allowed to use the deprecated TAP encryption protocol?
+ * The hidden service protocol still uses TAP for some connections, because
+ * ntor onion keys aren't included in HS descriptors or INTRODUCE cells. */
+static int
+circuit_purpose_can_use_tap_impl(uint8_t purpose)
+{
+ return (purpose == CIRCUIT_PURPOSE_S_CONNECT_REND ||
+ purpose == CIRCUIT_PURPOSE_C_INTRODUCING);
+}
+
+/* Is circ allowed to use the deprecated TAP encryption protocol?
+ * The hidden service protocol still uses TAP for some connections, because
+ * ntor onion keys aren't included in HS descriptors or INTRODUCE cells. */
+int
+circuit_can_use_tap(const origin_circuit_t *circ)
+{
+ tor_assert(circ);
+ tor_assert(circ->cpath);
+ tor_assert(circ->cpath->extend_info);
+ return (circuit_purpose_can_use_tap_impl(circ->base_.purpose) &&
+ extend_info_supports_tap(circ->cpath->extend_info));
+}
+
+/* Does circ have an onion key which it's allowed to use? */
+int
+circuit_has_usable_onion_key(const origin_circuit_t *circ)
+{
+ tor_assert(circ);
+ tor_assert(circ->cpath);
+ tor_assert(circ->cpath->extend_info);
+ return (extend_info_supports_ntor(circ->cpath->extend_info) ||
+ circuit_can_use_tap(circ));
+}
+
+/* Does ei have an onion key which it would prefer to use?
+ * Currently, we prefer ntor keys*/
+int
+extend_info_has_preferred_onion_key(const extend_info_t* ei)
+{
+ tor_assert(ei);
+ return extend_info_supports_ntor(ei);
+}
+
+/** Find the circuits that are waiting to find out whether their guards are
+ * usable, and if any are ready to become usable, mark them open and try
+ * attaching streams as appropriate. */
+void
+circuit_upgrade_circuits_from_guard_wait(void)
+{
+ smartlist_t *to_upgrade =
+ circuit_find_circuits_to_upgrade_from_guard_wait();
+
+ if (to_upgrade == NULL)
+ return;
+
+ log_info(LD_GUARD, "Upgrading %d circuits from 'waiting for better guard' "
+ "to 'open'.", smartlist_len(to_upgrade));
+
+ SMARTLIST_FOREACH_BEGIN(to_upgrade, origin_circuit_t *, circ) {
+ circuit_set_state(TO_CIRCUIT(circ), CIRCUIT_STATE_OPEN);
+ circuit_has_opened(circ);
+ } SMARTLIST_FOREACH_END(circ);
+
+ smartlist_free(to_upgrade);
+}
+
diff --git a/src/core/or/circuitbuild.h b/src/core/or/circuitbuild.h
new file mode 100644
index 0000000000..9f5d99c2a5
--- /dev/null
+++ b/src/core/or/circuitbuild.h
@@ -0,0 +1,107 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitbuild.h
+ * \brief Header file for circuitbuild.c.
+ **/
+
+#ifndef TOR_CIRCUITBUILD_H
+#define TOR_CIRCUITBUILD_H
+
+struct ed25519_public_key_t;
+struct curve25519_public_key_t;
+
+int route_len_for_purpose(uint8_t purpose, extend_info_t *exit_ei);
+char *circuit_list_path(origin_circuit_t *circ, int verbose);
+char *circuit_list_path_for_controller(origin_circuit_t *circ);
+void circuit_log_path(int severity, unsigned int domain,
+ origin_circuit_t *circ);
+origin_circuit_t *origin_circuit_init(uint8_t purpose, int flags);
+origin_circuit_t *circuit_establish_circuit(uint8_t purpose,
+ extend_info_t *exit,
+ int flags);
+struct circuit_guard_state_t *origin_circuit_get_guard_state(
+ origin_circuit_t *circ);
+int circuit_handle_first_hop(origin_circuit_t *circ);
+void circuit_n_chan_done(channel_t *chan, int status,
+ int close_origin_circuits);
+int inform_testing_reachability(void);
+int circuit_timeout_want_to_count_circ(const origin_circuit_t *circ);
+int circuit_send_next_onion_skin(origin_circuit_t *circ);
+void circuit_note_clock_jumped(int64_t seconds_elapsed, bool was_idle);
+int circuit_extend(cell_t *cell, circuit_t *circ);
+int circuit_init_cpath_crypto(crypt_path_t *cpath,
+ const char *key_data, size_t key_data_len,
+ int reverse, int is_hs_v3);
+struct created_cell_t;
+int circuit_finish_handshake(origin_circuit_t *circ,
+ const struct created_cell_t *created_cell);
+int circuit_truncated(origin_circuit_t *circ, crypt_path_t *layer,
+ int reason);
+int onionskin_answer(or_circuit_t *circ,
+ const struct created_cell_t *created_cell,
+ const char *keys, size_t keys_len,
+ const uint8_t *rend_circ_nonce);
+MOCK_DECL(int, circuit_all_predicted_ports_handled, (time_t now,
+ int *need_uptime,
+ int *need_capacity));
+
+int circuit_append_new_exit(origin_circuit_t *circ, extend_info_t *info);
+int circuit_extend_to_new_exit(origin_circuit_t *circ, extend_info_t *info);
+void onion_append_to_cpath(crypt_path_t **head_ptr, crypt_path_t *new_hop);
+extend_info_t *extend_info_new(const char *nickname,
+ const char *rsa_id_digest,
+ const struct ed25519_public_key_t *ed_id,
+ crypto_pk_t *onion_key,
+ const struct curve25519_public_key_t *ntor_key,
+ const tor_addr_t *addr, uint16_t port);
+extend_info_t *extend_info_from_node(const node_t *r, int for_direct_connect);
+extend_info_t *extend_info_dup(extend_info_t *info);
+void extend_info_free_(extend_info_t *info);
+#define extend_info_free(info) \
+ FREE_AND_NULL(extend_info_t, extend_info_free_, (info))
+int extend_info_addr_is_allowed(const tor_addr_t *addr);
+int extend_info_supports_tap(const extend_info_t* ei);
+int extend_info_supports_ntor(const extend_info_t* ei);
+int circuit_can_use_tap(const origin_circuit_t *circ);
+int circuit_has_usable_onion_key(const origin_circuit_t *circ);
+int extend_info_has_preferred_onion_key(const extend_info_t* ei);
+const uint8_t *build_state_get_exit_rsa_id(cpath_build_state_t *state);
+const node_t *build_state_get_exit_node(cpath_build_state_t *state);
+const char *build_state_get_exit_nickname(cpath_build_state_t *state);
+
+struct circuit_guard_state_t;
+
+const node_t *choose_good_entry_server(uint8_t purpose,
+ cpath_build_state_t *state,
+ struct circuit_guard_state_t **guard_state_out);
+void circuit_upgrade_circuits_from_guard_wait(void);
+
+#ifdef CIRCUITBUILD_PRIVATE
+STATIC circid_t get_unique_circ_id_by_chan(channel_t *chan);
+STATIC int new_route_len(uint8_t purpose, extend_info_t *exit_ei,
+ smartlist_t *nodes);
+MOCK_DECL(STATIC int, count_acceptable_nodes, (smartlist_t *nodes));
+
+STATIC int onion_extend_cpath(origin_circuit_t *circ);
+
+STATIC int
+onion_pick_cpath_exit(origin_circuit_t *circ, extend_info_t *exit_ei,
+ int is_hs_v3_rp_circuit);
+
+#if defined(ENABLE_TOR2WEB_MODE) || defined(TOR_UNIT_TESTS)
+enum router_crn_flags_t;
+STATIC const node_t *pick_tor2web_rendezvous_node(
+ enum router_crn_flags_t flags,
+ const or_options_t *options);
+unsigned int cpath_get_n_hops(crypt_path_t **head_ptr);
+
+#endif /* defined(ENABLE_TOR2WEB_MODE) || defined(TOR_UNIT_TESTS) */
+
+#endif /* defined(CIRCUITBUILD_PRIVATE) */
+
+#endif /* !defined(TOR_CIRCUITBUILD_H) */
diff --git a/src/core/or/circuitlist.c b/src/core/or/circuitlist.c
new file mode 100644
index 0000000000..d9d12db9b4
--- /dev/null
+++ b/src/core/or/circuitlist.c
@@ -0,0 +1,2742 @@
+/* Copyright 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitlist.c
+ *
+ * \brief Manage global structures that list and index circuits, and
+ * look up circuits within them.
+ *
+ * One of the most frequent operations in Tor occurs every time that
+ * a relay cell arrives on a channel. When that happens, we need to
+ * find which circuit it is associated with, based on the channel and the
+ * circuit ID in the relay cell.
+ *
+ * To handle that, we maintain a global list of circuits, and a hashtable
+ * mapping [channel,circID] pairs to circuits. Circuits are added to and
+ * removed from this mapping using circuit_set_p_circid_chan() and
+ * circuit_set_n_circid_chan(). To look up a circuit from this map, most
+ * callers should use circuit_get_by_circid_channel(), though
+ * circuit_get_by_circid_channel_even_if_marked() is appropriate under some
+ * circumstances.
+ *
+ * We also need to allow for the possibility that we have blocked use of a
+ * circuit ID (because we are waiting to send a DESTROY cell), but the
+ * circuit is not there any more. For that case, we allow placeholder
+ * entries in the table, using channel_mark_circid_unusable().
+ *
+ * To efficiently handle a channel that has just opened, we also maintain a
+ * list of the circuits waiting for channels, so we can attach them as
+ * needed without iterating through the whole list of circuits, using
+ * circuit_get_all_pending_on_channel().
+ *
+ * In this module, we also handle the list of circuits that have been
+ * marked for close elsewhere, and close them as needed. (We use this
+ * "mark now, close later" pattern here and elsewhere to avoid
+ * unpredictable recursion if we closed every circuit immediately upon
+ * realizing it needed to close.) See circuit_mark_for_close() for the
+ * mark function, and circuit_close_all_marked() for the close function.
+ *
+ * For hidden services, we need to be able to look up introduction point
+ * circuits and rendezvous circuits by cookie, key, etc. These are
+ * currently handled with linear searches in
+ * circuit_get_ready_rend_circuit_by_rend_data(),
+ * circuit_get_next_by_pk_and_purpose(), and with hash lookups in
+ * circuit_get_rendezvous() and circuit_get_intro_point().
+ *
+ * This module is also the entry point for our out-of-memory handler
+ * logic, which was originally circuit-focused.
+ **/
+#define CIRCUITLIST_PRIVATE
+#include "lib/cc/torint.h" /* TOR_PRIuSZ */
+
+#include "or/or.h"
+#include "or/channel.h"
+#include "or/circpathbias.h"
+#include "or/circuitbuild.h"
+#include "or/circuitlist.h"
+#include "or/circuituse.h"
+#include "or/circuitstats.h"
+#include "or/connection.h"
+#include "or/config.h"
+#include "or/connection_edge.h"
+#include "or/connection_or.h"
+#include "or/control.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "lib/crypt_ops/crypto_dh.h"
+#include "or/directory.h"
+#include "or/entrynodes.h"
+#include "or/main.h"
+#include "or/hs_circuit.h"
+#include "or/hs_circuitmap.h"
+#include "or/hs_ident.h"
+#include "or/networkstatus.h"
+#include "or/nodelist.h"
+#include "or/onion.h"
+#include "or/onion_fast.h"
+#include "or/policies.h"
+#include "or/relay.h"
+#include "or/relay_crypto.h"
+#include "or/rendclient.h"
+#include "or/rendcommon.h"
+#include "or/rephist.h"
+#include "or/routerlist.h"
+#include "or/routerset.h"
+#include "or/channelpadding.h"
+#include "lib/compress/compress.h"
+#include "lib/compress/compress_lzma.h"
+#include "lib/compress/compress_zlib.h"
+#include "lib/compress/compress_zstd.h"
+#include "lib/container/buffers.h"
+
+#include "ht.h"
+
+#include "or/cpath_build_state_st.h"
+#include "or/crypt_path_reference_st.h"
+#include "or/dir_connection_st.h"
+#include "or/edge_connection_st.h"
+#include "or/extend_info_st.h"
+#include "or/or_circuit_st.h"
+#include "or/origin_circuit_st.h"
+
+/********* START VARIABLES **********/
+
+/** A global list of all circuits at this hop. */
+static smartlist_t *global_circuitlist = NULL;
+
+/** A global list of all origin circuits. Every element of this is also
+ * an element of global_circuitlist. */
+static smartlist_t *global_origin_circuit_list = NULL;
+
+/** A list of all the circuits in CIRCUIT_STATE_CHAN_WAIT. */
+static smartlist_t *circuits_pending_chans = NULL;
+
+/** List of all the (origin) circuits whose state is
+ * CIRCUIT_STATE_GUARD_WAIT. */
+static smartlist_t *circuits_pending_other_guards = NULL;
+
+/** A list of all the circuits that have been marked with
+ * circuit_mark_for_close and which are waiting for circuit_about_to_free. */
+static smartlist_t *circuits_pending_close = NULL;
+
+static void circuit_free_cpath_node(crypt_path_t *victim);
+static void cpath_ref_decref(crypt_path_reference_t *cpath_ref);
+static void circuit_about_to_free_atexit(circuit_t *circ);
+static void circuit_about_to_free(circuit_t *circ);
+
+/**
+ * A cached value of the current state of the origin circuit list. Has the
+ * value 1 if we saw any opened circuits recently (since the last call to
+ * circuit_any_opened_circuits(), which gets called around once a second by
+ * circuit_expire_building). 0 otherwise.
+ */
+static int any_opened_circs_cached_val = 0;
+
+/********* END VARIABLES ************/
+
+or_circuit_t *
+TO_OR_CIRCUIT(circuit_t *x)
+{
+ tor_assert(x->magic == OR_CIRCUIT_MAGIC);
+ return DOWNCAST(or_circuit_t, x);
+}
+const or_circuit_t *
+CONST_TO_OR_CIRCUIT(const circuit_t *x)
+{
+ tor_assert(x->magic == OR_CIRCUIT_MAGIC);
+ return DOWNCAST(or_circuit_t, x);
+}
+origin_circuit_t *
+TO_ORIGIN_CIRCUIT(circuit_t *x)
+{
+ tor_assert(x->magic == ORIGIN_CIRCUIT_MAGIC);
+ return DOWNCAST(origin_circuit_t, x);
+}
+const origin_circuit_t *
+CONST_TO_ORIGIN_CIRCUIT(const circuit_t *x)
+{
+ tor_assert(x->magic == ORIGIN_CIRCUIT_MAGIC);
+ return DOWNCAST(origin_circuit_t, x);
+}
+
+/** A map from channel and circuit ID to circuit. (Lookup performance is
+ * very important here, since we need to do it every time a cell arrives.) */
+typedef struct chan_circid_circuit_map_t {
+ HT_ENTRY(chan_circid_circuit_map_t) node;
+ channel_t *chan;
+ circid_t circ_id;
+ circuit_t *circuit;
+ /* For debugging 12184: when was this placeholder item added? */
+ time_t made_placeholder_at;
+} chan_circid_circuit_map_t;
+
+/** Helper for hash tables: compare the channel and circuit ID for a and
+ * b, and return less than, equal to, or greater than zero appropriately.
+ */
+static inline int
+chan_circid_entries_eq_(chan_circid_circuit_map_t *a,
+ chan_circid_circuit_map_t *b)
+{
+ return a->chan == b->chan && a->circ_id == b->circ_id;
+}
+
+/** Helper: return a hash based on circuit ID and the pointer value of
+ * chan in <b>a</b>. */
+static inline unsigned int
+chan_circid_entry_hash_(chan_circid_circuit_map_t *a)
+{
+ /* Try to squeze the siphash input into 8 bytes to save any extra siphash
+ * rounds. This hash function is in the critical path. */
+ uintptr_t chan = (uintptr_t) (void*) a->chan;
+ uint32_t array[2];
+ array[0] = a->circ_id;
+ /* The low bits of the channel pointer are uninteresting, since the channel
+ * is a pretty big structure. */
+ array[1] = (uint32_t) (chan >> 6);
+ return (unsigned) siphash24g(array, sizeof(array));
+}
+
+/** Map from [chan,circid] to circuit. */
+static HT_HEAD(chan_circid_map, chan_circid_circuit_map_t)
+ chan_circid_map = HT_INITIALIZER();
+HT_PROTOTYPE(chan_circid_map, chan_circid_circuit_map_t, node,
+ chan_circid_entry_hash_, chan_circid_entries_eq_)
+HT_GENERATE2(chan_circid_map, chan_circid_circuit_map_t, node,
+ chan_circid_entry_hash_, chan_circid_entries_eq_, 0.6,
+ tor_reallocarray_, tor_free_)
+
+/** The most recently returned entry from circuit_get_by_circid_chan;
+ * used to improve performance when many cells arrive in a row from the
+ * same circuit.
+ */
+static chan_circid_circuit_map_t *_last_circid_chan_ent = NULL;
+
+/** Implementation helper for circuit_set_{p,n}_circid_channel: A circuit ID
+ * and/or channel for circ has just changed from <b>old_chan, old_id</b>
+ * to <b>chan, id</b>. Adjust the chan,circid map as appropriate, removing
+ * the old entry (if any) and adding a new one. */
+static void
+circuit_set_circid_chan_helper(circuit_t *circ, int direction,
+ circid_t id,
+ channel_t *chan)
+{
+ chan_circid_circuit_map_t search;
+ chan_circid_circuit_map_t *found;
+ channel_t *old_chan, **chan_ptr;
+ circid_t old_id, *circid_ptr;
+ int make_active, attached = 0;
+
+ if (direction == CELL_DIRECTION_OUT) {
+ chan_ptr = &circ->n_chan;
+ circid_ptr = &circ->n_circ_id;
+ make_active = circ->n_chan_cells.n > 0;
+ } else {
+ or_circuit_t *c = TO_OR_CIRCUIT(circ);
+ chan_ptr = &c->p_chan;
+ circid_ptr = &c->p_circ_id;
+ make_active = c->p_chan_cells.n > 0;
+ }
+ old_chan = *chan_ptr;
+ old_id = *circid_ptr;
+
+ if (id == old_id && chan == old_chan)
+ return;
+
+ if (_last_circid_chan_ent &&
+ ((old_id == _last_circid_chan_ent->circ_id &&
+ old_chan == _last_circid_chan_ent->chan) ||
+ (id == _last_circid_chan_ent->circ_id &&
+ chan == _last_circid_chan_ent->chan))) {
+ _last_circid_chan_ent = NULL;
+ }
+
+ if (old_chan) {
+ /*
+ * If we're changing channels or ID and had an old channel and a non
+ * zero old ID and weren't marked for close (i.e., we should have been
+ * attached), detach the circuit. ID changes require this because
+ * circuitmux hashes on (channel_id, circuit_id).
+ */
+ if (old_id != 0 && (old_chan != chan || old_id != id) &&
+ !(circ->marked_for_close)) {
+ tor_assert(old_chan->cmux);
+ circuitmux_detach_circuit(old_chan->cmux, circ);
+ }
+
+ /* we may need to remove it from the conn-circid map */
+ search.circ_id = old_id;
+ search.chan = old_chan;
+ found = HT_REMOVE(chan_circid_map, &chan_circid_map, &search);
+ if (found) {
+ tor_free(found);
+ if (direction == CELL_DIRECTION_OUT) {
+ /* One fewer circuits use old_chan as n_chan */
+ --(old_chan->num_n_circuits);
+ } else {
+ /* One fewer circuits use old_chan as p_chan */
+ --(old_chan->num_p_circuits);
+ }
+ }
+ }
+
+ /* Change the values only after we have possibly made the circuit inactive
+ * on the previous chan. */
+ *chan_ptr = chan;
+ *circid_ptr = id;
+
+ if (chan == NULL)
+ return;
+
+ /* now add the new one to the conn-circid map */
+ search.circ_id = id;
+ search.chan = chan;
+ found = HT_FIND(chan_circid_map, &chan_circid_map, &search);
+ if (found) {
+ found->circuit = circ;
+ found->made_placeholder_at = 0;
+ } else {
+ found = tor_malloc_zero(sizeof(chan_circid_circuit_map_t));
+ found->circ_id = id;
+ found->chan = chan;
+ found->circuit = circ;
+ HT_INSERT(chan_circid_map, &chan_circid_map, found);
+ }
+
+ /*
+ * Attach to the circuitmux if we're changing channels or IDs and
+ * have a new channel and ID to use and the circuit is not marked for
+ * close.
+ */
+ if (chan && id != 0 && (old_chan != chan || old_id != id) &&
+ !(circ->marked_for_close)) {
+ tor_assert(chan->cmux);
+ circuitmux_attach_circuit(chan->cmux, circ, direction);
+ attached = 1;
+ }
+
+ /*
+ * This is a no-op if we have no cells, but if we do it marks us active to
+ * the circuitmux
+ */
+ if (make_active && attached)
+ update_circuit_on_cmux(circ, direction);
+
+ /* Adjust circuit counts on new channel */
+ if (direction == CELL_DIRECTION_OUT) {
+ ++chan->num_n_circuits;
+ } else {
+ ++chan->num_p_circuits;
+ }
+}
+
+/** Mark that circuit id <b>id</b> shouldn't be used on channel <b>chan</b>,
+ * even if there is no circuit on the channel. We use this to keep the
+ * circuit id from getting re-used while we have queued but not yet sent
+ * a destroy cell. */
+void
+channel_mark_circid_unusable(channel_t *chan, circid_t id)
+{
+ chan_circid_circuit_map_t search;
+ chan_circid_circuit_map_t *ent;
+
+ /* See if there's an entry there. That wouldn't be good. */
+ memset(&search, 0, sizeof(search));
+ search.chan = chan;
+ search.circ_id = id;
+ ent = HT_FIND(chan_circid_map, &chan_circid_map, &search);
+
+ if (ent && ent->circuit) {
+ /* we have a problem. */
+ log_warn(LD_BUG, "Tried to mark %u unusable on %p, but there was already "
+ "a circuit there.", (unsigned)id, chan);
+ } else if (ent) {
+ /* It's already marked. */
+ if (!ent->made_placeholder_at)
+ ent->made_placeholder_at = approx_time();
+ } else {
+ ent = tor_malloc_zero(sizeof(chan_circid_circuit_map_t));
+ ent->chan = chan;
+ ent->circ_id = id;
+ /* leave circuit at NULL. */
+ ent->made_placeholder_at = approx_time();
+ HT_INSERT(chan_circid_map, &chan_circid_map, ent);
+ }
+}
+
+/** Mark that a circuit id <b>id</b> can be used again on <b>chan</b>.
+ * We use this to re-enable the circuit ID after we've sent a destroy cell.
+ */
+void
+channel_mark_circid_usable(channel_t *chan, circid_t id)
+{
+ chan_circid_circuit_map_t search;
+ chan_circid_circuit_map_t *ent;
+
+ /* See if there's an entry there. That wouldn't be good. */
+ memset(&search, 0, sizeof(search));
+ search.chan = chan;
+ search.circ_id = id;
+ ent = HT_REMOVE(chan_circid_map, &chan_circid_map, &search);
+ if (ent && ent->circuit) {
+ log_warn(LD_BUG, "Tried to mark %u usable on %p, but there was already "
+ "a circuit there.", (unsigned)id, chan);
+ return;
+ }
+ if (_last_circid_chan_ent == ent)
+ _last_circid_chan_ent = NULL;
+ tor_free(ent);
+}
+
+/** Called to indicate that a DESTROY is pending on <b>chan</b> with
+ * circuit ID <b>id</b>, but hasn't been sent yet. */
+void
+channel_note_destroy_pending(channel_t *chan, circid_t id)
+{
+ circuit_t *circ = circuit_get_by_circid_channel_even_if_marked(id,chan);
+ if (circ) {
+ if (circ->n_chan == chan && circ->n_circ_id == id) {
+ circ->n_delete_pending = 1;
+ } else {
+ or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
+ if (orcirc->p_chan == chan && orcirc->p_circ_id == id) {
+ circ->p_delete_pending = 1;
+ }
+ }
+ return;
+ }
+ channel_mark_circid_unusable(chan, id);
+}
+
+/** Called to indicate that a DESTROY is no longer pending on <b>chan</b> with
+ * circuit ID <b>id</b> -- typically, because it has been sent. */
+MOCK_IMPL(void,
+channel_note_destroy_not_pending,(channel_t *chan, circid_t id))
+{
+ circuit_t *circ = circuit_get_by_circid_channel_even_if_marked(id,chan);
+ if (circ) {
+ if (circ->n_chan == chan && circ->n_circ_id == id) {
+ circ->n_delete_pending = 0;
+ } else {
+ or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
+ if (orcirc->p_chan == chan && orcirc->p_circ_id == id) {
+ circ->p_delete_pending = 0;
+ }
+ }
+ /* XXXX this shouldn't happen; log a bug here. */
+ return;
+ }
+ channel_mark_circid_usable(chan, id);
+}
+
+/** Set the p_conn field of a circuit <b>circ</b>, along
+ * with the corresponding circuit ID, and add the circuit as appropriate
+ * to the (chan,id)-\>circuit map. */
+void
+circuit_set_p_circid_chan(or_circuit_t *or_circ, circid_t id,
+ channel_t *chan)
+{
+ circuit_t *circ = TO_CIRCUIT(or_circ);
+ channel_t *old_chan = or_circ->p_chan;
+ circid_t old_id = or_circ->p_circ_id;
+
+ circuit_set_circid_chan_helper(circ, CELL_DIRECTION_IN, id, chan);
+
+ if (chan) {
+ chan->timestamp_last_had_circuits = approx_time();
+ }
+
+ if (circ->p_delete_pending && old_chan) {
+ channel_mark_circid_unusable(old_chan, old_id);
+ circ->p_delete_pending = 0;
+ }
+}
+
+/** Set the n_conn field of a circuit <b>circ</b>, along
+ * with the corresponding circuit ID, and add the circuit as appropriate
+ * to the (chan,id)-\>circuit map. */
+void
+circuit_set_n_circid_chan(circuit_t *circ, circid_t id,
+ channel_t *chan)
+{
+ channel_t *old_chan = circ->n_chan;
+ circid_t old_id = circ->n_circ_id;
+
+ circuit_set_circid_chan_helper(circ, CELL_DIRECTION_OUT, id, chan);
+
+ if (chan) {
+ chan->timestamp_last_had_circuits = approx_time();
+ }
+
+ if (circ->n_delete_pending && old_chan) {
+ channel_mark_circid_unusable(old_chan, old_id);
+ circ->n_delete_pending = 0;
+ }
+}
+
+/** Change the state of <b>circ</b> to <b>state</b>, adding it to or removing
+ * it from lists as appropriate. */
+void
+circuit_set_state(circuit_t *circ, uint8_t state)
+{
+ tor_assert(circ);
+ if (state == circ->state)
+ return;
+ if (PREDICT_UNLIKELY(!circuits_pending_chans))
+ circuits_pending_chans = smartlist_new();
+ if (PREDICT_UNLIKELY(!circuits_pending_other_guards))
+ circuits_pending_other_guards = smartlist_new();
+ if (circ->state == CIRCUIT_STATE_CHAN_WAIT) {
+ /* remove from waiting-circuit list. */
+ smartlist_remove(circuits_pending_chans, circ);
+ }
+ if (state == CIRCUIT_STATE_CHAN_WAIT) {
+ /* add to waiting-circuit list. */
+ smartlist_add(circuits_pending_chans, circ);
+ }
+ if (circ->state == CIRCUIT_STATE_GUARD_WAIT) {
+ smartlist_remove(circuits_pending_other_guards, circ);
+ }
+ if (state == CIRCUIT_STATE_GUARD_WAIT) {
+ smartlist_add(circuits_pending_other_guards, circ);
+ }
+ if (state == CIRCUIT_STATE_GUARD_WAIT || state == CIRCUIT_STATE_OPEN)
+ tor_assert(!circ->n_chan_create_cell);
+ circ->state = state;
+}
+
+/** Append to <b>out</b> all circuits in state CHAN_WAIT waiting for
+ * the given connection. */
+void
+circuit_get_all_pending_on_channel(smartlist_t *out, channel_t *chan)
+{
+ tor_assert(out);
+ tor_assert(chan);
+
+ if (!circuits_pending_chans)
+ return;
+
+ SMARTLIST_FOREACH_BEGIN(circuits_pending_chans, circuit_t *, circ) {
+ if (circ->marked_for_close)
+ continue;
+ if (!circ->n_hop)
+ continue;
+ tor_assert(circ->state == CIRCUIT_STATE_CHAN_WAIT);
+ if (tor_digest_is_zero(circ->n_hop->identity_digest)) {
+ /* Look at addr/port. This is an unkeyed connection. */
+ if (!channel_matches_extend_info(chan, circ->n_hop))
+ continue;
+ } else {
+ /* We expected a key. See if it's the right one. */
+ if (tor_memneq(chan->identity_digest,
+ circ->n_hop->identity_digest, DIGEST_LEN))
+ continue;
+ }
+ smartlist_add(out, circ);
+ } SMARTLIST_FOREACH_END(circ);
+}
+
+/** Return the number of circuits in state CHAN_WAIT, waiting for the given
+ * channel. */
+int
+circuit_count_pending_on_channel(channel_t *chan)
+{
+ int cnt;
+ smartlist_t *sl = smartlist_new();
+
+ tor_assert(chan);
+
+ circuit_get_all_pending_on_channel(sl, chan);
+ cnt = smartlist_len(sl);
+ smartlist_free(sl);
+ log_debug(LD_CIRC,"or_conn to %s, %d pending circs",
+ channel_get_canonical_remote_descr(chan),
+ cnt);
+ return cnt;
+}
+
+/** Remove <b>origin_circ</b> from the global list of origin circuits.
+ * Called when we are freeing a circuit.
+ */
+static void
+circuit_remove_from_origin_circuit_list(origin_circuit_t *origin_circ)
+{
+ int origin_idx = origin_circ->global_origin_circuit_list_idx;
+ if (origin_idx < 0)
+ return;
+ origin_circuit_t *c2;
+ tor_assert(origin_idx <= smartlist_len(global_origin_circuit_list));
+ c2 = smartlist_get(global_origin_circuit_list, origin_idx);
+ tor_assert(origin_circ == c2);
+ smartlist_del(global_origin_circuit_list, origin_idx);
+ if (origin_idx < smartlist_len(global_origin_circuit_list)) {
+ origin_circuit_t *replacement =
+ smartlist_get(global_origin_circuit_list, origin_idx);
+ replacement->global_origin_circuit_list_idx = origin_idx;
+ }
+ origin_circ->global_origin_circuit_list_idx = -1;
+}
+
+/** Add <b>origin_circ</b> to the global list of origin circuits. Called
+ * when creating the circuit. */
+static void
+circuit_add_to_origin_circuit_list(origin_circuit_t *origin_circ)
+{
+ tor_assert(origin_circ->global_origin_circuit_list_idx == -1);
+ smartlist_t *lst = circuit_get_global_origin_circuit_list();
+ smartlist_add(lst, origin_circ);
+ origin_circ->global_origin_circuit_list_idx = smartlist_len(lst) - 1;
+}
+
+/** Detach from the global circuit list, and deallocate, all
+ * circuits that have been marked for close.
+ */
+void
+circuit_close_all_marked(void)
+{
+ if (circuits_pending_close == NULL)
+ return;
+
+ smartlist_t *lst = circuit_get_global_list();
+ SMARTLIST_FOREACH_BEGIN(circuits_pending_close, circuit_t *, circ) {
+ tor_assert(circ->marked_for_close);
+
+ /* Remove it from the circuit list. */
+ int idx = circ->global_circuitlist_idx;
+ smartlist_del(lst, idx);
+ if (idx < smartlist_len(lst)) {
+ circuit_t *replacement = smartlist_get(lst, idx);
+ replacement->global_circuitlist_idx = idx;
+ }
+ circ->global_circuitlist_idx = -1;
+
+ /* Remove it from the origin circuit list, if appropriate. */
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ circuit_remove_from_origin_circuit_list(TO_ORIGIN_CIRCUIT(circ));
+ }
+
+ circuit_about_to_free(circ);
+ circuit_free(circ);
+ } SMARTLIST_FOREACH_END(circ);
+
+ smartlist_clear(circuits_pending_close);
+}
+
+/** Return a pointer to the global list of circuits. */
+MOCK_IMPL(smartlist_t *,
+circuit_get_global_list,(void))
+{
+ if (NULL == global_circuitlist)
+ global_circuitlist = smartlist_new();
+ return global_circuitlist;
+}
+
+/** Return a pointer to the global list of origin circuits. */
+smartlist_t *
+circuit_get_global_origin_circuit_list(void)
+{
+ if (NULL == global_origin_circuit_list)
+ global_origin_circuit_list = smartlist_new();
+ return global_origin_circuit_list;
+}
+
+/**
+ * Return true if we have any opened general-purpose 3 hop
+ * origin circuits.
+ *
+ * The result from this function is cached for use by
+ * circuit_any_opened_circuits_cached().
+ */
+int
+circuit_any_opened_circuits(void)
+{
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_origin_circuit_list(),
+ const origin_circuit_t *, next_circ) {
+ if (!TO_CIRCUIT(next_circ)->marked_for_close &&
+ next_circ->has_opened &&
+ TO_CIRCUIT(next_circ)->state == CIRCUIT_STATE_OPEN &&
+ TO_CIRCUIT(next_circ)->purpose != CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT &&
+ next_circ->build_state &&
+ next_circ->build_state->desired_path_len == DEFAULT_ROUTE_LEN) {
+ circuit_cache_opened_circuit_state(1);
+ return 1;
+ }
+ } SMARTLIST_FOREACH_END(next_circ);
+
+ circuit_cache_opened_circuit_state(0);
+ return 0;
+}
+
+/**
+ * Cache the "any circuits opened" state, as specified in param
+ * circuits_are_opened. This is a helper function to update
+ * the circuit opened status whenever we happen to look at the
+ * circuit list.
+ */
+void
+circuit_cache_opened_circuit_state(int circuits_are_opened)
+{
+ any_opened_circs_cached_val = circuits_are_opened;
+}
+
+/**
+ * Return true if there were any opened circuits since the last call to
+ * circuit_any_opened_circuits(), or since circuit_expire_building() last
+ * ran (it runs roughly once per second).
+ */
+int
+circuit_any_opened_circuits_cached(void)
+{
+ return any_opened_circs_cached_val;
+}
+
+/** Function to make circ-\>state human-readable */
+const char *
+circuit_state_to_string(int state)
+{
+ static char buf[64];
+ switch (state) {
+ case CIRCUIT_STATE_BUILDING: return "doing handshakes";
+ case CIRCUIT_STATE_ONIONSKIN_PENDING: return "processing the onion";
+ case CIRCUIT_STATE_CHAN_WAIT: return "connecting to server";
+ case CIRCUIT_STATE_GUARD_WAIT: return "waiting to see how other "
+ "guards perform";
+ case CIRCUIT_STATE_OPEN: return "open";
+ default:
+ log_warn(LD_BUG, "Unknown circuit state %d", state);
+ tor_snprintf(buf, sizeof(buf), "unknown state [%d]", state);
+ return buf;
+ }
+}
+
+/** Map a circuit purpose to a string suitable to be displayed to a
+ * controller. */
+const char *
+circuit_purpose_to_controller_string(uint8_t purpose)
+{
+ static char buf[32];
+ switch (purpose) {
+ case CIRCUIT_PURPOSE_OR:
+ case CIRCUIT_PURPOSE_INTRO_POINT:
+ case CIRCUIT_PURPOSE_REND_POINT_WAITING:
+ case CIRCUIT_PURPOSE_REND_ESTABLISHED:
+ return "SERVER"; /* A controller should never see these, actually. */
+
+ case CIRCUIT_PURPOSE_C_GENERAL:
+ return "GENERAL";
+
+ case CIRCUIT_PURPOSE_C_HSDIR_GET:
+ return "HS_CLIENT_HSDIR";
+
+ case CIRCUIT_PURPOSE_C_INTRODUCING:
+ case CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT:
+ case CIRCUIT_PURPOSE_C_INTRODUCE_ACKED:
+ return "HS_CLIENT_INTRO";
+
+ case CIRCUIT_PURPOSE_C_ESTABLISH_REND:
+ case CIRCUIT_PURPOSE_C_REND_READY:
+ case CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED:
+ case CIRCUIT_PURPOSE_C_REND_JOINED:
+ return "HS_CLIENT_REND";
+
+ case CIRCUIT_PURPOSE_S_HSDIR_POST:
+ return "HS_SERVICE_HSDIR";
+
+ case CIRCUIT_PURPOSE_S_ESTABLISH_INTRO:
+ case CIRCUIT_PURPOSE_S_INTRO:
+ return "HS_SERVICE_INTRO";
+
+ case CIRCUIT_PURPOSE_S_CONNECT_REND:
+ case CIRCUIT_PURPOSE_S_REND_JOINED:
+ return "HS_SERVICE_REND";
+
+ case CIRCUIT_PURPOSE_TESTING:
+ return "TESTING";
+ case CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT:
+ return "MEASURE_TIMEOUT";
+ case CIRCUIT_PURPOSE_CONTROLLER:
+ return "CONTROLLER";
+ case CIRCUIT_PURPOSE_PATH_BIAS_TESTING:
+ return "PATH_BIAS_TESTING";
+ case CIRCUIT_PURPOSE_HS_VANGUARDS:
+ return "HS_VANGUARDS";
+
+ default:
+ tor_snprintf(buf, sizeof(buf), "UNKNOWN_%d", (int)purpose);
+ return buf;
+ }
+}
+
+/** Return a string specifying the state of the hidden-service circuit
+ * purpose <b>purpose</b>, or NULL if <b>purpose</b> is not a
+ * hidden-service-related circuit purpose. */
+const char *
+circuit_purpose_to_controller_hs_state_string(uint8_t purpose)
+{
+ switch (purpose)
+ {
+ default:
+ log_fn(LOG_WARN, LD_BUG,
+ "Unrecognized circuit purpose: %d",
+ (int)purpose);
+ tor_fragile_assert();
+ /* fall through */
+
+ case CIRCUIT_PURPOSE_OR:
+ case CIRCUIT_PURPOSE_C_GENERAL:
+ case CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT:
+ case CIRCUIT_PURPOSE_TESTING:
+ case CIRCUIT_PURPOSE_CONTROLLER:
+ case CIRCUIT_PURPOSE_PATH_BIAS_TESTING:
+ case CIRCUIT_PURPOSE_HS_VANGUARDS:
+ return NULL;
+
+ case CIRCUIT_PURPOSE_INTRO_POINT:
+ return "OR_HSSI_ESTABLISHED";
+ case CIRCUIT_PURPOSE_REND_POINT_WAITING:
+ return "OR_HSCR_ESTABLISHED";
+ case CIRCUIT_PURPOSE_REND_ESTABLISHED:
+ return "OR_HS_R_JOINED";
+
+ case CIRCUIT_PURPOSE_C_HSDIR_GET:
+ case CIRCUIT_PURPOSE_C_INTRODUCING:
+ return "HSCI_CONNECTING";
+ case CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT:
+ return "HSCI_INTRO_SENT";
+ case CIRCUIT_PURPOSE_C_INTRODUCE_ACKED:
+ return "HSCI_DONE";
+
+ case CIRCUIT_PURPOSE_C_ESTABLISH_REND:
+ return "HSCR_CONNECTING";
+ case CIRCUIT_PURPOSE_C_REND_READY:
+ return "HSCR_ESTABLISHED_IDLE";
+ case CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED:
+ return "HSCR_ESTABLISHED_WAITING";
+ case CIRCUIT_PURPOSE_C_REND_JOINED:
+ return "HSCR_JOINED";
+
+ case CIRCUIT_PURPOSE_S_HSDIR_POST:
+ case CIRCUIT_PURPOSE_S_ESTABLISH_INTRO:
+ return "HSSI_CONNECTING";
+ case CIRCUIT_PURPOSE_S_INTRO:
+ return "HSSI_ESTABLISHED";
+
+ case CIRCUIT_PURPOSE_S_CONNECT_REND:
+ return "HSSR_CONNECTING";
+ case CIRCUIT_PURPOSE_S_REND_JOINED:
+ return "HSSR_JOINED";
+ }
+}
+
+/** Return a human-readable string for the circuit purpose <b>purpose</b>. */
+const char *
+circuit_purpose_to_string(uint8_t purpose)
+{
+ static char buf[32];
+
+ switch (purpose)
+ {
+ case CIRCUIT_PURPOSE_OR:
+ return "Circuit at relay";
+ case CIRCUIT_PURPOSE_INTRO_POINT:
+ return "Acting as intro point";
+ case CIRCUIT_PURPOSE_REND_POINT_WAITING:
+ return "Acting as rendezvous (pending)";
+ case CIRCUIT_PURPOSE_REND_ESTABLISHED:
+ return "Acting as rendezvous (established)";
+ case CIRCUIT_PURPOSE_C_GENERAL:
+ return "General-purpose client";
+ case CIRCUIT_PURPOSE_C_INTRODUCING:
+ return "Hidden service client: Connecting to intro point";
+ case CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT:
+ return "Hidden service client: Waiting for ack from intro point";
+ case CIRCUIT_PURPOSE_C_INTRODUCE_ACKED:
+ return "Hidden service client: Received ack from intro point";
+ case CIRCUIT_PURPOSE_C_ESTABLISH_REND:
+ return "Hidden service client: Establishing rendezvous point";
+ case CIRCUIT_PURPOSE_C_REND_READY:
+ return "Hidden service client: Pending rendezvous point";
+ case CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED:
+ return "Hidden service client: Pending rendezvous point (ack received)";
+ case CIRCUIT_PURPOSE_C_REND_JOINED:
+ return "Hidden service client: Active rendezvous point";
+ case CIRCUIT_PURPOSE_C_HSDIR_GET:
+ return "Hidden service client: Fetching HS descriptor";
+
+ case CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT:
+ return "Measuring circuit timeout";
+
+ case CIRCUIT_PURPOSE_S_ESTABLISH_INTRO:
+ return "Hidden service: Establishing introduction point";
+ case CIRCUIT_PURPOSE_S_INTRO:
+ return "Hidden service: Introduction point";
+ case CIRCUIT_PURPOSE_S_CONNECT_REND:
+ return "Hidden service: Connecting to rendezvous point";
+ case CIRCUIT_PURPOSE_S_REND_JOINED:
+ return "Hidden service: Active rendezvous point";
+ case CIRCUIT_PURPOSE_S_HSDIR_POST:
+ return "Hidden service: Uploading HS descriptor";
+
+ case CIRCUIT_PURPOSE_TESTING:
+ return "Testing circuit";
+
+ case CIRCUIT_PURPOSE_CONTROLLER:
+ return "Circuit made by controller";
+
+ case CIRCUIT_PURPOSE_PATH_BIAS_TESTING:
+ return "Path-bias testing circuit";
+
+ case CIRCUIT_PURPOSE_HS_VANGUARDS:
+ return "Hidden service: Pre-built vanguard circuit";
+
+ default:
+ tor_snprintf(buf, sizeof(buf), "UNKNOWN_%d", (int)purpose);
+ return buf;
+ }
+}
+
+/** Pick a reasonable package_window to start out for our circuits.
+ * Originally this was hard-coded at 1000, but now the consensus votes
+ * on the answer. See proposal 168. */
+int32_t
+circuit_initial_package_window(void)
+{
+ int32_t num = networkstatus_get_param(NULL, "circwindow", CIRCWINDOW_START,
+ CIRCWINDOW_START_MIN,
+ CIRCWINDOW_START_MAX);
+ /* If the consensus tells us a negative number, we'd assert. */
+ if (num < 0)
+ num = CIRCWINDOW_START;
+ return num;
+}
+
+/** Initialize the common elements in a circuit_t, and add it to the global
+ * list. */
+static void
+init_circuit_base(circuit_t *circ)
+{
+ tor_gettimeofday(&circ->timestamp_created);
+
+ // Gets reset when we send CREATE_FAST.
+ // circuit_expire_building() expects these to be equal
+ // until the orconn is built.
+ circ->timestamp_began = circ->timestamp_created;
+
+ circ->package_window = circuit_initial_package_window();
+ circ->deliver_window = CIRCWINDOW_START;
+ cell_queue_init(&circ->n_chan_cells);
+
+ smartlist_add(circuit_get_global_list(), circ);
+ circ->global_circuitlist_idx = smartlist_len(circuit_get_global_list()) - 1;
+}
+
+/** If we haven't yet decided on a good timeout value for circuit
+ * building, we close idle circuits aggressively so we can get more
+ * data points. These are the default, min, and max consensus values */
+#define DFLT_IDLE_TIMEOUT_WHILE_LEARNING (3*60)
+#define MIN_IDLE_TIMEOUT_WHILE_LEARNING (10)
+#define MAX_IDLE_TIMEOUT_WHILE_LEARNING (1000*60)
+
+/** Allocate space for a new circuit, initializing with <b>p_circ_id</b>
+ * and <b>p_conn</b>. Add it to the global circuit list.
+ */
+origin_circuit_t *
+origin_circuit_new(void)
+{
+ origin_circuit_t *circ;
+ /* never zero, since a global ID of 0 is treated specially by the
+ * controller */
+ static uint32_t n_circuits_allocated = 1;
+
+ circ = tor_malloc_zero(sizeof(origin_circuit_t));
+ circ->base_.magic = ORIGIN_CIRCUIT_MAGIC;
+
+ circ->next_stream_id = crypto_rand_int(1<<16);
+ circ->global_identifier = n_circuits_allocated++;
+ circ->remaining_relay_early_cells = MAX_RELAY_EARLY_CELLS_PER_CIRCUIT;
+ circ->remaining_relay_early_cells -= crypto_rand_int(2);
+
+ init_circuit_base(TO_CIRCUIT(circ));
+
+ /* Add to origin-list. */
+ circ->global_origin_circuit_list_idx = -1;
+ circuit_add_to_origin_circuit_list(circ);
+
+ circuit_build_times_update_last_circ(get_circuit_build_times_mutable());
+
+ if (! circuit_build_times_disabled(get_options()) &&
+ circuit_build_times_needs_circuits(get_circuit_build_times())) {
+ /* Circuits should be shorter lived if we need more of them
+ * for learning a good build timeout */
+ circ->circuit_idle_timeout =
+ networkstatus_get_param(NULL, "cbtlearntimeout",
+ DFLT_IDLE_TIMEOUT_WHILE_LEARNING,
+ MIN_IDLE_TIMEOUT_WHILE_LEARNING,
+ MAX_IDLE_TIMEOUT_WHILE_LEARNING);
+ } else {
+ // This should always be larger than the current port prediction time
+ // remaining, or else we'll end up with the case where a circuit times out
+ // and another one is built, effectively doubling the timeout window.
+ //
+ // We also randomize it by up to 5% more (ie 5% of 0 to 3600 seconds,
+ // depending on how much circuit prediction time is remaining) so that
+ // we don't close a bunch of unused circuits all at the same time.
+ int prediction_time_remaining =
+ predicted_ports_prediction_time_remaining(time(NULL));
+ circ->circuit_idle_timeout = prediction_time_remaining+1+
+ crypto_rand_int(1+prediction_time_remaining/20);
+
+ if (circ->circuit_idle_timeout <= 0) {
+ log_warn(LD_BUG,
+ "Circuit chose a negative idle timeout of %d based on "
+ "%d seconds of predictive building remaining.",
+ circ->circuit_idle_timeout,
+ prediction_time_remaining);
+ circ->circuit_idle_timeout =
+ networkstatus_get_param(NULL, "cbtlearntimeout",
+ DFLT_IDLE_TIMEOUT_WHILE_LEARNING,
+ MIN_IDLE_TIMEOUT_WHILE_LEARNING,
+ MAX_IDLE_TIMEOUT_WHILE_LEARNING);
+ }
+
+ log_info(LD_CIRC,
+ "Circuit %"PRIu32" chose an idle timeout of %d based on "
+ "%d seconds of predictive building remaining.",
+ (circ->global_identifier),
+ circ->circuit_idle_timeout,
+ prediction_time_remaining);
+ }
+
+ return circ;
+}
+
+/** Allocate a new or_circuit_t, connected to <b>p_chan</b> as
+ * <b>p_circ_id</b>. If <b>p_chan</b> is NULL, the circuit is unattached. */
+or_circuit_t *
+or_circuit_new(circid_t p_circ_id, channel_t *p_chan)
+{
+ /* CircIDs */
+ or_circuit_t *circ;
+
+ circ = tor_malloc_zero(sizeof(or_circuit_t));
+ circ->base_.magic = OR_CIRCUIT_MAGIC;
+
+ if (p_chan)
+ circuit_set_p_circid_chan(circ, p_circ_id, p_chan);
+
+ circ->remaining_relay_early_cells = MAX_RELAY_EARLY_CELLS_PER_CIRCUIT;
+ cell_queue_init(&circ->p_chan_cells);
+
+ init_circuit_base(TO_CIRCUIT(circ));
+
+ return circ;
+}
+
+/** Free all storage held in circ->testing_cell_stats */
+void
+circuit_clear_testing_cell_stats(circuit_t *circ)
+{
+ if (!circ || !circ->testing_cell_stats)
+ return;
+ SMARTLIST_FOREACH(circ->testing_cell_stats, testing_cell_stats_entry_t *,
+ ent, tor_free(ent));
+ smartlist_free(circ->testing_cell_stats);
+ circ->testing_cell_stats = NULL;
+}
+
+/** Deallocate space associated with circ.
+ */
+STATIC void
+circuit_free_(circuit_t *circ)
+{
+ circid_t n_circ_id = 0;
+ void *mem;
+ size_t memlen;
+ int should_free = 1;
+ if (!circ)
+ return;
+
+ /* We keep a copy of this so we can log its value before it gets unset. */
+ n_circ_id = circ->n_circ_id;
+
+ circuit_clear_testing_cell_stats(circ);
+
+ /* Cleanup circuit from anything HS v3 related. We also do this when the
+ * circuit is closed. This is to avoid any code path that free registered
+ * circuits without closing them before. This needs to be done before the
+ * hs identifier is freed. */
+ hs_circ_cleanup(circ);
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
+ mem = ocirc;
+ memlen = sizeof(origin_circuit_t);
+ tor_assert(circ->magic == ORIGIN_CIRCUIT_MAGIC);
+
+ circuit_remove_from_origin_circuit_list(ocirc);
+
+ if (ocirc->build_state) {
+ extend_info_free(ocirc->build_state->chosen_exit);
+ circuit_free_cpath_node(ocirc->build_state->pending_final_cpath);
+ cpath_ref_decref(ocirc->build_state->service_pending_final_cpath_ref);
+ }
+ tor_free(ocirc->build_state);
+
+ /* Cancel before freeing, if we haven't already succeeded or failed. */
+ if (ocirc->guard_state) {
+ entry_guard_cancel(&ocirc->guard_state);
+ }
+ circuit_guard_state_free(ocirc->guard_state);
+
+ circuit_clear_cpath(ocirc);
+
+ crypto_pk_free(ocirc->intro_key);
+ rend_data_free(ocirc->rend_data);
+
+ /* Finally, free the identifier of the circuit and nullify it so multiple
+ * cleanup will work. */
+ hs_ident_circuit_free(ocirc->hs_ident);
+ ocirc->hs_ident = NULL;
+
+ tor_free(ocirc->dest_address);
+ if (ocirc->socks_username) {
+ memwipe(ocirc->socks_username, 0x12, ocirc->socks_username_len);
+ tor_free(ocirc->socks_username);
+ }
+ if (ocirc->socks_password) {
+ memwipe(ocirc->socks_password, 0x06, ocirc->socks_password_len);
+ tor_free(ocirc->socks_password);
+ }
+ addr_policy_list_free(ocirc->prepend_policy);
+ } else {
+ or_circuit_t *ocirc = TO_OR_CIRCUIT(circ);
+ /* Remember cell statistics for this circuit before deallocating. */
+ if (get_options()->CellStatistics)
+ rep_hist_buffer_stats_add_circ(circ, time(NULL));
+ mem = ocirc;
+ memlen = sizeof(or_circuit_t);
+ tor_assert(circ->magic == OR_CIRCUIT_MAGIC);
+
+ should_free = (ocirc->workqueue_entry == NULL);
+
+ relay_crypto_clear(&ocirc->crypto);
+
+ if (ocirc->rend_splice) {
+ or_circuit_t *other = ocirc->rend_splice;
+ tor_assert(other->base_.magic == OR_CIRCUIT_MAGIC);
+ other->rend_splice = NULL;
+ }
+
+ /* remove from map. */
+ circuit_set_p_circid_chan(ocirc, 0, NULL);
+
+ /* Clear cell queue _after_ removing it from the map. Otherwise our
+ * "active" checks will be violated. */
+ cell_queue_clear(&ocirc->p_chan_cells);
+ }
+
+ extend_info_free(circ->n_hop);
+ tor_free(circ->n_chan_create_cell);
+
+ if (circ->global_circuitlist_idx != -1) {
+ int idx = circ->global_circuitlist_idx;
+ circuit_t *c2 = smartlist_get(global_circuitlist, idx);
+ tor_assert(c2 == circ);
+ smartlist_del(global_circuitlist, idx);
+ if (idx < smartlist_len(global_circuitlist)) {
+ c2 = smartlist_get(global_circuitlist, idx);
+ c2->global_circuitlist_idx = idx;
+ }
+ }
+
+ /* Remove from map. */
+ circuit_set_n_circid_chan(circ, 0, NULL);
+
+ /* Clear cell queue _after_ removing it from the map. Otherwise our
+ * "active" checks will be violated. */
+ cell_queue_clear(&circ->n_chan_cells);
+
+ log_info(LD_CIRC, "Circuit %u (id: %" PRIu32 ") has been freed.",
+ n_circ_id,
+ CIRCUIT_IS_ORIGIN(circ) ?
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0);
+
+ if (should_free) {
+ memwipe(mem, 0xAA, memlen); /* poison memory */
+ tor_free(mem);
+ } else {
+ /* If we made it here, this is an or_circuit_t that still has a pending
+ * cpuworker request which we weren't able to cancel. Instead, set up
+ * the magic value so that when the reply comes back, we'll know to discard
+ * the reply and free this structure.
+ */
+ memwipe(mem, 0xAA, memlen);
+ circ->magic = DEAD_CIRCUIT_MAGIC;
+ }
+}
+
+/** Deallocate the linked list circ-><b>cpath</b>, and remove the cpath from
+ * <b>circ</b>. */
+void
+circuit_clear_cpath(origin_circuit_t *circ)
+{
+ crypt_path_t *victim, *head, *cpath;
+
+ head = cpath = circ->cpath;
+
+ if (!cpath)
+ return;
+
+ /* it's a circular list, so we have to notice when we've
+ * gone through it once. */
+ while (cpath->next && cpath->next != head) {
+ victim = cpath;
+ cpath = victim->next;
+ circuit_free_cpath_node(victim);
+ }
+
+ circuit_free_cpath_node(cpath);
+
+ circ->cpath = NULL;
+}
+
+/** Release all storage held by circuits. */
+void
+circuit_free_all(void)
+{
+ smartlist_t *lst = circuit_get_global_list();
+
+ SMARTLIST_FOREACH_BEGIN(lst, circuit_t *, tmp) {
+ if (! CIRCUIT_IS_ORIGIN(tmp)) {
+ or_circuit_t *or_circ = TO_OR_CIRCUIT(tmp);
+ while (or_circ->resolving_streams) {
+ edge_connection_t *next_conn;
+ next_conn = or_circ->resolving_streams->next_stream;
+ connection_free_(TO_CONN(or_circ->resolving_streams));
+ or_circ->resolving_streams = next_conn;
+ }
+ }
+ tmp->global_circuitlist_idx = -1;
+ circuit_about_to_free_atexit(tmp);
+ circuit_free(tmp);
+ SMARTLIST_DEL_CURRENT(lst, tmp);
+ } SMARTLIST_FOREACH_END(tmp);
+
+ smartlist_free(lst);
+ global_circuitlist = NULL;
+
+ smartlist_free(global_origin_circuit_list);
+ global_origin_circuit_list = NULL;
+
+ smartlist_free(circuits_pending_chans);
+ circuits_pending_chans = NULL;
+
+ smartlist_free(circuits_pending_close);
+ circuits_pending_close = NULL;
+
+ smartlist_free(circuits_pending_other_guards);
+ circuits_pending_other_guards = NULL;
+
+ {
+ chan_circid_circuit_map_t **elt, **next, *c;
+ for (elt = HT_START(chan_circid_map, &chan_circid_map);
+ elt;
+ elt = next) {
+ c = *elt;
+ next = HT_NEXT_RMV(chan_circid_map, &chan_circid_map, elt);
+
+ tor_assert(c->circuit == NULL);
+ tor_free(c);
+ }
+ }
+ HT_CLEAR(chan_circid_map, &chan_circid_map);
+}
+
+/** Deallocate space associated with the cpath node <b>victim</b>. */
+static void
+circuit_free_cpath_node(crypt_path_t *victim)
+{
+ if (!victim)
+ return;
+
+ relay_crypto_clear(&victim->crypto);
+ onion_handshake_state_release(&victim->handshake_state);
+ crypto_dh_free(victim->rend_dh_handshake_state);
+ extend_info_free(victim->extend_info);
+
+ memwipe(victim, 0xBB, sizeof(crypt_path_t)); /* poison memory */
+ tor_free(victim);
+}
+
+/** Release a crypt_path_reference_t*, which may be NULL. */
+static void
+cpath_ref_decref(crypt_path_reference_t *cpath_ref)
+{
+ if (cpath_ref != NULL) {
+ if (--(cpath_ref->refcount) == 0) {
+ circuit_free_cpath_node(cpath_ref->cpath);
+ tor_free(cpath_ref);
+ }
+ }
+}
+
+/** A helper function for circuit_dump_by_conn() below. Log a bunch
+ * of information about circuit <b>circ</b>.
+ */
+static void
+circuit_dump_conn_details(int severity,
+ circuit_t *circ,
+ int conn_array_index,
+ const char *type,
+ circid_t this_circid,
+ circid_t other_circid)
+{
+ tor_log(severity, LD_CIRC, "Conn %d has %s circuit: circID %u "
+ "(other side %u), state %d (%s), born %ld:",
+ conn_array_index, type, (unsigned)this_circid, (unsigned)other_circid,
+ circ->state, circuit_state_to_string(circ->state),
+ (long)circ->timestamp_began.tv_sec);
+ if (CIRCUIT_IS_ORIGIN(circ)) { /* circ starts at this node */
+ circuit_log_path(severity, LD_CIRC, TO_ORIGIN_CIRCUIT(circ));
+ }
+}
+
+/** Log, at severity <b>severity</b>, information about each circuit
+ * that is connected to <b>conn</b>.
+ */
+void
+circuit_dump_by_conn(connection_t *conn, int severity)
+{
+ edge_connection_t *tmpconn;
+
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ circid_t n_circ_id = circ->n_circ_id, p_circ_id = 0;
+
+ if (circ->marked_for_close) {
+ continue;
+ }
+
+ if (!CIRCUIT_IS_ORIGIN(circ)) {
+ p_circ_id = TO_OR_CIRCUIT(circ)->p_circ_id;
+ }
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ for (tmpconn=TO_ORIGIN_CIRCUIT(circ)->p_streams; tmpconn;
+ tmpconn=tmpconn->next_stream) {
+ if (TO_CONN(tmpconn) == conn) {
+ circuit_dump_conn_details(severity, circ, conn->conn_array_index,
+ "App-ward", p_circ_id, n_circ_id);
+ }
+ }
+ }
+
+ if (! CIRCUIT_IS_ORIGIN(circ)) {
+ for (tmpconn=TO_OR_CIRCUIT(circ)->n_streams; tmpconn;
+ tmpconn=tmpconn->next_stream) {
+ if (TO_CONN(tmpconn) == conn) {
+ circuit_dump_conn_details(severity, circ, conn->conn_array_index,
+ "Exit-ward", n_circ_id, p_circ_id);
+ }
+ }
+ }
+ }
+ SMARTLIST_FOREACH_END(circ);
+}
+
+/** Return the circuit whose global ID is <b>id</b>, or NULL if no
+ * such circuit exists. */
+origin_circuit_t *
+circuit_get_by_global_id(uint32_t id)
+{
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ if (CIRCUIT_IS_ORIGIN(circ) &&
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier == id) {
+ if (circ->marked_for_close)
+ return NULL;
+ else
+ return TO_ORIGIN_CIRCUIT(circ);
+ }
+ }
+ SMARTLIST_FOREACH_END(circ);
+ return NULL;
+}
+
+/** Return a circ such that:
+ * - circ-\>n_circ_id or circ-\>p_circ_id is equal to <b>circ_id</b>, and
+ * - circ is attached to <b>chan</b>, either as p_chan or n_chan.
+ * Return NULL if no such circuit exists.
+ *
+ * If <b>found_entry_out</b> is provided, set it to true if we have a
+ * placeholder entry for circid/chan, and leave it unset otherwise.
+ */
+static inline circuit_t *
+circuit_get_by_circid_channel_impl(circid_t circ_id, channel_t *chan,
+ int *found_entry_out)
+{
+ chan_circid_circuit_map_t search;
+ chan_circid_circuit_map_t *found;
+
+ if (_last_circid_chan_ent &&
+ circ_id == _last_circid_chan_ent->circ_id &&
+ chan == _last_circid_chan_ent->chan) {
+ found = _last_circid_chan_ent;
+ } else {
+ search.circ_id = circ_id;
+ search.chan = chan;
+ found = HT_FIND(chan_circid_map, &chan_circid_map, &search);
+ _last_circid_chan_ent = found;
+ }
+ if (found && found->circuit) {
+ log_debug(LD_CIRC,
+ "circuit_get_by_circid_channel_impl() returning circuit %p for"
+ " circ_id %u, channel ID %"PRIu64 " (%p)",
+ found->circuit, (unsigned)circ_id,
+ (chan->global_identifier), chan);
+ if (found_entry_out)
+ *found_entry_out = 1;
+ return found->circuit;
+ }
+
+ log_debug(LD_CIRC,
+ "circuit_get_by_circid_channel_impl() found %s for"
+ " circ_id %u, channel ID %"PRIu64 " (%p)",
+ found ? "placeholder" : "nothing",
+ (unsigned)circ_id,
+ (chan->global_identifier), chan);
+
+ if (found_entry_out)
+ *found_entry_out = found ? 1 : 0;
+
+ return NULL;
+ /* The rest of this checks for bugs. Disabled by default. */
+ /* We comment it out because coverity complains otherwise.
+ {
+ circuit_t *circ;
+ TOR_LIST_FOREACH(circ, &global_circuitlist, head) {
+ if (! CIRCUIT_IS_ORIGIN(circ)) {
+ or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
+ if (or_circ->p_chan == chan && or_circ->p_circ_id == circ_id) {
+ log_warn(LD_BUG,
+ "circuit matches p_chan, but not in hash table (Bug!)");
+ return circ;
+ }
+ }
+ if (circ->n_chan == chan && circ->n_circ_id == circ_id) {
+ log_warn(LD_BUG,
+ "circuit matches n_chan, but not in hash table (Bug!)");
+ return circ;
+ }
+ }
+ return NULL;
+ } */
+}
+
+/** Return a circ such that:
+ * - circ-\>n_circ_id or circ-\>p_circ_id is equal to <b>circ_id</b>, and
+ * - circ is attached to <b>chan</b>, either as p_chan or n_chan.
+ * - circ is not marked for close.
+ * Return NULL if no such circuit exists.
+ */
+circuit_t *
+circuit_get_by_circid_channel(circid_t circ_id, channel_t *chan)
+{
+ circuit_t *circ = circuit_get_by_circid_channel_impl(circ_id, chan, NULL);
+ if (!circ || circ->marked_for_close)
+ return NULL;
+ else
+ return circ;
+}
+
+/** Return a circ such that:
+ * - circ-\>n_circ_id or circ-\>p_circ_id is equal to <b>circ_id</b>, and
+ * - circ is attached to <b>chan</b>, either as p_chan or n_chan.
+ * Return NULL if no such circuit exists.
+ */
+circuit_t *
+circuit_get_by_circid_channel_even_if_marked(circid_t circ_id,
+ channel_t *chan)
+{
+ return circuit_get_by_circid_channel_impl(circ_id, chan, NULL);
+}
+
+/** Return true iff the circuit ID <b>circ_id</b> is currently used by a
+ * circuit, marked or not, on <b>chan</b>, or if the circ ID is reserved until
+ * a queued destroy cell can be sent.
+ *
+ * (Return 1 if the circuit is present, marked or not; Return 2
+ * if the circuit ID is pending a destroy.)
+ **/
+int
+circuit_id_in_use_on_channel(circid_t circ_id, channel_t *chan)
+{
+ int found = 0;
+ if (circuit_get_by_circid_channel_impl(circ_id, chan, &found) != NULL)
+ return 1;
+ if (found)
+ return 2;
+ return 0;
+}
+
+/** Helper for debugging 12184. Returns the time since which 'circ_id' has
+ * been marked unusable on 'chan'. */
+time_t
+circuit_id_when_marked_unusable_on_channel(circid_t circ_id, channel_t *chan)
+{
+ chan_circid_circuit_map_t search;
+ chan_circid_circuit_map_t *found;
+
+ memset(&search, 0, sizeof(search));
+ search.circ_id = circ_id;
+ search.chan = chan;
+
+ found = HT_FIND(chan_circid_map, &chan_circid_map, &search);
+
+ if (! found || found->circuit)
+ return 0;
+
+ return found->made_placeholder_at;
+}
+
+/** Return the circuit that a given edge connection is using. */
+circuit_t *
+circuit_get_by_edge_conn(edge_connection_t *conn)
+{
+ circuit_t *circ;
+
+ circ = conn->on_circuit;
+ tor_assert(!circ ||
+ (CIRCUIT_IS_ORIGIN(circ) ? circ->magic == ORIGIN_CIRCUIT_MAGIC
+ : circ->magic == OR_CIRCUIT_MAGIC));
+
+ return circ;
+}
+
+/** For each circuit that has <b>chan</b> as n_chan or p_chan, unlink the
+ * circuit from the chan,circid map, and mark it for close if it hasn't
+ * been marked already.
+ */
+void
+circuit_unlink_all_from_channel(channel_t *chan, int reason)
+{
+ smartlist_t *detached = smartlist_new();
+
+/* #define DEBUG_CIRCUIT_UNLINK_ALL */
+
+ channel_unlink_all_circuits(chan, detached);
+
+#ifdef DEBUG_CIRCUIT_UNLINK_ALL
+ {
+ smartlist_t *detached_2 = smartlist_new();
+ int mismatch = 0, badlen = 0;
+
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ if (circ->n_chan == chan ||
+ (!CIRCUIT_IS_ORIGIN(circ) &&
+ TO_OR_CIRCUIT(circ)->p_chan == chan)) {
+ smartlist_add(detached_2, circ);
+ }
+ }
+ SMARTLIST_FOREACH_END(circ);
+
+ if (smartlist_len(detached) != smartlist_len(detached_2)) {
+ log_warn(LD_BUG, "List of detached circuits had the wrong length! "
+ "(got %d, should have gotten %d)",
+ (int)smartlist_len(detached),
+ (int)smartlist_len(detached_2));
+ badlen = 1;
+ }
+ smartlist_sort_pointers(detached);
+ smartlist_sort_pointers(detached_2);
+
+ SMARTLIST_FOREACH(detached, circuit_t *, c,
+ if (c != smartlist_get(detached_2, c_sl_idx))
+ mismatch = 1;
+ );
+
+ if (mismatch)
+ log_warn(LD_BUG, "Mismatch in list of detached circuits.");
+
+ if (badlen || mismatch) {
+ smartlist_free(detached);
+ detached = detached_2;
+ } else {
+ log_notice(LD_CIRC, "List of %d circuits was as expected.",
+ (int)smartlist_len(detached));
+ smartlist_free(detached_2);
+ }
+ }
+#endif /* defined(DEBUG_CIRCUIT_UNLINK_ALL) */
+
+ SMARTLIST_FOREACH_BEGIN(detached, circuit_t *, circ) {
+ int mark = 0;
+ if (circ->n_chan == chan) {
+
+ circuit_set_n_circid_chan(circ, 0, NULL);
+ mark = 1;
+
+ /* If we didn't request this closure, pass the remote
+ * bit to mark_for_close. */
+ if (chan->reason_for_closing != CHANNEL_CLOSE_REQUESTED)
+ reason |= END_CIRC_REASON_FLAG_REMOTE;
+ }
+ if (! CIRCUIT_IS_ORIGIN(circ)) {
+ or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
+ if (or_circ->p_chan == chan) {
+ circuit_set_p_circid_chan(or_circ, 0, NULL);
+ mark = 1;
+ }
+ }
+ if (!mark) {
+ log_warn(LD_BUG, "Circuit on detached list which I had no reason "
+ "to mark");
+ continue;
+ }
+ if (!circ->marked_for_close)
+ circuit_mark_for_close(circ, reason);
+ } SMARTLIST_FOREACH_END(circ);
+
+ smartlist_free(detached);
+}
+
+/** Return a circ such that
+ * - circ-\>rend_data-\>onion_address is equal to
+ * <b>rend_data</b>-\>onion_address,
+ * - circ-\>rend_data-\>rend_cookie is equal to
+ * <b>rend_data</b>-\>rend_cookie, and
+ * - circ-\>purpose is equal to CIRCUIT_PURPOSE_C_REND_READY.
+ *
+ * Return NULL if no such circuit exists.
+ */
+origin_circuit_t *
+circuit_get_ready_rend_circ_by_rend_data(const rend_data_t *rend_data)
+{
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ if (!circ->marked_for_close &&
+ circ->purpose == CIRCUIT_PURPOSE_C_REND_READY) {
+ origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
+ if (ocirc->rend_data == NULL) {
+ continue;
+ }
+ if (!rend_cmp_service_ids(rend_data_get_address(rend_data),
+ rend_data_get_address(ocirc->rend_data)) &&
+ tor_memeq(ocirc->rend_data->rend_cookie,
+ rend_data->rend_cookie,
+ REND_COOKIE_LEN))
+ return ocirc;
+ }
+ }
+ SMARTLIST_FOREACH_END(circ);
+ return NULL;
+}
+
+/** Return the first service introduction circuit originating from the global
+ * circuit list after <b>start</b> or at the start of the list if <b>start</b>
+ * is NULL. Return NULL if no circuit is found.
+ *
+ * A service introduction point circuit has a purpose of either
+ * CIRCUIT_PURPOSE_S_ESTABLISH_INTRO or CIRCUIT_PURPOSE_S_INTRO. This does not
+ * return a circuit marked for close and its state must be open. */
+origin_circuit_t *
+circuit_get_next_service_intro_circ(origin_circuit_t *start)
+{
+ int idx = 0;
+ smartlist_t *lst = circuit_get_global_list();
+
+ if (start) {
+ idx = TO_CIRCUIT(start)->global_circuitlist_idx + 1;
+ }
+
+ for ( ; idx < smartlist_len(lst); ++idx) {
+ circuit_t *circ = smartlist_get(lst, idx);
+
+ /* Ignore a marked for close circuit or purpose not matching a service
+ * intro point or if the state is not open. */
+ if (circ->marked_for_close || circ->state != CIRCUIT_STATE_OPEN ||
+ (circ->purpose != CIRCUIT_PURPOSE_S_ESTABLISH_INTRO &&
+ circ->purpose != CIRCUIT_PURPOSE_S_INTRO)) {
+ continue;
+ }
+ /* The purposes we are looking for are only for origin circuits so the
+ * following is valid. */
+ return TO_ORIGIN_CIRCUIT(circ);
+ }
+ /* Not found. */
+ return NULL;
+}
+
+/** Return the first service rendezvous circuit originating from the global
+ * circuit list after <b>start</b> or at the start of the list if <b>start</b>
+ * is NULL. Return NULL if no circuit is found.
+ *
+ * A service rendezvous point circuit has a purpose of either
+ * CIRCUIT_PURPOSE_S_CONNECT_REND or CIRCUIT_PURPOSE_S_REND_JOINED. This does
+ * not return a circuit marked for close and its state must be open. */
+origin_circuit_t *
+circuit_get_next_service_rp_circ(origin_circuit_t *start)
+{
+ int idx = 0;
+ smartlist_t *lst = circuit_get_global_list();
+
+ if (start) {
+ idx = TO_CIRCUIT(start)->global_circuitlist_idx + 1;
+ }
+
+ for ( ; idx < smartlist_len(lst); ++idx) {
+ circuit_t *circ = smartlist_get(lst, idx);
+
+ /* Ignore a marked for close circuit or purpose not matching a service
+ * intro point or if the state is not open. */
+ if (circ->marked_for_close || circ->state != CIRCUIT_STATE_OPEN ||
+ (circ->purpose != CIRCUIT_PURPOSE_S_CONNECT_REND &&
+ circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED)) {
+ continue;
+ }
+ /* The purposes we are looking for are only for origin circuits so the
+ * following is valid. */
+ return TO_ORIGIN_CIRCUIT(circ);
+ }
+ /* Not found. */
+ return NULL;
+}
+
+/** Return the first circuit originating here in global_circuitlist after
+ * <b>start</b> whose purpose is <b>purpose</b>, and where <b>digest</b> (if
+ * set) matches the private key digest of the rend data associated with the
+ * circuit. Return NULL if no circuit is found. If <b>start</b> is NULL,
+ * begin at the start of the list.
+ */
+origin_circuit_t *
+circuit_get_next_by_pk_and_purpose(origin_circuit_t *start,
+ const uint8_t *digest, uint8_t purpose)
+{
+ int idx;
+ smartlist_t *lst = circuit_get_global_list();
+ tor_assert(CIRCUIT_PURPOSE_IS_ORIGIN(purpose));
+ if (start == NULL)
+ idx = 0;
+ else
+ idx = TO_CIRCUIT(start)->global_circuitlist_idx + 1;
+
+ for ( ; idx < smartlist_len(lst); ++idx) {
+ circuit_t *circ = smartlist_get(lst, idx);
+ origin_circuit_t *ocirc;
+
+ if (circ->marked_for_close)
+ continue;
+ if (circ->purpose != purpose)
+ continue;
+ /* At this point we should be able to get a valid origin circuit because
+ * the origin purpose we are looking for matches this circuit. */
+ if (BUG(!CIRCUIT_PURPOSE_IS_ORIGIN(circ->purpose))) {
+ break;
+ }
+ ocirc = TO_ORIGIN_CIRCUIT(circ);
+ if (!digest)
+ return ocirc;
+ if (rend_circuit_pk_digest_eq(ocirc, digest)) {
+ return ocirc;
+ }
+ }
+ return NULL;
+}
+
+/** We might cannibalize this circuit: Return true if its last hop can be used
+ * as a v3 rendezvous point. */
+static int
+circuit_can_be_cannibalized_for_v3_rp(const origin_circuit_t *circ)
+{
+ if (!circ->build_state) {
+ return 0;
+ }
+
+ extend_info_t *chosen_exit = circ->build_state->chosen_exit;
+ if (BUG(!chosen_exit)) {
+ return 0;
+ }
+
+ const node_t *rp_node = node_get_by_id(chosen_exit->identity_digest);
+ if (rp_node) {
+ if (node_supports_v3_rendezvous_point(rp_node)) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/** We are trying to create a circuit of purpose <b>purpose</b> and we are
+ * looking for cannibalizable circuits. Return the circuit purpose we would be
+ * willing to cannibalize. */
+static uint8_t
+get_circuit_purpose_needed_to_cannibalize(uint8_t purpose)
+{
+ if (circuit_should_use_vanguards(purpose)) {
+ /* If we are using vanguards, then we should only cannibalize vanguard
+ * circuits so that we get the same path construction logic. */
+ return CIRCUIT_PURPOSE_HS_VANGUARDS;
+ } else {
+ /* If no vanguards are used just get a general circuit! */
+ return CIRCUIT_PURPOSE_C_GENERAL;
+ }
+}
+
+/** Return a circuit that is open, is CIRCUIT_PURPOSE_C_GENERAL,
+ * has a timestamp_dirty value of 0, has flags matching the CIRCLAUNCH_*
+ * flags in <b>flags</b>, and if info is defined, does not already use info
+ * as any of its hops; or NULL if no circuit fits this description.
+ *
+ * The <b>purpose</b> argument refers to the purpose of the circuit we want to
+ * create, not the purpose of the circuit we want to cannibalize.
+ *
+ * If !CIRCLAUNCH_NEED_UPTIME, prefer returning non-uptime circuits.
+ *
+ * To "cannibalize" a circuit means to extend it an extra hop, and use it
+ * for some other purpose than we had originally intended. We do this when
+ * we want to perform some low-bandwidth task at a specific relay, and we
+ * would like the circuit to complete as soon as possible. (If we were going
+ * to use a lot of bandwidth, we wouldn't want a circuit with an extra hop.
+ * If we didn't care about circuit completion latency, we would just build
+ * a new circuit.)
+ */
+origin_circuit_t *
+circuit_find_to_cannibalize(uint8_t purpose_to_produce, extend_info_t *info,
+ int flags)
+{
+ origin_circuit_t *best=NULL;
+ int need_uptime = (flags & CIRCLAUNCH_NEED_UPTIME) != 0;
+ int need_capacity = (flags & CIRCLAUNCH_NEED_CAPACITY) != 0;
+ int internal = (flags & CIRCLAUNCH_IS_INTERNAL) != 0;
+ const or_options_t *options = get_options();
+ /* We want the circuit we are trying to cannibalize to have this purpose */
+ int purpose_to_search_for;
+
+ /* Make sure we're not trying to create a onehop circ by
+ * cannibalization. */
+ tor_assert(!(flags & CIRCLAUNCH_ONEHOP_TUNNEL));
+
+ purpose_to_search_for = get_circuit_purpose_needed_to_cannibalize(
+ purpose_to_produce);
+
+ tor_assert_nonfatal(purpose_to_search_for == CIRCUIT_PURPOSE_C_GENERAL ||
+ purpose_to_search_for == CIRCUIT_PURPOSE_HS_VANGUARDS);
+
+ log_debug(LD_CIRC,
+ "Hunting for a circ to cannibalize: purpose %d, uptime %d, "
+ "capacity %d, internal %d",
+ purpose_to_produce, need_uptime, need_capacity, internal);
+
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ_) {
+ if (CIRCUIT_IS_ORIGIN(circ_) &&
+ circ_->state == CIRCUIT_STATE_OPEN &&
+ !circ_->marked_for_close &&
+ circ_->purpose == purpose_to_search_for &&
+ !circ_->timestamp_dirty) {
+ origin_circuit_t *circ = TO_ORIGIN_CIRCUIT(circ_);
+
+ /* Only cannibalize from reasonable length circuits. If we
+ * want C_GENERAL, then only choose 3 hop circs. If we want
+ * HS_VANGUARDS, only choose 4 hop circs.
+ */
+ if (circ->build_state->desired_path_len !=
+ route_len_for_purpose(purpose_to_search_for, NULL)) {
+ goto next;
+ }
+
+ /* Ignore any circuits for which we can't use the Guard. It is possible
+ * that the Guard was removed from the samepled set after the circuit
+ * was created so avoid using it. */
+ if (!entry_guard_could_succeed(circ->guard_state)) {
+ goto next;
+ }
+
+ if ((!need_uptime || circ->build_state->need_uptime) &&
+ (!need_capacity || circ->build_state->need_capacity) &&
+ (internal == circ->build_state->is_internal) &&
+ !circ->unusable_for_new_conns &&
+ circ->remaining_relay_early_cells &&
+ !circ->build_state->onehop_tunnel &&
+ !circ->isolation_values_set) {
+ if (info) {
+ /* need to make sure we don't duplicate hops */
+ crypt_path_t *hop = circ->cpath;
+ const node_t *ri1 = node_get_by_id(info->identity_digest);
+ do {
+ const node_t *ri2;
+ if (tor_memeq(hop->extend_info->identity_digest,
+ info->identity_digest, DIGEST_LEN))
+ goto next;
+ if (ri1 &&
+ (ri2 = node_get_by_id(hop->extend_info->identity_digest))
+ && nodes_in_same_family(ri1, ri2))
+ goto next;
+ hop=hop->next;
+ } while (hop!=circ->cpath);
+ }
+ if (options->ExcludeNodes) {
+ /* Make sure no existing nodes in the circuit are excluded for
+ * general use. (This may be possible if StrictNodes is 0, and we
+ * thought we needed to use an otherwise excluded node for, say, a
+ * directory operation.) */
+ crypt_path_t *hop = circ->cpath;
+ do {
+ if (routerset_contains_extendinfo(options->ExcludeNodes,
+ hop->extend_info))
+ goto next;
+ hop = hop->next;
+ } while (hop != circ->cpath);
+ }
+
+ if ((flags & CIRCLAUNCH_IS_V3_RP) &&
+ !circuit_can_be_cannibalized_for_v3_rp(circ)) {
+ log_debug(LD_GENERAL, "Skipping uncannibalizable circuit for v3 "
+ "rendezvous point.");
+ goto next;
+ }
+
+ if (!best || (best->build_state->need_uptime && !need_uptime))
+ best = circ;
+ next: ;
+ }
+ }
+ }
+ SMARTLIST_FOREACH_END(circ_);
+ return best;
+}
+
+/**
+ * Check whether any of the origin circuits that are waiting to see if
+ * their guard is good enough to use can be upgraded to "ready". If so,
+ * return a new smartlist containing them. Otherwise return NULL.
+ */
+smartlist_t *
+circuit_find_circuits_to_upgrade_from_guard_wait(void)
+{
+ /* Only if some circuit is actually waiting on an upgrade should we
+ * run the algorithm. */
+ if (! circuits_pending_other_guards ||
+ smartlist_len(circuits_pending_other_guards)==0)
+ return NULL;
+ /* Only if we have some origin circuits should we run the algorithm. */
+ if (!global_origin_circuit_list)
+ return NULL;
+
+ /* Okay; we can pass our circuit list to entrynodes.c.*/
+ smartlist_t *result = smartlist_new();
+ int circuits_upgraded = entry_guards_upgrade_waiting_circuits(
+ get_guard_selection_info(),
+ global_origin_circuit_list,
+ result);
+ if (circuits_upgraded && smartlist_len(result)) {
+ return result;
+ } else {
+ smartlist_free(result);
+ return NULL;
+ }
+}
+
+/** Return the number of hops in circuit's path. If circ has no entries,
+ * or is NULL, returns 0. */
+int
+circuit_get_cpath_len(origin_circuit_t *circ)
+{
+ int n = 0;
+ if (circ && circ->cpath) {
+ crypt_path_t *cpath, *cpath_next = NULL;
+ for (cpath = circ->cpath; cpath_next != circ->cpath; cpath = cpath_next) {
+ cpath_next = cpath->next;
+ ++n;
+ }
+ }
+ return n;
+}
+
+/** Return the number of opened hops in circuit's path.
+ * If circ has no entries, or is NULL, returns 0. */
+int
+circuit_get_cpath_opened_len(const origin_circuit_t *circ)
+{
+ int n = 0;
+ if (circ && circ->cpath) {
+ crypt_path_t *cpath, *cpath_next = NULL;
+ for (cpath = circ->cpath;
+ cpath->state == CPATH_STATE_OPEN
+ && cpath_next != circ->cpath;
+ cpath = cpath_next) {
+ cpath_next = cpath->next;
+ ++n;
+ }
+ }
+ return n;
+}
+
+/** Return the <b>hopnum</b>th hop in <b>circ</b>->cpath, or NULL if there
+ * aren't that many hops in the list. <b>hopnum</b> starts at 1.
+ * Returns NULL if <b>hopnum</b> is 0 or negative. */
+crypt_path_t *
+circuit_get_cpath_hop(origin_circuit_t *circ, int hopnum)
+{
+ if (circ && circ->cpath && hopnum > 0) {
+ crypt_path_t *cpath, *cpath_next = NULL;
+ for (cpath = circ->cpath; cpath_next != circ->cpath; cpath = cpath_next) {
+ cpath_next = cpath->next;
+ if (--hopnum <= 0)
+ return cpath;
+ }
+ }
+ return NULL;
+}
+
+/** Go through the circuitlist; mark-for-close each circuit that starts
+ * at us but has not yet been used. */
+void
+circuit_mark_all_unused_circs(void)
+{
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ if (CIRCUIT_IS_ORIGIN(circ) &&
+ !circ->marked_for_close &&
+ !circ->timestamp_dirty)
+ circuit_mark_for_close(circ, END_CIRC_REASON_FINISHED);
+ }
+ SMARTLIST_FOREACH_END(circ);
+}
+
+/** Go through the circuitlist; for each circuit that starts at us
+ * and is dirty, frob its timestamp_dirty so we won't use it for any
+ * new streams.
+ *
+ * This is useful for letting the user change pseudonyms, so new
+ * streams will not be linkable to old streams.
+ */
+void
+circuit_mark_all_dirty_circs_as_unusable(void)
+{
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ if (CIRCUIT_IS_ORIGIN(circ) &&
+ !circ->marked_for_close &&
+ circ->timestamp_dirty) {
+ mark_circuit_unusable_for_new_conns(TO_ORIGIN_CIRCUIT(circ));
+ }
+ }
+ SMARTLIST_FOREACH_END(circ);
+}
+
+/** Mark <b>circ</b> to be closed next time we call
+ * circuit_close_all_marked(). Do any cleanup needed:
+ * - If state is onionskin_pending, remove circ from the onion_pending
+ * list.
+ * - If circ isn't open yet: call circuit_build_failed() if we're
+ * the origin.
+ * - If purpose is C_INTRODUCE_ACK_WAIT, report the intro point
+ * failure we just had to the hidden service client module.
+ * - If purpose is C_INTRODUCING and <b>reason</b> isn't TIMEOUT,
+ * report to the hidden service client module that the intro point
+ * we just tried may be unreachable.
+ * - Send appropriate destroys and edge_destroys for conns and
+ * streams attached to circ.
+ * - If circ->rend_splice is set (we are the midpoint of a joined
+ * rendezvous stream), then mark the other circuit to close as well.
+ */
+MOCK_IMPL(void,
+circuit_mark_for_close_, (circuit_t *circ, int reason, int line,
+ const char *file))
+{
+ int orig_reason = reason; /* Passed to the controller */
+ assert_circuit_ok(circ);
+ tor_assert(line);
+ tor_assert(file);
+
+ if (circ->marked_for_close) {
+ log_warn(LD_BUG,
+ "Duplicate call to circuit_mark_for_close at %s:%d"
+ " (first at %s:%d)", file, line,
+ circ->marked_for_close_file, circ->marked_for_close);
+ return;
+ }
+ if (reason == END_CIRC_AT_ORIGIN) {
+ if (!CIRCUIT_IS_ORIGIN(circ)) {
+ log_warn(LD_BUG, "Specified 'at-origin' non-reason for ending circuit, "
+ "but circuit was not at origin. (called %s:%d, purpose=%d)",
+ file, line, circ->purpose);
+ }
+ reason = END_CIRC_REASON_NONE;
+ }
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ if (pathbias_check_close(TO_ORIGIN_CIRCUIT(circ), reason) == -1) {
+ /* Don't close it yet, we need to test it first */
+ return;
+ }
+
+ /* We don't send reasons when closing circuits at the origin. */
+ reason = END_CIRC_REASON_NONE;
+ }
+
+ if (reason & END_CIRC_REASON_FLAG_REMOTE)
+ reason &= ~END_CIRC_REASON_FLAG_REMOTE;
+
+ if (reason < END_CIRC_REASON_MIN_ || reason > END_CIRC_REASON_MAX_) {
+ if (!(orig_reason & END_CIRC_REASON_FLAG_REMOTE))
+ log_warn(LD_BUG, "Reason %d out of range at %s:%d", reason, file, line);
+ reason = END_CIRC_REASON_NONE;
+ }
+
+ circ->marked_for_close = line;
+ circ->marked_for_close_file = file;
+ circ->marked_for_close_reason = reason;
+ circ->marked_for_close_orig_reason = orig_reason;
+
+ if (!CIRCUIT_IS_ORIGIN(circ)) {
+ or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
+ if (or_circ->rend_splice) {
+ if (!or_circ->rend_splice->base_.marked_for_close) {
+ /* do this after marking this circuit, to avoid infinite recursion. */
+ circuit_mark_for_close(TO_CIRCUIT(or_circ->rend_splice), reason);
+ }
+ or_circ->rend_splice = NULL;
+ }
+ }
+
+ /* Notify the HS subsystem that this circuit is closing. */
+ hs_circ_cleanup(circ);
+
+ if (circuits_pending_close == NULL)
+ circuits_pending_close = smartlist_new();
+
+ smartlist_add(circuits_pending_close, circ);
+ mainloop_schedule_postloop_cleanup();
+
+ log_info(LD_GENERAL, "Circuit %u (id: %" PRIu32 ") marked for close at "
+ "%s:%d (orig reason: %d, new reason: %d)",
+ circ->n_circ_id,
+ CIRCUIT_IS_ORIGIN(circ) ?
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0,
+ file, line, orig_reason, reason);
+}
+
+/** Called immediately before freeing a marked circuit <b>circ</b> from
+ * circuit_free_all() while shutting down Tor; this is a safe-at-shutdown
+ * version of circuit_about_to_free(). It's important that it at least
+ * do circuitmux_detach_circuit() when appropriate.
+ */
+static void
+circuit_about_to_free_atexit(circuit_t *circ)
+{
+
+ if (circ->n_chan) {
+ circuit_clear_cell_queue(circ, circ->n_chan);
+ circuitmux_detach_circuit(circ->n_chan->cmux, circ);
+ circuit_set_n_circid_chan(circ, 0, NULL);
+ }
+
+ if (! CIRCUIT_IS_ORIGIN(circ)) {
+ or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
+
+ if (or_circ->p_chan) {
+ circuit_clear_cell_queue(circ, or_circ->p_chan);
+ circuitmux_detach_circuit(or_circ->p_chan->cmux, circ);
+ circuit_set_p_circid_chan(or_circ, 0, NULL);
+ }
+ }
+}
+
+/** Called immediately before freeing a marked circuit <b>circ</b>.
+ * Disconnects the circuit from other data structures, launches events
+ * as appropriate, and performs other housekeeping.
+ */
+static void
+circuit_about_to_free(circuit_t *circ)
+{
+
+ int reason = circ->marked_for_close_reason;
+ int orig_reason = circ->marked_for_close_orig_reason;
+
+ if (circ->state == CIRCUIT_STATE_ONIONSKIN_PENDING) {
+ onion_pending_remove(TO_OR_CIRCUIT(circ));
+ }
+ /* If the circuit ever became OPEN, we sent it to the reputation history
+ * module then. If it isn't OPEN, we send it there now to remember which
+ * links worked and which didn't.
+ */
+ if (circ->state != CIRCUIT_STATE_OPEN &&
+ circ->state != CIRCUIT_STATE_GUARD_WAIT) {
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
+ circuit_build_failed(ocirc); /* take actions if necessary */
+ }
+ }
+ if (circ->state == CIRCUIT_STATE_CHAN_WAIT) {
+ if (circuits_pending_chans)
+ smartlist_remove(circuits_pending_chans, circ);
+ }
+ if (circuits_pending_other_guards) {
+ smartlist_remove(circuits_pending_other_guards, circ);
+ }
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ control_event_circuit_status(TO_ORIGIN_CIRCUIT(circ),
+ (circ->state == CIRCUIT_STATE_OPEN ||
+ circ->state == CIRCUIT_STATE_GUARD_WAIT) ?
+ CIRC_EVENT_CLOSED:CIRC_EVENT_FAILED,
+ orig_reason);
+ }
+
+ if (circ->purpose == CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT) {
+ origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
+ int timed_out = (reason == END_CIRC_REASON_TIMEOUT);
+ tor_assert(circ->state == CIRCUIT_STATE_OPEN);
+ tor_assert(ocirc->build_state->chosen_exit);
+ if (orig_reason != END_CIRC_REASON_IP_NOW_REDUNDANT &&
+ ocirc->rend_data) {
+ /* treat this like getting a nack from it */
+ log_info(LD_REND, "Failed intro circ %s to %s (awaiting ack). %s",
+ safe_str_client(rend_data_get_address(ocirc->rend_data)),
+ safe_str_client(build_state_get_exit_nickname(ocirc->build_state)),
+ timed_out ? "Recording timeout." : "Removing from descriptor.");
+ rend_client_report_intro_point_failure(ocirc->build_state->chosen_exit,
+ ocirc->rend_data,
+ timed_out ?
+ INTRO_POINT_FAILURE_TIMEOUT :
+ INTRO_POINT_FAILURE_GENERIC);
+ }
+ } else if (circ->purpose == CIRCUIT_PURPOSE_C_INTRODUCING &&
+ reason != END_CIRC_REASON_TIMEOUT) {
+ origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
+ if (ocirc->build_state->chosen_exit && ocirc->rend_data) {
+ if (orig_reason != END_CIRC_REASON_IP_NOW_REDUNDANT &&
+ ocirc->rend_data) {
+ log_info(LD_REND, "Failed intro circ %s to %s "
+ "(building circuit to intro point). "
+ "Marking intro point as possibly unreachable.",
+ safe_str_client(rend_data_get_address(ocirc->rend_data)),
+ safe_str_client(build_state_get_exit_nickname(
+ ocirc->build_state)));
+ rend_client_report_intro_point_failure(ocirc->build_state->chosen_exit,
+ ocirc->rend_data,
+ INTRO_POINT_FAILURE_UNREACHABLE);
+ }
+ }
+ }
+
+ if (circ->n_chan) {
+ circuit_clear_cell_queue(circ, circ->n_chan);
+ /* Only send destroy if the channel isn't closing anyway */
+ if (!CHANNEL_CONDEMNED(circ->n_chan)) {
+ channel_send_destroy(circ->n_circ_id, circ->n_chan, reason);
+ }
+ circuitmux_detach_circuit(circ->n_chan->cmux, circ);
+ circuit_set_n_circid_chan(circ, 0, NULL);
+ }
+
+ if (! CIRCUIT_IS_ORIGIN(circ)) {
+ or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
+ edge_connection_t *conn;
+ for (conn=or_circ->n_streams; conn; conn=conn->next_stream)
+ connection_edge_destroy(or_circ->p_circ_id, conn);
+ or_circ->n_streams = NULL;
+
+ while (or_circ->resolving_streams) {
+ conn = or_circ->resolving_streams;
+ or_circ->resolving_streams = conn->next_stream;
+ if (!conn->base_.marked_for_close) {
+ /* The client will see a DESTROY, and infer that the connections
+ * are closing because the circuit is getting torn down. No need
+ * to send an end cell. */
+ conn->edge_has_sent_end = 1;
+ conn->end_reason = END_STREAM_REASON_DESTROY;
+ conn->end_reason |= END_STREAM_REASON_FLAG_ALREADY_SENT_CLOSED;
+ connection_mark_for_close(TO_CONN(conn));
+ }
+ conn->on_circuit = NULL;
+ }
+
+ if (or_circ->p_chan) {
+ circuit_clear_cell_queue(circ, or_circ->p_chan);
+ /* Only send destroy if the channel isn't closing anyway */
+ if (!CHANNEL_CONDEMNED(or_circ->p_chan)) {
+ channel_send_destroy(or_circ->p_circ_id, or_circ->p_chan, reason);
+ }
+ circuitmux_detach_circuit(or_circ->p_chan->cmux, circ);
+ circuit_set_p_circid_chan(or_circ, 0, NULL);
+ }
+ } else {
+ origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
+ edge_connection_t *conn;
+ for (conn=ocirc->p_streams; conn; conn=conn->next_stream)
+ connection_edge_destroy(circ->n_circ_id, conn);
+ ocirc->p_streams = NULL;
+ }
+}
+
+/** Given a marked circuit <b>circ</b>, aggressively free its cell queues to
+ * recover memory. */
+static void
+marked_circuit_free_cells(circuit_t *circ)
+{
+ if (!circ->marked_for_close) {
+ log_warn(LD_BUG, "Called on non-marked circuit");
+ return;
+ }
+ cell_queue_clear(&circ->n_chan_cells);
+ if (circ->n_mux)
+ circuitmux_clear_num_cells(circ->n_mux, circ);
+ if (! CIRCUIT_IS_ORIGIN(circ)) {
+ or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
+ cell_queue_clear(&orcirc->p_chan_cells);
+ if (orcirc->p_mux)
+ circuitmux_clear_num_cells(orcirc->p_mux, circ);
+ }
+}
+
+static size_t
+single_conn_free_bytes(connection_t *conn)
+{
+ size_t result = 0;
+ if (conn->inbuf) {
+ result += buf_allocation(conn->inbuf);
+ buf_clear(conn->inbuf);
+ }
+ if (conn->outbuf) {
+ result += buf_allocation(conn->outbuf);
+ buf_clear(conn->outbuf);
+ conn->outbuf_flushlen = 0;
+ }
+ if (conn->type == CONN_TYPE_DIR) {
+ dir_connection_t *dir_conn = TO_DIR_CONN(conn);
+ if (dir_conn->compress_state) {
+ result += tor_compress_state_size(dir_conn->compress_state);
+ tor_compress_free(dir_conn->compress_state);
+ dir_conn->compress_state = NULL;
+ }
+ }
+ return result;
+}
+
+/** Aggressively free buffer contents on all the buffers of all streams in the
+ * list starting at <b>stream</b>. Return the number of bytes recovered. */
+static size_t
+marked_circuit_streams_free_bytes(edge_connection_t *stream)
+{
+ size_t result = 0;
+ for ( ; stream; stream = stream->next_stream) {
+ connection_t *conn = TO_CONN(stream);
+ result += single_conn_free_bytes(conn);
+ if (conn->linked_conn) {
+ result += single_conn_free_bytes(conn->linked_conn);
+ }
+ }
+ return result;
+}
+
+/** Aggressively free buffer contents on all the buffers of all streams on
+ * circuit <b>c</b>. Return the number of bytes recovered. */
+static size_t
+marked_circuit_free_stream_bytes(circuit_t *c)
+{
+ if (CIRCUIT_IS_ORIGIN(c)) {
+ return marked_circuit_streams_free_bytes(TO_ORIGIN_CIRCUIT(c)->p_streams);
+ } else {
+ return marked_circuit_streams_free_bytes(TO_OR_CIRCUIT(c)->n_streams);
+ }
+}
+
+/** Return the number of cells used by the circuit <b>c</b>'s cell queues. */
+STATIC size_t
+n_cells_in_circ_queues(const circuit_t *c)
+{
+ size_t n = c->n_chan_cells.n;
+ if (! CIRCUIT_IS_ORIGIN(c)) {
+ circuit_t *cc = (circuit_t *) c;
+ n += TO_OR_CIRCUIT(cc)->p_chan_cells.n;
+ }
+ return n;
+}
+
+/**
+ * Return the age of the oldest cell queued on <b>c</b>, in timestamp units.
+ * Return 0 if there are no cells queued on c. Requires that <b>now</b> be
+ * the current coarse timestamp.
+ *
+ * This function will return incorrect results if the oldest cell queued on
+ * the circuit is older than about 2**32 msec (about 49 days) old.
+ */
+STATIC uint32_t
+circuit_max_queued_cell_age(const circuit_t *c, uint32_t now)
+{
+ uint32_t age = 0;
+ packed_cell_t *cell;
+
+ if (NULL != (cell = TOR_SIMPLEQ_FIRST(&c->n_chan_cells.head)))
+ age = now - cell->inserted_timestamp;
+
+ if (! CIRCUIT_IS_ORIGIN(c)) {
+ const or_circuit_t *orcirc = CONST_TO_OR_CIRCUIT(c);
+ if (NULL != (cell = TOR_SIMPLEQ_FIRST(&orcirc->p_chan_cells.head))) {
+ uint32_t age2 = now - cell->inserted_timestamp;
+ if (age2 > age)
+ return age2;
+ }
+ }
+ return age;
+}
+
+/** Return the age of the oldest buffer chunk on <b>conn</b>, where age is
+ * taken in timestamp units before the time <b>now</b>. If the connection has
+ * no data, treat it as having age zero.
+ **/
+static uint32_t
+conn_get_buffer_age(const connection_t *conn, uint32_t now_ts)
+{
+ uint32_t age = 0, age2;
+ if (conn->outbuf) {
+ age2 = buf_get_oldest_chunk_timestamp(conn->outbuf, now_ts);
+ if (age2 > age)
+ age = age2;
+ }
+ if (conn->inbuf) {
+ age2 = buf_get_oldest_chunk_timestamp(conn->inbuf, now_ts);
+ if (age2 > age)
+ age = age2;
+ }
+ return age;
+}
+
+/** Return the age in timestamp units of the oldest buffer chunk on any stream
+ * in the linked list <b>stream</b>, where age is taken in timestamp units
+ * before the timestamp <b>now</b>. */
+static uint32_t
+circuit_get_streams_max_data_age(const edge_connection_t *stream, uint32_t now)
+{
+ uint32_t age = 0, age2;
+ for (; stream; stream = stream->next_stream) {
+ const connection_t *conn = TO_CONN(stream);
+ age2 = conn_get_buffer_age(conn, now);
+ if (age2 > age)
+ age = age2;
+ if (conn->linked_conn) {
+ age2 = conn_get_buffer_age(conn->linked_conn, now);
+ if (age2 > age)
+ age = age2;
+ }
+ }
+ return age;
+}
+
+/** Return the age in timestamp units of the oldest buffer chunk on any stream
+ * attached to the circuit <b>c</b>, where age is taken before the timestamp
+ * <b>now</b>. */
+STATIC uint32_t
+circuit_max_queued_data_age(const circuit_t *c, uint32_t now)
+{
+ if (CIRCUIT_IS_ORIGIN(c)) {
+ return circuit_get_streams_max_data_age(
+ CONST_TO_ORIGIN_CIRCUIT(c)->p_streams, now);
+ } else {
+ return circuit_get_streams_max_data_age(
+ CONST_TO_OR_CIRCUIT(c)->n_streams, now);
+ }
+}
+
+/** Return the age of the oldest cell or stream buffer chunk on the circuit
+ * <b>c</b>, where age is taken in timestamp units before the timestamp
+ * <b>now</b> */
+STATIC uint32_t
+circuit_max_queued_item_age(const circuit_t *c, uint32_t now)
+{
+ uint32_t cell_age = circuit_max_queued_cell_age(c, now);
+ uint32_t data_age = circuit_max_queued_data_age(c, now);
+ if (cell_age > data_age)
+ return cell_age;
+ else
+ return data_age;
+}
+
+/** Helper to sort a list of circuit_t by age of oldest item, in descending
+ * order. */
+static int
+circuits_compare_by_oldest_queued_item_(const void **a_, const void **b_)
+{
+ const circuit_t *a = *a_;
+ const circuit_t *b = *b_;
+ uint32_t age_a = a->age_tmp;
+ uint32_t age_b = b->age_tmp;
+
+ if (age_a < age_b)
+ return 1;
+ else if (age_a == age_b)
+ return 0;
+ else
+ return -1;
+}
+
+static uint32_t now_ts_for_buf_cmp;
+
+/** Helper to sort a list of circuit_t by age of oldest item, in descending
+ * order. */
+static int
+conns_compare_by_buffer_age_(const void **a_, const void **b_)
+{
+ const connection_t *a = *a_;
+ const connection_t *b = *b_;
+ time_t age_a = conn_get_buffer_age(a, now_ts_for_buf_cmp);
+ time_t age_b = conn_get_buffer_age(b, now_ts_for_buf_cmp);
+
+ if (age_a < age_b)
+ return 1;
+ else if (age_a == age_b)
+ return 0;
+ else
+ return -1;
+}
+
+#define FRACTION_OF_DATA_TO_RETAIN_ON_OOM 0.90
+
+/** We're out of memory for cells, having allocated <b>current_allocation</b>
+ * bytes' worth. Kill the 'worst' circuits until we're under
+ * FRACTION_OF_DATA_TO_RETAIN_ON_OOM of our maximum usage. */
+void
+circuits_handle_oom(size_t current_allocation)
+{
+ smartlist_t *circlist;
+ smartlist_t *connection_array = get_connection_array();
+ int conn_idx;
+ size_t mem_to_recover;
+ size_t mem_recovered=0;
+ int n_circuits_killed=0;
+ int n_dirconns_killed=0;
+ uint32_t now_ts;
+ log_notice(LD_GENERAL, "We're low on memory (cell queues total alloc:"
+ " %"TOR_PRIuSZ" buffer total alloc: %" TOR_PRIuSZ ","
+ " tor compress total alloc: %" TOR_PRIuSZ
+ " (zlib: %" TOR_PRIuSZ ", zstd: %" TOR_PRIuSZ ","
+ " lzma: %" TOR_PRIuSZ "),"
+ " rendezvous cache total alloc: %" TOR_PRIuSZ "). Killing"
+ " circuits withover-long queues. (This behavior is controlled by"
+ " MaxMemInQueues.)",
+ cell_queues_get_total_allocation(),
+ buf_get_total_allocation(),
+ tor_compress_get_total_allocation(),
+ tor_zlib_get_total_allocation(),
+ tor_zstd_get_total_allocation(),
+ tor_lzma_get_total_allocation(),
+ rend_cache_get_total_allocation());
+
+ {
+ size_t mem_target = (size_t)(get_options()->MaxMemInQueues *
+ FRACTION_OF_DATA_TO_RETAIN_ON_OOM);
+ if (current_allocation <= mem_target)
+ return;
+ mem_to_recover = current_allocation - mem_target;
+ }
+
+ now_ts = monotime_coarse_get_stamp();
+
+ circlist = circuit_get_global_list();
+ SMARTLIST_FOREACH_BEGIN(circlist, circuit_t *, circ) {
+ circ->age_tmp = circuit_max_queued_item_age(circ, now_ts);
+ } SMARTLIST_FOREACH_END(circ);
+
+ /* This is O(n log n); there are faster algorithms we could use instead.
+ * Let's hope this doesn't happen enough to be in the critical path. */
+ smartlist_sort(circlist, circuits_compare_by_oldest_queued_item_);
+
+ /* Fix up the indices before we run into trouble */
+ SMARTLIST_FOREACH_BEGIN(circlist, circuit_t *, circ) {
+ circ->global_circuitlist_idx = circ_sl_idx;
+ } SMARTLIST_FOREACH_END(circ);
+
+ /* Now sort the connection array ... */
+ now_ts_for_buf_cmp = now_ts;
+ smartlist_sort(connection_array, conns_compare_by_buffer_age_);
+ now_ts_for_buf_cmp = 0;
+
+ /* Fix up the connection array to its new order. */
+ SMARTLIST_FOREACH_BEGIN(connection_array, connection_t *, conn) {
+ conn->conn_array_index = conn_sl_idx;
+ } SMARTLIST_FOREACH_END(conn);
+
+ /* Okay, now the worst circuits and connections are at the front of their
+ * respective lists. Let's mark them, and reclaim their storage
+ * aggressively. */
+ conn_idx = 0;
+ SMARTLIST_FOREACH_BEGIN(circlist, circuit_t *, circ) {
+ size_t n;
+ size_t freed;
+
+ /* Free storage in any non-linked directory connections that have buffered
+ * data older than this circuit. */
+ while (conn_idx < smartlist_len(connection_array)) {
+ connection_t *conn = smartlist_get(connection_array, conn_idx);
+ uint32_t conn_age = conn_get_buffer_age(conn, now_ts);
+ if (conn_age < circ->age_tmp) {
+ break;
+ }
+ if (conn->type == CONN_TYPE_DIR && conn->linked_conn == NULL) {
+ if (!conn->marked_for_close)
+ connection_mark_for_close(conn);
+ mem_recovered += single_conn_free_bytes(conn);
+
+ ++n_dirconns_killed;
+
+ if (mem_recovered >= mem_to_recover)
+ goto done_recovering_mem;
+ }
+ ++conn_idx;
+ }
+
+ /* Now, kill the circuit. */
+ n = n_cells_in_circ_queues(circ);
+ if (! circ->marked_for_close) {
+ circuit_mark_for_close(circ, END_CIRC_REASON_RESOURCELIMIT);
+ }
+ marked_circuit_free_cells(circ);
+ freed = marked_circuit_free_stream_bytes(circ);
+
+ ++n_circuits_killed;
+
+ mem_recovered += n * packed_cell_mem_cost();
+ mem_recovered += freed;
+
+ if (mem_recovered >= mem_to_recover)
+ goto done_recovering_mem;
+ } SMARTLIST_FOREACH_END(circ);
+
+ done_recovering_mem:
+
+ log_notice(LD_GENERAL, "Removed %"TOR_PRIuSZ" bytes by killing %d circuits; "
+ "%d circuits remain alive. Also killed %d non-linked directory "
+ "connections.",
+ mem_recovered,
+ n_circuits_killed,
+ smartlist_len(circlist) - n_circuits_killed,
+ n_dirconns_killed);
+}
+
+/** Verify that cpath layer <b>cp</b> has all of its invariants
+ * correct. Trigger an assert if anything is invalid.
+ */
+void
+assert_cpath_layer_ok(const crypt_path_t *cp)
+{
+// tor_assert(cp->addr); /* these are zero for rendezvous extra-hops */
+// tor_assert(cp->port);
+ tor_assert(cp);
+ tor_assert(cp->magic == CRYPT_PATH_MAGIC);
+ switch (cp->state)
+ {
+ case CPATH_STATE_OPEN:
+ relay_crypto_assert_ok(&cp->crypto);
+ /* fall through */
+ case CPATH_STATE_CLOSED:
+ /*XXXX Assert that there's no handshake_state either. */
+ tor_assert(!cp->rend_dh_handshake_state);
+ break;
+ case CPATH_STATE_AWAITING_KEYS:
+ /* tor_assert(cp->dh_handshake_state); */
+ break;
+ default:
+ log_fn(LOG_ERR, LD_BUG, "Unexpected state %d", cp->state);
+ tor_assert(0);
+ }
+ tor_assert(cp->package_window >= 0);
+ tor_assert(cp->deliver_window >= 0);
+}
+
+/** Verify that cpath <b>cp</b> has all of its invariants
+ * correct. Trigger an assert if anything is invalid.
+ */
+static void
+assert_cpath_ok(const crypt_path_t *cp)
+{
+ const crypt_path_t *start = cp;
+
+ do {
+ assert_cpath_layer_ok(cp);
+ /* layers must be in sequence of: "open* awaiting? closed*" */
+ if (cp != start) {
+ if (cp->state == CPATH_STATE_AWAITING_KEYS) {
+ tor_assert(cp->prev->state == CPATH_STATE_OPEN);
+ } else if (cp->state == CPATH_STATE_OPEN) {
+ tor_assert(cp->prev->state == CPATH_STATE_OPEN);
+ }
+ }
+ cp = cp->next;
+ tor_assert(cp);
+ } while (cp != start);
+}
+
+/** Verify that circuit <b>c</b> has all of its invariants
+ * correct. Trigger an assert if anything is invalid.
+ */
+MOCK_IMPL(void,
+assert_circuit_ok,(const circuit_t *c))
+{
+ edge_connection_t *conn;
+ const or_circuit_t *or_circ = NULL;
+ const origin_circuit_t *origin_circ = NULL;
+
+ tor_assert(c);
+ tor_assert(c->magic == ORIGIN_CIRCUIT_MAGIC || c->magic == OR_CIRCUIT_MAGIC);
+ tor_assert(c->purpose >= CIRCUIT_PURPOSE_MIN_ &&
+ c->purpose <= CIRCUIT_PURPOSE_MAX_);
+
+ if (CIRCUIT_IS_ORIGIN(c))
+ origin_circ = CONST_TO_ORIGIN_CIRCUIT(c);
+ else
+ or_circ = CONST_TO_OR_CIRCUIT(c);
+
+ if (c->n_chan) {
+ tor_assert(!c->n_hop);
+
+ if (c->n_circ_id) {
+ /* We use the _impl variant here to make sure we don't fail on marked
+ * circuits, which would not be returned by the regular function. */
+ circuit_t *c2 = circuit_get_by_circid_channel_impl(c->n_circ_id,
+ c->n_chan, NULL);
+ tor_assert(c == c2);
+ }
+ }
+ if (or_circ && or_circ->p_chan) {
+ if (or_circ->p_circ_id) {
+ /* ibid */
+ circuit_t *c2 =
+ circuit_get_by_circid_channel_impl(or_circ->p_circ_id,
+ or_circ->p_chan, NULL);
+ tor_assert(c == c2);
+ }
+ }
+ if (or_circ)
+ for (conn = or_circ->n_streams; conn; conn = conn->next_stream)
+ tor_assert(conn->base_.type == CONN_TYPE_EXIT);
+
+ tor_assert(c->deliver_window >= 0);
+ tor_assert(c->package_window >= 0);
+ if (c->state == CIRCUIT_STATE_OPEN ||
+ c->state == CIRCUIT_STATE_GUARD_WAIT) {
+ tor_assert(!c->n_chan_create_cell);
+ if (or_circ) {
+ relay_crypto_assert_ok(&or_circ->crypto);
+ }
+ }
+ if (c->state == CIRCUIT_STATE_CHAN_WAIT && !c->marked_for_close) {
+ tor_assert(circuits_pending_chans &&
+ smartlist_contains(circuits_pending_chans, c));
+ } else {
+ tor_assert(!circuits_pending_chans ||
+ !smartlist_contains(circuits_pending_chans, c));
+ }
+ if (origin_circ && origin_circ->cpath) {
+ assert_cpath_ok(origin_circ->cpath);
+ }
+ if (c->purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED) {
+ tor_assert(or_circ);
+ if (!c->marked_for_close) {
+ tor_assert(or_circ->rend_splice);
+ tor_assert(or_circ->rend_splice->rend_splice == or_circ);
+ }
+ tor_assert(or_circ->rend_splice != or_circ);
+ } else {
+ tor_assert(!or_circ || !or_circ->rend_splice);
+ }
+}
diff --git a/src/core/or/circuitlist.h b/src/core/or/circuitlist.h
new file mode 100644
index 0000000000..7c9bc0199a
--- /dev/null
+++ b/src/core/or/circuitlist.h
@@ -0,0 +1,247 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitlist.h
+ * \brief Header file for circuitlist.c.
+ **/
+
+#ifndef TOR_CIRCUITLIST_H
+#define TOR_CIRCUITLIST_H
+
+#include "lib/testsupport/testsupport.h"
+#include "or/hs_ident.h"
+
+/** Circuit state: I'm the origin, still haven't done all my handshakes. */
+#define CIRCUIT_STATE_BUILDING 0
+/** Circuit state: Waiting to process the onionskin. */
+#define CIRCUIT_STATE_ONIONSKIN_PENDING 1
+/** Circuit state: I'd like to deliver a create, but my n_chan is still
+ * connecting. */
+#define CIRCUIT_STATE_CHAN_WAIT 2
+/** Circuit state: the circuit is open but we don't want to actually use it
+ * until we find out if a better guard will be available.
+ */
+#define CIRCUIT_STATE_GUARD_WAIT 3
+/** Circuit state: onionskin(s) processed, ready to send/receive cells. */
+#define CIRCUIT_STATE_OPEN 4
+
+#define CIRCUIT_PURPOSE_MIN_ 1
+
+/* these circuits were initiated elsewhere */
+#define CIRCUIT_PURPOSE_OR_MIN_ 1
+/** OR-side circuit purpose: normal circuit, at OR. */
+#define CIRCUIT_PURPOSE_OR 1
+/** OR-side circuit purpose: At OR, from the service, waiting for intro from
+ * clients. */
+#define CIRCUIT_PURPOSE_INTRO_POINT 2
+/** OR-side circuit purpose: At OR, from the client, waiting for the service.
+ */
+#define CIRCUIT_PURPOSE_REND_POINT_WAITING 3
+/** OR-side circuit purpose: At OR, both circuits have this purpose. */
+#define CIRCUIT_PURPOSE_REND_ESTABLISHED 4
+#define CIRCUIT_PURPOSE_OR_MAX_ 4
+
+/* these circuits originate at this node */
+
+/* here's how circ client-side purposes work:
+ * normal circuits are C_GENERAL.
+ * circuits that are c_introducing are either on their way to
+ * becoming open, or they are open and waiting for a
+ * suitable rendcirc before they send the intro.
+ * circuits that are c_introduce_ack_wait have sent the intro,
+ * but haven't gotten a response yet.
+ * circuits that are c_establish_rend are either on their way
+ * to becoming open, or they are open and have sent the
+ * establish_rendezvous cell but haven't received an ack.
+ * circuits that are c_rend_ready are open and have received a
+ * rend ack, but haven't heard from the service yet. if they have a
+ * buildstate->pending_final_cpath then they're expecting a
+ * cell from the service, else they're not.
+ * circuits that are c_rend_ready_intro_acked are open, and
+ * some intro circ has sent its intro and received an ack.
+ * circuits that are c_rend_joined are open, have heard from
+ * the service, and are talking to it.
+ */
+/** Client-side circuit purpose: Normal circuit, with cpath. */
+#define CIRCUIT_PURPOSE_C_GENERAL 5
+#define CIRCUIT_PURPOSE_C_HS_MIN_ 6
+/** Client-side circuit purpose: at the client, connecting to intro point. */
+#define CIRCUIT_PURPOSE_C_INTRODUCING 6
+/** Client-side circuit purpose: at the client, sent INTRODUCE1 to intro point,
+ * waiting for ACK/NAK. */
+#define CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT 7
+/** Client-side circuit purpose: at the client, introduced and acked, closing.
+ */
+#define CIRCUIT_PURPOSE_C_INTRODUCE_ACKED 8
+/** Client-side circuit purpose: at the client, waiting for ack. */
+#define CIRCUIT_PURPOSE_C_ESTABLISH_REND 9
+/** Client-side circuit purpose: at the client, waiting for the service. */
+#define CIRCUIT_PURPOSE_C_REND_READY 10
+/** Client-side circuit purpose: at the client, waiting for the service,
+ * INTRODUCE has been acknowledged. */
+#define CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED 11
+/** Client-side circuit purpose: at the client, rendezvous established. */
+#define CIRCUIT_PURPOSE_C_REND_JOINED 12
+/** This circuit is used for getting hsdirs */
+#define CIRCUIT_PURPOSE_C_HSDIR_GET 13
+#define CIRCUIT_PURPOSE_C_HS_MAX_ 13
+/** This circuit is used for build time measurement only */
+#define CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT 14
+#define CIRCUIT_PURPOSE_C_MAX_ 14
+
+#define CIRCUIT_PURPOSE_S_HS_MIN_ 15
+/** Hidden-service-side circuit purpose: at the service, waiting for
+ * introductions. */
+#define CIRCUIT_PURPOSE_S_ESTABLISH_INTRO 15
+/** Hidden-service-side circuit purpose: at the service, successfully
+ * established intro. */
+#define CIRCUIT_PURPOSE_S_INTRO 16
+/** Hidden-service-side circuit purpose: at the service, connecting to rend
+ * point. */
+#define CIRCUIT_PURPOSE_S_CONNECT_REND 17
+/** Hidden-service-side circuit purpose: at the service, rendezvous
+ * established. */
+#define CIRCUIT_PURPOSE_S_REND_JOINED 18
+/** This circuit is used for uploading hsdirs */
+#define CIRCUIT_PURPOSE_S_HSDIR_POST 19
+#define CIRCUIT_PURPOSE_S_HS_MAX_ 19
+
+/** A testing circuit; not meant to be used for actual traffic. */
+#define CIRCUIT_PURPOSE_TESTING 20
+/** A controller made this circuit and Tor should not use it. */
+#define CIRCUIT_PURPOSE_CONTROLLER 21
+/** This circuit is used for path bias probing only */
+#define CIRCUIT_PURPOSE_PATH_BIAS_TESTING 22
+
+/** This circuit is used for vanguards/restricted paths.
+ *
+ * This type of circuit is *only* created preemptively and never
+ * on-demand. When an HS operation needs to take place (e.g. connect to an
+ * intro point), these circuits are then cannibalized and repurposed to the
+ * actual needed HS purpose. */
+#define CIRCUIT_PURPOSE_HS_VANGUARDS 23
+
+#define CIRCUIT_PURPOSE_MAX_ 23
+/** A catch-all for unrecognized purposes. Currently we don't expect
+ * to make or see any circuits with this purpose. */
+#define CIRCUIT_PURPOSE_UNKNOWN 255
+
+/** True iff the circuit purpose <b>p</b> is for a circuit that
+ * originated at this node. */
+#define CIRCUIT_PURPOSE_IS_ORIGIN(p) ((p)>CIRCUIT_PURPOSE_OR_MAX_)
+/** True iff the circuit purpose <b>p</b> is for a circuit that originated
+ * here to serve as a client. (Hidden services don't count here.) */
+#define CIRCUIT_PURPOSE_IS_CLIENT(p) \
+ ((p)> CIRCUIT_PURPOSE_OR_MAX_ && \
+ (p)<=CIRCUIT_PURPOSE_C_MAX_)
+/** True iff the circuit_t <b>c</b> is actually an origin_circuit_t. */
+#define CIRCUIT_IS_ORIGIN(c) (CIRCUIT_PURPOSE_IS_ORIGIN((c)->purpose))
+/** True iff the circuit purpose <b>p</b> is for an established rendezvous
+ * circuit. */
+#define CIRCUIT_PURPOSE_IS_ESTABLISHED_REND(p) \
+ ((p) == CIRCUIT_PURPOSE_C_REND_JOINED || \
+ (p) == CIRCUIT_PURPOSE_S_REND_JOINED)
+/** True iff the circuit_t c is actually an or_circuit_t */
+#define CIRCUIT_IS_ORCIRC(c) (((circuit_t *)(c))->magic == OR_CIRCUIT_MAGIC)
+
+/** True iff this circuit purpose should count towards the global
+ * pending rate limit (set by MaxClientCircuitsPending). We count all
+ * general purpose circuits, as well as the first step of client onion
+ * service connections (HSDir gets). */
+#define CIRCUIT_PURPOSE_COUNTS_TOWARDS_MAXPENDING(p) \
+ ((p) == CIRCUIT_PURPOSE_C_GENERAL || \
+ (p) == CIRCUIT_PURPOSE_C_HSDIR_GET)
+
+/** Convert a circuit_t* to a pointer to the enclosing or_circuit_t. Assert
+ * if the cast is impossible. */
+or_circuit_t *TO_OR_CIRCUIT(circuit_t *);
+const or_circuit_t *CONST_TO_OR_CIRCUIT(const circuit_t *);
+/** Convert a circuit_t* to a pointer to the enclosing origin_circuit_t.
+ * Assert if the cast is impossible. */
+origin_circuit_t *TO_ORIGIN_CIRCUIT(circuit_t *);
+const origin_circuit_t *CONST_TO_ORIGIN_CIRCUIT(const circuit_t *);
+
+MOCK_DECL(smartlist_t *, circuit_get_global_list, (void));
+smartlist_t *circuit_get_global_origin_circuit_list(void);
+int circuit_any_opened_circuits(void);
+int circuit_any_opened_circuits_cached(void);
+void circuit_cache_opened_circuit_state(int circuits_are_opened);
+
+const char *circuit_state_to_string(int state);
+const char *circuit_purpose_to_controller_string(uint8_t purpose);
+const char *circuit_purpose_to_controller_hs_state_string(uint8_t purpose);
+const char *circuit_purpose_to_string(uint8_t purpose);
+void circuit_dump_by_conn(connection_t *conn, int severity);
+void circuit_set_p_circid_chan(or_circuit_t *circ, circid_t id,
+ channel_t *chan);
+void circuit_set_n_circid_chan(circuit_t *circ, circid_t id,
+ channel_t *chan);
+void channel_mark_circid_unusable(channel_t *chan, circid_t id);
+void channel_mark_circid_usable(channel_t *chan, circid_t id);
+time_t circuit_id_when_marked_unusable_on_channel(circid_t circ_id,
+ channel_t *chan);
+void circuit_set_state(circuit_t *circ, uint8_t state);
+void circuit_close_all_marked(void);
+int32_t circuit_initial_package_window(void);
+origin_circuit_t *origin_circuit_new(void);
+or_circuit_t *or_circuit_new(circid_t p_circ_id, channel_t *p_chan);
+circuit_t *circuit_get_by_circid_channel(circid_t circ_id,
+ channel_t *chan);
+circuit_t *
+circuit_get_by_circid_channel_even_if_marked(circid_t circ_id,
+ channel_t *chan);
+int circuit_id_in_use_on_channel(circid_t circ_id, channel_t *chan);
+circuit_t *circuit_get_by_edge_conn(edge_connection_t *conn);
+void circuit_unlink_all_from_channel(channel_t *chan, int reason);
+origin_circuit_t *circuit_get_by_global_id(uint32_t id);
+origin_circuit_t *circuit_get_ready_rend_circ_by_rend_data(
+ const rend_data_t *rend_data);
+origin_circuit_t *circuit_get_next_by_pk_and_purpose(origin_circuit_t *start,
+ const uint8_t *digest, uint8_t purpose);
+origin_circuit_t *circuit_get_next_service_intro_circ(origin_circuit_t *start);
+origin_circuit_t *circuit_get_next_service_rp_circ(origin_circuit_t *start);
+origin_circuit_t *circuit_get_next_service_hsdir_circ(origin_circuit_t *start);
+origin_circuit_t *circuit_find_to_cannibalize(uint8_t purpose,
+ extend_info_t *info, int flags);
+void circuit_mark_all_unused_circs(void);
+void circuit_mark_all_dirty_circs_as_unusable(void);
+MOCK_DECL(void, circuit_mark_for_close_, (circuit_t *circ, int reason,
+ int line, const char *file));
+int circuit_get_cpath_len(origin_circuit_t *circ);
+int circuit_get_cpath_opened_len(const origin_circuit_t *);
+void circuit_clear_cpath(origin_circuit_t *circ);
+crypt_path_t *circuit_get_cpath_hop(origin_circuit_t *circ, int hopnum);
+void circuit_get_all_pending_on_channel(smartlist_t *out,
+ channel_t *chan);
+int circuit_count_pending_on_channel(channel_t *chan);
+
+#define circuit_mark_for_close(c, reason) \
+ circuit_mark_for_close_((c), (reason), __LINE__, SHORT_FILE__)
+
+void assert_cpath_layer_ok(const crypt_path_t *cp);
+MOCK_DECL(void, assert_circuit_ok,(const circuit_t *c));
+void circuit_free_all(void);
+void circuits_handle_oom(size_t current_allocation);
+
+void circuit_clear_testing_cell_stats(circuit_t *circ);
+
+void channel_note_destroy_pending(channel_t *chan, circid_t id);
+MOCK_DECL(void, channel_note_destroy_not_pending,
+ (channel_t *chan, circid_t id));
+
+smartlist_t *circuit_find_circuits_to_upgrade_from_guard_wait(void);
+
+#ifdef CIRCUITLIST_PRIVATE
+STATIC void circuit_free_(circuit_t *circ);
+#define circuit_free(circ) FREE_AND_NULL(circuit_t, circuit_free_, (circ))
+STATIC size_t n_cells_in_circ_queues(const circuit_t *c);
+STATIC uint32_t circuit_max_queued_data_age(const circuit_t *c, uint32_t now);
+STATIC uint32_t circuit_max_queued_cell_age(const circuit_t *c, uint32_t now);
+STATIC uint32_t circuit_max_queued_item_age(const circuit_t *c, uint32_t now);
+#endif /* defined(CIRCUITLIST_PRIVATE) */
+
+#endif /* !defined(TOR_CIRCUITLIST_H) */
diff --git a/src/core/or/circuitmux.c b/src/core/or/circuitmux.c
new file mode 100644
index 0000000000..9e0126b8f1
--- /dev/null
+++ b/src/core/or/circuitmux.c
@@ -0,0 +1,1364 @@
+/* * Copyright (c) 2012-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitmux.c
+ * \brief Circuit mux/cell selection abstraction
+ *
+ * A circuitmux is responsible for <b>MU</b>ltiple<b>X</b>ing all of the
+ * circuits that are writing on a single channel. It keeps track of which of
+ * these circuits has something to write (aka, "active" circuits), and which
+ * one should write next. A circuitmux corresponds 1:1 with a channel.
+ *
+ * There can be different implementations of the circuitmux's rules (which
+ * decide which circuit is next to write).
+ *
+ * A circuitmux exposes three distinct
+ * interfaces to other components:
+ *
+ * To channels, which each have a circuitmux_t, the supported operations
+ * (invoked from relay.c) are:
+ *
+ * circuitmux_get_first_active_circuit():
+ *
+ * Pick one of the circuitmux's active circuits to send cells from.
+ *
+ * circuitmux_notify_xmit_cells():
+ *
+ * Notify the circuitmux that cells have been sent on a circuit.
+ *
+ * To circuits, the exposed operations are:
+ *
+ * circuitmux_attach_circuit():
+ *
+ * Attach a circuit to the circuitmux; this will allocate any policy-
+ * specific data wanted for this circuit and add it to the active
+ * circuits list if it has queued cells.
+ *
+ * circuitmux_detach_circuit():
+ *
+ * Detach a circuit from the circuitmux, freeing associated structures.
+ *
+ * circuitmux_clear_num_cells():
+ *
+ * Clear the circuitmux's cell counter for this circuit.
+ *
+ * circuitmux_set_num_cells():
+ *
+ * Set the circuitmux's cell counter for this circuit. One of
+ * circuitmuc_clear_num_cells() or circuitmux_set_num_cells() MUST be
+ * called when the number of cells queued on a circuit changes.
+ *
+ * See circuitmux.h for the circuitmux_policy_t data structure, which contains
+ * a table of function pointers implementing a circuit selection policy, and
+ * circuitmux_ewma.c for an example of a circuitmux policy. Circuitmux
+ * policies can be manipulated with:
+ *
+ * circuitmux_get_policy():
+ *
+ * Return the current policy for a circuitmux_t, if any.
+ *
+ * circuitmux_clear_policy():
+ *
+ * Remove a policy installed on a circuitmux_t, freeing all associated
+ * data. The circuitmux will revert to the built-in round-robin behavior.
+ *
+ * circuitmux_set_policy():
+ *
+ * Install a policy on a circuitmux_t; the appropriate callbacks will be
+ * made to attach all existing circuits to the new policy.
+ **/
+
+#include "or/or.h"
+#include "or/channel.h"
+#include "or/circuitlist.h"
+#include "or/circuitmux.h"
+#include "or/relay.h"
+
+#include "or/cell_queue_st.h"
+#include "or/destroy_cell_queue_st.h"
+#include "or/or_circuit_st.h"
+
+/*
+ * Private typedefs for circuitmux.c
+ */
+
+/*
+ * Map of muxinfos for circuitmux_t to use; struct is defined below (name
+ * of struct must match HT_HEAD line).
+ */
+typedef struct chanid_circid_muxinfo_map chanid_circid_muxinfo_map_t;
+
+/*
+ * Hash table entry (yeah, calling it chanid_circid_muxinfo_s seems to
+ * break the hash table code).
+ */
+typedef struct chanid_circid_muxinfo_t chanid_circid_muxinfo_t;
+
+/*
+ * Anything the mux wants to store per-circuit in the map; right now just
+ * a count of queued cells.
+ */
+
+typedef struct circuit_muxinfo_s circuit_muxinfo_t;
+
+/*
+ * Structures for circuitmux.c
+ */
+
+struct circuitmux_s {
+ /* Keep count of attached, active circuits */
+ unsigned int n_circuits, n_active_circuits;
+
+ /* Total number of queued cells on all circuits */
+ unsigned int n_cells;
+
+ /*
+ * Map from (channel ID, circuit ID) pairs to circuit_muxinfo_t
+ */
+ chanid_circid_muxinfo_map_t *chanid_circid_map;
+
+ /** List of queued destroy cells */
+ destroy_cell_queue_t destroy_cell_queue;
+ /** Boolean: True iff the last cell to circuitmux_get_first_active_circuit
+ * returned the destroy queue. Used to force alternation between
+ * destroy/non-destroy cells.
+ *
+ * XXXX There is no reason to think that alternating is a particularly good
+ * approach -- it's just designed to prevent destroys from starving other
+ * cells completely.
+ */
+ unsigned int last_cell_was_destroy : 1;
+ /** Destroy counter: increment this when a destroy gets queued, decrement
+ * when we unqueue it, so we can test to make sure they don't starve.
+ */
+ int64_t destroy_ctr;
+
+ /*
+ * Circuitmux policy; if this is non-NULL, it can override the built-
+ * in round-robin active circuits behavior. This is how EWMA works in
+ * the new circuitmux_t world.
+ */
+ const circuitmux_policy_t *policy;
+
+ /* Policy-specific data */
+ circuitmux_policy_data_t *policy_data;
+};
+
+/*
+ * This struct holds whatever we want to store per attached circuit on a
+ * circuitmux_t; right now, just the count of queued cells and the direction.
+ */
+
+struct circuit_muxinfo_s {
+ /* Count of cells on this circuit at last update */
+ unsigned int cell_count;
+ /* Direction of flow */
+ cell_direction_t direction;
+ /* Policy-specific data */
+ circuitmux_policy_circ_data_t *policy_data;
+ /* Mark bit for consistency checker */
+ unsigned int mark:1;
+};
+
+/*
+ * A map from channel ID and circuit ID to a circuit_muxinfo_t for that
+ * circuit.
+ */
+
+struct chanid_circid_muxinfo_t {
+ HT_ENTRY(chanid_circid_muxinfo_t) node;
+ uint64_t chan_id;
+ circid_t circ_id;
+ circuit_muxinfo_t muxinfo;
+};
+
+/*
+ * Static function declarations
+ */
+
+static inline int
+chanid_circid_entries_eq(chanid_circid_muxinfo_t *a,
+ chanid_circid_muxinfo_t *b);
+static inline unsigned int
+chanid_circid_entry_hash(chanid_circid_muxinfo_t *a);
+static chanid_circid_muxinfo_t *
+circuitmux_find_map_entry(circuitmux_t *cmux, circuit_t *circ);
+static void
+circuitmux_make_circuit_active(circuitmux_t *cmux, circuit_t *circ);
+static void
+circuitmux_make_circuit_inactive(circuitmux_t *cmux, circuit_t *circ);
+
+/* Static global variables */
+
+/** Count the destroy balance to debug destroy queue logic */
+static int64_t global_destroy_ctr = 0;
+
+/* Function definitions */
+
+/**
+ * Helper for chanid_circid_cell_count_map_t hash table: compare the channel
+ * ID and circuit ID for a and b, and return less than, equal to, or greater
+ * than zero appropriately.
+ */
+
+static inline int
+chanid_circid_entries_eq(chanid_circid_muxinfo_t *a,
+ chanid_circid_muxinfo_t *b)
+{
+ return a->chan_id == b->chan_id && a->circ_id == b->circ_id;
+}
+
+/**
+ * Helper: return a hash based on circuit ID and channel ID in a.
+ */
+
+static inline unsigned int
+chanid_circid_entry_hash(chanid_circid_muxinfo_t *a)
+{
+ return (((unsigned int)(a->circ_id) << 8) ^
+ ((unsigned int)((a->chan_id >> 32) & 0xffffffff)) ^
+ ((unsigned int)(a->chan_id & 0xffffffff)));
+}
+
+/* Declare the struct chanid_circid_muxinfo_map type */
+HT_HEAD(chanid_circid_muxinfo_map, chanid_circid_muxinfo_t);
+
+/* Emit a bunch of hash table stuff */
+HT_PROTOTYPE(chanid_circid_muxinfo_map, chanid_circid_muxinfo_t, node,
+ chanid_circid_entry_hash, chanid_circid_entries_eq)
+HT_GENERATE2(chanid_circid_muxinfo_map, chanid_circid_muxinfo_t, node,
+ chanid_circid_entry_hash, chanid_circid_entries_eq, 0.6,
+ tor_reallocarray_, tor_free_)
+
+/*
+ * Circuitmux alloc/free functions
+ */
+
+/**
+ * Allocate a new circuitmux_t
+ */
+
+circuitmux_t *
+circuitmux_alloc(void)
+{
+ circuitmux_t *rv = NULL;
+
+ rv = tor_malloc_zero(sizeof(*rv));
+ rv->chanid_circid_map = tor_malloc_zero(sizeof(*( rv->chanid_circid_map)));
+ HT_INIT(chanid_circid_muxinfo_map, rv->chanid_circid_map);
+ destroy_cell_queue_init(&rv->destroy_cell_queue);
+
+ return rv;
+}
+
+/**
+ * Detach all circuits from a circuitmux (use before circuitmux_free())
+ *
+ * If <b>detached_out</b> is non-NULL, add every detached circuit_t to
+ * detached_out.
+ */
+
+void
+circuitmux_detach_all_circuits(circuitmux_t *cmux, smartlist_t *detached_out)
+{
+ chanid_circid_muxinfo_t **i = NULL, *to_remove;
+ channel_t *chan = NULL;
+ circuit_t *circ = NULL;
+
+ tor_assert(cmux);
+
+ i = HT_START(chanid_circid_muxinfo_map, cmux->chanid_circid_map);
+ while (i) {
+ to_remove = *i;
+
+ if (! to_remove) {
+ log_warn(LD_BUG, "Somehow, an HT iterator gave us a NULL pointer.");
+ break;
+ } else {
+ /* Find a channel and circuit */
+ chan = channel_find_by_global_id(to_remove->chan_id);
+ if (chan) {
+ circ =
+ circuit_get_by_circid_channel_even_if_marked(to_remove->circ_id,
+ chan);
+ if (circ) {
+ /* Clear the circuit's mux for this direction */
+ if (to_remove->muxinfo.direction == CELL_DIRECTION_OUT) {
+ /*
+ * Update active_circuits et al.; this does policy notifies, so
+ * comes before freeing policy data
+ */
+
+ if (to_remove->muxinfo.cell_count > 0) {
+ circuitmux_make_circuit_inactive(cmux, circ);
+ }
+
+ /* Clear n_mux */
+ circ->n_mux = NULL;
+
+ if (detached_out)
+ smartlist_add(detached_out, circ);
+ } else if (circ->magic == OR_CIRCUIT_MAGIC) {
+ /*
+ * Update active_circuits et al.; this does policy notifies, so
+ * comes before freeing policy data
+ */
+
+ if (to_remove->muxinfo.cell_count > 0) {
+ circuitmux_make_circuit_inactive(cmux, circ);
+ }
+
+ /*
+ * It has a sensible p_chan and direction == CELL_DIRECTION_IN,
+ * so clear p_mux.
+ */
+ TO_OR_CIRCUIT(circ)->p_mux = NULL;
+
+ if (detached_out)
+ smartlist_add(detached_out, circ);
+ } else {
+ /* Complain and move on */
+ log_warn(LD_CIRC,
+ "Circuit %u/channel %"PRIu64 " had direction == "
+ "CELL_DIRECTION_IN, but isn't an or_circuit_t",
+ (unsigned)to_remove->circ_id,
+ (to_remove->chan_id));
+ }
+
+ /* Free policy-specific data if we have it */
+ if (to_remove->muxinfo.policy_data) {
+ /*
+ * If we have policy data, assert that we have the means to
+ * free it
+ */
+ tor_assert(cmux->policy);
+ tor_assert(cmux->policy->free_circ_data);
+ /* Call free_circ_data() */
+ cmux->policy->free_circ_data(cmux,
+ cmux->policy_data,
+ circ,
+ to_remove->muxinfo.policy_data);
+ to_remove->muxinfo.policy_data = NULL;
+ }
+ } else {
+ /* Complain and move on */
+ log_warn(LD_CIRC,
+ "Couldn't find circuit %u (for channel %"PRIu64 ")",
+ (unsigned)to_remove->circ_id,
+ (to_remove->chan_id));
+ }
+ } else {
+ /* Complain and move on */
+ log_warn(LD_CIRC,
+ "Couldn't find channel %"PRIu64 " (for circuit id %u)",
+ (to_remove->chan_id),
+ (unsigned)to_remove->circ_id);
+ }
+
+ /* Assert that we don't have un-freed policy data for this circuit */
+ tor_assert(to_remove->muxinfo.policy_data == NULL);
+ }
+
+ i = HT_NEXT_RMV(chanid_circid_muxinfo_map, cmux->chanid_circid_map, i);
+
+ /* Free it */
+ tor_free(to_remove);
+ }
+
+ cmux->n_circuits = 0;
+ cmux->n_active_circuits = 0;
+ cmux->n_cells = 0;
+}
+
+/** Reclaim all circuit IDs currently marked as unusable on <b>chan</b> because
+ * of pending destroy cells in <b>cmux</b>.
+ *
+ * This function must be called AFTER circuits are unlinked from the (channel,
+ * circuid-id) map with circuit_unlink_all_from_channel(), but before calling
+ * circuitmux_free().
+ */
+void
+circuitmux_mark_destroyed_circids_usable(circuitmux_t *cmux, channel_t *chan)
+{
+ destroy_cell_t *cell;
+ TOR_SIMPLEQ_FOREACH(cell, &cmux->destroy_cell_queue.head, next) {
+ channel_mark_circid_usable(chan, cell->circid);
+ }
+}
+
+/**
+ * Free a circuitmux_t; the circuits must be detached first with
+ * circuitmux_detach_all_circuits().
+ */
+
+void
+circuitmux_free_(circuitmux_t *cmux)
+{
+ if (!cmux) return;
+
+ tor_assert(cmux->n_circuits == 0);
+ tor_assert(cmux->n_active_circuits == 0);
+
+ /*
+ * Free policy-specific data if we have any; we don't
+ * need to do circuitmux_set_policy(cmux, NULL) to cover
+ * the circuits because they would have been handled in
+ * circuitmux_detach_all_circuits() before this was
+ * called.
+ */
+ if (cmux->policy && cmux->policy->free_cmux_data) {
+ if (cmux->policy_data) {
+ cmux->policy->free_cmux_data(cmux, cmux->policy_data);
+ cmux->policy_data = NULL;
+ }
+ } else tor_assert(cmux->policy_data == NULL);
+
+ if (cmux->chanid_circid_map) {
+ HT_CLEAR(chanid_circid_muxinfo_map, cmux->chanid_circid_map);
+ tor_free(cmux->chanid_circid_map);
+ }
+
+ /*
+ * We're throwing away some destroys; log the counter and
+ * adjust the global counter by the queue size.
+ */
+ if (cmux->destroy_cell_queue.n > 0) {
+ cmux->destroy_ctr -= cmux->destroy_cell_queue.n;
+ global_destroy_ctr -= cmux->destroy_cell_queue.n;
+ log_debug(LD_CIRC,
+ "Freeing cmux at %p with %u queued destroys; the last cmux "
+ "destroy balance was %"PRId64", global is %"PRId64,
+ cmux, cmux->destroy_cell_queue.n,
+ (cmux->destroy_ctr),
+ (global_destroy_ctr));
+ } else {
+ log_debug(LD_CIRC,
+ "Freeing cmux at %p with no queued destroys, the cmux destroy "
+ "balance was %"PRId64", global is %"PRId64,
+ cmux,
+ (cmux->destroy_ctr),
+ (global_destroy_ctr));
+ }
+
+ destroy_cell_queue_clear(&cmux->destroy_cell_queue);
+
+ tor_free(cmux);
+}
+
+/*
+ * Circuitmux policy control functions
+ */
+
+/**
+ * Remove any policy installed on cmux; all policy data will be freed and
+ * cmux behavior will revert to the built-in round-robin active_circuits
+ * mechanism.
+ */
+
+void
+circuitmux_clear_policy(circuitmux_t *cmux)
+{
+ tor_assert(cmux);
+
+ /* Internally, this is just setting policy to NULL */
+ circuitmux_set_policy(cmux, NULL);
+}
+
+/**
+ * Return the policy currently installed on a circuitmux_t
+ */
+
+MOCK_IMPL(const circuitmux_policy_t *,
+circuitmux_get_policy, (circuitmux_t *cmux))
+{
+ tor_assert(cmux);
+
+ return cmux->policy;
+}
+
+/**
+ * Set policy; allocate for new policy, detach all circuits from old policy
+ * if any, attach them to new policy, and free old policy data.
+ */
+
+void
+circuitmux_set_policy(circuitmux_t *cmux,
+ const circuitmux_policy_t *pol)
+{
+ const circuitmux_policy_t *old_pol = NULL, *new_pol = NULL;
+ circuitmux_policy_data_t *old_pol_data = NULL, *new_pol_data = NULL;
+ chanid_circid_muxinfo_t **i = NULL;
+ channel_t *chan = NULL;
+ uint64_t last_chan_id_searched = 0;
+ circuit_t *circ = NULL;
+
+ tor_assert(cmux);
+
+ /* Set up variables */
+ old_pol = cmux->policy;
+ old_pol_data = cmux->policy_data;
+ new_pol = pol;
+
+ /* Check if this is the trivial case */
+ if (old_pol == new_pol) return;
+
+ /* Allocate data for new policy, if any */
+ if (new_pol && new_pol->alloc_cmux_data) {
+ /*
+ * If alloc_cmux_data is not null, then we expect to get some policy
+ * data. Assert that we also have free_cmux_data so we can free it
+ * when the time comes, and allocate it.
+ */
+ tor_assert(new_pol->free_cmux_data);
+ new_pol_data = new_pol->alloc_cmux_data(cmux);
+ tor_assert(new_pol_data);
+ }
+
+ /* Install new policy and new policy data on cmux */
+ cmux->policy = new_pol;
+ cmux->policy_data = new_pol_data;
+
+ /* Iterate over all circuits, attaching/detaching each one */
+ i = HT_START(chanid_circid_muxinfo_map, cmux->chanid_circid_map);
+ while (i) {
+ /* Assert that this entry isn't NULL */
+ tor_assert(*i);
+
+ /*
+ * Get the channel; since normal case is all circuits on the mux share a
+ * channel, we cache last_chan_id_searched
+ */
+ if (!chan || last_chan_id_searched != (*i)->chan_id) {
+ chan = channel_find_by_global_id((*i)->chan_id);
+ last_chan_id_searched = (*i)->chan_id;
+ }
+ tor_assert(chan);
+
+ /* Get the circuit */
+ circ = circuit_get_by_circid_channel_even_if_marked((*i)->circ_id, chan);
+ tor_assert(circ);
+
+ /* Need to tell old policy it becomes inactive (i.e., it is active) ? */
+ if (old_pol && old_pol->notify_circ_inactive &&
+ (*i)->muxinfo.cell_count > 0) {
+ old_pol->notify_circ_inactive(cmux, old_pol_data, circ,
+ (*i)->muxinfo.policy_data);
+ }
+
+ /* Need to free old policy data? */
+ if ((*i)->muxinfo.policy_data) {
+ /* Assert that we have the means to free it if we have policy data */
+ tor_assert(old_pol);
+ tor_assert(old_pol->free_circ_data);
+ /* Free it */
+ old_pol->free_circ_data(cmux, old_pol_data, circ,
+ (*i)->muxinfo.policy_data);
+ (*i)->muxinfo.policy_data = NULL;
+ }
+
+ /* Need to allocate new policy data? */
+ if (new_pol && new_pol->alloc_circ_data) {
+ /*
+ * If alloc_circ_data is not null, we expect to get some per-circuit
+ * policy data. Assert that we also have free_circ_data so we can
+ * free it when the time comes, and allocate it.
+ */
+ tor_assert(new_pol->free_circ_data);
+ (*i)->muxinfo.policy_data =
+ new_pol->alloc_circ_data(cmux, new_pol_data, circ,
+ (*i)->muxinfo.direction,
+ (*i)->muxinfo.cell_count);
+ }
+
+ /* Need to make active on new policy? */
+ if (new_pol && new_pol->notify_circ_active &&
+ (*i)->muxinfo.cell_count > 0) {
+ new_pol->notify_circ_active(cmux, new_pol_data, circ,
+ (*i)->muxinfo.policy_data);
+ }
+
+ /* Advance to next circuit map entry */
+ i = HT_NEXT(chanid_circid_muxinfo_map, cmux->chanid_circid_map, i);
+ }
+
+ /* Free data for old policy, if any */
+ if (old_pol_data) {
+ /*
+ * If we had old policy data, we should have an old policy and a free
+ * function for it.
+ */
+ tor_assert(old_pol);
+ tor_assert(old_pol->free_cmux_data);
+ old_pol->free_cmux_data(cmux, old_pol_data);
+ old_pol_data = NULL;
+ }
+}
+
+/*
+ * Circuitmux/circuit attachment status inquiry functions
+ */
+
+/**
+ * Query the direction of an attached circuit
+ */
+
+cell_direction_t
+circuitmux_attached_circuit_direction(circuitmux_t *cmux, circuit_t *circ)
+{
+ chanid_circid_muxinfo_t *hashent = NULL;
+
+ /* Try to find a map entry */
+ hashent = circuitmux_find_map_entry(cmux, circ);
+
+ /*
+ * This function should only be called on attached circuits; assert that
+ * we had a map entry.
+ */
+ tor_assert(hashent);
+
+ /* Return the direction from the map entry */
+ return hashent->muxinfo.direction;
+}
+
+/**
+ * Find an entry in the cmux's map for this circuit or return NULL if there
+ * is none.
+ */
+
+static chanid_circid_muxinfo_t *
+circuitmux_find_map_entry(circuitmux_t *cmux, circuit_t *circ)
+{
+ chanid_circid_muxinfo_t search, *hashent = NULL;
+
+ /* Sanity-check parameters */
+ tor_assert(cmux);
+ tor_assert(cmux->chanid_circid_map);
+ tor_assert(circ);
+
+ /* Check if we have n_chan */
+ if (circ->n_chan) {
+ /* Okay, let's see if it's attached for n_chan/n_circ_id */
+ search.chan_id = circ->n_chan->global_identifier;
+ search.circ_id = circ->n_circ_id;
+
+ /* Query */
+ hashent = HT_FIND(chanid_circid_muxinfo_map, cmux->chanid_circid_map,
+ &search);
+ }
+
+ /* Found something? */
+ if (hashent) {
+ /*
+ * Assert that the direction makes sense for a hashent we found by
+ * n_chan/n_circ_id before we return it.
+ */
+ tor_assert(hashent->muxinfo.direction == CELL_DIRECTION_OUT);
+ } else {
+ /* Not there, have we got a p_chan/p_circ_id to try? */
+ if (circ->magic == OR_CIRCUIT_MAGIC) {
+ search.circ_id = TO_OR_CIRCUIT(circ)->p_circ_id;
+ /* Check for p_chan */
+ if (TO_OR_CIRCUIT(circ)->p_chan) {
+ search.chan_id = TO_OR_CIRCUIT(circ)->p_chan->global_identifier;
+ /* Okay, search for that */
+ hashent = HT_FIND(chanid_circid_muxinfo_map, cmux->chanid_circid_map,
+ &search);
+ /* Find anything? */
+ if (hashent) {
+ /* Assert that the direction makes sense before we return it */
+ tor_assert(hashent->muxinfo.direction == CELL_DIRECTION_IN);
+ }
+ }
+ }
+ }
+
+ /* Okay, hashent is it if it was there */
+ return hashent;
+}
+
+/**
+ * Query whether a circuit is attached to a circuitmux
+ */
+
+int
+circuitmux_is_circuit_attached(circuitmux_t *cmux, circuit_t *circ)
+{
+ chanid_circid_muxinfo_t *hashent = NULL;
+
+ /* Look if it's in the circuit map */
+ hashent = circuitmux_find_map_entry(cmux, circ);
+
+ return (hashent != NULL);
+}
+
+/**
+ * Query whether a circuit is active on a circuitmux
+ */
+
+int
+circuitmux_is_circuit_active(circuitmux_t *cmux, circuit_t *circ)
+{
+ chanid_circid_muxinfo_t *hashent = NULL;
+ int is_active = 0;
+
+ tor_assert(cmux);
+ tor_assert(circ);
+
+ /* Look if it's in the circuit map */
+ hashent = circuitmux_find_map_entry(cmux, circ);
+ if (hashent) {
+ /* Check the number of cells on this circuit */
+ is_active = (hashent->muxinfo.cell_count > 0);
+ }
+ /* else not attached, so not active */
+
+ return is_active;
+}
+
+/**
+ * Query number of available cells for a circuit on a circuitmux
+ */
+
+unsigned int
+circuitmux_num_cells_for_circuit(circuitmux_t *cmux, circuit_t *circ)
+{
+ chanid_circid_muxinfo_t *hashent = NULL;
+ unsigned int n_cells = 0;
+
+ tor_assert(cmux);
+ tor_assert(circ);
+
+ /* Look if it's in the circuit map */
+ hashent = circuitmux_find_map_entry(cmux, circ);
+ if (hashent) {
+ /* Just get the cell count for this circuit */
+ n_cells = hashent->muxinfo.cell_count;
+ }
+ /* else not attached, so 0 cells */
+
+ return n_cells;
+}
+
+/**
+ * Query total number of available cells on a circuitmux
+ */
+
+MOCK_IMPL(unsigned int,
+circuitmux_num_cells, (circuitmux_t *cmux))
+{
+ tor_assert(cmux);
+
+ return cmux->n_cells + cmux->destroy_cell_queue.n;
+}
+
+/**
+ * Query total number of circuits active on a circuitmux
+ */
+
+unsigned int
+circuitmux_num_active_circuits(circuitmux_t *cmux)
+{
+ tor_assert(cmux);
+
+ return cmux->n_active_circuits;
+}
+
+/**
+ * Query total number of circuits attached to a circuitmux
+ */
+
+unsigned int
+circuitmux_num_circuits(circuitmux_t *cmux)
+{
+ tor_assert(cmux);
+
+ return cmux->n_circuits;
+}
+
+/*
+ * Functions for circuit code to call to update circuit status
+ */
+
+/**
+ * Attach a circuit to a circuitmux, for the specified direction.
+ */
+
+MOCK_IMPL(void,
+circuitmux_attach_circuit,(circuitmux_t *cmux, circuit_t *circ,
+ cell_direction_t direction))
+{
+ channel_t *chan = NULL;
+ uint64_t channel_id;
+ circid_t circ_id;
+ chanid_circid_muxinfo_t search, *hashent = NULL;
+ unsigned int cell_count;
+
+ tor_assert(cmux);
+ tor_assert(circ);
+ tor_assert(direction == CELL_DIRECTION_IN ||
+ direction == CELL_DIRECTION_OUT);
+
+ /*
+ * Figure out which channel we're using, and get the circuit's current
+ * cell count and circuit ID; assert that the circuit is not already
+ * attached to another mux.
+ */
+ if (direction == CELL_DIRECTION_OUT) {
+ /* It's n_chan */
+ chan = circ->n_chan;
+ cell_count = circ->n_chan_cells.n;
+ circ_id = circ->n_circ_id;
+ } else {
+ /* We want p_chan */
+ chan = TO_OR_CIRCUIT(circ)->p_chan;
+ cell_count = TO_OR_CIRCUIT(circ)->p_chan_cells.n;
+ circ_id = TO_OR_CIRCUIT(circ)->p_circ_id;
+ }
+ /* Assert that we did get a channel */
+ tor_assert(chan);
+ /* Assert that the circuit ID is sensible */
+ tor_assert(circ_id != 0);
+
+ /* Get the channel ID */
+ channel_id = chan->global_identifier;
+
+ /* See if we already have this one */
+ search.chan_id = channel_id;
+ search.circ_id = circ_id;
+ hashent = HT_FIND(chanid_circid_muxinfo_map, cmux->chanid_circid_map,
+ &search);
+
+ if (hashent) {
+ /*
+ * This circuit was already attached to this cmux; make sure the
+ * directions match and update the cell count and active circuit count.
+ */
+ log_info(LD_CIRC,
+ "Circuit %u on channel %"PRIu64 " was already attached to "
+ "cmux %p (trying to attach to %p)",
+ (unsigned)circ_id, (channel_id),
+ ((direction == CELL_DIRECTION_OUT) ?
+ circ->n_mux : TO_OR_CIRCUIT(circ)->p_mux),
+ cmux);
+
+ /*
+ * The mux pointer on this circuit and the direction in result should
+ * match; otherwise assert.
+ */
+ if (direction == CELL_DIRECTION_OUT) tor_assert(circ->n_mux == cmux);
+ else tor_assert(TO_OR_CIRCUIT(circ)->p_mux == cmux);
+ tor_assert(hashent->muxinfo.direction == direction);
+
+ /*
+ * Looks okay; just update the cell count and active circuits if we must
+ */
+ if (hashent->muxinfo.cell_count > 0 && cell_count == 0) {
+ --(cmux->n_active_circuits);
+ circuitmux_make_circuit_inactive(cmux, circ);
+ } else if (hashent->muxinfo.cell_count == 0 && cell_count > 0) {
+ ++(cmux->n_active_circuits);
+ circuitmux_make_circuit_active(cmux, circ);
+ }
+ cmux->n_cells -= hashent->muxinfo.cell_count;
+ cmux->n_cells += cell_count;
+ hashent->muxinfo.cell_count = cell_count;
+ } else {
+ /*
+ * New circuit; add an entry and update the circuit/active circuit
+ * counts.
+ */
+ log_debug(LD_CIRC,
+ "Attaching circuit %u on channel %"PRIu64 " to cmux %p",
+ (unsigned)circ_id, (channel_id), cmux);
+
+ /*
+ * Assert that the circuit doesn't already have a mux for this
+ * direction.
+ */
+ if (direction == CELL_DIRECTION_OUT) tor_assert(circ->n_mux == NULL);
+ else tor_assert(TO_OR_CIRCUIT(circ)->p_mux == NULL);
+
+ /* Insert it in the map */
+ hashent = tor_malloc_zero(sizeof(*hashent));
+ hashent->chan_id = channel_id;
+ hashent->circ_id = circ_id;
+ hashent->muxinfo.cell_count = cell_count;
+ hashent->muxinfo.direction = direction;
+ /* Allocate policy specific circuit data if we need it */
+ if (cmux->policy->alloc_circ_data) {
+ /* Assert that we have the means to free policy-specific data */
+ tor_assert(cmux->policy->free_circ_data);
+ /* Allocate it */
+ hashent->muxinfo.policy_data =
+ cmux->policy->alloc_circ_data(cmux,
+ cmux->policy_data,
+ circ,
+ direction,
+ cell_count);
+ /* If we wanted policy data, it's an error not to get any */
+ tor_assert(hashent->muxinfo.policy_data);
+ }
+ HT_INSERT(chanid_circid_muxinfo_map, cmux->chanid_circid_map,
+ hashent);
+
+ /* Set the circuit's mux for this direction */
+ if (direction == CELL_DIRECTION_OUT) circ->n_mux = cmux;
+ else TO_OR_CIRCUIT(circ)->p_mux = cmux;
+
+ /* Update counters */
+ ++(cmux->n_circuits);
+ if (cell_count > 0) {
+ ++(cmux->n_active_circuits);
+ circuitmux_make_circuit_active(cmux, circ);
+ }
+ cmux->n_cells += cell_count;
+ }
+}
+
+/**
+ * Detach a circuit from a circuitmux and update all counters as needed;
+ * no-op if not attached.
+ */
+
+MOCK_IMPL(void,
+circuitmux_detach_circuit,(circuitmux_t *cmux, circuit_t *circ))
+{
+ chanid_circid_muxinfo_t search, *hashent = NULL;
+ /*
+ * Use this to keep track of whether we found it for n_chan or
+ * p_chan for consistency checking.
+ *
+ * The 0 initializer is not a valid cell_direction_t value.
+ * We assert that it has been replaced with a valid value before it is used.
+ */
+ cell_direction_t last_searched_direction = 0;
+
+ tor_assert(cmux);
+ tor_assert(cmux->chanid_circid_map);
+ tor_assert(circ);
+
+ /* See if we have it for n_chan/n_circ_id */
+ if (circ->n_chan) {
+ search.chan_id = circ->n_chan->global_identifier;
+ search.circ_id = circ->n_circ_id;
+ hashent = HT_FIND(chanid_circid_muxinfo_map, cmux->chanid_circid_map,
+ &search);
+ last_searched_direction = CELL_DIRECTION_OUT;
+ }
+
+ /* Got one? If not, see if it's an or_circuit_t and try p_chan/p_circ_id */
+ if (!hashent) {
+ if (circ->magic == OR_CIRCUIT_MAGIC) {
+ search.circ_id = TO_OR_CIRCUIT(circ)->p_circ_id;
+ if (TO_OR_CIRCUIT(circ)->p_chan) {
+ search.chan_id = TO_OR_CIRCUIT(circ)->p_chan->global_identifier;
+ hashent = HT_FIND(chanid_circid_muxinfo_map,
+ cmux->chanid_circid_map,
+ &search);
+ last_searched_direction = CELL_DIRECTION_IN;
+ }
+ }
+ }
+
+ tor_assert(last_searched_direction == CELL_DIRECTION_OUT
+ || last_searched_direction == CELL_DIRECTION_IN);
+
+ /*
+ * If hashent isn't NULL, we have a circuit to detach; don't remove it from
+ * the map until later of circuitmux_make_circuit_inactive() breaks.
+ */
+ if (hashent) {
+ /* Update counters */
+ --(cmux->n_circuits);
+ if (hashent->muxinfo.cell_count > 0) {
+ --(cmux->n_active_circuits);
+ /* This does policy notifies, so comes before freeing policy data */
+ circuitmux_make_circuit_inactive(cmux, circ);
+ }
+ cmux->n_cells -= hashent->muxinfo.cell_count;
+
+ /* Free policy-specific data if we have it */
+ if (hashent->muxinfo.policy_data) {
+ /* If we have policy data, assert that we have the means to free it */
+ tor_assert(cmux->policy);
+ tor_assert(cmux->policy->free_circ_data);
+ /* Call free_circ_data() */
+ cmux->policy->free_circ_data(cmux,
+ cmux->policy_data,
+ circ,
+ hashent->muxinfo.policy_data);
+ hashent->muxinfo.policy_data = NULL;
+ }
+
+ /* Consistency check: the direction must match the direction searched */
+ tor_assert(last_searched_direction == hashent->muxinfo.direction);
+ /* Clear the circuit's mux for this direction */
+ if (last_searched_direction == CELL_DIRECTION_OUT) circ->n_mux = NULL;
+ else TO_OR_CIRCUIT(circ)->p_mux = NULL;
+
+ /* Now remove it from the map */
+ HT_REMOVE(chanid_circid_muxinfo_map, cmux->chanid_circid_map, hashent);
+
+ /* Free the hash entry */
+ tor_free(hashent);
+ }
+}
+
+/**
+ * Make a circuit active; update active list and policy-specific info, but
+ * we don't mess with the counters or hash table here.
+ */
+
+static void
+circuitmux_make_circuit_active(circuitmux_t *cmux, circuit_t *circ)
+{
+ tor_assert(cmux);
+ tor_assert(cmux->policy);
+ tor_assert(circ);
+
+ /* Policy-specific notification */
+ if (cmux->policy->notify_circ_active) {
+ /* Okay, we need to check the circuit for policy data now */
+ chanid_circid_muxinfo_t *hashent = circuitmux_find_map_entry(cmux, circ);
+ /* We should have found something */
+ tor_assert(hashent);
+ /* Notify */
+ cmux->policy->notify_circ_active(cmux, cmux->policy_data,
+ circ, hashent->muxinfo.policy_data);
+ }
+}
+
+/**
+ * Make a circuit inactive; update active list and policy-specific info, but
+ * we don't mess with the counters or hash table here.
+ */
+
+static void
+circuitmux_make_circuit_inactive(circuitmux_t *cmux, circuit_t *circ)
+{
+ tor_assert(cmux);
+ tor_assert(cmux->policy);
+ tor_assert(circ);
+
+ /* Policy-specific notification */
+ if (cmux->policy->notify_circ_inactive) {
+ /* Okay, we need to check the circuit for policy data now */
+ chanid_circid_muxinfo_t *hashent = circuitmux_find_map_entry(cmux, circ);
+ /* We should have found something */
+ tor_assert(hashent);
+ /* Notify */
+ cmux->policy->notify_circ_inactive(cmux, cmux->policy_data,
+ circ, hashent->muxinfo.policy_data);
+ }
+}
+
+/**
+ * Clear the cell counter for a circuit on a circuitmux
+ */
+
+void
+circuitmux_clear_num_cells(circuitmux_t *cmux, circuit_t *circ)
+{
+ /* This is the same as setting the cell count to zero */
+ circuitmux_set_num_cells(cmux, circ, 0);
+}
+
+/**
+ * Set the cell counter for a circuit on a circuitmux
+ */
+
+void
+circuitmux_set_num_cells(circuitmux_t *cmux, circuit_t *circ,
+ unsigned int n_cells)
+{
+ chanid_circid_muxinfo_t *hashent = NULL;
+
+ tor_assert(cmux);
+ tor_assert(circ);
+
+ /* Search for this circuit's entry */
+ hashent = circuitmux_find_map_entry(cmux, circ);
+ /* Assert that we found one */
+ tor_assert(hashent);
+
+ /* Update cmux cell counter */
+ cmux->n_cells -= hashent->muxinfo.cell_count;
+ cmux->n_cells += n_cells;
+
+ /* Do we need to notify a cmux policy? */
+ if (cmux->policy->notify_set_n_cells) {
+ /* Call notify_set_n_cells */
+ cmux->policy->notify_set_n_cells(cmux,
+ cmux->policy_data,
+ circ,
+ hashent->muxinfo.policy_data,
+ n_cells);
+ }
+
+ /*
+ * Update cmux active circuit counter: is the old cell count > 0 and the
+ * new cell count == 0 ?
+ */
+ if (hashent->muxinfo.cell_count > 0 && n_cells == 0) {
+ --(cmux->n_active_circuits);
+ hashent->muxinfo.cell_count = n_cells;
+ circuitmux_make_circuit_inactive(cmux, circ);
+ /* Is the old cell count == 0 and the new cell count > 0 ? */
+ } else if (hashent->muxinfo.cell_count == 0 && n_cells > 0) {
+ ++(cmux->n_active_circuits);
+ hashent->muxinfo.cell_count = n_cells;
+ circuitmux_make_circuit_active(cmux, circ);
+ } else {
+ hashent->muxinfo.cell_count = n_cells;
+ }
+}
+
+/*
+ * Functions for channel code to call to get a circuit to transmit from or
+ * notify that cells have been transmitted.
+ */
+
+/**
+ * Pick a circuit to send from, using the active circuits list or a
+ * circuitmux policy if one is available. This is called from channel.c.
+ *
+ * If we would rather send a destroy cell, return NULL and set
+ * *<b>destroy_queue_out</b> to the destroy queue.
+ *
+ * If we have nothing to send, set *<b>destroy_queue_out</b> to NULL and
+ * return NULL.
+ */
+
+circuit_t *
+circuitmux_get_first_active_circuit(circuitmux_t *cmux,
+ destroy_cell_queue_t **destroy_queue_out)
+{
+ circuit_t *circ = NULL;
+
+ tor_assert(cmux);
+ tor_assert(cmux->policy);
+ /* This callback is mandatory. */
+ tor_assert(cmux->policy->pick_active_circuit);
+ tor_assert(destroy_queue_out);
+
+ *destroy_queue_out = NULL;
+
+ if (cmux->destroy_cell_queue.n &&
+ (!cmux->last_cell_was_destroy || cmux->n_active_circuits == 0)) {
+ /* We have destroy cells to send, and either we just sent a relay cell,
+ * or we have no relay cells to send. */
+
+ /* XXXX We should let the cmux policy have some say in this eventually. */
+ /* XXXX Alternating is not a terribly brilliant approach here. */
+ *destroy_queue_out = &cmux->destroy_cell_queue;
+
+ cmux->last_cell_was_destroy = 1;
+ } else if (cmux->n_active_circuits > 0) {
+ /* We also must have a cell available for this to be the case */
+ tor_assert(cmux->n_cells > 0);
+ /* Do we have a policy-provided circuit selector? */
+ circ = cmux->policy->pick_active_circuit(cmux, cmux->policy_data);
+ cmux->last_cell_was_destroy = 0;
+ } else {
+ tor_assert(cmux->n_cells == 0);
+ tor_assert(cmux->destroy_cell_queue.n == 0);
+ }
+
+ return circ;
+}
+
+/**
+ * Notify the circuitmux that cells have been sent on a circuit; this
+ * is called from channel.c.
+ */
+
+void
+circuitmux_notify_xmit_cells(circuitmux_t *cmux, circuit_t *circ,
+ unsigned int n_cells)
+{
+ chanid_circid_muxinfo_t *hashent = NULL;
+ int becomes_inactive = 0;
+
+ tor_assert(cmux);
+ tor_assert(circ);
+
+ if (n_cells == 0) return;
+
+ /*
+ * To handle this, we have to:
+ *
+ * 1.) Adjust the circuit's cell counter in the cmux hash table
+ * 2.) Move the circuit to the tail of the active_circuits linked list
+ * for this cmux, or make the circuit inactive if the cell count
+ * went to zero.
+ * 3.) Call cmux->policy->notify_xmit_cells(), if any
+ */
+
+ /* Find the hash entry */
+ hashent = circuitmux_find_map_entry(cmux, circ);
+ /* Assert that we found one */
+ tor_assert(hashent);
+
+ /* Adjust the cell counter and assert that we had that many cells to send */
+ tor_assert(n_cells <= hashent->muxinfo.cell_count);
+ hashent->muxinfo.cell_count -= n_cells;
+ /* Do we need to make the circuit inactive? */
+ if (hashent->muxinfo.cell_count == 0) becomes_inactive = 1;
+ /* Adjust the mux cell counter */
+ cmux->n_cells -= n_cells;
+
+ /*
+ * We call notify_xmit_cells() before making the circuit inactive if needed,
+ * so the policy can always count on this coming in on an active circuit.
+ */
+ if (cmux->policy->notify_xmit_cells) {
+ cmux->policy->notify_xmit_cells(cmux, cmux->policy_data, circ,
+ hashent->muxinfo.policy_data,
+ n_cells);
+ }
+
+ /*
+ * Now make the circuit inactive if needed; this will call the policy's
+ * notify_circ_inactive() if present.
+ */
+ if (becomes_inactive) {
+ --(cmux->n_active_circuits);
+ circuitmux_make_circuit_inactive(cmux, circ);
+ }
+}
+
+/**
+ * Notify the circuitmux that a destroy was sent, so we can update
+ * the counter.
+ */
+
+void
+circuitmux_notify_xmit_destroy(circuitmux_t *cmux)
+{
+ tor_assert(cmux);
+
+ --(cmux->destroy_ctr);
+ --(global_destroy_ctr);
+ log_debug(LD_CIRC,
+ "Cmux at %p sent a destroy, cmux counter is now %"PRId64", "
+ "global counter is now %"PRId64,
+ cmux,
+ (cmux->destroy_ctr),
+ (global_destroy_ctr));
+}
+
+/*DOCDOC */
+void
+circuitmux_append_destroy_cell(channel_t *chan,
+ circuitmux_t *cmux,
+ circid_t circ_id,
+ uint8_t reason)
+{
+ destroy_cell_queue_append(&cmux->destroy_cell_queue, circ_id, reason);
+
+ /* Destroy entering the queue, update counters */
+ ++(cmux->destroy_ctr);
+ ++global_destroy_ctr;
+ log_debug(LD_CIRC,
+ "Cmux at %p queued a destroy for circ %u, cmux counter is now "
+ "%"PRId64", global counter is now %"PRId64,
+ cmux, circ_id,
+ (cmux->destroy_ctr),
+ (global_destroy_ctr));
+
+ /* XXXX Duplicate code from append_cell_to_circuit_queue */
+ if (!channel_has_queued_writes(chan)) {
+ /* There is no data at all waiting to be sent on the outbuf. Add a
+ * cell, so that we can notice when it gets flushed, flushed_some can
+ * get called, and we can start putting more data onto the buffer then.
+ */
+ log_debug(LD_GENERAL, "Primed a buffer.");
+ channel_flush_from_first_active_circuit(chan, 1);
+ }
+}
+
+/*DOCDOC; for debugging 12184. This runs slowly. */
+int64_t
+circuitmux_count_queued_destroy_cells(const channel_t *chan,
+ const circuitmux_t *cmux)
+{
+ int64_t n_destroy_cells = cmux->destroy_ctr;
+ int64_t destroy_queue_size = cmux->destroy_cell_queue.n;
+
+ int64_t manual_total = 0;
+ int64_t manual_total_in_map = 0;
+ destroy_cell_t *cell;
+
+ TOR_SIMPLEQ_FOREACH(cell, &cmux->destroy_cell_queue.head, next) {
+ circid_t id;
+ ++manual_total;
+
+ id = cell->circid;
+ if (circuit_id_in_use_on_channel(id, (channel_t*)chan))
+ ++manual_total_in_map;
+ }
+
+ if (n_destroy_cells != destroy_queue_size ||
+ n_destroy_cells != manual_total ||
+ n_destroy_cells != manual_total_in_map) {
+ log_warn(LD_BUG, " Discrepancy in counts for queued destroy cells on "
+ "circuitmux. n=%"PRId64". queue_size=%"PRId64". "
+ "manual_total=%"PRId64". manual_total_in_map=%"PRId64".",
+ (n_destroy_cells),
+ (destroy_queue_size),
+ (manual_total),
+ (manual_total_in_map));
+ }
+
+ return n_destroy_cells;
+}
+
+/**
+ * Compare cmuxes to see which is more preferred; return < 0 if
+ * cmux_1 has higher priority (i.e., cmux_1 < cmux_2 in the scheduler's
+ * sort order), > 0 if cmux_2 has higher priority, or 0 if they are
+ * equally preferred.
+ *
+ * If the cmuxes have different cmux policies or the policy does not
+ * support the cmp_cmux method, return 0.
+ */
+
+MOCK_IMPL(int,
+circuitmux_compare_muxes, (circuitmux_t *cmux_1, circuitmux_t *cmux_2))
+{
+ const circuitmux_policy_t *policy;
+
+ tor_assert(cmux_1);
+ tor_assert(cmux_2);
+
+ if (cmux_1 == cmux_2) {
+ /* Equivalent because they're the same cmux */
+ return 0;
+ }
+
+ if (cmux_1->policy && cmux_2->policy) {
+ if (cmux_1->policy == cmux_2->policy) {
+ policy = cmux_1->policy;
+
+ if (policy->cmp_cmux) {
+ /* Okay, we can compare! */
+ return policy->cmp_cmux(cmux_1, cmux_1->policy_data,
+ cmux_2, cmux_2->policy_data);
+ } else {
+ /*
+ * Equivalent because the policy doesn't know how to compare between
+ * muxes.
+ */
+ return 0;
+ }
+ } else {
+ /* Equivalent because they have different policies */
+ return 0;
+ }
+ } else {
+ /* Equivalent because one or both are missing a policy */
+ return 0;
+ }
+}
+
diff --git a/src/core/or/circuitmux.h b/src/core/or/circuitmux.h
new file mode 100644
index 0000000000..e94cc354c7
--- /dev/null
+++ b/src/core/or/circuitmux.h
@@ -0,0 +1,162 @@
+/* * Copyright (c) 2012-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitmux.h
+ * \brief Header file for circuitmux.c
+ **/
+
+#ifndef TOR_CIRCUITMUX_H
+#define TOR_CIRCUITMUX_H
+
+#include "or/or.h"
+#include "lib/testsupport/testsupport.h"
+
+typedef struct circuitmux_policy_s circuitmux_policy_t;
+typedef struct circuitmux_policy_data_s circuitmux_policy_data_t;
+typedef struct circuitmux_policy_circ_data_s circuitmux_policy_circ_data_t;
+
+struct circuitmux_policy_s {
+ /* Allocate cmux-wide policy-specific data */
+ circuitmux_policy_data_t * (*alloc_cmux_data)(circuitmux_t *cmux);
+ /* Free cmux-wide policy-specific data */
+ void (*free_cmux_data)(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data);
+ /* Allocate circuit policy-specific data for a newly attached circuit */
+ circuitmux_policy_circ_data_t *
+ (*alloc_circ_data)(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ cell_direction_t direction,
+ unsigned int cell_count);
+ /* Free circuit policy-specific data */
+ void (*free_circ_data)(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ circuitmux_policy_circ_data_t *pol_circ_data);
+ /* Notify that a circuit has become active/inactive */
+ void (*notify_circ_active)(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ circuitmux_policy_circ_data_t *pol_circ_data);
+ void (*notify_circ_inactive)(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ circuitmux_policy_circ_data_t *pol_circ_data);
+ /* Notify of arriving/transmitted cells on a circuit */
+ void (*notify_set_n_cells)(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ circuitmux_policy_circ_data_t *pol_circ_data,
+ unsigned int n_cells);
+ void (*notify_xmit_cells)(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ circuitmux_policy_circ_data_t *pol_circ_data,
+ unsigned int n_cells);
+ /* Choose a circuit */
+ circuit_t * (*pick_active_circuit)(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data);
+ /* Optional: channel comparator for use by the scheduler */
+ int (*cmp_cmux)(circuitmux_t *cmux_1, circuitmux_policy_data_t *pol_data_1,
+ circuitmux_t *cmux_2, circuitmux_policy_data_t *pol_data_2);
+};
+
+/*
+ * Circuitmux policy implementations can subclass this to store circuitmux-
+ * wide data; it just has the magic number in the base struct.
+ */
+
+struct circuitmux_policy_data_s {
+ uint32_t magic;
+};
+
+/*
+ * Circuitmux policy implementations can subclass this to store circuit-
+ * specific data; it just has the magic number in the base struct.
+ */
+
+struct circuitmux_policy_circ_data_s {
+ uint32_t magic;
+};
+
+/*
+ * Upcast #defines for the above types
+ */
+
+/**
+ * Convert a circuitmux_policy_data_t subtype to a circuitmux_policy_data_t.
+ */
+
+#define TO_CMUX_POL_DATA(x) (&((x)->base_))
+
+/**
+ * Convert a circuitmux_policy_circ_data_t subtype to a
+ * circuitmux_policy_circ_data_t.
+ */
+
+#define TO_CMUX_POL_CIRC_DATA(x) (&((x)->base_))
+
+/* Consistency check */
+void circuitmux_assert_okay(circuitmux_t *cmux);
+
+/* Create/destroy */
+circuitmux_t * circuitmux_alloc(void);
+void circuitmux_detach_all_circuits(circuitmux_t *cmux,
+ smartlist_t *detached_out);
+void circuitmux_free_(circuitmux_t *cmux);
+#define circuitmux_free(cmux) \
+ FREE_AND_NULL(circuitmux_t, circuitmux_free_, (cmux))
+
+/* Policy control */
+void circuitmux_clear_policy(circuitmux_t *cmux);
+MOCK_DECL(const circuitmux_policy_t *,
+ circuitmux_get_policy, (circuitmux_t *cmux));
+void circuitmux_set_policy(circuitmux_t *cmux,
+ const circuitmux_policy_t *pol);
+
+/* Status inquiries */
+cell_direction_t circuitmux_attached_circuit_direction(
+ circuitmux_t *cmux,
+ circuit_t *circ);
+int circuitmux_is_circuit_attached(circuitmux_t *cmux, circuit_t *circ);
+int circuitmux_is_circuit_active(circuitmux_t *cmux, circuit_t *circ);
+unsigned int circuitmux_num_cells_for_circuit(circuitmux_t *cmux,
+ circuit_t *circ);
+MOCK_DECL(unsigned int, circuitmux_num_cells, (circuitmux_t *cmux));
+unsigned int circuitmux_num_circuits(circuitmux_t *cmux);
+unsigned int circuitmux_num_active_circuits(circuitmux_t *cmux);
+
+/* Debuging interface - slow. */
+int64_t circuitmux_count_queued_destroy_cells(const channel_t *chan,
+ const circuitmux_t *cmux);
+
+/* Channel interface */
+circuit_t * circuitmux_get_first_active_circuit(circuitmux_t *cmux,
+ destroy_cell_queue_t **destroy_queue_out);
+void circuitmux_notify_xmit_cells(circuitmux_t *cmux, circuit_t *circ,
+ unsigned int n_cells);
+void circuitmux_notify_xmit_destroy(circuitmux_t *cmux);
+
+/* Circuit interface */
+MOCK_DECL(void, circuitmux_attach_circuit, (circuitmux_t *cmux,
+ circuit_t *circ,
+ cell_direction_t direction));
+MOCK_DECL(void, circuitmux_detach_circuit,
+ (circuitmux_t *cmux, circuit_t *circ));
+void circuitmux_clear_num_cells(circuitmux_t *cmux, circuit_t *circ);
+void circuitmux_set_num_cells(circuitmux_t *cmux, circuit_t *circ,
+ unsigned int n_cells);
+
+void circuitmux_append_destroy_cell(channel_t *chan,
+ circuitmux_t *cmux, circid_t circ_id,
+ uint8_t reason);
+void circuitmux_mark_destroyed_circids_usable(circuitmux_t *cmux,
+ channel_t *chan);
+
+/* Optional interchannel comparisons for scheduling */
+MOCK_DECL(int, circuitmux_compare_muxes,
+ (circuitmux_t *cmux_1, circuitmux_t *cmux_2));
+
+#endif /* !defined(TOR_CIRCUITMUX_H) */
+
diff --git a/src/core/or/circuitmux_ewma.c b/src/core/or/circuitmux_ewma.c
new file mode 100644
index 0000000000..d600602a7e
--- /dev/null
+++ b/src/core/or/circuitmux_ewma.c
@@ -0,0 +1,829 @@
+/* * Copyright (c) 2012-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitmux_ewma.c
+ * \brief EWMA circuit selection as a circuitmux_t policy
+ *
+ * The "EWMA" in this module stands for the "exponentially weighted moving
+ * average" of the number of cells sent on each circuit. The goal is to
+ * prioritize cells on circuits that have been quiet recently, by looking at
+ * those that have sent few cells over time, prioritizing recent times
+ * more than older ones.
+ *
+ * Specifically, a cell sent at time "now" has weight 1, but a time X ticks
+ * before now has weight ewma_scale_factor ^ X , where ewma_scale_factor is
+ * between 0.0 and 1.0.
+ *
+ * For efficiency, we do not re-scale these averages every time we send a
+ * cell: that would be horribly inefficient. Instead, we we keep the cell
+ * count on all circuits on the same circuitmux scaled relative to a single
+ * tick. When we add a new cell, we scale its weight depending on the time
+ * that has elapsed since the tick. We do re-scale the circuits on the
+ * circuitmux periodically, so that we don't overflow double.
+ *
+ *
+ * This module should be used through the interfaces in circuitmux.c, which it
+ * implements.
+ *
+ **/
+
+#define CIRCUITMUX_EWMA_PRIVATE
+
+#include "orconfig.h"
+
+#include <math.h>
+
+#include "or/or.h"
+#include "or/circuitmux.h"
+#include "or/circuitmux_ewma.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "or/networkstatus.h"
+#include "or/or_options_st.h"
+
+/*** EWMA parameter #defines ***/
+
+/** How long does a tick last (seconds)? */
+#define EWMA_TICK_LEN 10
+
+/** The default per-tick scale factor, if it hasn't been overridden by a
+ * consensus or a configuration setting. zero means "disabled". */
+#define EWMA_DEFAULT_HALFLIFE 0.0
+
+/*** Some useful constant #defines ***/
+
+/** Any halflife smaller than this number of seconds is considered to be
+ * "disabled". */
+#define EPSILON 0.00001
+/** The natural logarithm of 0.5. */
+#define LOG_ONEHALF -0.69314718055994529
+
+/*** EWMA structures ***/
+
+typedef struct cell_ewma_s cell_ewma_t;
+typedef struct ewma_policy_data_s ewma_policy_data_t;
+typedef struct ewma_policy_circ_data_s ewma_policy_circ_data_t;
+
+/**
+ * The cell_ewma_t structure keeps track of how many cells a circuit has
+ * transferred recently. It keeps an EWMA (exponentially weighted moving
+ * average) of the number of cells flushed from the circuit queue onto a
+ * connection in channel_flush_from_first_active_circuit().
+ */
+
+struct cell_ewma_s {
+ /** The last 'tick' at which we recalibrated cell_count.
+ *
+ * A cell sent at exactly the start of this tick has weight 1.0. Cells sent
+ * since the start of this tick have weight greater than 1.0; ones sent
+ * earlier have less weight. */
+ unsigned int last_adjusted_tick;
+ /** The EWMA of the cell count. */
+ double cell_count;
+ /** True iff this is the cell count for a circuit's previous
+ * channel. */
+ unsigned int is_for_p_chan : 1;
+ /** The position of the circuit within the OR connection's priority
+ * queue. */
+ int heap_index;
+};
+
+struct ewma_policy_data_s {
+ circuitmux_policy_data_t base_;
+
+ /**
+ * Priority queue of cell_ewma_t for circuits with queued cells waiting
+ * for room to free up on the channel that owns this circuitmux. Kept
+ * in heap order according to EWMA. This was formerly in channel_t, and
+ * in or_connection_t before that.
+ */
+ smartlist_t *active_circuit_pqueue;
+
+ /**
+ * The tick on which the cell_ewma_ts in active_circuit_pqueue last had
+ * their ewma values rescaled. This was formerly in channel_t, and in
+ * or_connection_t before that.
+ */
+ unsigned int active_circuit_pqueue_last_recalibrated;
+};
+
+struct ewma_policy_circ_data_s {
+ circuitmux_policy_circ_data_t base_;
+
+ /**
+ * The EWMA count for the number of cells flushed from this circuit
+ * onto this circuitmux. Used to determine which circuit to flush
+ * from next. This was formerly in circuit_t and or_circuit_t.
+ */
+ cell_ewma_t cell_ewma;
+
+ /**
+ * Pointer back to the circuit_t this is for; since we're separating
+ * out circuit selection policy like this, we can't attach cell_ewma_t
+ * to the circuit_t any more, so we can't use SUBTYPE_P directly to a
+ * circuit_t like before; instead get it here.
+ */
+ circuit_t *circ;
+};
+
+#define EWMA_POL_DATA_MAGIC 0x2fd8b16aU
+#define EWMA_POL_CIRC_DATA_MAGIC 0x761e7747U
+
+/*** Downcasts for the above types ***/
+
+static ewma_policy_data_t *
+TO_EWMA_POL_DATA(circuitmux_policy_data_t *);
+
+static ewma_policy_circ_data_t *
+TO_EWMA_POL_CIRC_DATA(circuitmux_policy_circ_data_t *);
+
+/**
+ * Downcast a circuitmux_policy_data_t to an ewma_policy_data_t and assert
+ * if the cast is impossible.
+ */
+
+static inline ewma_policy_data_t *
+TO_EWMA_POL_DATA(circuitmux_policy_data_t *pol)
+{
+ if (!pol) return NULL;
+ else {
+ tor_assert(pol->magic == EWMA_POL_DATA_MAGIC);
+ return DOWNCAST(ewma_policy_data_t, pol);
+ }
+}
+
+/**
+ * Downcast a circuitmux_policy_circ_data_t to an ewma_policy_circ_data_t
+ * and assert if the cast is impossible.
+ */
+
+static inline ewma_policy_circ_data_t *
+TO_EWMA_POL_CIRC_DATA(circuitmux_policy_circ_data_t *pol)
+{
+ if (!pol) return NULL;
+ else {
+ tor_assert(pol->magic == EWMA_POL_CIRC_DATA_MAGIC);
+ return DOWNCAST(ewma_policy_circ_data_t, pol);
+ }
+}
+
+/*** Static declarations for circuitmux_ewma.c ***/
+
+static void add_cell_ewma(ewma_policy_data_t *pol, cell_ewma_t *ewma);
+static int compare_cell_ewma_counts(const void *p1, const void *p2);
+static circuit_t * cell_ewma_to_circuit(cell_ewma_t *ewma);
+static inline double get_scale_factor(unsigned from_tick, unsigned to_tick);
+static cell_ewma_t * pop_first_cell_ewma(ewma_policy_data_t *pol);
+static void remove_cell_ewma(ewma_policy_data_t *pol, cell_ewma_t *ewma);
+static void scale_single_cell_ewma(cell_ewma_t *ewma, unsigned cur_tick);
+static void scale_active_circuits(ewma_policy_data_t *pol,
+ unsigned cur_tick);
+
+/*** Circuitmux policy methods ***/
+
+static circuitmux_policy_data_t * ewma_alloc_cmux_data(circuitmux_t *cmux);
+static void ewma_free_cmux_data(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data);
+static circuitmux_policy_circ_data_t *
+ewma_alloc_circ_data(circuitmux_t *cmux, circuitmux_policy_data_t *pol_data,
+ circuit_t *circ, cell_direction_t direction,
+ unsigned int cell_count);
+static void
+ewma_free_circ_data(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ circuitmux_policy_circ_data_t *pol_circ_data);
+static void
+ewma_notify_circ_active(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ circuitmux_policy_circ_data_t *pol_circ_data);
+static void
+ewma_notify_circ_inactive(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ circuitmux_policy_circ_data_t *pol_circ_data);
+static void
+ewma_notify_xmit_cells(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ circuitmux_policy_circ_data_t *pol_circ_data,
+ unsigned int n_cells);
+static circuit_t *
+ewma_pick_active_circuit(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data);
+static int
+ewma_cmp_cmux(circuitmux_t *cmux_1, circuitmux_policy_data_t *pol_data_1,
+ circuitmux_t *cmux_2, circuitmux_policy_data_t *pol_data_2);
+
+/*** EWMA global variables ***/
+
+/** The per-tick scale factor to be used when computing cell-count EWMA
+ * values. (A cell sent N ticks before the start of the current tick
+ * has value ewma_scale_factor ** N.)
+ */
+static double ewma_scale_factor = 0.1;
+
+/*** EWMA circuitmux_policy_t method table ***/
+
+circuitmux_policy_t ewma_policy = {
+ /*.alloc_cmux_data =*/ ewma_alloc_cmux_data,
+ /*.free_cmux_data =*/ ewma_free_cmux_data,
+ /*.alloc_circ_data =*/ ewma_alloc_circ_data,
+ /*.free_circ_data =*/ ewma_free_circ_data,
+ /*.notify_circ_active =*/ ewma_notify_circ_active,
+ /*.notify_circ_inactive =*/ ewma_notify_circ_inactive,
+ /*.notify_set_n_cells =*/ NULL, /* EWMA doesn't need this */
+ /*.notify_xmit_cells =*/ ewma_notify_xmit_cells,
+ /*.pick_active_circuit =*/ ewma_pick_active_circuit,
+ /*.cmp_cmux =*/ ewma_cmp_cmux
+};
+
+/** Have we initialized the ewma tick-counting logic? */
+static int ewma_ticks_initialized = 0;
+/** At what monotime_coarse_t did the current tick begin? */
+static monotime_coarse_t start_of_current_tick;
+/** What is the number of the current tick? */
+static unsigned current_tick_num;
+
+/*** EWMA method implementations using the below EWMA helper functions ***/
+
+/** Compute and return the current cell_ewma tick. */
+static inline unsigned int
+cell_ewma_get_tick(void)
+{
+ monotime_coarse_t now;
+ monotime_coarse_get(&now);
+ int32_t msec_diff = monotime_coarse_diff_msec32(&start_of_current_tick,
+ &now);
+ return current_tick_num + msec_diff / (1000*EWMA_TICK_LEN);
+}
+
+/**
+ * Allocate an ewma_policy_data_t and upcast it to a circuitmux_policy_data_t;
+ * this is called when setting the policy on a circuitmux_t to ewma_policy.
+ */
+
+static circuitmux_policy_data_t *
+ewma_alloc_cmux_data(circuitmux_t *cmux)
+{
+ ewma_policy_data_t *pol = NULL;
+
+ tor_assert(cmux);
+
+ pol = tor_malloc_zero(sizeof(*pol));
+ pol->base_.magic = EWMA_POL_DATA_MAGIC;
+ pol->active_circuit_pqueue = smartlist_new();
+ pol->active_circuit_pqueue_last_recalibrated = cell_ewma_get_tick();
+
+ return TO_CMUX_POL_DATA(pol);
+}
+
+/**
+ * Free an ewma_policy_data_t allocated with ewma_alloc_cmux_data()
+ */
+
+static void
+ewma_free_cmux_data(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data)
+{
+ ewma_policy_data_t *pol = NULL;
+
+ tor_assert(cmux);
+ if (!pol_data) return;
+
+ pol = TO_EWMA_POL_DATA(pol_data);
+
+ smartlist_free(pol->active_circuit_pqueue);
+ tor_free(pol);
+}
+
+/**
+ * Allocate an ewma_policy_circ_data_t and upcast it to a
+ * circuitmux_policy_data_t; this is called when attaching a circuit to a
+ * circuitmux_t with ewma_policy.
+ */
+
+static circuitmux_policy_circ_data_t *
+ewma_alloc_circ_data(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ cell_direction_t direction,
+ unsigned int cell_count)
+{
+ ewma_policy_circ_data_t *cdata = NULL;
+
+ tor_assert(cmux);
+ tor_assert(pol_data);
+ tor_assert(circ);
+ tor_assert(direction == CELL_DIRECTION_OUT ||
+ direction == CELL_DIRECTION_IN);
+ /* Shut the compiler up without triggering -Wtautological-compare */
+ (void)cell_count;
+
+ cdata = tor_malloc_zero(sizeof(*cdata));
+ cdata->base_.magic = EWMA_POL_CIRC_DATA_MAGIC;
+ cdata->circ = circ;
+
+ /*
+ * Initialize the cell_ewma_t structure (formerly in
+ * init_circuit_base())
+ */
+ cdata->cell_ewma.last_adjusted_tick = cell_ewma_get_tick();
+ cdata->cell_ewma.cell_count = 0.0;
+ cdata->cell_ewma.heap_index = -1;
+ if (direction == CELL_DIRECTION_IN) {
+ cdata->cell_ewma.is_for_p_chan = 1;
+ } else {
+ cdata->cell_ewma.is_for_p_chan = 0;
+ }
+
+ return TO_CMUX_POL_CIRC_DATA(cdata);
+}
+
+/**
+ * Free an ewma_policy_circ_data_t allocated with ewma_alloc_circ_data()
+ */
+
+static void
+ewma_free_circ_data(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ circuitmux_policy_circ_data_t *pol_circ_data)
+
+{
+ ewma_policy_circ_data_t *cdata = NULL;
+
+ tor_assert(cmux);
+ tor_assert(circ);
+ tor_assert(pol_data);
+
+ if (!pol_circ_data) return;
+
+ cdata = TO_EWMA_POL_CIRC_DATA(pol_circ_data);
+
+ tor_free(cdata);
+}
+
+/**
+ * Handle circuit activation; this inserts the circuit's cell_ewma into
+ * the active_circuits_pqueue.
+ */
+
+static void
+ewma_notify_circ_active(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ circuitmux_policy_circ_data_t *pol_circ_data)
+{
+ ewma_policy_data_t *pol = NULL;
+ ewma_policy_circ_data_t *cdata = NULL;
+
+ tor_assert(cmux);
+ tor_assert(pol_data);
+ tor_assert(circ);
+ tor_assert(pol_circ_data);
+
+ pol = TO_EWMA_POL_DATA(pol_data);
+ cdata = TO_EWMA_POL_CIRC_DATA(pol_circ_data);
+
+ add_cell_ewma(pol, &(cdata->cell_ewma));
+}
+
+/**
+ * Handle circuit deactivation; this removes the circuit's cell_ewma from
+ * the active_circuits_pqueue.
+ */
+
+static void
+ewma_notify_circ_inactive(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ circuitmux_policy_circ_data_t *pol_circ_data)
+{
+ ewma_policy_data_t *pol = NULL;
+ ewma_policy_circ_data_t *cdata = NULL;
+
+ tor_assert(cmux);
+ tor_assert(pol_data);
+ tor_assert(circ);
+ tor_assert(pol_circ_data);
+
+ pol = TO_EWMA_POL_DATA(pol_data);
+ cdata = TO_EWMA_POL_CIRC_DATA(pol_circ_data);
+
+ remove_cell_ewma(pol, &(cdata->cell_ewma));
+}
+
+/**
+ * Update cell_ewma for this circuit after we've sent some cells, and
+ * remove/reinsert it in the queue. This used to be done (brokenly,
+ * see bug 6816) in channel_flush_from_first_active_circuit().
+ */
+
+static void
+ewma_notify_xmit_cells(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data,
+ circuit_t *circ,
+ circuitmux_policy_circ_data_t *pol_circ_data,
+ unsigned int n_cells)
+{
+ ewma_policy_data_t *pol = NULL;
+ ewma_policy_circ_data_t *cdata = NULL;
+ unsigned int tick;
+ double fractional_tick, ewma_increment;
+ cell_ewma_t *cell_ewma, *tmp;
+
+ tor_assert(cmux);
+ tor_assert(pol_data);
+ tor_assert(circ);
+ tor_assert(pol_circ_data);
+ tor_assert(n_cells > 0);
+
+ pol = TO_EWMA_POL_DATA(pol_data);
+ cdata = TO_EWMA_POL_CIRC_DATA(pol_circ_data);
+
+ /* Rescale the EWMAs if needed */
+ tick = cell_ewma_get_current_tick_and_fraction(&fractional_tick);
+
+ if (tick != pol->active_circuit_pqueue_last_recalibrated) {
+ scale_active_circuits(pol, tick);
+ }
+
+ /* How much do we adjust the cell count in cell_ewma by? */
+ ewma_increment =
+ ((double)(n_cells)) * pow(ewma_scale_factor, -fractional_tick);
+
+ /* Do the adjustment */
+ cell_ewma = &(cdata->cell_ewma);
+ cell_ewma->cell_count += ewma_increment;
+
+ /*
+ * Since we just sent on this circuit, it should be at the head of
+ * the queue. Pop the head, assert that it matches, then re-add.
+ */
+ tmp = pop_first_cell_ewma(pol);
+ tor_assert(tmp == cell_ewma);
+ add_cell_ewma(pol, cell_ewma);
+}
+
+/**
+ * Pick the preferred circuit to send from; this will be the one with
+ * the lowest EWMA value in the priority queue. This used to be done
+ * in channel_flush_from_first_active_circuit().
+ */
+
+static circuit_t *
+ewma_pick_active_circuit(circuitmux_t *cmux,
+ circuitmux_policy_data_t *pol_data)
+{
+ ewma_policy_data_t *pol = NULL;
+ circuit_t *circ = NULL;
+ cell_ewma_t *cell_ewma = NULL;
+
+ tor_assert(cmux);
+ tor_assert(pol_data);
+
+ pol = TO_EWMA_POL_DATA(pol_data);
+
+ if (smartlist_len(pol->active_circuit_pqueue) > 0) {
+ /* Get the head of the queue */
+ cell_ewma = smartlist_get(pol->active_circuit_pqueue, 0);
+ circ = cell_ewma_to_circuit(cell_ewma);
+ }
+
+ return circ;
+}
+
+/**
+ * Compare two EWMA cmuxes, and return -1, 0 or 1 to indicate which should
+ * be more preferred - see circuitmux_compare_muxes() of circuitmux.c.
+ */
+
+static int
+ewma_cmp_cmux(circuitmux_t *cmux_1, circuitmux_policy_data_t *pol_data_1,
+ circuitmux_t *cmux_2, circuitmux_policy_data_t *pol_data_2)
+{
+ ewma_policy_data_t *p1 = NULL, *p2 = NULL;
+ cell_ewma_t *ce1 = NULL, *ce2 = NULL;
+
+ tor_assert(cmux_1);
+ tor_assert(pol_data_1);
+ tor_assert(cmux_2);
+ tor_assert(pol_data_2);
+
+ p1 = TO_EWMA_POL_DATA(pol_data_1);
+ p2 = TO_EWMA_POL_DATA(pol_data_2);
+
+ if (p1 != p2) {
+ /* Get the head cell_ewma_t from each queue */
+ if (smartlist_len(p1->active_circuit_pqueue) > 0) {
+ ce1 = smartlist_get(p1->active_circuit_pqueue, 0);
+ }
+
+ if (smartlist_len(p2->active_circuit_pqueue) > 0) {
+ ce2 = smartlist_get(p2->active_circuit_pqueue, 0);
+ }
+
+ /* Got both of them? */
+ if (ce1 != NULL && ce2 != NULL) {
+ /* Pick whichever one has the better best circuit */
+ return compare_cell_ewma_counts(ce1, ce2);
+ } else {
+ if (ce1 != NULL ) {
+ /* We only have a circuit on cmux_1, so prefer it */
+ return -1;
+ } else if (ce2 != NULL) {
+ /* We only have a circuit on cmux_2, so prefer it */
+ return 1;
+ } else {
+ /* No circuits at all; no preference */
+ return 0;
+ }
+ }
+ } else {
+ /* We got identical params */
+ return 0;
+ }
+}
+
+/** Helper for sorting cell_ewma_t values in their priority queue. */
+static int
+compare_cell_ewma_counts(const void *p1, const void *p2)
+{
+ const cell_ewma_t *e1 = p1, *e2 = p2;
+
+ if (e1->cell_count < e2->cell_count)
+ return -1;
+ else if (e1->cell_count > e2->cell_count)
+ return 1;
+ else
+ return 0;
+}
+
+/** Given a cell_ewma_t, return a pointer to the circuit containing it. */
+static circuit_t *
+cell_ewma_to_circuit(cell_ewma_t *ewma)
+{
+ ewma_policy_circ_data_t *cdata = NULL;
+
+ tor_assert(ewma);
+ cdata = SUBTYPE_P(ewma, ewma_policy_circ_data_t, cell_ewma);
+ tor_assert(cdata);
+
+ return cdata->circ;
+}
+
+/* ==== Functions for scaling cell_ewma_t ====
+
+ When choosing which cells to relay first, we favor circuits that have been
+ quiet recently. This gives better latency on connections that aren't
+ pushing lots of data, and makes the network feel more interactive.
+
+ Conceptually, we take an exponentially weighted mean average of the number
+ of cells a circuit has sent, and allow active circuits (those with cells to
+ relay) to send cells in reverse order of their exponentially-weighted mean
+ average (EWMA) cell count. [That is, a cell sent N seconds ago 'counts'
+ F^N times as much as a cell sent now, for 0<F<1.0, and we favor the
+ circuit that has sent the fewest cells]
+
+ If 'double' had infinite precision, we could do this simply by counting a
+ cell sent at startup as having weight 1.0, and a cell sent N seconds later
+ as having weight F^-N. This way, we would never need to re-scale
+ any already-sent cells.
+
+ To prevent double from overflowing, we could count a cell sent now as
+ having weight 1.0 and a cell sent N seconds ago as having weight F^N.
+ This, however, would mean we'd need to re-scale *ALL* old circuits every
+ time we wanted to send a cell.
+
+ So as a compromise, we divide time into 'ticks' (currently, 10-second
+ increments) and say that a cell sent at the start of a current tick is
+ worth 1.0, a cell sent N seconds before the start of the current tick is
+ worth F^N, and a cell sent N seconds after the start of the current tick is
+ worth F^-N. This way we don't overflow, and we don't need to constantly
+ rescale.
+ */
+
+/**
+ * Initialize the system that tells which ewma tick we are in.
+ */
+STATIC void
+cell_ewma_initialize_ticks(void)
+{
+ if (ewma_ticks_initialized)
+ return;
+ monotime_coarse_get(&start_of_current_tick);
+ crypto_rand((char*)&current_tick_num, sizeof(current_tick_num));
+ ewma_ticks_initialized = 1;
+}
+
+/** Compute the current cell_ewma tick and the fraction of the tick that has
+ * elapsed between the start of the tick and the current time. Return the
+ * former and store the latter in *<b>remainder_out</b>.
+ *
+ * These tick values are not meant to be shared between Tor instances, or used
+ * for other purposes. */
+STATIC unsigned
+cell_ewma_get_current_tick_and_fraction(double *remainder_out)
+{
+ if (BUG(!ewma_ticks_initialized)) {
+ cell_ewma_initialize_ticks(); // LCOV_EXCL_LINE
+ }
+ monotime_coarse_t now;
+ monotime_coarse_get(&now);
+ int32_t msec_diff = monotime_coarse_diff_msec32(&start_of_current_tick,
+ &now);
+ if (msec_diff > (1000*EWMA_TICK_LEN)) {
+ unsigned ticks_difference = msec_diff / (1000*EWMA_TICK_LEN);
+ monotime_coarse_add_msec(&start_of_current_tick,
+ &start_of_current_tick,
+ ticks_difference * 1000 * EWMA_TICK_LEN);
+ current_tick_num += ticks_difference;
+ msec_diff %= 1000*EWMA_TICK_LEN;
+ }
+ *remainder_out = ((double)msec_diff) / (1.0e3 * EWMA_TICK_LEN);
+ return current_tick_num;
+}
+
+/* Default value for the CircuitPriorityHalflifeMsec consensus parameter in
+ * msec. */
+#define CMUX_PRIORITY_HALFLIFE_MSEC_DEFAULT 30000
+/* Minimum and maximum value for the CircuitPriorityHalflifeMsec consensus
+ * parameter. */
+#define CMUX_PRIORITY_HALFLIFE_MSEC_MIN 1
+#define CMUX_PRIORITY_HALFLIFE_MSEC_MAX INT32_MAX
+
+/* Return the value of the circuit priority halflife from the options if
+ * available or else from the consensus (in that order). If none can be found,
+ * a default value is returned.
+ *
+ * The source_msg points to a string describing from where the value was
+ * picked so it can be used for logging. */
+static double
+get_circuit_priority_halflife(const or_options_t *options,
+ const networkstatus_t *consensus,
+ const char **source_msg)
+{
+ int32_t halflife_ms;
+ double halflife;
+ /* Compute the default value now. We might need it. */
+ double halflife_default =
+ ((double) CMUX_PRIORITY_HALFLIFE_MSEC_DEFAULT) / 1000.0;
+
+ /* Try to get it from configuration file first. */
+ if (options && options->CircuitPriorityHalflife >= -EPSILON) {
+ halflife = options->CircuitPriorityHalflife;
+ *source_msg = "CircuitPriorityHalflife in configuration";
+ goto end;
+ }
+
+ /* Try to get the msec value from the consensus. */
+ halflife_ms = networkstatus_get_param(consensus,
+ "CircuitPriorityHalflifeMsec",
+ CMUX_PRIORITY_HALFLIFE_MSEC_DEFAULT,
+ CMUX_PRIORITY_HALFLIFE_MSEC_MIN,
+ CMUX_PRIORITY_HALFLIFE_MSEC_MAX);
+ halflife = ((double) halflife_ms) / 1000.0;
+ *source_msg = "CircuitPriorityHalflifeMsec in consensus";
+
+ end:
+ /* We should never go below the EPSILON else we would consider it disabled
+ * and we can't have that. */
+ if (halflife < EPSILON) {
+ log_warn(LD_CONFIG, "CircuitPriorityHalflife is too small (%f). "
+ "Adjusting to the smallest value allowed: %f.",
+ halflife, halflife_default);
+ halflife = halflife_default;
+ }
+ return halflife;
+}
+
+/** Adjust the global cell scale factor based on <b>options</b> */
+void
+cmux_ewma_set_options(const or_options_t *options,
+ const networkstatus_t *consensus)
+{
+ double halflife;
+ const char *source;
+
+ cell_ewma_initialize_ticks();
+
+ /* Both options and consensus can be NULL. This assures us to either get a
+ * valid configured value or the default one. */
+ halflife = get_circuit_priority_halflife(options, consensus, &source);
+
+ /* convert halflife into halflife-per-tick. */
+ halflife /= EWMA_TICK_LEN;
+ /* compute per-tick scale factor. */
+ ewma_scale_factor = exp( LOG_ONEHALF / halflife );
+ log_info(LD_OR,
+ "Enabled cell_ewma algorithm because of value in %s; "
+ "scale factor is %f per %d seconds",
+ source, ewma_scale_factor, EWMA_TICK_LEN);
+}
+
+/** Return the multiplier necessary to convert the value of a cell sent in
+ * 'from_tick' to one sent in 'to_tick'. */
+static inline double
+get_scale_factor(unsigned from_tick, unsigned to_tick)
+{
+ /* This math can wrap around, but that's okay: unsigned overflow is
+ well-defined */
+ int diff = (int)(to_tick - from_tick);
+ return pow(ewma_scale_factor, diff);
+}
+
+/** Adjust the cell count of <b>ewma</b> so that it is scaled with respect to
+ * <b>cur_tick</b> */
+static void
+scale_single_cell_ewma(cell_ewma_t *ewma, unsigned cur_tick)
+{
+ double factor = get_scale_factor(ewma->last_adjusted_tick, cur_tick);
+ ewma->cell_count *= factor;
+ ewma->last_adjusted_tick = cur_tick;
+}
+
+/** Adjust the cell count of every active circuit on <b>chan</b> so
+ * that they are scaled with respect to <b>cur_tick</b> */
+static void
+scale_active_circuits(ewma_policy_data_t *pol, unsigned cur_tick)
+{
+ double factor;
+
+ tor_assert(pol);
+ tor_assert(pol->active_circuit_pqueue);
+
+ factor =
+ get_scale_factor(
+ pol->active_circuit_pqueue_last_recalibrated,
+ cur_tick);
+ /** Ordinarily it isn't okay to change the value of an element in a heap,
+ * but it's okay here, since we are preserving the order. */
+ SMARTLIST_FOREACH_BEGIN(
+ pol->active_circuit_pqueue,
+ cell_ewma_t *, e) {
+ tor_assert(e->last_adjusted_tick ==
+ pol->active_circuit_pqueue_last_recalibrated);
+ e->cell_count *= factor;
+ e->last_adjusted_tick = cur_tick;
+ } SMARTLIST_FOREACH_END(e);
+ pol->active_circuit_pqueue_last_recalibrated = cur_tick;
+}
+
+/** Rescale <b>ewma</b> to the same scale as <b>pol</b>, and add it to
+ * <b>pol</b>'s priority queue of active circuits */
+static void
+add_cell_ewma(ewma_policy_data_t *pol, cell_ewma_t *ewma)
+{
+ tor_assert(pol);
+ tor_assert(pol->active_circuit_pqueue);
+ tor_assert(ewma);
+ tor_assert(ewma->heap_index == -1);
+
+ scale_single_cell_ewma(
+ ewma,
+ pol->active_circuit_pqueue_last_recalibrated);
+
+ smartlist_pqueue_add(pol->active_circuit_pqueue,
+ compare_cell_ewma_counts,
+ offsetof(cell_ewma_t, heap_index),
+ ewma);
+}
+
+/** Remove <b>ewma</b> from <b>pol</b>'s priority queue of active circuits */
+static void
+remove_cell_ewma(ewma_policy_data_t *pol, cell_ewma_t *ewma)
+{
+ tor_assert(pol);
+ tor_assert(pol->active_circuit_pqueue);
+ tor_assert(ewma);
+ tor_assert(ewma->heap_index != -1);
+
+ smartlist_pqueue_remove(pol->active_circuit_pqueue,
+ compare_cell_ewma_counts,
+ offsetof(cell_ewma_t, heap_index),
+ ewma);
+}
+
+/** Remove and return the first cell_ewma_t from pol's priority queue of
+ * active circuits. Requires that the priority queue is nonempty. */
+static cell_ewma_t *
+pop_first_cell_ewma(ewma_policy_data_t *pol)
+{
+ tor_assert(pol);
+ tor_assert(pol->active_circuit_pqueue);
+
+ return smartlist_pqueue_pop(pol->active_circuit_pqueue,
+ compare_cell_ewma_counts,
+ offsetof(cell_ewma_t, heap_index));
+}
+
+/**
+ * Drop all resources held by circuitmux_ewma.c, and deinitialize the
+ * module. */
+void
+circuitmux_ewma_free_all(void)
+{
+ ewma_ticks_initialized = 0;
+}
diff --git a/src/core/or/circuitmux_ewma.h b/src/core/or/circuitmux_ewma.h
new file mode 100644
index 0000000000..1214b0264b
--- /dev/null
+++ b/src/core/or/circuitmux_ewma.h
@@ -0,0 +1,30 @@
+/* * Copyright (c) 2012-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitmux_ewma.h
+ * \brief Header file for circuitmux_ewma.c
+ **/
+
+#ifndef TOR_CIRCUITMUX_EWMA_H
+#define TOR_CIRCUITMUX_EWMA_H
+
+#include "or/or.h"
+#include "or/circuitmux.h"
+
+/* The public EWMA policy callbacks object. */
+extern circuitmux_policy_t ewma_policy;
+
+/* Externally visible EWMA functions */
+void cmux_ewma_set_options(const or_options_t *options,
+ const networkstatus_t *consensus);
+
+void circuitmux_ewma_free_all(void);
+
+#ifdef CIRCUITMUX_EWMA_PRIVATE
+STATIC unsigned cell_ewma_get_current_tick_and_fraction(double *remainder_out);
+STATIC void cell_ewma_initialize_ticks(void);
+#endif
+
+#endif /* !defined(TOR_CIRCUITMUX_EWMA_H) */
+
diff --git a/src/core/or/circuitstats.c b/src/core/or/circuitstats.c
new file mode 100644
index 0000000000..d275399e97
--- /dev/null
+++ b/src/core/or/circuitstats.c
@@ -0,0 +1,1952 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitstats.c
+ *
+ * \brief Maintains and analyzes statistics about circuit built times, so we
+ * can tell how long we may need to wait for a fast circuit to be constructed.
+ *
+ * By keeping these statistics, a client learns when it should time out a slow
+ * circuit for being too slow, and when it should keep a circuit open in order
+ * to wait for it to complete.
+ *
+ * The information here is kept in a circuit_built_times_t structure, which is
+ * currently a singleton, but doesn't need to be. It's updated by calls to
+ * circuit_build_times_count_timeout() from circuituse.c,
+ * circuit_build_times_count_close() from circuituse.c, and
+ * circuit_build_times_add_time() from circuitbuild.c, and inspected by other
+ * calls into this module, mostly from circuitlist.c. Observations are
+ * persisted to disk via the or_state_t-related calls.
+ */
+
+#define CIRCUITSTATS_PRIVATE
+
+#include "or/or.h"
+#include "or/circuitbuild.h"
+#include "or/circuitstats.h"
+#include "or/config.h"
+#include "or/confparse.h"
+#include "or/control.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "or/main.h"
+#include "or/networkstatus.h"
+#include "or/rendclient.h"
+#include "or/rendservice.h"
+#include "or/router.h"
+#include "or/statefile.h"
+#include "or/circuitlist.h"
+#include "or/circuituse.h"
+#include "lib/math/fp.h"
+#include "lib/time/tvdiff.h"
+#include "lib/encoding/confline.h"
+
+#include "or/crypt_path_st.h"
+#include "or/origin_circuit_st.h"
+#include "or/or_state_st.h"
+
+#undef log
+#include <math.h>
+
+static void cbt_control_event_buildtimeout_set(
+ const circuit_build_times_t *cbt,
+ buildtimeout_set_event_t type);
+static void circuit_build_times_scale_circ_counts(circuit_build_times_t *cbt);
+
+#define CBT_BIN_TO_MS(bin) ((bin)*CBT_BIN_WIDTH + (CBT_BIN_WIDTH/2))
+
+/** Global list of circuit build times */
+// XXXX: Add this as a member for entry_guard_t instead of global?
+// Then we could do per-guard statistics, as guards are likely to
+// vary in their own latency. The downside of this is that guards
+// can change frequently, so we'd be building a lot more circuits
+// most likely.
+static circuit_build_times_t circ_times;
+
+#ifdef TOR_UNIT_TESTS
+/** If set, we're running the unit tests: we should avoid clobbering
+ * our state file or accessing get_options() or get_or_state() */
+static int unit_tests = 0;
+#else
+#define unit_tests 0
+#endif /* defined(TOR_UNIT_TESTS) */
+
+/** Return a pointer to the data structure describing our current circuit
+ * build time history and computations. */
+const circuit_build_times_t *
+get_circuit_build_times(void)
+{
+ return &circ_times;
+}
+
+/** As get_circuit_build_times, but return a mutable pointer. */
+circuit_build_times_t *
+get_circuit_build_times_mutable(void)
+{
+ return &circ_times;
+}
+
+/** Return the time to wait before actually closing an under-construction, in
+ * milliseconds. */
+double
+get_circuit_build_close_time_ms(void)
+{
+ return circ_times.close_ms;
+}
+
+/** Return the time to wait before giving up on an under-construction circuit,
+ * in milliseconds. */
+double
+get_circuit_build_timeout_ms(void)
+{
+ return circ_times.timeout_ms;
+}
+
+/**
+ * This function decides if CBT learning should be disabled. It returns
+ * true if one or more of the following conditions are met:
+ *
+ * 1. If the cbtdisabled consensus parameter is set.
+ * 2. If the torrc option LearnCircuitBuildTimeout is false.
+ * 3. If we are a directory authority
+ * 4. If we fail to write circuit build time history to our state file.
+ * 5. If we are compiled or configured in Tor2web mode
+ * 6. If we are configured in Single Onion mode
+ */
+int
+circuit_build_times_disabled(const or_options_t *options)
+{
+ return circuit_build_times_disabled_(options, 0);
+}
+
+/** As circuit_build_times_disabled, but take options as an argument. */
+int
+circuit_build_times_disabled_(const or_options_t *options,
+ int ignore_consensus)
+{
+ if (unit_tests) {
+ return 0;
+ } else {
+ int consensus_disabled =
+ ignore_consensus ? 0 : networkstatus_get_param(NULL, "cbtdisabled",
+ 0, 0, 1);
+ int config_disabled = !options->LearnCircuitBuildTimeout;
+ int dirauth_disabled = authdir_mode(options);
+ int state_disabled = did_last_state_file_write_fail() ? 1 : 0;
+ /* LearnCircuitBuildTimeout and Tor2web/Single Onion Services are
+ * incompatible in two ways:
+ *
+ * - LearnCircuitBuildTimeout results in a low CBT, which
+ * Single Onion use of one-hop intro and rendezvous circuits lowers
+ * much further, producing *far* too many timeouts.
+ *
+ * - The adaptive CBT code does not update its timeout estimate
+ * using build times for single-hop circuits.
+ *
+ * If we fix both of these issues someday, we should test
+ * these modes with LearnCircuitBuildTimeout on again. */
+ int tor2web_disabled = rend_client_allow_non_anonymous_connection(options);
+ int single_onion_disabled = rend_service_allow_non_anonymous_connection(
+ options);
+
+ if (consensus_disabled || config_disabled || dirauth_disabled ||
+ state_disabled || tor2web_disabled || single_onion_disabled) {
+#if 0
+ log_debug(LD_CIRC,
+ "CircuitBuildTime learning is disabled. "
+ "Consensus=%d, Config=%d, AuthDir=%d, StateFile=%d",
+ consensus_disabled, config_disabled, dirauth_disabled,
+ state_disabled);
+#endif /* 0 */
+ return 1;
+ } else {
+#if 0
+ log_debug(LD_CIRC,
+ "CircuitBuildTime learning is not disabled. "
+ "Consensus=%d, Config=%d, AuthDir=%d, StateFile=%d",
+ consensus_disabled, config_disabled, dirauth_disabled,
+ state_disabled);
+#endif /* 0 */
+ return 0;
+ }
+ }
+}
+
+/**
+ * Retrieve and bounds-check the cbtmaxtimeouts consensus parameter.
+ *
+ * Effect: When this many timeouts happen in the last 'cbtrecentcount'
+ * circuit attempts, the client should discard all of its history and
+ * begin learning a fresh timeout value.
+ */
+static int32_t
+circuit_build_times_max_timeouts(void)
+{
+ int32_t cbt_maxtimeouts;
+
+ cbt_maxtimeouts = networkstatus_get_param(NULL, "cbtmaxtimeouts",
+ CBT_DEFAULT_MAX_RECENT_TIMEOUT_COUNT,
+ CBT_MIN_MAX_RECENT_TIMEOUT_COUNT,
+ CBT_MAX_MAX_RECENT_TIMEOUT_COUNT);
+
+ if (!(get_options()->LearnCircuitBuildTimeout)) {
+ log_debug(LD_BUG,
+ "circuit_build_times_max_timeouts() called, cbtmaxtimeouts is"
+ " %d",
+ cbt_maxtimeouts);
+ }
+
+ return cbt_maxtimeouts;
+}
+
+/**
+ * Retrieve and bounds-check the cbtnummodes consensus parameter.
+ *
+ * Effect: This value governs how many modes to use in the weighted
+ * average calculation of Pareto parameter Xm. A value of 3 introduces
+ * some bias (2-5% of CDF) under ideal conditions, but allows for better
+ * performance in the event that a client chooses guard nodes of radically
+ * different performance characteristics.
+ */
+static int32_t
+circuit_build_times_default_num_xm_modes(void)
+{
+ int32_t num = networkstatus_get_param(NULL, "cbtnummodes",
+ CBT_DEFAULT_NUM_XM_MODES,
+ CBT_MIN_NUM_XM_MODES,
+ CBT_MAX_NUM_XM_MODES);
+
+ if (!(get_options()->LearnCircuitBuildTimeout)) {
+ log_debug(LD_BUG,
+ "circuit_build_times_default_num_xm_modes() called, cbtnummodes"
+ " is %d",
+ num);
+ }
+
+ return num;
+}
+
+/**
+ * Retrieve and bounds-check the cbtmincircs consensus parameter.
+ *
+ * Effect: This is the minimum number of circuits to build before
+ * computing a timeout.
+ */
+static int32_t
+circuit_build_times_min_circs_to_observe(void)
+{
+ int32_t num = networkstatus_get_param(NULL, "cbtmincircs",
+ CBT_DEFAULT_MIN_CIRCUITS_TO_OBSERVE,
+ CBT_MIN_MIN_CIRCUITS_TO_OBSERVE,
+ CBT_MAX_MIN_CIRCUITS_TO_OBSERVE);
+
+ if (!(get_options()->LearnCircuitBuildTimeout)) {
+ log_debug(LD_BUG,
+ "circuit_build_times_min_circs_to_observe() called, cbtmincircs"
+ " is %d",
+ num);
+ }
+
+ return num;
+}
+
+/** Return true iff <b>cbt</b> has recorded enough build times that we
+ * want to start acting on the timeout it implies. */
+int
+circuit_build_times_enough_to_compute(const circuit_build_times_t *cbt)
+{
+ return cbt->total_build_times >= circuit_build_times_min_circs_to_observe();
+}
+
+/**
+ * Retrieve and bounds-check the cbtquantile consensus parameter.
+ *
+ * Effect: This is the position on the quantile curve to use to set the
+ * timeout value. It is a percent (10-99).
+ */
+double
+circuit_build_times_quantile_cutoff(void)
+{
+ int32_t num = networkstatus_get_param(NULL, "cbtquantile",
+ CBT_DEFAULT_QUANTILE_CUTOFF,
+ CBT_MIN_QUANTILE_CUTOFF,
+ CBT_MAX_QUANTILE_CUTOFF);
+
+ if (!(get_options()->LearnCircuitBuildTimeout)) {
+ log_debug(LD_BUG,
+ "circuit_build_times_quantile_cutoff() called, cbtquantile"
+ " is %d",
+ num);
+ }
+
+ return num/100.0;
+}
+
+/**
+ * Retrieve and bounds-check the cbtclosequantile consensus parameter.
+ *
+ * Effect: This is the position on the quantile curve to use to set the
+ * timeout value to use to actually close circuits. It is a percent
+ * (0-99).
+ */
+static double
+circuit_build_times_close_quantile(void)
+{
+ int32_t param;
+ /* Cast is safe - circuit_build_times_quantile_cutoff() is capped */
+ int32_t min = (int)tor_lround(100*circuit_build_times_quantile_cutoff());
+ param = networkstatus_get_param(NULL, "cbtclosequantile",
+ CBT_DEFAULT_CLOSE_QUANTILE,
+ CBT_MIN_CLOSE_QUANTILE,
+ CBT_MAX_CLOSE_QUANTILE);
+
+ if (!(get_options()->LearnCircuitBuildTimeout)) {
+ log_debug(LD_BUG,
+ "circuit_build_times_close_quantile() called, cbtclosequantile"
+ " is %d", param);
+ }
+
+ if (param < min) {
+ log_warn(LD_DIR, "Consensus parameter cbtclosequantile is "
+ "too small, raising to %d", min);
+ param = min;
+ }
+ return param / 100.0;
+}
+
+/**
+ * Retrieve and bounds-check the cbttestfreq consensus parameter.
+ *
+ * Effect: Describes how often in seconds to build a test circuit to
+ * gather timeout values. Only applies if less than 'cbtmincircs'
+ * have been recorded.
+ */
+static int32_t
+circuit_build_times_test_frequency(void)
+{
+ int32_t num = networkstatus_get_param(NULL, "cbttestfreq",
+ CBT_DEFAULT_TEST_FREQUENCY,
+ CBT_MIN_TEST_FREQUENCY,
+ CBT_MAX_TEST_FREQUENCY);
+
+ if (!(get_options()->LearnCircuitBuildTimeout)) {
+ log_debug(LD_BUG,
+ "circuit_build_times_test_frequency() called, cbttestfreq is %d",
+ num);
+ }
+
+ return num;
+}
+
+/**
+ * Retrieve and bounds-check the cbtmintimeout consensus parameter.
+ *
+ * Effect: This is the minimum allowed timeout value in milliseconds.
+ * The minimum is to prevent rounding to 0 (we only check once
+ * per second).
+ */
+static int32_t
+circuit_build_times_min_timeout(void)
+{
+ int32_t num = networkstatus_get_param(NULL, "cbtmintimeout",
+ CBT_DEFAULT_TIMEOUT_MIN_VALUE,
+ CBT_MIN_TIMEOUT_MIN_VALUE,
+ CBT_MAX_TIMEOUT_MIN_VALUE);
+
+ if (!(get_options()->LearnCircuitBuildTimeout)) {
+ log_debug(LD_BUG,
+ "circuit_build_times_min_timeout() called, cbtmintimeout is %d",
+ num);
+ }
+ return num;
+}
+
+/**
+ * Retrieve and bounds-check the cbtinitialtimeout consensus parameter.
+ *
+ * Effect: This is the timeout value to use before computing a timeout,
+ * in milliseconds.
+ */
+int32_t
+circuit_build_times_initial_timeout(void)
+{
+ int32_t min = circuit_build_times_min_timeout();
+ int32_t param = networkstatus_get_param(NULL, "cbtinitialtimeout",
+ CBT_DEFAULT_TIMEOUT_INITIAL_VALUE,
+ CBT_MIN_TIMEOUT_INITIAL_VALUE,
+ CBT_MAX_TIMEOUT_INITIAL_VALUE);
+
+ if (!(get_options()->LearnCircuitBuildTimeout)) {
+ log_debug(LD_BUG,
+ "circuit_build_times_initial_timeout() called, "
+ "cbtinitialtimeout is %d",
+ param);
+ }
+
+ if (param < min) {
+ log_warn(LD_DIR, "Consensus parameter cbtinitialtimeout is too small, "
+ "raising to %d", min);
+ param = min;
+ }
+ return param;
+}
+
+/**
+ * Retrieve and bounds-check the cbtrecentcount consensus parameter.
+ *
+ * Effect: This is the number of circuit build times to keep track of
+ * for deciding if we hit cbtmaxtimeouts and need to reset our state
+ * and learn a new timeout.
+ */
+static int32_t
+circuit_build_times_recent_circuit_count(networkstatus_t *ns)
+{
+ int32_t num;
+ num = networkstatus_get_param(ns, "cbtrecentcount",
+ CBT_DEFAULT_RECENT_CIRCUITS,
+ CBT_MIN_RECENT_CIRCUITS,
+ CBT_MAX_RECENT_CIRCUITS);
+
+ if (!(get_options()->LearnCircuitBuildTimeout)) {
+ log_debug(LD_BUG,
+ "circuit_build_times_recent_circuit_count() called, "
+ "cbtrecentcount is %d",
+ num);
+ }
+
+ return num;
+}
+
+/**
+ * This function is called when we get a consensus update.
+ *
+ * It checks to see if we have changed any consensus parameters
+ * that require reallocation or discard of previous stats.
+ */
+void
+circuit_build_times_new_consensus_params(circuit_build_times_t *cbt,
+ networkstatus_t *ns)
+{
+ int32_t num;
+
+ /*
+ * First check if we're doing adaptive timeouts at all; nothing to
+ * update if we aren't.
+ */
+
+ if (!circuit_build_times_disabled(get_options())) {
+ num = circuit_build_times_recent_circuit_count(ns);
+
+ if (num > 0) {
+ if (num != cbt->liveness.num_recent_circs) {
+ int8_t *recent_circs;
+ if (cbt->liveness.num_recent_circs > 0) {
+ log_notice(LD_CIRC, "The Tor Directory Consensus has changed how "
+ "many circuits we must track to detect network failures "
+ "from %d to %d.", cbt->liveness.num_recent_circs, num);
+ } else {
+ log_notice(LD_CIRC, "Upon receiving a consensus directory, "
+ "re-enabling circuit-based network failure detection.");
+ }
+
+ tor_assert(cbt->liveness.timeouts_after_firsthop ||
+ cbt->liveness.num_recent_circs == 0);
+
+ /*
+ * Technically this is a circular array that we are reallocating
+ * and memcopying. However, since it only consists of either 1s
+ * or 0s, and is only used in a statistical test to determine when
+ * we should discard our history after a sufficient number of 1's
+ * have been reached, it is fine if order is not preserved or
+ * elements are lost.
+ *
+ * cbtrecentcount should only be changing in cases of severe network
+ * distress anyway, so memory correctness here is paramount over
+ * doing acrobatics to preserve the array.
+ */
+ recent_circs = tor_calloc(num, sizeof(int8_t));
+ if (cbt->liveness.timeouts_after_firsthop &&
+ cbt->liveness.num_recent_circs > 0) {
+ memcpy(recent_circs, cbt->liveness.timeouts_after_firsthop,
+ sizeof(int8_t)*MIN(num, cbt->liveness.num_recent_circs));
+ }
+
+ // Adjust the index if it needs it.
+ if (num < cbt->liveness.num_recent_circs) {
+ cbt->liveness.after_firsthop_idx = MIN(num-1,
+ cbt->liveness.after_firsthop_idx);
+ }
+
+ tor_free(cbt->liveness.timeouts_after_firsthop);
+ cbt->liveness.timeouts_after_firsthop = recent_circs;
+ cbt->liveness.num_recent_circs = num;
+ }
+ /* else no change, nothing to do */
+ } else { /* num == 0 */
+ /*
+ * Weird. This probably shouldn't happen, so log a warning, but try
+ * to do something sensible anyway.
+ */
+
+ log_warn(LD_CIRC,
+ "The cbtrecentcircs consensus parameter came back zero! "
+ "This disables adaptive timeouts since we can't keep track of "
+ "any recent circuits.");
+
+ circuit_build_times_free_timeouts(cbt);
+ }
+ } else {
+ /*
+ * Adaptive timeouts are disabled; this might be because of the
+ * LearnCircuitBuildTimes config parameter, and hence permanent, or
+ * the cbtdisabled consensus parameter, so it may be a new condition.
+ * Treat it like getting num == 0 above and free the circuit history
+ * if we have any.
+ */
+
+ circuit_build_times_free_timeouts(cbt);
+ }
+}
+
+/**
+ * Return the initial default or configured timeout in milliseconds
+ */
+static double
+circuit_build_times_get_initial_timeout(void)
+{
+ double timeout;
+ const or_options_t *options = get_options();
+
+ /*
+ * Check if we have LearnCircuitBuildTimeout, and if we don't,
+ * always use CircuitBuildTimeout, no questions asked.
+ */
+ if (!unit_tests && options->CircuitBuildTimeout) {
+ timeout = options->CircuitBuildTimeout*1000;
+ if (!circuit_build_times_disabled(options) &&
+ timeout < circuit_build_times_min_timeout()) {
+ log_warn(LD_CIRC, "Config CircuitBuildTimeout too low. Setting to %ds",
+ circuit_build_times_min_timeout()/1000);
+ timeout = circuit_build_times_min_timeout();
+ }
+ } else {
+ timeout = circuit_build_times_initial_timeout();
+ }
+
+ return timeout;
+}
+
+/**
+ * Reset the build time state.
+ *
+ * Leave estimated parameters, timeout and network liveness intact
+ * for future use.
+ */
+STATIC void
+circuit_build_times_reset(circuit_build_times_t *cbt)
+{
+ memset(cbt->circuit_build_times, 0, sizeof(cbt->circuit_build_times));
+ cbt->total_build_times = 0;
+ cbt->build_times_idx = 0;
+ cbt->have_computed_timeout = 0;
+
+ // Reset timeout and close counts
+ cbt->num_circ_succeeded = 0;
+ cbt->num_circ_closed = 0;
+ cbt->num_circ_timeouts = 0;
+}
+
+/**
+ * Initialize the buildtimes structure for first use.
+ *
+ * Sets the initial timeout values based on either the config setting,
+ * the consensus param, or the default (CBT_DEFAULT_TIMEOUT_INITIAL_VALUE).
+ */
+void
+circuit_build_times_init(circuit_build_times_t *cbt)
+{
+ memset(cbt, 0, sizeof(*cbt));
+ /*
+ * Check if we really are using adaptive timeouts, and don't keep
+ * track of this stuff if not.
+ */
+ if (!circuit_build_times_disabled(get_options())) {
+ cbt->liveness.num_recent_circs =
+ circuit_build_times_recent_circuit_count(NULL);
+ cbt->liveness.timeouts_after_firsthop =
+ tor_calloc(cbt->liveness.num_recent_circs, sizeof(int8_t));
+ } else {
+ cbt->liveness.num_recent_circs = 0;
+ cbt->liveness.timeouts_after_firsthop = NULL;
+ }
+ cbt->close_ms = cbt->timeout_ms = circuit_build_times_get_initial_timeout();
+ cbt_control_event_buildtimeout_set(cbt, BUILDTIMEOUT_SET_EVENT_RESET);
+}
+
+/**
+ * Free the saved timeouts, if the cbtdisabled consensus parameter got turned
+ * on or something.
+ */
+
+void
+circuit_build_times_free_timeouts(circuit_build_times_t *cbt)
+{
+ if (!cbt) return;
+
+ if (cbt->liveness.timeouts_after_firsthop) {
+ tor_free(cbt->liveness.timeouts_after_firsthop);
+ }
+
+ cbt->liveness.num_recent_circs = 0;
+}
+
+#if 0
+/**
+ * Rewind our build time history by n positions.
+ */
+static void
+circuit_build_times_rewind_history(circuit_build_times_t *cbt, int n)
+{
+ int i = 0;
+
+ cbt->build_times_idx -= n;
+ cbt->build_times_idx %= CBT_NCIRCUITS_TO_OBSERVE;
+
+ for (i = 0; i < n; i++) {
+ cbt->circuit_build_times[(i+cbt->build_times_idx)
+ %CBT_NCIRCUITS_TO_OBSERVE]=0;
+ }
+
+ if (cbt->total_build_times > n) {
+ cbt->total_build_times -= n;
+ } else {
+ cbt->total_build_times = 0;
+ }
+
+ log_info(LD_CIRC,
+ "Rewound history by %d places. Current index: %d. "
+ "Total: %d", n, cbt->build_times_idx, cbt->total_build_times);
+}
+#endif /* 0 */
+
+/**
+ * Mark this circuit as timed out, but change its purpose
+ * so that it continues to build, allowing us to measure
+ * its full build time.
+ */
+void
+circuit_build_times_mark_circ_as_measurement_only(origin_circuit_t *circ)
+{
+ control_event_circuit_status(circ,
+ CIRC_EVENT_FAILED,
+ END_CIRC_REASON_TIMEOUT);
+ circuit_change_purpose(TO_CIRCUIT(circ),
+ CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT);
+ /* Record this event to check for too many timeouts
+ * in a row. This function does not record a time value yet
+ * (we do that later); it only counts the fact that we did
+ * have a timeout. We also want to avoid double-counting
+ * already "relaxed" circuits, which are counted in
+ * circuit_expire_building(). */
+ if (!circ->relaxed_timeout) {
+ int first_hop_succeeded = circ->cpath &&
+ circ->cpath->state == CPATH_STATE_OPEN;
+
+ circuit_build_times_count_timeout(
+ get_circuit_build_times_mutable(),
+ first_hop_succeeded);
+ }
+}
+
+/**
+ * Perform the build time work that needs to be done when a circuit
+ * completes a hop.
+ *
+ * This function decides if we should record a circuit's build time
+ * in our histogram data and other statistics, and if so, records it.
+ * It also will mark circuits that have already timed out as
+ * measurement-only circuits, so they can continue to build but
+ * not get used.
+ *
+ * For this, we want to consider circuits that will eventually make
+ * it to the third hop. For circuits longer than 3 hops, we want to
+ * record their build time when they reach the third hop, but let
+ * them continue (and not count them later). For circuits that are
+ * exactly 3 hops, this will count them when they are completed. We
+ * do this so that CBT is always gathering statistics on circuits
+ * of the same length, regardless of their type.
+ */
+void
+circuit_build_times_handle_completed_hop(origin_circuit_t *circ)
+{
+ struct timeval end;
+ long timediff;
+
+ /* If circuit build times are disabled, let circuit_expire_building()
+ * handle it.. */
+ if (circuit_build_times_disabled(get_options())) {
+ return;
+ }
+
+ /* Is this a circuit for which the timeout applies in a straight-forward
+ * way? If so, handle it below. If not, just return (and let
+ * circuit_expire_building() eventually take care of it).
+ */
+ if (!circuit_timeout_want_to_count_circ(circ)) {
+ return;
+ }
+
+ tor_gettimeofday(&end);
+ timediff = tv_mdiff(&circ->base_.timestamp_began, &end);
+
+ /* Check if we would have timed out already. If so, change the
+ * purpose here. But don't do any timeout handling here if there
+ * are no circuits opened yet. Save it for circuit_expire_building()
+ * (to allow it to handle timeout "relaxing" over there). */
+ if (timediff > get_circuit_build_timeout_ms() &&
+ circuit_any_opened_circuits_cached()) {
+
+ /* Circuits are allowed to last longer for measurement.
+ * Switch their purpose and wait. */
+ if (circ->base_.purpose != CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT) {
+ log_info(LD_CIRC,
+ "Deciding to timeout circuit %"PRIu32"\n",
+ (circ->global_identifier));
+ circuit_build_times_mark_circ_as_measurement_only(circ);
+ }
+ }
+
+ /* If the circuit is built to exactly the DEFAULT_ROUTE_LEN,
+ * add it to our buildtimes. */
+ if (circuit_get_cpath_opened_len(circ) == DEFAULT_ROUTE_LEN) {
+ /* If the circuit build time is much greater than we would have cut
+ * it off at, we probably had a suspend event along this codepath,
+ * and we should discard the value.
+ */
+ if (timediff < 0 ||
+ timediff > 2*get_circuit_build_close_time_ms()+1000) {
+ log_notice(LD_CIRC, "Strange value for circuit build time: %ldmsec. "
+ "Assuming clock jump. Purpose %d (%s)", timediff,
+ circ->base_.purpose,
+ circuit_purpose_to_string(circ->base_.purpose));
+ } else {
+ /* Only count circuit times if the network is live */
+ if (circuit_build_times_network_check_live(
+ get_circuit_build_times())) {
+ circuit_build_times_add_time(get_circuit_build_times_mutable(),
+ (build_time_t)timediff);
+ circuit_build_times_set_timeout(get_circuit_build_times_mutable());
+ }
+
+ if (circ->base_.purpose != CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT) {
+ circuit_build_times_network_circ_success(
+ get_circuit_build_times_mutable());
+ }
+ }
+ }
+}
+
+/**
+ * Add a new build time value <b>time</b> to the set of build times. Time
+ * units are milliseconds.
+ *
+ * circuit_build_times <b>cbt</b> is a circular array, so loop around when
+ * array is full.
+ */
+int
+circuit_build_times_add_time(circuit_build_times_t *cbt, build_time_t btime)
+{
+ if (btime <= 0 || btime > CBT_BUILD_TIME_MAX) {
+ log_warn(LD_BUG, "Circuit build time is too large (%u)."
+ "This is probably a bug.", btime);
+ tor_fragile_assert();
+ return -1;
+ }
+
+ log_debug(LD_CIRC, "Adding circuit build time %u", btime);
+
+ cbt->circuit_build_times[cbt->build_times_idx] = btime;
+ cbt->build_times_idx = (cbt->build_times_idx + 1) % CBT_NCIRCUITS_TO_OBSERVE;
+ if (cbt->total_build_times < CBT_NCIRCUITS_TO_OBSERVE)
+ cbt->total_build_times++;
+
+ if ((cbt->total_build_times % CBT_SAVE_STATE_EVERY) == 0) {
+ /* Save state every n circuit builds */
+ if (!unit_tests && !get_options()->AvoidDiskWrites)
+ or_state_mark_dirty(get_or_state(), 0);
+ }
+
+ return 0;
+}
+
+/**
+ * Return maximum circuit build time
+ */
+static build_time_t
+circuit_build_times_max(const circuit_build_times_t *cbt)
+{
+ int i = 0;
+ build_time_t max_build_time = 0;
+ for (i = 0; i < CBT_NCIRCUITS_TO_OBSERVE; i++) {
+ if (cbt->circuit_build_times[i] > max_build_time
+ && cbt->circuit_build_times[i] != CBT_BUILD_ABANDONED)
+ max_build_time = cbt->circuit_build_times[i];
+ }
+ return max_build_time;
+}
+
+#if 0
+/** Return minimum circuit build time */
+build_time_t
+circuit_build_times_min(circuit_build_times_t *cbt)
+{
+ int i = 0;
+ build_time_t min_build_time = CBT_BUILD_TIME_MAX;
+ for (i = 0; i < CBT_NCIRCUITS_TO_OBSERVE; i++) {
+ if (cbt->circuit_build_times[i] && /* 0 <-> uninitialized */
+ cbt->circuit_build_times[i] < min_build_time)
+ min_build_time = cbt->circuit_build_times[i];
+ }
+ if (min_build_time == CBT_BUILD_TIME_MAX) {
+ log_warn(LD_CIRC, "No build times less than CBT_BUILD_TIME_MAX!");
+ }
+ return min_build_time;
+}
+#endif /* 0 */
+
+/**
+ * Calculate and return a histogram for the set of build times.
+ *
+ * Returns an allocated array of histrogram bins representing
+ * the frequency of index*CBT_BIN_WIDTH millisecond
+ * build times. Also outputs the number of bins in nbins.
+ *
+ * The return value must be freed by the caller.
+ */
+static uint32_t *
+circuit_build_times_create_histogram(const circuit_build_times_t *cbt,
+ build_time_t *nbins)
+{
+ uint32_t *histogram;
+ build_time_t max_build_time = circuit_build_times_max(cbt);
+ int i, c;
+
+ *nbins = 1 + (max_build_time / CBT_BIN_WIDTH);
+ histogram = tor_calloc(*nbins, sizeof(build_time_t));
+
+ // calculate histogram
+ for (i = 0; i < CBT_NCIRCUITS_TO_OBSERVE; i++) {
+ if (cbt->circuit_build_times[i] == 0
+ || cbt->circuit_build_times[i] == CBT_BUILD_ABANDONED)
+ continue; /* 0 <-> uninitialized */
+
+ c = (cbt->circuit_build_times[i] / CBT_BIN_WIDTH);
+ histogram[c]++;
+ }
+
+ return histogram;
+}
+
+/**
+ * Return the Pareto start-of-curve parameter Xm.
+ *
+ * Because we are not a true Pareto curve, we compute this as the
+ * weighted average of the N most frequent build time bins. N is either
+ * 1 if we don't have enough circuit build time data collected, or
+ * determined by the consensus parameter cbtnummodes (default 3).
+ */
+static build_time_t
+circuit_build_times_get_xm(circuit_build_times_t *cbt)
+{
+ build_time_t i, nbins;
+ build_time_t *nth_max_bin;
+ int32_t bin_counts=0;
+ build_time_t ret = 0;
+ uint32_t *histogram = circuit_build_times_create_histogram(cbt, &nbins);
+ int n=0;
+ int num_modes = circuit_build_times_default_num_xm_modes();
+
+ tor_assert(nbins > 0);
+ tor_assert(num_modes > 0);
+
+ // Only use one mode if < 1000 buildtimes. Not enough data
+ // for multiple.
+ if (cbt->total_build_times < CBT_NCIRCUITS_TO_OBSERVE)
+ num_modes = 1;
+
+ nth_max_bin = tor_calloc(num_modes, sizeof(build_time_t));
+
+ /* Determine the N most common build times */
+ for (i = 0; i < nbins; i++) {
+ if (histogram[i] >= histogram[nth_max_bin[0]]) {
+ nth_max_bin[0] = i;
+ }
+
+ for (n = 1; n < num_modes; n++) {
+ if (histogram[i] >= histogram[nth_max_bin[n]] &&
+ (!histogram[nth_max_bin[n-1]]
+ || histogram[i] < histogram[nth_max_bin[n-1]])) {
+ nth_max_bin[n] = i;
+ }
+ }
+ }
+
+ for (n = 0; n < num_modes; n++) {
+ bin_counts += histogram[nth_max_bin[n]];
+ ret += CBT_BIN_TO_MS(nth_max_bin[n])*histogram[nth_max_bin[n]];
+ log_info(LD_CIRC, "Xm mode #%d: %u %u", n, CBT_BIN_TO_MS(nth_max_bin[n]),
+ histogram[nth_max_bin[n]]);
+ }
+
+ /* bin_counts can become zero if all of our last CBT_NCIRCUITS_TO_OBSERVE
+ * circuits were abandoned before they completed. This shouldn't happen,
+ * though. We should have reset/re-learned a lower timeout first. */
+ if (bin_counts == 0) {
+ ret = 0;
+ log_warn(LD_CIRC,
+ "No valid circuit build time data out of %d times, %u modes, "
+ "have_timeout=%d, %lfms", cbt->total_build_times, num_modes,
+ cbt->have_computed_timeout, cbt->timeout_ms);
+ goto done;
+ }
+
+ tor_assert(bin_counts > 0);
+
+ ret /= bin_counts;
+
+ done:
+ tor_free(histogram);
+ tor_free(nth_max_bin);
+
+ return ret;
+}
+
+/**
+ * Output a histogram of current circuit build times to
+ * the or_state_t state structure.
+ */
+void
+circuit_build_times_update_state(const circuit_build_times_t *cbt,
+ or_state_t *state)
+{
+ uint32_t *histogram;
+ build_time_t i = 0;
+ build_time_t nbins = 0;
+ config_line_t **next, *line;
+
+ histogram = circuit_build_times_create_histogram(cbt, &nbins);
+ // write to state
+ config_free_lines(state->BuildtimeHistogram);
+ next = &state->BuildtimeHistogram;
+ *next = NULL;
+
+ state->TotalBuildTimes = cbt->total_build_times;
+ state->CircuitBuildAbandonedCount = 0;
+
+ for (i = 0; i < CBT_NCIRCUITS_TO_OBSERVE; i++) {
+ if (cbt->circuit_build_times[i] == CBT_BUILD_ABANDONED)
+ state->CircuitBuildAbandonedCount++;
+ }
+
+ for (i = 0; i < nbins; i++) {
+ // compress the histogram by skipping the blanks
+ if (histogram[i] == 0) continue;
+ *next = line = tor_malloc_zero(sizeof(config_line_t));
+ line->key = tor_strdup("CircuitBuildTimeBin");
+ tor_asprintf(&line->value, "%d %d",
+ CBT_BIN_TO_MS(i), histogram[i]);
+ next = &(line->next);
+ }
+
+ if (!unit_tests) {
+ if (!get_options()->AvoidDiskWrites)
+ or_state_mark_dirty(get_or_state(), 0);
+ }
+
+ tor_free(histogram);
+}
+
+/**
+ * Shuffle the build times array.
+ *
+ * Adapted from http://en.wikipedia.org/wiki/Fisher-Yates_shuffle
+ */
+static void
+circuit_build_times_shuffle_and_store_array(circuit_build_times_t *cbt,
+ build_time_t *raw_times,
+ uint32_t num_times)
+{
+ uint32_t n = num_times;
+ if (num_times > CBT_NCIRCUITS_TO_OBSERVE) {
+ log_notice(LD_CIRC, "The number of circuit times that this Tor version "
+ "uses to calculate build times is less than the number stored "
+ "in your state file. Decreasing the circuit time history from "
+ "%lu to %d.", (unsigned long)num_times,
+ CBT_NCIRCUITS_TO_OBSERVE);
+ }
+
+ if (n > INT_MAX-1) {
+ log_warn(LD_CIRC, "For some insane reasons, you had %lu circuit build "
+ "observations in your state file. That's far too many; probably "
+ "there's a bug here.", (unsigned long)n);
+ n = INT_MAX-1;
+ }
+
+ /* This code can only be run on a compact array */
+ while (n-- > 1) {
+ int k = crypto_rand_int(n + 1); /* 0 <= k <= n. */
+ build_time_t tmp = raw_times[k];
+ raw_times[k] = raw_times[n];
+ raw_times[n] = tmp;
+ }
+
+ /* Since the times are now shuffled, take a random CBT_NCIRCUITS_TO_OBSERVE
+ * subset (ie the first CBT_NCIRCUITS_TO_OBSERVE values) */
+ for (n = 0; n < MIN(num_times, CBT_NCIRCUITS_TO_OBSERVE); n++) {
+ circuit_build_times_add_time(cbt, raw_times[n]);
+ }
+}
+
+/**
+ * Filter old synthetic timeouts that were created before the
+ * new right-censored Pareto calculation was deployed.
+ *
+ * Once all clients before 0.2.1.13-alpha are gone, this code
+ * will be unused.
+ */
+static int
+circuit_build_times_filter_timeouts(circuit_build_times_t *cbt)
+{
+ int num_filtered=0, i=0;
+ double timeout_rate = 0;
+ build_time_t max_timeout = 0;
+
+ timeout_rate = circuit_build_times_timeout_rate(cbt);
+ max_timeout = (build_time_t)cbt->close_ms;
+
+ for (i = 0; i < CBT_NCIRCUITS_TO_OBSERVE; i++) {
+ if (cbt->circuit_build_times[i] > max_timeout) {
+ build_time_t replaced = cbt->circuit_build_times[i];
+ num_filtered++;
+ cbt->circuit_build_times[i] = CBT_BUILD_ABANDONED;
+
+ log_debug(LD_CIRC, "Replaced timeout %d with %d", replaced,
+ cbt->circuit_build_times[i]);
+ }
+ }
+
+ log_info(LD_CIRC,
+ "We had %d timeouts out of %d build times, "
+ "and filtered %d above the max of %u",
+ (int)(cbt->total_build_times*timeout_rate),
+ cbt->total_build_times, num_filtered, max_timeout);
+
+ return num_filtered;
+}
+
+/**
+ * Load histogram from <b>state</b>, shuffling the resulting array
+ * after we do so. Use this result to estimate parameters and
+ * calculate the timeout.
+ *
+ * Return -1 on error.
+ */
+int
+circuit_build_times_parse_state(circuit_build_times_t *cbt,
+ or_state_t *state)
+{
+ int tot_values = 0;
+ uint32_t loaded_cnt = 0, N = 0;
+ config_line_t *line;
+ int i;
+ build_time_t *loaded_times;
+ int err = 0;
+ circuit_build_times_init(cbt);
+
+ if (circuit_build_times_disabled(get_options())) {
+ return 0;
+ }
+
+ /* build_time_t 0 means uninitialized */
+ loaded_times = tor_calloc(state->TotalBuildTimes, sizeof(build_time_t));
+
+ for (line = state->BuildtimeHistogram; line; line = line->next) {
+ smartlist_t *args = smartlist_new();
+ smartlist_split_string(args, line->value, " ",
+ SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 0);
+ if (smartlist_len(args) < 2) {
+ log_warn(LD_GENERAL, "Unable to parse circuit build times: "
+ "Too few arguments to CircuitBuildTime");
+ err = 1;
+ SMARTLIST_FOREACH(args, char*, cp, tor_free(cp));
+ smartlist_free(args);
+ break;
+ } else {
+ const char *ms_str = smartlist_get(args,0);
+ const char *count_str = smartlist_get(args,1);
+ uint32_t count, k;
+ build_time_t ms;
+ int ok;
+ ms = (build_time_t)tor_parse_ulong(ms_str, 10, 0,
+ CBT_BUILD_TIME_MAX, &ok, NULL);
+ if (!ok) {
+ log_warn(LD_GENERAL, "Unable to parse circuit build times: "
+ "Unparsable bin number");
+ err = 1;
+ SMARTLIST_FOREACH(args, char*, cp, tor_free(cp));
+ smartlist_free(args);
+ break;
+ }
+ count = (uint32_t)tor_parse_ulong(count_str, 10, 0,
+ UINT32_MAX, &ok, NULL);
+ if (!ok) {
+ log_warn(LD_GENERAL, "Unable to parse circuit build times: "
+ "Unparsable bin count");
+ err = 1;
+ SMARTLIST_FOREACH(args, char*, cp, tor_free(cp));
+ smartlist_free(args);
+ break;
+ }
+
+ if (loaded_cnt+count+ (unsigned)state->CircuitBuildAbandonedCount
+ > (unsigned) state->TotalBuildTimes) {
+ log_warn(LD_CIRC,
+ "Too many build times in state file. "
+ "Stopping short before %d",
+ loaded_cnt+count);
+ SMARTLIST_FOREACH(args, char*, cp, tor_free(cp));
+ smartlist_free(args);
+ break;
+ }
+
+ for (k = 0; k < count; k++) {
+ loaded_times[loaded_cnt++] = ms;
+ }
+ N++;
+ SMARTLIST_FOREACH(args, char*, cp, tor_free(cp));
+ smartlist_free(args);
+ }
+ }
+
+ log_info(LD_CIRC,
+ "Adding %d timeouts.", state->CircuitBuildAbandonedCount);
+ for (i=0; i < state->CircuitBuildAbandonedCount; i++) {
+ loaded_times[loaded_cnt++] = CBT_BUILD_ABANDONED;
+ }
+
+ if (loaded_cnt != (unsigned)state->TotalBuildTimes) {
+ log_warn(LD_CIRC,
+ "Corrupt state file? Build times count mismatch. "
+ "Read %d times, but file says %d", loaded_cnt,
+ state->TotalBuildTimes);
+ err = 1;
+ circuit_build_times_reset(cbt);
+ goto done;
+ }
+
+ circuit_build_times_shuffle_and_store_array(cbt, loaded_times, loaded_cnt);
+
+ /* Verify that we didn't overwrite any indexes */
+ for (i=0; i < CBT_NCIRCUITS_TO_OBSERVE; i++) {
+ if (!cbt->circuit_build_times[i])
+ break;
+ tot_values++;
+ }
+ log_info(LD_CIRC,
+ "Loaded %d/%d values from %d lines in circuit time histogram",
+ tot_values, cbt->total_build_times, N);
+
+ if (cbt->total_build_times != tot_values
+ || cbt->total_build_times > CBT_NCIRCUITS_TO_OBSERVE) {
+ log_warn(LD_CIRC,
+ "Corrupt state file? Shuffled build times mismatch. "
+ "Read %d times, but file says %d", tot_values,
+ state->TotalBuildTimes);
+ err = 1;
+ circuit_build_times_reset(cbt);
+ goto done;
+ }
+
+ circuit_build_times_set_timeout(cbt);
+
+ if (!state->CircuitBuildAbandonedCount && cbt->total_build_times) {
+ circuit_build_times_filter_timeouts(cbt);
+ }
+
+ done:
+ tor_free(loaded_times);
+ return err ? -1 : 0;
+}
+
+/**
+ * Estimates the Xm and Alpha parameters using
+ * http://en.wikipedia.org/wiki/Pareto_distribution#Parameter_estimation
+ *
+ * The notable difference is that we use mode instead of min to estimate Xm.
+ * This is because our distribution is frechet-like. We claim this is
+ * an acceptable approximation because we are only concerned with the
+ * accuracy of the CDF of the tail.
+ */
+STATIC int
+circuit_build_times_update_alpha(circuit_build_times_t *cbt)
+{
+ build_time_t *x=cbt->circuit_build_times;
+ double a = 0;
+ int n=0,i=0,abandoned_count=0;
+ build_time_t max_time=0;
+
+ /* http://en.wikipedia.org/wiki/Pareto_distribution#Parameter_estimation */
+ /* We sort of cheat here and make our samples slightly more pareto-like
+ * and less frechet-like. */
+ cbt->Xm = circuit_build_times_get_xm(cbt);
+
+ /* If Xm came back 0, then too many circuits were abandoned. */
+ if (cbt->Xm == 0)
+ return 0;
+
+ tor_assert(cbt->Xm > 0);
+
+ for (i=0; i< CBT_NCIRCUITS_TO_OBSERVE; i++) {
+ if (!x[i]) {
+ continue;
+ }
+
+ if (x[i] < cbt->Xm) {
+ a += tor_mathlog(cbt->Xm);
+ } else if (x[i] == CBT_BUILD_ABANDONED) {
+ abandoned_count++;
+ } else {
+ a += tor_mathlog(x[i]);
+ if (x[i] > max_time)
+ max_time = x[i];
+ }
+ n++;
+ }
+
+ /*
+ * We are erring and asserting here because this can only happen
+ * in codepaths other than startup. The startup state parsing code
+ * performs this same check, and resets state if it hits it. If we
+ * hit it at runtime, something serious has gone wrong.
+ */
+ if (n!=cbt->total_build_times) {
+ log_err(LD_CIRC, "Discrepancy in build times count: %d vs %d", n,
+ cbt->total_build_times);
+ }
+ tor_assert(n==cbt->total_build_times);
+
+ if (max_time <= 0) {
+ /* This can happen if Xm is actually the *maximum* value in the set.
+ * It can also happen if we've abandoned every single circuit somehow.
+ * In either case, tell the caller not to compute a new build timeout. */
+ log_warn(LD_BUG,
+ "Could not determine largest build time (%d). "
+ "Xm is %dms and we've abandoned %d out of %d circuits.", max_time,
+ cbt->Xm, abandoned_count, n);
+ return 0;
+ }
+
+ a += abandoned_count*tor_mathlog(max_time);
+
+ a -= n*tor_mathlog(cbt->Xm);
+ // Estimator comes from Eq #4 in:
+ // "Bayesian estimation based on trimmed samples from Pareto populations"
+ // by Arturo J. Fernández. We are right-censored only.
+ a = (n-abandoned_count)/a;
+
+ cbt->alpha = a;
+
+ return 1;
+}
+
+/**
+ * This is the Pareto Quantile Function. It calculates the point x
+ * in the distribution such that F(x) = quantile (ie quantile*100%
+ * of the mass of the density function is below x on the curve).
+ *
+ * We use it to calculate the timeout and also to generate synthetic
+ * values of time for circuits that timeout before completion.
+ *
+ * See http://en.wikipedia.org/wiki/Quantile_function,
+ * http://en.wikipedia.org/wiki/Inverse_transform_sampling and
+ * http://en.wikipedia.org/wiki/Pareto_distribution#Generating_a_
+ * random_sample_from_Pareto_distribution
+ * That's right. I'll cite wikipedia all day long.
+ *
+ * Return value is in milliseconds, clamped to INT32_MAX.
+ */
+STATIC double
+circuit_build_times_calculate_timeout(circuit_build_times_t *cbt,
+ double quantile)
+{
+ double ret;
+ tor_assert(quantile >= 0);
+ tor_assert(1.0-quantile > 0);
+ tor_assert(cbt->Xm > 0);
+
+ /* If either alpha or p are 0, we would divide by zero, yielding an
+ * infinite (double) result; which would be clamped to INT32_MAX.
+ * Instead, initialise ret to INT32_MAX, and skip over these
+ * potentially illegal/trapping divides by zero.
+ */
+ ret = INT32_MAX;
+
+ if (cbt->alpha > 0) {
+ double p;
+ p = pow(1.0-quantile,1.0/cbt->alpha);
+ if (p > 0) {
+ ret = cbt->Xm/p;
+ }
+ }
+
+ if (ret > INT32_MAX) {
+ ret = INT32_MAX;
+ }
+ tor_assert(ret > 0);
+ return ret;
+}
+
+#ifdef TOR_UNIT_TESTS
+/** Pareto CDF */
+double
+circuit_build_times_cdf(circuit_build_times_t *cbt, double x)
+{
+ double ret;
+ tor_assert(cbt->Xm > 0);
+ ret = 1.0-pow(cbt->Xm/x,cbt->alpha);
+ tor_assert(0 <= ret && ret <= 1.0);
+ return ret;
+}
+#endif /* defined(TOR_UNIT_TESTS) */
+
+#ifdef TOR_UNIT_TESTS
+/**
+ * Generate a synthetic time using our distribution parameters.
+ *
+ * The return value will be within the [q_lo, q_hi) quantile points
+ * on the CDF.
+ */
+build_time_t
+circuit_build_times_generate_sample(circuit_build_times_t *cbt,
+ double q_lo, double q_hi)
+{
+ double randval = crypto_rand_double();
+ build_time_t ret;
+ double u;
+
+ /* Generate between [q_lo, q_hi) */
+ /*XXXX This is what nextafter is supposed to be for; we should use it on the
+ * platforms that support it. */
+ q_hi -= 1.0/(INT32_MAX);
+
+ tor_assert(q_lo >= 0);
+ tor_assert(q_hi < 1);
+ tor_assert(q_lo < q_hi);
+
+ u = q_lo + (q_hi-q_lo)*randval;
+
+ tor_assert(0 <= u && u < 1.0);
+ /* circuit_build_times_calculate_timeout returns <= INT32_MAX */
+ ret = (build_time_t)
+ tor_lround(circuit_build_times_calculate_timeout(cbt, u));
+ tor_assert(ret > 0);
+ return ret;
+}
+#endif /* defined(TOR_UNIT_TESTS) */
+
+#ifdef TOR_UNIT_TESTS
+/**
+ * Estimate an initial alpha parameter by solving the quantile
+ * function with a quantile point and a specific timeout value.
+ */
+void
+circuit_build_times_initial_alpha(circuit_build_times_t *cbt,
+ double quantile, double timeout_ms)
+{
+ // Q(u) = Xm/((1-u)^(1/a))
+ // Q(0.8) = Xm/((1-0.8))^(1/a)) = CircBuildTimeout
+ // CircBuildTimeout = Xm/((1-0.8))^(1/a))
+ // CircBuildTimeout = Xm*((1-0.8))^(-1/a))
+ // ln(CircBuildTimeout) = ln(Xm)+ln(((1-0.8)))*(-1/a)
+ // -ln(1-0.8)/(ln(CircBuildTimeout)-ln(Xm))=a
+ tor_assert(quantile >= 0);
+ tor_assert(cbt->Xm > 0);
+ cbt->alpha = tor_mathlog(1.0-quantile)/
+ (tor_mathlog(cbt->Xm)-tor_mathlog(timeout_ms));
+ tor_assert(cbt->alpha > 0);
+}
+#endif /* defined(TOR_UNIT_TESTS) */
+
+/**
+ * Returns true if we need circuits to be built
+ */
+int
+circuit_build_times_needs_circuits(const circuit_build_times_t *cbt)
+{
+ /* Return true if < MIN_CIRCUITS_TO_OBSERVE */
+ return !circuit_build_times_enough_to_compute(cbt);
+}
+
+/**
+ * Returns true if we should build a timeout test circuit
+ * right now.
+ */
+int
+circuit_build_times_needs_circuits_now(const circuit_build_times_t *cbt)
+{
+ return circuit_build_times_needs_circuits(cbt) &&
+ approx_time()-cbt->last_circ_at > circuit_build_times_test_frequency();
+}
+
+/**
+ * How long should we be unreachable before we think we need to check if
+ * our published IP address has changed.
+ */
+#define CIRCUIT_TIMEOUT_BEFORE_RECHECK_IP (60*3)
+
+/**
+ * Called to indicate that the network showed some signs of liveness,
+ * i.e. we received a cell.
+ *
+ * This is used by circuit_build_times_network_check_live() to decide
+ * if we should record the circuit build timeout or not.
+ *
+ * This function is called every time we receive a cell. Avoid
+ * syscalls, events, and other high-intensity work.
+ */
+void
+circuit_build_times_network_is_live(circuit_build_times_t *cbt)
+{
+ time_t now = approx_time();
+ if (cbt->liveness.nonlive_timeouts > 0) {
+ time_t time_since_live = now - cbt->liveness.network_last_live;
+ log_notice(LD_CIRC,
+ "Tor now sees network activity. Restoring circuit build "
+ "timeout recording. Network was down for %d seconds "
+ "during %d circuit attempts.",
+ (int)time_since_live,
+ cbt->liveness.nonlive_timeouts);
+ if (time_since_live > CIRCUIT_TIMEOUT_BEFORE_RECHECK_IP)
+ reschedule_descriptor_update_check();
+ }
+ cbt->liveness.network_last_live = now;
+ cbt->liveness.nonlive_timeouts = 0;
+
+ /* Tell control.c */
+ control_event_network_liveness_update(1);
+}
+
+/**
+ * Non-destructively scale all of our circuit success, timeout, and close
+ * counts down by a factor of two. Scaling in this way preserves the
+ * ratios between succeeded vs timed out vs closed circuits, so that
+ * our statistics don't change when we scale.
+ *
+ * This is used only in the rare event that we build more than
+ * INT32_MAX circuits. Since the num_circ_* variables are
+ * uint32_t, we won't even be close to overflowing them.
+ */
+void
+circuit_build_times_scale_circ_counts(circuit_build_times_t *cbt)
+{
+ cbt->num_circ_succeeded /= 2;
+ cbt->num_circ_timeouts /= 2;
+ cbt->num_circ_closed /= 2;
+}
+
+/**
+ * Called to indicate that we "completed" a circuit. Because this circuit
+ * succeeded, it doesn't count as a timeout-after-the-first-hop.
+ *
+ * (For the purposes of the cbt code, we consider a circuit "completed" if
+ * it has 3 hops, regardless of its final hop count. We do this because
+ * we're trying to answer the question, "how long should a circuit take to
+ * reach the 3-hop count".)
+ *
+ * This is used by circuit_build_times_network_check_changed() to determine
+ * if we had too many recent timeouts and need to reset our learned timeout
+ * to something higher.
+ */
+void
+circuit_build_times_network_circ_success(circuit_build_times_t *cbt)
+{
+ // Count circuit success
+ cbt->num_circ_succeeded++;
+
+ // If we're going to wrap int32, scale everything
+ if (cbt->num_circ_succeeded >= INT32_MAX) {
+ circuit_build_times_scale_circ_counts(cbt);
+ }
+
+ /* Check for NULLness because we might not be using adaptive timeouts */
+ if (cbt->liveness.timeouts_after_firsthop &&
+ cbt->liveness.num_recent_circs > 0) {
+ cbt->liveness.timeouts_after_firsthop[cbt->liveness.after_firsthop_idx]
+ = 0;
+ cbt->liveness.after_firsthop_idx++;
+ cbt->liveness.after_firsthop_idx %= cbt->liveness.num_recent_circs;
+ }
+}
+
+/**
+ * A circuit just timed out. If it failed after the first hop, record it
+ * in our history for later deciding if the network speed has changed.
+ *
+ * This is used by circuit_build_times_network_check_changed() to determine
+ * if we had too many recent timeouts and need to reset our learned timeout
+ * to something higher.
+ */
+static void
+circuit_build_times_network_timeout(circuit_build_times_t *cbt,
+ int did_onehop)
+{
+ // Count circuit timeout
+ cbt->num_circ_timeouts++;
+
+ // If we're going to wrap int32, scale everything
+ if (cbt->num_circ_timeouts >= INT32_MAX) {
+ circuit_build_times_scale_circ_counts(cbt);
+ }
+
+ /* Check for NULLness because we might not be using adaptive timeouts */
+ if (cbt->liveness.timeouts_after_firsthop &&
+ cbt->liveness.num_recent_circs > 0) {
+ if (did_onehop) {
+ cbt->liveness.timeouts_after_firsthop[cbt->liveness.after_firsthop_idx]
+ = 1;
+ cbt->liveness.after_firsthop_idx++;
+ cbt->liveness.after_firsthop_idx %= cbt->liveness.num_recent_circs;
+ }
+ }
+}
+
+/**
+ * A circuit was just forcibly closed. If there has been no recent network
+ * activity at all, but this circuit was launched back when we thought the
+ * network was live, increment the number of "nonlive" circuit timeouts.
+ *
+ * This is used by circuit_build_times_network_check_live() to decide
+ * if we should record the circuit build timeout or not.
+ */
+static void
+circuit_build_times_network_close(circuit_build_times_t *cbt,
+ int did_onehop, time_t start_time)
+{
+ time_t now = time(NULL);
+
+ // Count circuit close
+ cbt->num_circ_closed++;
+
+ // If we're going to wrap int32, scale everything
+ if (cbt->num_circ_closed >= INT32_MAX) {
+ circuit_build_times_scale_circ_counts(cbt);
+ }
+
+ /*
+ * Check if this is a timeout that was for a circuit that spent its
+ * entire existence during a time where we have had no network activity.
+ */
+ if (cbt->liveness.network_last_live < start_time) {
+ if (did_onehop) {
+ char last_live_buf[ISO_TIME_LEN+1];
+ char start_time_buf[ISO_TIME_LEN+1];
+ char now_buf[ISO_TIME_LEN+1];
+ format_local_iso_time(last_live_buf, cbt->liveness.network_last_live);
+ format_local_iso_time(start_time_buf, start_time);
+ format_local_iso_time(now_buf, now);
+ log_notice(LD_CIRC,
+ "A circuit somehow completed a hop while the network was "
+ "not live. The network was last live at %s, but the circuit "
+ "launched at %s. It's now %s. This could mean your clock "
+ "changed.", last_live_buf, start_time_buf, now_buf);
+ }
+ cbt->liveness.nonlive_timeouts++;
+ if (cbt->liveness.nonlive_timeouts == 1) {
+ log_notice(LD_CIRC,
+ "Tor has not observed any network activity for the past %d "
+ "seconds. Disabling circuit build timeout recording.",
+ (int)(now - cbt->liveness.network_last_live));
+
+ /* Tell control.c */
+ control_event_network_liveness_update(0);
+ } else {
+ log_info(LD_CIRC,
+ "Got non-live timeout. Current count is: %d",
+ cbt->liveness.nonlive_timeouts);
+ }
+ }
+}
+
+/**
+ * When the network is not live, we do not record circuit build times.
+ *
+ * The network is considered not live if there has been at least one
+ * circuit build that began and ended (had its close_ms measurement
+ * period expire) since we last received a cell.
+ *
+ * Also has the side effect of rewinding the circuit time history
+ * in the case of recent liveness changes.
+ */
+int
+circuit_build_times_network_check_live(const circuit_build_times_t *cbt)
+{
+ if (cbt->liveness.nonlive_timeouts > 0) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * Returns true if we have seen more than MAX_RECENT_TIMEOUT_COUNT of
+ * the past RECENT_CIRCUITS time out after the first hop. Used to detect
+ * if the network connection has changed significantly, and if so,
+ * resets our circuit build timeout to the default.
+ *
+ * Also resets the entire timeout history in this case and causes us
+ * to restart the process of building test circuits and estimating a
+ * new timeout.
+ */
+STATIC int
+circuit_build_times_network_check_changed(circuit_build_times_t *cbt)
+{
+ int total_build_times = cbt->total_build_times;
+ int timeout_count=0;
+ int i;
+
+ if (cbt->liveness.timeouts_after_firsthop &&
+ cbt->liveness.num_recent_circs > 0) {
+ /* how many of our recent circuits made it to the first hop but then
+ * timed out? */
+ for (i = 0; i < cbt->liveness.num_recent_circs; i++) {
+ timeout_count += cbt->liveness.timeouts_after_firsthop[i];
+ }
+ }
+
+ /* If 80% of our recent circuits are timing out after the first hop,
+ * we need to re-estimate a new initial alpha and timeout. */
+ if (timeout_count < circuit_build_times_max_timeouts()) {
+ return 0;
+ }
+
+ circuit_build_times_reset(cbt);
+ if (cbt->liveness.timeouts_after_firsthop &&
+ cbt->liveness.num_recent_circs > 0) {
+ memset(cbt->liveness.timeouts_after_firsthop, 0,
+ sizeof(*cbt->liveness.timeouts_after_firsthop)*
+ cbt->liveness.num_recent_circs);
+ }
+ cbt->liveness.after_firsthop_idx = 0;
+
+#define MAX_TIMEOUT ((int32_t) (INT32_MAX/2))
+ /* Check to see if this has happened before. If so, double the timeout
+ * to give clients on abysmally bad network connections a shot at access */
+ if (cbt->timeout_ms >= circuit_build_times_get_initial_timeout()) {
+ if (cbt->timeout_ms > MAX_TIMEOUT || cbt->close_ms > MAX_TIMEOUT) {
+ log_warn(LD_CIRC, "Insanely large circuit build timeout value. "
+ "(timeout = %fmsec, close = %fmsec)",
+ cbt->timeout_ms, cbt->close_ms);
+ } else {
+ cbt->timeout_ms *= 2;
+ cbt->close_ms *= 2;
+ }
+ } else {
+ cbt->close_ms = cbt->timeout_ms
+ = circuit_build_times_get_initial_timeout();
+ }
+#undef MAX_TIMEOUT
+
+ cbt_control_event_buildtimeout_set(cbt, BUILDTIMEOUT_SET_EVENT_RESET);
+
+ log_notice(LD_CIRC,
+ "Your network connection speed appears to have changed. Resetting "
+ "timeout to %lds after %d timeouts and %d buildtimes.",
+ tor_lround(cbt->timeout_ms/1000), timeout_count,
+ total_build_times);
+
+ return 1;
+}
+
+/**
+ * Count the number of timeouts in a set of cbt data.
+ */
+double
+circuit_build_times_timeout_rate(const circuit_build_times_t *cbt)
+{
+ int i=0,timeouts=0;
+ for (i = 0; i < CBT_NCIRCUITS_TO_OBSERVE; i++) {
+ if (cbt->circuit_build_times[i] >= cbt->timeout_ms) {
+ timeouts++;
+ }
+ }
+
+ if (!cbt->total_build_times)
+ return 0;
+
+ return ((double)timeouts)/cbt->total_build_times;
+}
+
+/**
+ * Count the number of closed circuits in a set of cbt data.
+ */
+double
+circuit_build_times_close_rate(const circuit_build_times_t *cbt)
+{
+ int i=0,closed=0;
+ for (i = 0; i < CBT_NCIRCUITS_TO_OBSERVE; i++) {
+ if (cbt->circuit_build_times[i] == CBT_BUILD_ABANDONED) {
+ closed++;
+ }
+ }
+
+ if (!cbt->total_build_times)
+ return 0;
+
+ return ((double)closed)/cbt->total_build_times;
+}
+
+/**
+ * Store a timeout as a synthetic value.
+ *
+ * Returns true if the store was successful and we should possibly
+ * update our timeout estimate.
+ */
+int
+circuit_build_times_count_close(circuit_build_times_t *cbt,
+ int did_onehop,
+ time_t start_time)
+{
+ if (circuit_build_times_disabled(get_options())) {
+ cbt->close_ms = cbt->timeout_ms
+ = circuit_build_times_get_initial_timeout();
+ return 0;
+ }
+
+ /* Record this force-close to help determine if the network is dead */
+ circuit_build_times_network_close(cbt, did_onehop, start_time);
+
+ /* Only count timeouts if network is live.. */
+ if (!circuit_build_times_network_check_live(cbt)) {
+ return 0;
+ }
+
+ circuit_build_times_add_time(cbt, CBT_BUILD_ABANDONED);
+ return 1;
+}
+
+/**
+ * Update timeout counts to determine if we need to expire
+ * our build time history due to excessive timeouts.
+ *
+ * We do not record any actual time values at this stage;
+ * we are only interested in recording the fact that a timeout
+ * happened. We record the time values via
+ * circuit_build_times_count_close() and circuit_build_times_add_time().
+ */
+void
+circuit_build_times_count_timeout(circuit_build_times_t *cbt,
+ int did_onehop)
+{
+ if (circuit_build_times_disabled(get_options())) {
+ cbt->close_ms = cbt->timeout_ms
+ = circuit_build_times_get_initial_timeout();
+ return;
+ }
+
+ /* Register the fact that a timeout just occurred. */
+ circuit_build_times_network_timeout(cbt, did_onehop);
+
+ /* If there are a ton of timeouts, we should reset
+ * the circuit build timeout. */
+ circuit_build_times_network_check_changed(cbt);
+}
+
+/**
+ * Estimate a new timeout based on history and set our timeout
+ * variable accordingly.
+ */
+static int
+circuit_build_times_set_timeout_worker(circuit_build_times_t *cbt)
+{
+ build_time_t max_time;
+ if (!circuit_build_times_enough_to_compute(cbt))
+ return 0;
+
+ if (!circuit_build_times_update_alpha(cbt))
+ return 0;
+
+ cbt->timeout_ms = circuit_build_times_calculate_timeout(cbt,
+ circuit_build_times_quantile_cutoff());
+
+ cbt->close_ms = circuit_build_times_calculate_timeout(cbt,
+ circuit_build_times_close_quantile());
+
+ max_time = circuit_build_times_max(cbt);
+
+ if (cbt->timeout_ms > max_time) {
+ log_info(LD_CIRC,
+ "Circuit build timeout of %dms is beyond the maximum build "
+ "time we have ever observed. Capping it to %dms.",
+ (int)cbt->timeout_ms, max_time);
+ cbt->timeout_ms = max_time;
+ }
+
+ if (max_time < INT32_MAX/2 && cbt->close_ms > 2*max_time) {
+ log_info(LD_CIRC,
+ "Circuit build measurement period of %dms is more than twice "
+ "the maximum build time we have ever observed. Capping it to "
+ "%dms.", (int)cbt->close_ms, 2*max_time);
+ cbt->close_ms = 2*max_time;
+ }
+
+ /* Sometimes really fast guard nodes give us such a steep curve
+ * that this ends up being not that much greater than timeout_ms.
+ * Make it be at least 1 min to handle this case. */
+ cbt->close_ms = MAX(cbt->close_ms, circuit_build_times_initial_timeout());
+
+ cbt->have_computed_timeout = 1;
+ return 1;
+}
+
+/**
+ * Exposed function to compute a new timeout. Dispatches events and
+ * also filters out extremely high timeout values.
+ */
+void
+circuit_build_times_set_timeout(circuit_build_times_t *cbt)
+{
+ long prev_timeout = tor_lround(cbt->timeout_ms/1000);
+ double timeout_rate;
+
+ /*
+ * Just return if we aren't using adaptive timeouts
+ */
+ if (circuit_build_times_disabled(get_options()))
+ return;
+
+ if (!circuit_build_times_set_timeout_worker(cbt))
+ return;
+
+ if (cbt->timeout_ms < circuit_build_times_min_timeout()) {
+ log_info(LD_CIRC, "Set buildtimeout to low value %fms. Setting to %dms",
+ cbt->timeout_ms, circuit_build_times_min_timeout());
+ cbt->timeout_ms = circuit_build_times_min_timeout();
+ if (cbt->close_ms < cbt->timeout_ms) {
+ /* This shouldn't happen because of MAX() in timeout_worker above,
+ * but doing it just in case */
+ cbt->close_ms = circuit_build_times_initial_timeout();
+ }
+ }
+
+ cbt_control_event_buildtimeout_set(cbt, BUILDTIMEOUT_SET_EVENT_COMPUTED);
+
+ timeout_rate = circuit_build_times_timeout_rate(cbt);
+
+ if (prev_timeout > tor_lround(cbt->timeout_ms/1000)) {
+ log_info(LD_CIRC,
+ "Based on %d circuit times, it looks like we don't need to "
+ "wait so long for circuits to finish. We will now assume a "
+ "circuit is too slow to use after waiting %ld seconds.",
+ cbt->total_build_times,
+ tor_lround(cbt->timeout_ms/1000));
+ log_info(LD_CIRC,
+ "Circuit timeout data: %fms, %fms, Xm: %d, a: %f, r: %f",
+ cbt->timeout_ms, cbt->close_ms, cbt->Xm, cbt->alpha,
+ timeout_rate);
+ } else if (prev_timeout < tor_lround(cbt->timeout_ms/1000)) {
+ log_info(LD_CIRC,
+ "Based on %d circuit times, it looks like we need to wait "
+ "longer for circuits to finish. We will now assume a "
+ "circuit is too slow to use after waiting %ld seconds.",
+ cbt->total_build_times,
+ tor_lround(cbt->timeout_ms/1000));
+ log_info(LD_CIRC,
+ "Circuit timeout data: %fms, %fms, Xm: %d, a: %f, r: %f",
+ cbt->timeout_ms, cbt->close_ms, cbt->Xm, cbt->alpha,
+ timeout_rate);
+ } else {
+ log_info(LD_CIRC,
+ "Set circuit build timeout to %lds (%fms, %fms, Xm: %d, a: %f,"
+ " r: %f) based on %d circuit times",
+ tor_lround(cbt->timeout_ms/1000),
+ cbt->timeout_ms, cbt->close_ms, cbt->Xm, cbt->alpha, timeout_rate,
+ cbt->total_build_times);
+ }
+}
+
+#ifdef TOR_UNIT_TESTS
+/** Make a note that we're running unit tests (rather than running Tor
+ * itself), so we avoid clobbering our state file. */
+void
+circuitbuild_running_unit_tests(void)
+{
+ unit_tests = 1;
+}
+#endif /* defined(TOR_UNIT_TESTS) */
+
+void
+circuit_build_times_update_last_circ(circuit_build_times_t *cbt)
+{
+ cbt->last_circ_at = approx_time();
+}
+
+static void
+cbt_control_event_buildtimeout_set(const circuit_build_times_t *cbt,
+ buildtimeout_set_event_t type)
+{
+ char *args = NULL;
+ double qnt;
+ double timeout_rate = 0.0;
+ double close_rate = 0.0;
+
+ switch (type) {
+ case BUILDTIMEOUT_SET_EVENT_RESET:
+ case BUILDTIMEOUT_SET_EVENT_SUSPENDED:
+ case BUILDTIMEOUT_SET_EVENT_DISCARD:
+ qnt = 1.0;
+ break;
+ case BUILDTIMEOUT_SET_EVENT_COMPUTED:
+ case BUILDTIMEOUT_SET_EVENT_RESUME:
+ default:
+ qnt = circuit_build_times_quantile_cutoff();
+ break;
+ }
+
+ /* The timeout rate is the ratio of the timeout count over
+ * the total number of circuits attempted. The total number of
+ * circuits is (timeouts+succeeded), since every circuit
+ * either succeeds, or times out. "Closed" circuits are
+ * MEASURE_TIMEOUT circuits whose measurement period expired.
+ * All MEASURE_TIMEOUT circuits are counted in the timeouts stat
+ * before transitioning to MEASURE_TIMEOUT (in
+ * circuit_build_times_mark_circ_as_measurement_only()).
+ * MEASURE_TIMEOUT circuits that succeed are *not* counted as
+ * "succeeded". See circuit_build_times_handle_completed_hop().
+ *
+ * We cast the denominator
+ * to promote it to double before the addition, to avoid int32
+ * overflow. */
+ const double total_circuits =
+ ((double)cbt->num_circ_timeouts) + cbt->num_circ_succeeded;
+ if (total_circuits >= 1.0) {
+ timeout_rate = cbt->num_circ_timeouts / total_circuits;
+ close_rate = cbt->num_circ_closed / total_circuits;
+ }
+
+ tor_asprintf(&args, "TOTAL_TIMES=%lu "
+ "TIMEOUT_MS=%lu XM=%lu ALPHA=%f CUTOFF_QUANTILE=%f "
+ "TIMEOUT_RATE=%f CLOSE_MS=%lu CLOSE_RATE=%f",
+ (unsigned long)cbt->total_build_times,
+ (unsigned long)cbt->timeout_ms,
+ (unsigned long)cbt->Xm, cbt->alpha, qnt,
+ timeout_rate,
+ (unsigned long)cbt->close_ms,
+ close_rate);
+
+ control_event_buildtimeout_set(type, args);
+
+ tor_free(args);
+}
diff --git a/src/core/or/circuitstats.h b/src/core/or/circuitstats.h
new file mode 100644
index 0000000000..174730d035
--- /dev/null
+++ b/src/core/or/circuitstats.h
@@ -0,0 +1,213 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitstats.h
+ * \brief Header file for circuitstats.c
+ **/
+
+#ifndef TOR_CIRCUITSTATS_H
+#define TOR_CIRCUITSTATS_H
+
+const circuit_build_times_t *get_circuit_build_times(void);
+circuit_build_times_t *get_circuit_build_times_mutable(void);
+double get_circuit_build_close_time_ms(void);
+double get_circuit_build_timeout_ms(void);
+
+int circuit_build_times_disabled(const or_options_t *options);
+int circuit_build_times_disabled_(const or_options_t *options,
+ int ignore_consensus);
+
+/** A build_time_t is milliseconds */
+typedef uint32_t build_time_t;
+
+int circuit_build_times_enough_to_compute(const circuit_build_times_t *cbt);
+void circuit_build_times_update_state(const circuit_build_times_t *cbt,
+ or_state_t *state);
+int circuit_build_times_parse_state(circuit_build_times_t *cbt,
+ or_state_t *state);
+void circuit_build_times_count_timeout(circuit_build_times_t *cbt,
+ int did_onehop);
+int circuit_build_times_count_close(circuit_build_times_t *cbt,
+ int did_onehop, time_t start_time);
+void circuit_build_times_set_timeout(circuit_build_times_t *cbt);
+int circuit_build_times_add_time(circuit_build_times_t *cbt,
+ build_time_t time);
+int circuit_build_times_needs_circuits(const circuit_build_times_t *cbt);
+void circuit_build_times_handle_completed_hop(origin_circuit_t *circ);
+
+int circuit_build_times_needs_circuits_now(const circuit_build_times_t *cbt);
+void circuit_build_times_init(circuit_build_times_t *cbt);
+void circuit_build_times_free_timeouts(circuit_build_times_t *cbt);
+void circuit_build_times_new_consensus_params(circuit_build_times_t *cbt,
+ networkstatus_t *ns);
+double circuit_build_times_timeout_rate(const circuit_build_times_t *cbt);
+double circuit_build_times_close_rate(const circuit_build_times_t *cbt);
+
+void circuit_build_times_update_last_circ(circuit_build_times_t *cbt);
+void circuit_build_times_mark_circ_as_measurement_only(origin_circuit_t *circ);
+
+/** Total size of the circuit timeout history to accumulate.
+ * 1000 is approx 2.5 days worth of continual-use circuits. */
+#define CBT_NCIRCUITS_TO_OBSERVE 1000
+
+/** Width of the histogram bins in milliseconds */
+#define CBT_BIN_WIDTH ((build_time_t)50)
+
+/** Number of modes to use in the weighted-avg computation of Xm */
+#define CBT_DEFAULT_NUM_XM_MODES 3
+#define CBT_MIN_NUM_XM_MODES 1
+#define CBT_MAX_NUM_XM_MODES 20
+
+/**
+ * CBT_BUILD_ABANDONED is our flag value to represent a force-closed
+ * circuit (Aka a 'right-censored' pareto value).
+ */
+#define CBT_BUILD_ABANDONED ((build_time_t)(INT32_MAX-1))
+#define CBT_BUILD_TIME_MAX ((build_time_t)(INT32_MAX))
+
+/** Save state every 10 circuits */
+#define CBT_SAVE_STATE_EVERY 10
+
+/* Circuit build times consensus parameters */
+
+/**
+ * How long to wait before actually closing circuits that take too long to
+ * build in terms of CDF quantile.
+ */
+#define CBT_DEFAULT_CLOSE_QUANTILE 95
+#define CBT_MIN_CLOSE_QUANTILE CBT_MIN_QUANTILE_CUTOFF
+#define CBT_MAX_CLOSE_QUANTILE CBT_MAX_QUANTILE_CUTOFF
+
+/**
+ * How many circuits count as recent when considering if the
+ * connection has gone gimpy or changed.
+ */
+#define CBT_DEFAULT_RECENT_CIRCUITS 20
+#define CBT_MIN_RECENT_CIRCUITS 3
+#define CBT_MAX_RECENT_CIRCUITS 1000
+
+/**
+ * Maximum count of timeouts that finish the first hop in the past
+ * RECENT_CIRCUITS before calculating a new timeout.
+ *
+ * This tells us whether to abandon timeout history and set
+ * the timeout back to whatever circuit_build_times_get_initial_timeout()
+ * gives us.
+ */
+#define CBT_DEFAULT_MAX_RECENT_TIMEOUT_COUNT (CBT_DEFAULT_RECENT_CIRCUITS*9/10)
+#define CBT_MIN_MAX_RECENT_TIMEOUT_COUNT 3
+#define CBT_MAX_MAX_RECENT_TIMEOUT_COUNT 10000
+
+/** Minimum circuits before estimating a timeout */
+#define CBT_DEFAULT_MIN_CIRCUITS_TO_OBSERVE 100
+#define CBT_MIN_MIN_CIRCUITS_TO_OBSERVE 1
+#define CBT_MAX_MIN_CIRCUITS_TO_OBSERVE 10000
+
+/** Cutoff percentile on the CDF for our timeout estimation. */
+#define CBT_DEFAULT_QUANTILE_CUTOFF 80
+#define CBT_MIN_QUANTILE_CUTOFF 10
+#define CBT_MAX_QUANTILE_CUTOFF 99
+double circuit_build_times_quantile_cutoff(void);
+
+/** How often in seconds should we build a test circuit */
+#define CBT_DEFAULT_TEST_FREQUENCY 10
+#define CBT_MIN_TEST_FREQUENCY 1
+#define CBT_MAX_TEST_FREQUENCY INT32_MAX
+
+/** Lowest allowable value for CircuitBuildTimeout in milliseconds */
+#define CBT_DEFAULT_TIMEOUT_MIN_VALUE (1500)
+#define CBT_MIN_TIMEOUT_MIN_VALUE 500
+#define CBT_MAX_TIMEOUT_MIN_VALUE INT32_MAX
+
+/** Initial circuit build timeout in milliseconds */
+#define CBT_DEFAULT_TIMEOUT_INITIAL_VALUE (60*1000)
+#define CBT_MIN_TIMEOUT_INITIAL_VALUE CBT_MIN_TIMEOUT_MIN_VALUE
+#define CBT_MAX_TIMEOUT_INITIAL_VALUE INT32_MAX
+int32_t circuit_build_times_initial_timeout(void);
+
+#if CBT_DEFAULT_MAX_RECENT_TIMEOUT_COUNT < CBT_MIN_MAX_RECENT_TIMEOUT_COUNT
+#error "RECENT_CIRCUITS is set too low."
+#endif
+
+#ifdef CIRCUITSTATS_PRIVATE
+STATIC double circuit_build_times_calculate_timeout(circuit_build_times_t *cbt,
+ double quantile);
+STATIC int circuit_build_times_update_alpha(circuit_build_times_t *cbt);
+STATIC void circuit_build_times_reset(circuit_build_times_t *cbt);
+
+/* Network liveness functions */
+STATIC int circuit_build_times_network_check_changed(
+ circuit_build_times_t *cbt);
+#endif /* defined(CIRCUITSTATS_PRIVATE) */
+
+#ifdef TOR_UNIT_TESTS
+build_time_t circuit_build_times_generate_sample(circuit_build_times_t *cbt,
+ double q_lo, double q_hi);
+double circuit_build_times_cdf(circuit_build_times_t *cbt, double x);
+void circuit_build_times_initial_alpha(circuit_build_times_t *cbt,
+ double quantile, double time_ms);
+void circuitbuild_running_unit_tests(void);
+#endif /* defined(TOR_UNIT_TESTS) */
+
+/* Network liveness functions */
+void circuit_build_times_network_is_live(circuit_build_times_t *cbt);
+int circuit_build_times_network_check_live(const circuit_build_times_t *cbt);
+void circuit_build_times_network_circ_success(circuit_build_times_t *cbt);
+
+#ifdef CIRCUITSTATS_PRIVATE
+/** Information about the state of our local network connection */
+typedef struct {
+ /** The timestamp we last completed a TLS handshake or received a cell */
+ time_t network_last_live;
+ /** If the network is not live, how many timeouts has this caused? */
+ int nonlive_timeouts;
+ /** Circular array of circuits that have made it to the first hop. Slot is
+ * 1 if circuit timed out, 0 if circuit succeeded */
+ int8_t *timeouts_after_firsthop;
+ /** Number of elements allocated for the above array */
+ int num_recent_circs;
+ /** Index into circular array. */
+ int after_firsthop_idx;
+} network_liveness_t;
+
+/** Structure for circuit build times history */
+struct circuit_build_times_s {
+ /** The circular array of recorded build times in milliseconds */
+ build_time_t circuit_build_times[CBT_NCIRCUITS_TO_OBSERVE];
+ /** Current index in the circuit_build_times circular array */
+ int build_times_idx;
+ /** Total number of build times accumulated. Max CBT_NCIRCUITS_TO_OBSERVE */
+ int total_build_times;
+ /** Information about the state of our local network connection */
+ network_liveness_t liveness;
+ /** Last time we built a circuit. Used to decide to build new test circs */
+ time_t last_circ_at;
+ /** "Minimum" value of our pareto distribution (actually mode) */
+ build_time_t Xm;
+ /** alpha exponent for pareto dist. */
+ double alpha;
+ /** Have we computed a timeout? */
+ int have_computed_timeout;
+ /** The exact value for that timeout in milliseconds. Stored as a double
+ * to maintain precision from calculations to and from quantile value. */
+ double timeout_ms;
+ /** How long we wait before actually closing the circuit. */
+ double close_ms;
+ /** Total succeeded counts. Old measurements may be scaled downward if
+ * we've seen a lot of circuits. */
+ uint32_t num_circ_succeeded;
+ /** Total timeout counts. Old measurements may be scaled downward if
+ * we've seen a lot of circuits. */
+ uint32_t num_circ_timeouts;
+ /** Total closed counts. Old measurements may be scaled downward if
+ * we've seen a lot of circuits.*/
+ uint32_t num_circ_closed;
+
+};
+#endif /* defined(CIRCUITSTATS_PRIVATE) */
+
+#endif /* !defined(TOR_CIRCUITSTATS_H) */
diff --git a/src/core/or/circuituse.c b/src/core/or/circuituse.c
new file mode 100644
index 0000000000..a3a69dc354
--- /dev/null
+++ b/src/core/or/circuituse.c
@@ -0,0 +1,3155 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuituse.c
+ * \brief Launch the right sort of circuits and attach the right streams to
+ * them.
+ *
+ * As distinct from circuitlist.c, which manages lookups to find circuits, and
+ * circuitbuild.c, which handles the logistics of circuit construction, this
+ * module keeps track of which streams can be attached to which circuits (in
+ * circuit_get_best()), and attaches streams to circuits (with
+ * circuit_try_attaching_streams(), connection_ap_handshake_attach_circuit(),
+ * and connection_ap_handshake_attach_chosen_circuit() ).
+ *
+ * This module also makes sure that we are building circuits for all of the
+ * predicted ports, using circuit_remove_handled_ports(),
+ * circuit_stream_is_being_handled(), and circuit_build_needed_cirs(). It
+ * handles launching circuits for specific targets using
+ * circuit_launch_by_extend_info().
+ *
+ * This is also where we handle expiring circuits that have been around for
+ * too long without actually completing, along with the circuit_build_timeout
+ * logic in circuitstats.c.
+ **/
+
+#include "or/or.h"
+#include "or/addressmap.h"
+#include "or/bridges.h"
+#include "or/channel.h"
+#include "or/circpathbias.h"
+#include "or/circuitbuild.h"
+#include "or/circuitlist.h"
+#include "or/circuitstats.h"
+#include "or/circuituse.h"
+#include "or/config.h"
+#include "or/connection.h"
+#include "or/connection_edge.h"
+#include "or/control.h"
+#include "or/directory.h"
+#include "or/entrynodes.h"
+#include "or/hs_common.h"
+#include "or/hs_client.h"
+#include "or/hs_circuit.h"
+#include "or/hs_ident.h"
+#include "or/hs_stats.h"
+#include "or/nodelist.h"
+#include "or/networkstatus.h"
+#include "or/policies.h"
+#include "or/rendclient.h"
+#include "or/rendcommon.h"
+#include "or/rendservice.h"
+#include "or/rephist.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+#include "lib/math/fp.h"
+#include "lib/time/tvdiff.h"
+
+#include "or/cpath_build_state_st.h"
+#include "or/dir_connection_st.h"
+#include "or/entry_connection_st.h"
+#include "or/extend_info_st.h"
+#include "or/or_circuit_st.h"
+#include "or/origin_circuit_st.h"
+#include "or/socks_request_st.h"
+
+static void circuit_expire_old_circuits_clientside(void);
+static void circuit_increment_failure_count(void);
+
+/** Check whether the hidden service destination of the stream at
+ * <b>edge_conn</b> is the same as the destination of the circuit at
+ * <b>origin_circ</b>. */
+static int
+circuit_matches_with_rend_stream(const edge_connection_t *edge_conn,
+ const origin_circuit_t *origin_circ)
+{
+ /* Check if this is a v2 rendezvous circ/stream */
+ if ((edge_conn->rend_data && !origin_circ->rend_data) ||
+ (!edge_conn->rend_data && origin_circ->rend_data) ||
+ (edge_conn->rend_data && origin_circ->rend_data &&
+ rend_cmp_service_ids(rend_data_get_address(edge_conn->rend_data),
+ rend_data_get_address(origin_circ->rend_data)))) {
+ /* this circ is not for this conn */
+ return 0;
+ }
+
+ /* Check if this is a v3 rendezvous circ/stream */
+ if ((edge_conn->hs_ident && !origin_circ->hs_ident) ||
+ (!edge_conn->hs_ident && origin_circ->hs_ident) ||
+ (edge_conn->hs_ident && origin_circ->hs_ident &&
+ !ed25519_pubkey_eq(&edge_conn->hs_ident->identity_pk,
+ &origin_circ->hs_ident->identity_pk))) {
+ /* this circ is not for this conn */
+ return 0;
+ }
+
+ return 1;
+}
+
+/** Return 1 if <b>circ</b> could be returned by circuit_get_best().
+ * Else return 0.
+ */
+static int
+circuit_is_acceptable(const origin_circuit_t *origin_circ,
+ const entry_connection_t *conn,
+ int must_be_open, uint8_t purpose,
+ int need_uptime, int need_internal,
+ time_t now)
+{
+ const circuit_t *circ = TO_CIRCUIT(origin_circ);
+ const node_t *exitnode;
+ cpath_build_state_t *build_state;
+ tor_assert(circ);
+ tor_assert(conn);
+ tor_assert(conn->socks_request);
+
+ if (must_be_open && (circ->state != CIRCUIT_STATE_OPEN || !circ->n_chan))
+ return 0; /* ignore non-open circs */
+ if (circ->marked_for_close)
+ return 0;
+
+ /* if this circ isn't our purpose, skip. */
+ if (purpose == CIRCUIT_PURPOSE_C_REND_JOINED && !must_be_open) {
+ if (circ->purpose != CIRCUIT_PURPOSE_C_ESTABLISH_REND &&
+ circ->purpose != CIRCUIT_PURPOSE_C_REND_READY &&
+ circ->purpose != CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED &&
+ circ->purpose != CIRCUIT_PURPOSE_C_REND_JOINED)
+ return 0;
+ } else if (purpose == CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT &&
+ !must_be_open) {
+ if (circ->purpose != CIRCUIT_PURPOSE_C_INTRODUCING &&
+ circ->purpose != CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT)
+ return 0;
+ } else {
+ if (purpose != circ->purpose)
+ return 0;
+ }
+
+ /* If this is a timed-out hidden service circuit, skip it. */
+ if (origin_circ->hs_circ_has_timed_out) {
+ return 0;
+ }
+
+ if (purpose == CIRCUIT_PURPOSE_C_GENERAL ||
+ purpose == CIRCUIT_PURPOSE_C_HSDIR_GET ||
+ purpose == CIRCUIT_PURPOSE_S_HSDIR_POST ||
+ purpose == CIRCUIT_PURPOSE_HS_VANGUARDS ||
+ purpose == CIRCUIT_PURPOSE_C_REND_JOINED) {
+ if (circ->timestamp_dirty &&
+ circ->timestamp_dirty+get_options()->MaxCircuitDirtiness <= now)
+ return 0;
+ }
+
+ if (origin_circ->unusable_for_new_conns)
+ return 0;
+
+ /* decide if this circ is suitable for this conn */
+
+ /* for rend circs, circ->cpath->prev is not the last router in the
+ * circuit, it's the magical extra service hop. so just check the nickname
+ * of the one we meant to finish at.
+ */
+ build_state = origin_circ->build_state;
+ exitnode = build_state_get_exit_node(build_state);
+
+ if (need_uptime && !build_state->need_uptime)
+ return 0;
+ if (need_internal != build_state->is_internal)
+ return 0;
+
+ if (purpose == CIRCUIT_PURPOSE_C_GENERAL ||
+ purpose == CIRCUIT_PURPOSE_S_HSDIR_POST ||
+ purpose == CIRCUIT_PURPOSE_C_HSDIR_GET) {
+ tor_addr_t addr;
+ const int family = tor_addr_parse(&addr, conn->socks_request->address);
+ if (!exitnode && !build_state->onehop_tunnel) {
+ log_debug(LD_CIRC,"Not considering circuit with unknown router.");
+ return 0; /* this circuit is screwed and doesn't know it yet,
+ * or is a rendezvous circuit. */
+ }
+ if (build_state->onehop_tunnel) {
+ if (!conn->want_onehop) {
+ log_debug(LD_CIRC,"Skipping one-hop circuit.");
+ return 0;
+ }
+ tor_assert(conn->chosen_exit_name);
+ if (build_state->chosen_exit) {
+ char digest[DIGEST_LEN];
+ if (hexdigest_to_digest(conn->chosen_exit_name, digest) < 0)
+ return 0; /* broken digest, we don't want it */
+ if (tor_memneq(digest, build_state->chosen_exit->identity_digest,
+ DIGEST_LEN))
+ return 0; /* this is a circuit to somewhere else */
+ if (tor_digest_is_zero(digest)) {
+ /* we don't know the digest; have to compare addr:port */
+ if (family < 0 ||
+ !tor_addr_eq(&build_state->chosen_exit->addr, &addr) ||
+ build_state->chosen_exit->port != conn->socks_request->port)
+ return 0;
+ }
+ }
+ } else {
+ if (conn->want_onehop) {
+ /* don't use three-hop circuits -- that could hurt our anonymity. */
+ return 0;
+ }
+ }
+ if (origin_circ->prepend_policy && family != -1) {
+ int r = compare_tor_addr_to_addr_policy(&addr,
+ conn->socks_request->port,
+ origin_circ->prepend_policy);
+ if (r == ADDR_POLICY_REJECTED)
+ return 0;
+ }
+ if (exitnode && !connection_ap_can_use_exit(conn, exitnode)) {
+ /* can't exit from this router */
+ return 0;
+ }
+ } else { /* not general: this might be a rend circuit */
+ const edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn);
+ if (!circuit_matches_with_rend_stream(edge_conn, origin_circ)) {
+ return 0;
+ }
+ }
+
+ if (!connection_edge_compatible_with_circuit(conn, origin_circ)) {
+ /* conn needs to be isolated from other conns that have already used
+ * origin_circ */
+ return 0;
+ }
+
+ return 1;
+}
+
+/** Return 1 if circuit <b>a</b> is better than circuit <b>b</b> for
+ * <b>conn</b>, and return 0 otherwise. Used by circuit_get_best.
+ */
+static int
+circuit_is_better(const origin_circuit_t *oa, const origin_circuit_t *ob,
+ const entry_connection_t *conn)
+{
+ const circuit_t *a = TO_CIRCUIT(oa);
+ const circuit_t *b = TO_CIRCUIT(ob);
+ const uint8_t purpose = ENTRY_TO_CONN(conn)->purpose;
+ int a_bits, b_bits;
+
+ /* If one of the circuits was allowed to live due to relaxing its timeout,
+ * it is definitely worse (it's probably a much slower path). */
+ if (oa->relaxed_timeout && !ob->relaxed_timeout)
+ return 0; /* ob is better. It's not relaxed. */
+ if (!oa->relaxed_timeout && ob->relaxed_timeout)
+ return 1; /* oa is better. It's not relaxed. */
+
+ switch (purpose) {
+ case CIRCUIT_PURPOSE_S_HSDIR_POST:
+ case CIRCUIT_PURPOSE_C_HSDIR_GET:
+ case CIRCUIT_PURPOSE_C_GENERAL:
+ /* if it's used but less dirty it's best;
+ * else if it's more recently created it's best
+ */
+ if (b->timestamp_dirty) {
+ if (a->timestamp_dirty &&
+ a->timestamp_dirty > b->timestamp_dirty)
+ return 1;
+ } else {
+ if (a->timestamp_dirty ||
+ timercmp(&a->timestamp_began, &b->timestamp_began, OP_GT))
+ return 1;
+ if (ob->build_state->is_internal)
+ /* XXXX++ what the heck is this internal thing doing here. I
+ * think we can get rid of it. circuit_is_acceptable() already
+ * makes sure that is_internal is exactly what we need it to
+ * be. -RD */
+ return 1;
+ }
+ break;
+ case CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT:
+ /* the closer it is to ack_wait the better it is */
+ if (a->purpose > b->purpose)
+ return 1;
+ break;
+ case CIRCUIT_PURPOSE_C_REND_JOINED:
+ /* the closer it is to rend_joined the better it is */
+ if (a->purpose > b->purpose)
+ return 1;
+ break;
+ }
+
+ /* XXXX Maybe this check should get a higher priority to avoid
+ * using up circuits too rapidly. */
+
+ a_bits = connection_edge_update_circuit_isolation(conn,
+ (origin_circuit_t*)oa, 1);
+ b_bits = connection_edge_update_circuit_isolation(conn,
+ (origin_circuit_t*)ob, 1);
+ /* if x_bits < 0, then we have not used x for anything; better not to dirty
+ * a connection if we can help it. */
+ if (a_bits < 0) {
+ return 0;
+ } else if (b_bits < 0) {
+ return 1;
+ }
+ a_bits &= ~ oa->isolation_flags_mixed;
+ a_bits &= ~ ob->isolation_flags_mixed;
+ if (n_bits_set_u8(a_bits) < n_bits_set_u8(b_bits)) {
+ /* The fewer new restrictions we need to make on a circuit for stream
+ * isolation, the better. */
+ return 1;
+ }
+
+ return 0;
+}
+
+/** Find the best circ that conn can use, preferably one which is
+ * dirty. Circ must not be too old.
+ *
+ * Conn must be defined.
+ *
+ * If must_be_open, ignore circs not in CIRCUIT_STATE_OPEN.
+ *
+ * circ_purpose specifies what sort of circuit we must have.
+ * It can be C_GENERAL, C_INTRODUCE_ACK_WAIT, or C_REND_JOINED.
+ *
+ * If it's REND_JOINED and must_be_open==0, then return the closest
+ * rendezvous-purposed circuit that you can find.
+ *
+ * If it's INTRODUCE_ACK_WAIT and must_be_open==0, then return the
+ * closest introduce-purposed circuit that you can find.
+ */
+static origin_circuit_t *
+circuit_get_best(const entry_connection_t *conn,
+ int must_be_open, uint8_t purpose,
+ int need_uptime, int need_internal)
+{
+ origin_circuit_t *best=NULL;
+ struct timeval now;
+ int intro_going_on_but_too_old = 0;
+
+ tor_assert(conn);
+
+ tor_assert(purpose == CIRCUIT_PURPOSE_C_GENERAL ||
+ purpose == CIRCUIT_PURPOSE_HS_VANGUARDS ||
+ purpose == CIRCUIT_PURPOSE_C_HSDIR_GET ||
+ purpose == CIRCUIT_PURPOSE_S_HSDIR_POST ||
+ purpose == CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT ||
+ purpose == CIRCUIT_PURPOSE_C_REND_JOINED);
+
+ tor_gettimeofday(&now);
+
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ origin_circuit_t *origin_circ;
+ if (!CIRCUIT_IS_ORIGIN(circ))
+ continue;
+ origin_circ = TO_ORIGIN_CIRCUIT(circ);
+
+ /* Log an info message if we're going to launch a new intro circ in
+ * parallel */
+ if (purpose == CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT &&
+ !must_be_open && origin_circ->hs_circ_has_timed_out &&
+ !circ->marked_for_close) {
+ intro_going_on_but_too_old = 1;
+ continue;
+ }
+
+ if (!circuit_is_acceptable(origin_circ,conn,must_be_open,purpose,
+ need_uptime,need_internal, (time_t)now.tv_sec))
+ continue;
+
+ /* now this is an acceptable circ to hand back. but that doesn't
+ * mean it's the *best* circ to hand back. try to decide.
+ */
+ if (!best || circuit_is_better(origin_circ,best,conn))
+ best = origin_circ;
+ }
+ SMARTLIST_FOREACH_END(circ);
+
+ if (!best && intro_going_on_but_too_old)
+ log_info(LD_REND|LD_CIRC, "There is an intro circuit being created "
+ "right now, but it has already taken quite a while. Starting "
+ "one in parallel.");
+
+ return best;
+}
+
+/** Return the number of not-yet-open general-purpose origin circuits. */
+static int
+count_pending_general_client_circuits(void)
+{
+ int count = 0;
+
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ if (circ->marked_for_close ||
+ circ->state == CIRCUIT_STATE_OPEN ||
+ !CIRCUIT_PURPOSE_COUNTS_TOWARDS_MAXPENDING(circ->purpose) ||
+ !CIRCUIT_IS_ORIGIN(circ))
+ continue;
+
+ ++count;
+ }
+ SMARTLIST_FOREACH_END(circ);
+
+ return count;
+}
+
+#if 0
+/** Check whether, according to the policies in <b>options</b>, the
+ * circuit <b>circ</b> makes sense. */
+/* XXXX currently only checks Exclude{Exit}Nodes; it should check more.
+ * Also, it doesn't have the right definition of an exit circuit. Also,
+ * it's never called. */
+int
+circuit_conforms_to_options(const origin_circuit_t *circ,
+ const or_options_t *options)
+{
+ const crypt_path_t *cpath, *cpath_next = NULL;
+
+ /* first check if it includes any excluded nodes */
+ for (cpath = circ->cpath; cpath_next != circ->cpath; cpath = cpath_next) {
+ cpath_next = cpath->next;
+ if (routerset_contains_extendinfo(options->ExcludeNodes,
+ cpath->extend_info))
+ return 0;
+ }
+
+ /* then consider the final hop */
+ if (routerset_contains_extendinfo(options->ExcludeExitNodes,
+ circ->cpath->prev->extend_info))
+ return 0;
+
+ return 1;
+}
+#endif /* 0 */
+
+/**
+ * Close all circuits that start at us, aren't open, and were born
+ * at least CircuitBuildTimeout seconds ago.
+ *
+ * TODO: This function is now partially redundant to
+ * circuit_build_times_handle_completed_hop(), but that function only
+ * covers circuits up to and including 3 hops that are still actually
+ * completing hops. However, circuit_expire_building() also handles longer
+ * circuits, as well as circuits that are completely stalled.
+ * In the future (after prop247/other path selection revamping), we probably
+ * want to eliminate this rats nest in favor of a simpler approach.
+ */
+void
+circuit_expire_building(void)
+{
+ /* circ_times.timeout_ms and circ_times.close_ms are from
+ * circuit_build_times_get_initial_timeout() if we haven't computed
+ * custom timeouts yet */
+ struct timeval general_cutoff, begindir_cutoff, fourhop_cutoff,
+ close_cutoff, extremely_old_cutoff, hs_extremely_old_cutoff,
+ cannibalized_cutoff, c_intro_cutoff, s_intro_cutoff, stream_cutoff;
+ const or_options_t *options = get_options();
+ struct timeval now;
+ cpath_build_state_t *build_state;
+ int any_opened_circs = 0;
+
+ tor_gettimeofday(&now);
+
+ /* Check to see if we have any opened circuits. If we don't,
+ * we want to be more lenient with timeouts, in case the
+ * user has relocated and/or changed network connections.
+ * See bug #3443. */
+ any_opened_circs = circuit_any_opened_circuits();
+
+#define SET_CUTOFF(target, msec) do { \
+ long ms = tor_lround(msec); \
+ struct timeval diff; \
+ diff.tv_sec = ms / 1000; \
+ diff.tv_usec = (int)((ms % 1000) * 1000); \
+ timersub(&now, &diff, &target); \
+ } while (0)
+
+ /**
+ * Because circuit build timeout is calculated only based on 3 hop
+ * general purpose circuit construction, we need to scale the timeout
+ * to make it properly apply to longer circuits, and circuits of
+ * certain usage types. The following diagram illustrates how we
+ * derive the scaling below. In short, we calculate the number
+ * of times our telescoping-based circuit construction causes cells
+ * to traverse each link for the circuit purpose types in question,
+ * and then assume each link is equivalent.
+ *
+ * OP --a--> A --b--> B --c--> C
+ * OP --a--> A --b--> B --c--> C --d--> D
+ *
+ * Let h = a = b = c = d
+ *
+ * Three hops (general_cutoff)
+ * RTTs = 3a + 2b + c
+ * RTTs = 6h
+ * Cannibalized:
+ * RTTs = a+b+c+d
+ * RTTs = 4h
+ * Four hops:
+ * RTTs = 4a + 3b + 2c + d
+ * RTTs = 10h
+ * Client INTRODUCE1+ACK: // XXX: correct?
+ * RTTs = 5a + 4b + 3c + 2d
+ * RTTs = 14h
+ * Server intro:
+ * RTTs = 4a + 3b + 2c
+ * RTTs = 9h
+ */
+ SET_CUTOFF(general_cutoff, get_circuit_build_timeout_ms());
+ SET_CUTOFF(begindir_cutoff, get_circuit_build_timeout_ms());
+
+ // TODO: We should probably use route_len_for_purpose() here instead,
+ // except that does not count the extra round trip for things like server
+ // intros and rends.
+
+ /* > 3hop circs seem to have a 1.0 second delay on their cannibalized
+ * 4th hop. */
+ SET_CUTOFF(fourhop_cutoff, get_circuit_build_timeout_ms() * (10/6.0) + 1000);
+
+ /* CIRCUIT_PURPOSE_C_ESTABLISH_REND behaves more like a RELAY cell.
+ * Use the stream cutoff (more or less). */
+ SET_CUTOFF(stream_cutoff, MAX(options->CircuitStreamTimeout,15)*1000 + 1000);
+
+ /* Be lenient with cannibalized circs. They already survived the official
+ * CBT, and they're usually not performance-critical. */
+ SET_CUTOFF(cannibalized_cutoff,
+ MAX(get_circuit_build_close_time_ms()*(4/6.0),
+ options->CircuitStreamTimeout * 1000) + 1000);
+
+ /* Intro circs have an extra round trip (and are also 4 hops long) */
+ SET_CUTOFF(c_intro_cutoff, get_circuit_build_timeout_ms() * (14/6.0) + 1000);
+
+ /* Server intro circs have an extra round trip */
+ SET_CUTOFF(s_intro_cutoff, get_circuit_build_timeout_ms() * (9/6.0) + 1000);
+
+ SET_CUTOFF(close_cutoff, get_circuit_build_close_time_ms());
+ SET_CUTOFF(extremely_old_cutoff, get_circuit_build_close_time_ms()*2 + 1000);
+
+ SET_CUTOFF(hs_extremely_old_cutoff,
+ MAX(get_circuit_build_close_time_ms()*2 + 1000,
+ options->SocksTimeout * 1000));
+
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *,victim) {
+ struct timeval cutoff;
+ if (!CIRCUIT_IS_ORIGIN(victim) || /* didn't originate here */
+ victim->marked_for_close) /* don't mess with marked circs */
+ continue;
+
+ /* If we haven't yet started the first hop, it means we don't have
+ * any orconns available, and thus have not started counting time yet
+ * for this circuit. See circuit_deliver_create_cell() and uses of
+ * timestamp_began.
+ *
+ * Continue to wait in this case. The ORConn should timeout
+ * independently and kill us then.
+ */
+ if (TO_ORIGIN_CIRCUIT(victim)->cpath->state == CPATH_STATE_CLOSED) {
+ continue;
+ }
+
+ build_state = TO_ORIGIN_CIRCUIT(victim)->build_state;
+ if (build_state && build_state->onehop_tunnel)
+ cutoff = begindir_cutoff;
+ else if (victim->purpose == CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT)
+ cutoff = close_cutoff;
+ else if (victim->purpose == CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT)
+ cutoff = c_intro_cutoff;
+ else if (victim->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO)
+ cutoff = s_intro_cutoff;
+ else if (victim->purpose == CIRCUIT_PURPOSE_C_ESTABLISH_REND)
+ cutoff = stream_cutoff;
+ else if (victim->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING)
+ cutoff = close_cutoff;
+ else if (TO_ORIGIN_CIRCUIT(victim)->has_opened &&
+ victim->state != CIRCUIT_STATE_OPEN)
+ cutoff = cannibalized_cutoff;
+ else if (build_state && build_state->desired_path_len >= 4)
+ cutoff = fourhop_cutoff;
+ else
+ cutoff = general_cutoff;
+
+ if (TO_ORIGIN_CIRCUIT(victim)->hs_circ_has_timed_out)
+ cutoff = hs_extremely_old_cutoff;
+
+ if (timercmp(&victim->timestamp_began, &cutoff, OP_GT))
+ continue; /* it's still young, leave it alone */
+
+ /* We need to double-check the opened state here because
+ * we don't want to consider opened 1-hop dircon circuits for
+ * deciding when to relax the timeout, but we *do* want to relax
+ * those circuits too if nothing else is opened *and* they still
+ * aren't either. */
+ if (!any_opened_circs && victim->state != CIRCUIT_STATE_OPEN) {
+ /* It's still young enough that we wouldn't close it, right? */
+ if (timercmp(&victim->timestamp_began, &close_cutoff, OP_GT)) {
+ if (!TO_ORIGIN_CIRCUIT(victim)->relaxed_timeout) {
+ int first_hop_succeeded = TO_ORIGIN_CIRCUIT(victim)->cpath->state
+ == CPATH_STATE_OPEN;
+ log_info(LD_CIRC,
+ "No circuits are opened. Relaxing timeout for circuit %d "
+ "(a %s %d-hop circuit in state %s with channel state %s).",
+ TO_ORIGIN_CIRCUIT(victim)->global_identifier,
+ circuit_purpose_to_string(victim->purpose),
+ TO_ORIGIN_CIRCUIT(victim)->build_state ?
+ TO_ORIGIN_CIRCUIT(victim)->build_state->desired_path_len :
+ -1,
+ circuit_state_to_string(victim->state),
+ victim->n_chan ?
+ channel_state_to_string(victim->n_chan->state) : "none");
+
+ /* We count the timeout here for CBT, because technically this
+ * was a timeout, and the timeout value needs to reset if we
+ * see enough of them. Note this means we also need to avoid
+ * double-counting below, too. */
+ circuit_build_times_count_timeout(get_circuit_build_times_mutable(),
+ first_hop_succeeded);
+ TO_ORIGIN_CIRCUIT(victim)->relaxed_timeout = 1;
+ }
+ continue;
+ } else {
+ static ratelim_t relax_timeout_limit = RATELIM_INIT(3600);
+ const double build_close_ms = get_circuit_build_close_time_ms();
+ log_fn_ratelim(&relax_timeout_limit, LOG_NOTICE, LD_CIRC,
+ "No circuits are opened. Relaxed timeout for circuit %d "
+ "(a %s %d-hop circuit in state %s with channel state %s) to "
+ "%ldms. However, it appears the circuit has timed out "
+ "anyway.",
+ TO_ORIGIN_CIRCUIT(victim)->global_identifier,
+ circuit_purpose_to_string(victim->purpose),
+ TO_ORIGIN_CIRCUIT(victim)->build_state ?
+ TO_ORIGIN_CIRCUIT(victim)->build_state->desired_path_len :
+ -1,
+ circuit_state_to_string(victim->state),
+ victim->n_chan ?
+ channel_state_to_string(victim->n_chan->state) : "none",
+ (long)build_close_ms);
+ }
+ }
+
+#if 0
+ /* some debug logs, to help track bugs */
+ if (victim->purpose >= CIRCUIT_PURPOSE_C_INTRODUCING &&
+ victim->purpose <= CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED) {
+ if (!victim->timestamp_dirty)
+ log_fn(LOG_DEBUG,"Considering %sopen purpose %d to %s (circid %d)."
+ "(clean).",
+ victim->state == CIRCUIT_STATE_OPEN ? "" : "non",
+ victim->purpose, victim->build_state->chosen_exit_name,
+ victim->n_circ_id);
+ else
+ log_fn(LOG_DEBUG,"Considering %sopen purpose %d to %s (circid %d). "
+ "%d secs since dirty.",
+ victim->state == CIRCUIT_STATE_OPEN ? "" : "non",
+ victim->purpose, victim->build_state->chosen_exit_name,
+ victim->n_circ_id,
+ (int)(now - victim->timestamp_dirty));
+ }
+#endif /* 0 */
+
+ /* if circ is !open, or if it's open but purpose is a non-finished
+ * intro or rend, then mark it for close */
+ if (victim->state == CIRCUIT_STATE_OPEN) {
+ switch (victim->purpose) {
+ default: /* most open circuits can be left alone. */
+ continue; /* yes, continue inside a switch refers to the nearest
+ * enclosing loop. C is smart. */
+ case CIRCUIT_PURPOSE_S_ESTABLISH_INTRO:
+ break; /* too old, need to die */
+ case CIRCUIT_PURPOSE_C_REND_READY:
+ /* it's a rend_ready circ -- has it already picked a query? */
+ /* c_rend_ready circs measure age since timestamp_dirty,
+ * because that's set when they switch purposes
+ */
+ if (TO_ORIGIN_CIRCUIT(victim)->rend_data ||
+ TO_ORIGIN_CIRCUIT(victim)->hs_ident ||
+ victim->timestamp_dirty > cutoff.tv_sec)
+ continue;
+ break;
+ case CIRCUIT_PURPOSE_PATH_BIAS_TESTING:
+ /* Open path bias testing circuits are given a long
+ * time to complete the test, but not forever */
+ TO_ORIGIN_CIRCUIT(victim)->path_state = PATH_STATE_USE_FAILED;
+ break;
+ case CIRCUIT_PURPOSE_C_INTRODUCING:
+ /* That purpose means that the intro point circuit has been opened
+ * successfully but the INTRODUCE1 cell hasn't been sent yet because
+ * the client is waiting for the rendezvous point circuit to open.
+ * Keep this circuit open while waiting for the rendezvous circuit.
+ * We let the circuit idle timeout take care of cleaning this
+ * circuit if it never used. */
+ continue;
+ case CIRCUIT_PURPOSE_C_ESTABLISH_REND:
+ case CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED:
+ case CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT:
+ /* rend and intro circs become dirty each time they
+ * make an introduction attempt. so timestamp_dirty
+ * will reflect the time since the last attempt.
+ */
+ if (victim->timestamp_dirty > cutoff.tv_sec)
+ continue;
+ break;
+ }
+ } else { /* circuit not open, consider recording failure as timeout */
+ int first_hop_succeeded = TO_ORIGIN_CIRCUIT(victim)->cpath &&
+ TO_ORIGIN_CIRCUIT(victim)->cpath->state == CPATH_STATE_OPEN;
+
+ if (TO_ORIGIN_CIRCUIT(victim)->p_streams != NULL) {
+ log_warn(LD_BUG, "Circuit %d (purpose %d, %s) has timed out, "
+ "yet has attached streams!",
+ TO_ORIGIN_CIRCUIT(victim)->global_identifier,
+ victim->purpose,
+ circuit_purpose_to_string(victim->purpose));
+ tor_fragile_assert();
+ continue;
+ }
+
+ if (circuit_timeout_want_to_count_circ(TO_ORIGIN_CIRCUIT(victim)) &&
+ circuit_build_times_enough_to_compute(get_circuit_build_times())) {
+
+ log_info(LD_CIRC,
+ "Deciding to count the timeout for circuit %"PRIu32"\n",
+ TO_ORIGIN_CIRCUIT(victim)->global_identifier);
+
+ /* Circuits are allowed to last longer for measurement.
+ * Switch their purpose and wait. */
+ if (victim->purpose != CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT) {
+ circuit_build_times_mark_circ_as_measurement_only(TO_ORIGIN_CIRCUIT(
+ victim));
+ continue;
+ }
+
+ /*
+ * If the circuit build time is much greater than we would have cut
+ * it off at, we probably had a suspend event along this codepath,
+ * and we should discard the value.
+ */
+ if (timercmp(&victim->timestamp_began, &extremely_old_cutoff, OP_LT)) {
+ log_notice(LD_CIRC,
+ "Extremely large value for circuit build timeout: %lds. "
+ "Assuming clock jump. Purpose %d (%s)",
+ (long)(now.tv_sec - victim->timestamp_began.tv_sec),
+ victim->purpose,
+ circuit_purpose_to_string(victim->purpose));
+ } else if (circuit_build_times_count_close(
+ get_circuit_build_times_mutable(),
+ first_hop_succeeded,
+ (time_t)victim->timestamp_created.tv_sec)) {
+ circuit_build_times_set_timeout(get_circuit_build_times_mutable());
+ }
+ }
+ }
+
+ /* If this is a hidden service client circuit which is far enough along in
+ * connecting to its destination, and we haven't already flagged it as
+ * 'timed out', flag it so we'll launch another intro or rend circ, but
+ * don't mark it for close yet.
+ *
+ * (Circs flagged as 'timed out' are given a much longer timeout
+ * period above, so we won't close them in the next call to
+ * circuit_expire_building.) */
+ if (!(TO_ORIGIN_CIRCUIT(victim)->hs_circ_has_timed_out)) {
+ switch (victim->purpose) {
+ case CIRCUIT_PURPOSE_C_REND_READY:
+ /* We only want to spare a rend circ if it has been specified in
+ * an INTRODUCE1 cell sent to a hidden service. A circ's
+ * pending_final_cpath field is non-NULL iff it is a rend circ
+ * and we have tried to send an INTRODUCE1 cell specifying it.
+ * Thus, if the pending_final_cpath field *is* NULL, then we
+ * want to not spare it. */
+ if (TO_ORIGIN_CIRCUIT(victim)->build_state &&
+ TO_ORIGIN_CIRCUIT(victim)->build_state->pending_final_cpath ==
+ NULL)
+ break;
+ /* fallthrough! */
+ case CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT:
+ case CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED:
+ /* If we have reached this line, we want to spare the circ for now. */
+ log_info(LD_CIRC,"Marking circ %u (state %d:%s, purpose %d) "
+ "as timed-out HS circ",
+ (unsigned)victim->n_circ_id,
+ victim->state, circuit_state_to_string(victim->state),
+ victim->purpose);
+ TO_ORIGIN_CIRCUIT(victim)->hs_circ_has_timed_out = 1;
+ continue;
+ default:
+ break;
+ }
+ }
+
+ /* If this is a service-side rendezvous circuit which is far
+ * enough along in connecting to its destination, consider sparing
+ * it. */
+ if (!(TO_ORIGIN_CIRCUIT(victim)->hs_circ_has_timed_out) &&
+ victim->purpose == CIRCUIT_PURPOSE_S_CONNECT_REND) {
+ log_info(LD_CIRC,"Marking circ %u (state %d:%s, purpose %d) "
+ "as timed-out HS circ; relaunching rendezvous attempt.",
+ (unsigned)victim->n_circ_id,
+ victim->state, circuit_state_to_string(victim->state),
+ victim->purpose);
+ TO_ORIGIN_CIRCUIT(victim)->hs_circ_has_timed_out = 1;
+ hs_circ_retry_service_rendezvous_point(TO_ORIGIN_CIRCUIT(victim));
+ continue;
+ }
+
+ if (victim->n_chan)
+ log_info(LD_CIRC,
+ "Abandoning circ %u %s:%u (state %d,%d:%s, purpose %d, "
+ "len %d)", TO_ORIGIN_CIRCUIT(victim)->global_identifier,
+ channel_get_canonical_remote_descr(victim->n_chan),
+ (unsigned)victim->n_circ_id,
+ TO_ORIGIN_CIRCUIT(victim)->has_opened,
+ victim->state, circuit_state_to_string(victim->state),
+ victim->purpose,
+ TO_ORIGIN_CIRCUIT(victim)->build_state ?
+ TO_ORIGIN_CIRCUIT(victim)->build_state->desired_path_len :
+ -1);
+ else
+ log_info(LD_CIRC,
+ "Abandoning circ %u %u (state %d,%d:%s, purpose %d, len %d)",
+ TO_ORIGIN_CIRCUIT(victim)->global_identifier,
+ (unsigned)victim->n_circ_id,
+ TO_ORIGIN_CIRCUIT(victim)->has_opened,
+ victim->state,
+ circuit_state_to_string(victim->state), victim->purpose,
+ TO_ORIGIN_CIRCUIT(victim)->build_state ?
+ TO_ORIGIN_CIRCUIT(victim)->build_state->desired_path_len :
+ -1);
+
+ circuit_log_path(LOG_INFO,LD_CIRC,TO_ORIGIN_CIRCUIT(victim));
+ if (victim->purpose == CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT)
+ circuit_mark_for_close(victim, END_CIRC_REASON_MEASUREMENT_EXPIRED);
+ else
+ circuit_mark_for_close(victim, END_CIRC_REASON_TIMEOUT);
+
+ pathbias_count_timeout(TO_ORIGIN_CIRCUIT(victim));
+ } SMARTLIST_FOREACH_END(victim);
+}
+
+/**
+ * Mark for close all circuits that start here, that were built through a
+ * guard we weren't sure if we wanted to use, and that have been waiting
+ * around for way too long.
+ */
+void
+circuit_expire_waiting_for_better_guard(void)
+{
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_origin_circuit_list(),
+ origin_circuit_t *, circ) {
+ if (TO_CIRCUIT(circ)->marked_for_close)
+ continue;
+ if (circ->guard_state == NULL)
+ continue;
+ if (entry_guard_state_should_expire(circ->guard_state))
+ circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_NONE);
+ } SMARTLIST_FOREACH_END(circ);
+}
+
+/** For debugging #8387: track when we last called
+ * circuit_expire_old_circuits_clientside. */
+static time_t last_expired_clientside_circuits = 0;
+
+/**
+ * As a diagnostic for bug 8387, log information about how many one-hop
+ * circuits we have around that have been there for at least <b>age</b>
+ * seconds. Log a few of them.
+ * Ignores Single Onion Service intro and Tor2web redezvous circuits, they are
+ * expected to be long-term one-hop circuits.
+ */
+void
+circuit_log_ancient_one_hop_circuits(int age)
+{
+#define MAX_ANCIENT_ONEHOP_CIRCUITS_TO_LOG 10
+ time_t now = time(NULL);
+ time_t cutoff = now - age;
+ int n_found = 0;
+ smartlist_t *log_these = smartlist_new();
+ const or_options_t *options = get_options();
+
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ const origin_circuit_t *ocirc;
+ if (! CIRCUIT_IS_ORIGIN(circ))
+ continue;
+ if (circ->timestamp_created.tv_sec >= cutoff)
+ continue;
+ /* Single Onion Services deliberately make long term one-hop intro
+ * and rendezvous connections. Don't log the established ones. */
+ if (rend_service_allow_non_anonymous_connection(options) &&
+ (circ->purpose == CIRCUIT_PURPOSE_S_INTRO ||
+ circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED))
+ continue;
+ /* Tor2web deliberately makes long term one-hop rend connections,
+ * particularly when Tor2webRendezvousPoints is used. We only ignore
+ * active rend point connections, if we take a long time to rendezvous,
+ * that's worth logging. */
+ if (rend_client_allow_non_anonymous_connection(options) &&
+ circ->purpose == CIRCUIT_PURPOSE_C_REND_JOINED)
+ continue;
+ ocirc = CONST_TO_ORIGIN_CIRCUIT(circ);
+
+ if (ocirc->build_state && ocirc->build_state->onehop_tunnel) {
+ ++n_found;
+
+ if (smartlist_len(log_these) < MAX_ANCIENT_ONEHOP_CIRCUITS_TO_LOG)
+ smartlist_add(log_these, (origin_circuit_t*) ocirc);
+ }
+ }
+ SMARTLIST_FOREACH_END(circ);
+
+ if (n_found == 0)
+ goto done;
+
+ log_notice(LD_HEARTBEAT,
+ "Diagnostic for issue 8387: Found %d one-hop circuits more "
+ "than %d seconds old! Logging %d...",
+ n_found, age, smartlist_len(log_these));
+
+ SMARTLIST_FOREACH_BEGIN(log_these, const origin_circuit_t *, ocirc) {
+ char created[ISO_TIME_LEN+1];
+ int stream_num;
+ const edge_connection_t *conn;
+ char *dirty = NULL;
+ const circuit_t *circ = TO_CIRCUIT(ocirc);
+
+ format_local_iso_time(created,
+ (time_t)circ->timestamp_created.tv_sec);
+
+ if (circ->timestamp_dirty) {
+ char dirty_since[ISO_TIME_LEN+1];
+ format_local_iso_time(dirty_since, circ->timestamp_dirty);
+
+ tor_asprintf(&dirty, "Dirty since %s (%ld seconds vs %ld-second cutoff)",
+ dirty_since, (long)(now - circ->timestamp_dirty),
+ (long) options->MaxCircuitDirtiness);
+ } else {
+ dirty = tor_strdup("Not marked dirty");
+ }
+
+ log_notice(LD_HEARTBEAT, " #%d created at %s. %s, %s. %s for close. "
+ "Package window: %d. "
+ "%s for new conns. %s.",
+ ocirc_sl_idx,
+ created,
+ circuit_state_to_string(circ->state),
+ circuit_purpose_to_string(circ->purpose),
+ circ->marked_for_close ? "Marked" : "Not marked",
+ circ->package_window,
+ ocirc->unusable_for_new_conns ? "Not usable" : "usable",
+ dirty);
+ tor_free(dirty);
+
+ stream_num = 0;
+ for (conn = ocirc->p_streams; conn; conn = conn->next_stream) {
+ const connection_t *c = TO_CONN(conn);
+ char stream_created[ISO_TIME_LEN+1];
+ if (++stream_num >= 5)
+ break;
+
+ format_local_iso_time(stream_created, c->timestamp_created);
+
+ log_notice(LD_HEARTBEAT, " Stream#%d created at %s. "
+ "%s conn in state %s. "
+ "It is %slinked and %sreading from a linked connection %p. "
+ "Package window %d. "
+ "%s for close (%s:%d). Hold-open is %sset. "
+ "Has %ssent RELAY_END. %s on circuit.",
+ stream_num,
+ stream_created,
+ conn_type_to_string(c->type),
+ conn_state_to_string(c->type, c->state),
+ c->linked ? "" : "not ",
+ c->reading_from_linked_conn ? "": "not",
+ c->linked_conn,
+ conn->package_window,
+ c->marked_for_close ? "Marked" : "Not marked",
+ c->marked_for_close_file ? c->marked_for_close_file : "--",
+ c->marked_for_close,
+ c->hold_open_until_flushed ? "" : "not ",
+ conn->edge_has_sent_end ? "" : "not ",
+ conn->edge_blocked_on_circ ? "Blocked" : "Not blocked");
+ if (! c->linked_conn)
+ continue;
+
+ c = c->linked_conn;
+
+ log_notice(LD_HEARTBEAT, " Linked to %s connection in state %s "
+ "(Purpose %d). %s for close (%s:%d). Hold-open is %sset. ",
+ conn_type_to_string(c->type),
+ conn_state_to_string(c->type, c->state),
+ c->purpose,
+ c->marked_for_close ? "Marked" : "Not marked",
+ c->marked_for_close_file ? c->marked_for_close_file : "--",
+ c->marked_for_close,
+ c->hold_open_until_flushed ? "" : "not ");
+ }
+ } SMARTLIST_FOREACH_END(ocirc);
+
+ log_notice(LD_HEARTBEAT, "It has been %ld seconds since I last called "
+ "circuit_expire_old_circuits_clientside().",
+ (long)(now - last_expired_clientside_circuits));
+
+ done:
+ smartlist_free(log_these);
+}
+
+/** Remove any elements in <b>needed_ports</b> that are handled by an
+ * open or in-progress circuit.
+ */
+void
+circuit_remove_handled_ports(smartlist_t *needed_ports)
+{
+ int i;
+ uint16_t *port;
+
+ for (i = 0; i < smartlist_len(needed_ports); ++i) {
+ port = smartlist_get(needed_ports, i);
+ tor_assert(*port);
+ if (circuit_stream_is_being_handled(NULL, *port,
+ MIN_CIRCUITS_HANDLING_STREAM)) {
+ log_debug(LD_CIRC,"Port %d is already being handled; removing.", *port);
+ smartlist_del(needed_ports, i--);
+ tor_free(port);
+ } else {
+ log_debug(LD_CIRC,"Port %d is not handled.", *port);
+ }
+ }
+}
+
+/** Return 1 if at least <b>min</b> general-purpose non-internal circuits
+ * will have an acceptable exit node for exit stream <b>conn</b> if it
+ * is defined, else for "*:port".
+ * Else return 0.
+ */
+int
+circuit_stream_is_being_handled(entry_connection_t *conn,
+ uint16_t port, int min)
+{
+ const node_t *exitnode;
+ int num=0;
+ time_t now = time(NULL);
+ int need_uptime = smartlist_contains_int_as_string(
+ get_options()->LongLivedPorts,
+ conn ? conn->socks_request->port : port);
+
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ if (CIRCUIT_IS_ORIGIN(circ) &&
+ !circ->marked_for_close &&
+ circ->purpose == CIRCUIT_PURPOSE_C_GENERAL &&
+ (!circ->timestamp_dirty ||
+ circ->timestamp_dirty + get_options()->MaxCircuitDirtiness > now)) {
+ origin_circuit_t *origin_circ = TO_ORIGIN_CIRCUIT(circ);
+ cpath_build_state_t *build_state = origin_circ->build_state;
+ if (build_state->is_internal || build_state->onehop_tunnel)
+ continue;
+ if (origin_circ->unusable_for_new_conns)
+ continue;
+ if (origin_circ->isolation_values_set &&
+ (conn == NULL ||
+ !connection_edge_compatible_with_circuit(conn, origin_circ)))
+ continue;
+
+ exitnode = build_state_get_exit_node(build_state);
+ if (exitnode && (!need_uptime || build_state->need_uptime)) {
+ int ok;
+ if (conn) {
+ ok = connection_ap_can_use_exit(conn, exitnode);
+ } else {
+ addr_policy_result_t r;
+ r = compare_tor_addr_to_node_policy(NULL, port, exitnode);
+ ok = r != ADDR_POLICY_REJECTED && r != ADDR_POLICY_PROBABLY_REJECTED;
+ }
+ if (ok) {
+ if (++num >= min)
+ return 1;
+ }
+ }
+ }
+ }
+ SMARTLIST_FOREACH_END(circ);
+ return 0;
+}
+
+/** Don't keep more than this many unused open circuits around. */
+#define MAX_UNUSED_OPEN_CIRCUITS 14
+
+/* Return true if a circuit is available for use, meaning that it is open,
+ * clean, usable for new multi-hop connections, and a general purpose origin
+ * circuit.
+ * Accept any kind of circuit, return false if the above conditions are not
+ * met. */
+STATIC int
+circuit_is_available_for_use(const circuit_t *circ)
+{
+ const origin_circuit_t *origin_circ;
+ cpath_build_state_t *build_state;
+
+ if (!CIRCUIT_IS_ORIGIN(circ))
+ return 0; /* We first filter out only origin circuits before doing the
+ following checks. */
+ if (circ->marked_for_close)
+ return 0; /* Don't mess with marked circs */
+ if (circ->timestamp_dirty)
+ return 0; /* Only count clean circs */
+ if (circ->purpose != CIRCUIT_PURPOSE_C_GENERAL &&
+ circ->purpose != CIRCUIT_PURPOSE_HS_VANGUARDS)
+ return 0; /* We only pay attention to general purpose circuits.
+ General purpose circuits are always origin circuits. */
+
+ origin_circ = CONST_TO_ORIGIN_CIRCUIT(circ);
+ if (origin_circ->unusable_for_new_conns)
+ return 0;
+
+ build_state = origin_circ->build_state;
+ if (build_state->onehop_tunnel)
+ return 0;
+
+ return 1;
+}
+
+/* Return true if we need any more exit circuits.
+ * needs_uptime and needs_capacity are set only if we need more exit circuits.
+ * Check if we know of a port that's been requested recently and no circuit
+ * is currently available that can handle it. */
+STATIC int
+needs_exit_circuits(time_t now, int *needs_uptime, int *needs_capacity)
+{
+ return (!circuit_all_predicted_ports_handled(now, needs_uptime,
+ needs_capacity) &&
+ router_have_consensus_path() == CONSENSUS_PATH_EXIT);
+}
+
+/* Hidden services need at least this many internal circuits */
+#define SUFFICIENT_UPTIME_INTERNAL_HS_SERVERS 3
+
+/* Return true if we need any more hidden service server circuits.
+ * HS servers only need an internal circuit. */
+STATIC int
+needs_hs_server_circuits(time_t now, int num_uptime_internal)
+{
+ if (!rend_num_services() && !hs_service_get_num_services()) {
+ /* No services, we don't need anything. */
+ goto no_need;
+ }
+
+ if (num_uptime_internal >= SUFFICIENT_UPTIME_INTERNAL_HS_SERVERS) {
+ /* We have sufficient amount of internal circuit. */
+ goto no_need;
+ }
+
+ if (router_have_consensus_path() == CONSENSUS_PATH_UNKNOWN) {
+ /* Consensus hasn't been checked or might be invalid so requesting
+ * internal circuits is not wise. */
+ goto no_need;
+ }
+
+ /* At this point, we need a certain amount of circuits and we will most
+ * likely use them for rendezvous so we note down the use of internal
+ * circuit for our prediction for circuit needing uptime and capacity. */
+ rep_hist_note_used_internal(now, 1, 1);
+
+ return 1;
+ no_need:
+ return 0;
+}
+
+/* We need at least this many internal circuits for hidden service clients */
+#define SUFFICIENT_INTERNAL_HS_CLIENTS 3
+
+/* We need at least this much uptime for internal circuits for hidden service
+ * clients */
+#define SUFFICIENT_UPTIME_INTERNAL_HS_CLIENTS 2
+
+/* Return true if we need any more hidden service client circuits.
+ * HS clients only need an internal circuit. */
+STATIC int
+needs_hs_client_circuits(time_t now, int *needs_uptime, int *needs_capacity,
+ int num_internal, int num_uptime_internal)
+{
+ int used_internal_recently = rep_hist_get_predicted_internal(now,
+ needs_uptime,
+ needs_capacity);
+ int requires_uptime = num_uptime_internal <
+ SUFFICIENT_UPTIME_INTERNAL_HS_CLIENTS &&
+ needs_uptime;
+
+ return (used_internal_recently &&
+ (requires_uptime || num_internal < SUFFICIENT_INTERNAL_HS_CLIENTS) &&
+ router_have_consensus_path() != CONSENSUS_PATH_UNKNOWN);
+}
+
+/* This is how many circuits can be opened concurrently during the cbt learning
+ * phase. This number cannot exceed the tor-wide MAX_UNUSED_OPEN_CIRCUITS. */
+#define DFLT_CBT_UNUSED_OPEN_CIRCS (10)
+#define MIN_CBT_UNUSED_OPEN_CIRCS 0
+#define MAX_CBT_UNUSED_OPEN_CIRCS MAX_UNUSED_OPEN_CIRCUITS
+
+/* Return true if we need more circuits for a good build timeout.
+ * XXXX make the assumption that build timeout streams should be
+ * created whenever we can build internal circuits. */
+STATIC int
+needs_circuits_for_build(int num)
+{
+ if (router_have_consensus_path() != CONSENSUS_PATH_UNKNOWN) {
+ if (num < networkstatus_get_param(NULL, "cbtmaxopencircs",
+ DFLT_CBT_UNUSED_OPEN_CIRCS,
+ MIN_CBT_UNUSED_OPEN_CIRCS,
+ MAX_CBT_UNUSED_OPEN_CIRCS) &&
+ !circuit_build_times_disabled(get_options()) &&
+ circuit_build_times_needs_circuits_now(get_circuit_build_times())) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Launch the appropriate type of predicted circuit for hidden
+ * services, depending on our options.
+ */
+static void
+circuit_launch_predicted_hs_circ(int flags)
+{
+ /* K.I.S.S. implementation of bug #23101: If we are using
+ * vanguards or pinned middles, pre-build a specific purpose
+ * for HS circs. */
+ if (circuit_should_use_vanguards(CIRCUIT_PURPOSE_HS_VANGUARDS)) {
+ circuit_launch(CIRCUIT_PURPOSE_HS_VANGUARDS, flags);
+ } else {
+ /* If no vanguards, then no HS-specific prebuilt circuits are needed.
+ * Normal GENERAL circs are fine */
+ circuit_launch(CIRCUIT_PURPOSE_C_GENERAL, flags);
+ }
+}
+
+/** Determine how many circuits we have open that are clean,
+ * Make sure it's enough for all the upcoming behaviors we predict we'll have.
+ * But put an upper bound on the total number of circuits.
+ */
+static void
+circuit_predict_and_launch_new(void)
+{
+ int num=0, num_internal=0, num_uptime_internal=0;
+ int hidserv_needs_uptime=0, hidserv_needs_capacity=1;
+ int port_needs_uptime=0, port_needs_capacity=1;
+ time_t now = time(NULL);
+ int flags = 0;
+
+ /* Count how many of each type of circuit we currently have. */
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ if (!circuit_is_available_for_use(circ))
+ continue;
+
+ num++;
+
+ cpath_build_state_t *build_state = TO_ORIGIN_CIRCUIT(circ)->build_state;
+ if (build_state->is_internal)
+ num_internal++;
+ if (build_state->need_uptime && build_state->is_internal)
+ num_uptime_internal++;
+ }
+ SMARTLIST_FOREACH_END(circ);
+
+ /* If that's enough, then stop now. */
+ if (num >= MAX_UNUSED_OPEN_CIRCUITS)
+ return;
+
+ if (needs_exit_circuits(now, &port_needs_uptime, &port_needs_capacity)) {
+ if (port_needs_uptime)
+ flags |= CIRCLAUNCH_NEED_UPTIME;
+ if (port_needs_capacity)
+ flags |= CIRCLAUNCH_NEED_CAPACITY;
+
+ log_info(LD_CIRC,
+ "Have %d clean circs (%d internal), need another exit circ.",
+ num, num_internal);
+ circuit_launch(CIRCUIT_PURPOSE_C_GENERAL, flags);
+ return;
+ }
+
+ if (needs_hs_server_circuits(now, num_uptime_internal)) {
+ flags = (CIRCLAUNCH_NEED_CAPACITY | CIRCLAUNCH_NEED_UPTIME |
+ CIRCLAUNCH_IS_INTERNAL);
+
+ log_info(LD_CIRC,
+ "Have %d clean circs (%d internal), need another internal "
+ "circ for my hidden service.",
+ num, num_internal);
+ circuit_launch_predicted_hs_circ(flags);
+ return;
+ }
+
+ if (needs_hs_client_circuits(now, &hidserv_needs_uptime,
+ &hidserv_needs_capacity,
+ num_internal, num_uptime_internal))
+ {
+ if (hidserv_needs_uptime)
+ flags |= CIRCLAUNCH_NEED_UPTIME;
+ if (hidserv_needs_capacity)
+ flags |= CIRCLAUNCH_NEED_CAPACITY;
+ flags |= CIRCLAUNCH_IS_INTERNAL;
+
+ log_info(LD_CIRC,
+ "Have %d clean circs (%d uptime-internal, %d internal), need"
+ " another hidden service circ.",
+ num, num_uptime_internal, num_internal);
+
+ circuit_launch_predicted_hs_circ(flags);
+ return;
+ }
+
+ if (needs_circuits_for_build(num)) {
+ flags = CIRCLAUNCH_NEED_CAPACITY;
+ /* if there are no exits in the consensus, make timeout
+ * circuits internal */
+ if (router_have_consensus_path() == CONSENSUS_PATH_INTERNAL)
+ flags |= CIRCLAUNCH_IS_INTERNAL;
+
+ log_info(LD_CIRC,
+ "Have %d clean circs need another buildtime test circ.", num);
+ circuit_launch(CIRCUIT_PURPOSE_C_GENERAL, flags);
+ return;
+ }
+}
+
+/** Build a new test circuit every 5 minutes */
+#define TESTING_CIRCUIT_INTERVAL 300
+
+/** This function is called once a second, if router_have_minimum_dir_info()
+ * is true. Its job is to make sure all services we offer have enough circuits
+ * available. Some services just want enough circuits for current tasks,
+ * whereas others want a minimum set of idle circuits hanging around.
+ */
+void
+circuit_build_needed_circs(time_t now)
+{
+ const or_options_t *options = get_options();
+
+ /* launch a new circ for any pending streams that need one
+ * XXXX make the assumption that (some) AP streams (i.e. HS clients)
+ * don't require an exit circuit, review in #13814.
+ * This allows HSs to function in a consensus without exits. */
+ if (router_have_consensus_path() != CONSENSUS_PATH_UNKNOWN)
+ connection_ap_rescan_and_attach_pending();
+
+ circuit_expire_old_circs_as_needed(now);
+
+ if (!options->DisablePredictedCircuits)
+ circuit_predict_and_launch_new();
+}
+
+/**
+ * Called once a second either directly or from
+ * circuit_build_needed_circs(). As appropriate (once per NewCircuitPeriod)
+ * resets failure counts and expires old circuits.
+ */
+void
+circuit_expire_old_circs_as_needed(time_t now)
+{
+ static time_t time_to_expire_and_reset = 0;
+
+ if (time_to_expire_and_reset < now) {
+ circuit_reset_failure_count(1);
+ time_to_expire_and_reset = now + get_options()->NewCircuitPeriod;
+ if (proxy_mode(get_options()))
+ addressmap_clean(now);
+ circuit_expire_old_circuits_clientside();
+
+#if 0 /* disable for now, until predict-and-launch-new can cull leftovers */
+
+ /* If we ever re-enable, this has to move into
+ * circuit_build_needed_circs */
+
+ circ = circuit_get_youngest_clean_open(CIRCUIT_PURPOSE_C_GENERAL);
+ if (get_options()->RunTesting &&
+ circ &&
+ circ->timestamp_began.tv_sec + TESTING_CIRCUIT_INTERVAL < now) {
+ log_fn(LOG_INFO,"Creating a new testing circuit.");
+ circuit_launch(CIRCUIT_PURPOSE_C_GENERAL, 0);
+ }
+#endif /* 0 */
+ }
+}
+
+/** If the stream <b>conn</b> is a member of any of the linked
+ * lists of <b>circ</b>, then remove it from the list.
+ */
+void
+circuit_detach_stream(circuit_t *circ, edge_connection_t *conn)
+{
+ edge_connection_t *prevconn;
+
+ tor_assert(circ);
+ tor_assert(conn);
+
+ if (conn->base_.type == CONN_TYPE_AP) {
+ entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn);
+ entry_conn->may_use_optimistic_data = 0;
+ }
+ conn->cpath_layer = NULL; /* don't keep a stale pointer */
+ conn->on_circuit = NULL;
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ origin_circuit_t *origin_circ = TO_ORIGIN_CIRCUIT(circ);
+ int removed = 0;
+ if (conn == origin_circ->p_streams) {
+ origin_circ->p_streams = conn->next_stream;
+ removed = 1;
+ } else {
+ for (prevconn = origin_circ->p_streams;
+ prevconn && prevconn->next_stream && prevconn->next_stream != conn;
+ prevconn = prevconn->next_stream)
+ ;
+ if (prevconn && prevconn->next_stream) {
+ prevconn->next_stream = conn->next_stream;
+ removed = 1;
+ }
+ }
+ if (removed) {
+ log_debug(LD_APP, "Removing stream %d from circ %u",
+ conn->stream_id, (unsigned)circ->n_circ_id);
+
+ /* If the stream was removed, and it was a rend stream, decrement the
+ * number of streams on the circuit associated with the rend service.
+ */
+ if (circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED) {
+ hs_dec_rdv_stream_counter(origin_circ);
+ }
+ return;
+ }
+ } else {
+ or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
+ if (conn == or_circ->n_streams) {
+ or_circ->n_streams = conn->next_stream;
+ return;
+ }
+ if (conn == or_circ->resolving_streams) {
+ or_circ->resolving_streams = conn->next_stream;
+ return;
+ }
+
+ for (prevconn = or_circ->n_streams;
+ prevconn && prevconn->next_stream && prevconn->next_stream != conn;
+ prevconn = prevconn->next_stream)
+ ;
+ if (prevconn && prevconn->next_stream) {
+ prevconn->next_stream = conn->next_stream;
+ return;
+ }
+
+ for (prevconn = or_circ->resolving_streams;
+ prevconn && prevconn->next_stream && prevconn->next_stream != conn;
+ prevconn = prevconn->next_stream)
+ ;
+ if (prevconn && prevconn->next_stream) {
+ prevconn->next_stream = conn->next_stream;
+ return;
+ }
+ }
+
+ log_warn(LD_BUG,"Edge connection not in circuit's list.");
+ /* Don't give an error here; it's harmless. */
+ tor_fragile_assert();
+}
+
+/** Find each circuit that has been unused for too long, or dirty
+ * for too long and has no streams on it: mark it for close.
+ */
+static void
+circuit_expire_old_circuits_clientside(void)
+{
+ struct timeval cutoff, now;
+
+ tor_gettimeofday(&now);
+ last_expired_clientside_circuits = now.tv_sec;
+
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ if (circ->marked_for_close || !CIRCUIT_IS_ORIGIN(circ))
+ continue;
+
+ cutoff = now;
+ cutoff.tv_sec -= TO_ORIGIN_CIRCUIT(circ)->circuit_idle_timeout;
+
+ /* If the circuit has been dirty for too long, and there are no streams
+ * on it, mark it for close.
+ */
+ if (circ->timestamp_dirty &&
+ circ->timestamp_dirty + get_options()->MaxCircuitDirtiness <
+ now.tv_sec &&
+ !TO_ORIGIN_CIRCUIT(circ)->p_streams /* nothing attached */ ) {
+ log_debug(LD_CIRC, "Closing n_circ_id %u (dirty %ld sec ago, "
+ "purpose %d)",
+ (unsigned)circ->n_circ_id,
+ (long)(now.tv_sec - circ->timestamp_dirty),
+ circ->purpose);
+ /* Don't do this magic for testing circuits. Their death is governed
+ * by circuit_expire_building */
+ if (circ->purpose != CIRCUIT_PURPOSE_PATH_BIAS_TESTING)
+ circuit_mark_for_close(circ, END_CIRC_REASON_FINISHED);
+ } else if (!circ->timestamp_dirty && circ->state == CIRCUIT_STATE_OPEN) {
+ if (timercmp(&circ->timestamp_began, &cutoff, OP_LT)) {
+ if (circ->purpose == CIRCUIT_PURPOSE_C_GENERAL ||
+ circ->purpose == CIRCUIT_PURPOSE_C_HSDIR_GET ||
+ circ->purpose == CIRCUIT_PURPOSE_S_HSDIR_POST ||
+ circ->purpose == CIRCUIT_PURPOSE_HS_VANGUARDS ||
+ circ->purpose == CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT ||
+ circ->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO ||
+ circ->purpose == CIRCUIT_PURPOSE_TESTING ||
+ (circ->purpose >= CIRCUIT_PURPOSE_C_INTRODUCING &&
+ circ->purpose <= CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED) ||
+ circ->purpose == CIRCUIT_PURPOSE_S_CONNECT_REND) {
+ log_info(LD_CIRC,
+ "Closing circuit %"PRIu32
+ " that has been unused for %ld msec.",
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier,
+ tv_mdiff(&circ->timestamp_began, &now));
+ circuit_mark_for_close(circ, END_CIRC_REASON_FINISHED);
+ } else if (!TO_ORIGIN_CIRCUIT(circ)->is_ancient) {
+ /* Server-side rend joined circuits can end up really old, because
+ * they are reused by clients for longer than normal. The client
+ * controls their lifespan. (They never become dirty, because
+ * connection_exit_begin_conn() never marks anything as dirty.)
+ * Similarly, server-side intro circuits last a long time. */
+ if (circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED &&
+ circ->purpose != CIRCUIT_PURPOSE_S_INTRO) {
+ log_notice(LD_CIRC,
+ "Ancient non-dirty circuit %d is still around after "
+ "%ld milliseconds. Purpose: %d (%s)",
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier,
+ tv_mdiff(&circ->timestamp_began, &now),
+ circ->purpose,
+ circuit_purpose_to_string(circ->purpose));
+ TO_ORIGIN_CIRCUIT(circ)->is_ancient = 1;
+ }
+ }
+ }
+ }
+ } SMARTLIST_FOREACH_END(circ);
+}
+
+/** How long do we wait before killing circuits with the properties
+ * described below?
+ *
+ * Probably we could choose a number here as low as 5 to 10 seconds,
+ * since these circs are used for begindir, and a) generally you either
+ * ask another begindir question right after or you don't for a long time,
+ * b) clients at least through 0.2.1.x choose from the whole set of
+ * directory mirrors at each choice, and c) re-establishing a one-hop
+ * circuit via create-fast is a light operation assuming the TLS conn is
+ * still there.
+ *
+ * I expect "b" to go away one day when we move to using directory
+ * guards, but I think "a" and "c" are good enough reasons that a low
+ * number is safe even then.
+ */
+#define IDLE_ONE_HOP_CIRC_TIMEOUT 60
+
+/** Find each non-origin circuit that has been unused for too long,
+ * has no streams on it, came from a client, and ends here: mark it
+ * for close.
+ */
+void
+circuit_expire_old_circuits_serverside(time_t now)
+{
+ or_circuit_t *or_circ;
+ time_t cutoff = now - IDLE_ONE_HOP_CIRC_TIMEOUT;
+
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ if (circ->marked_for_close || CIRCUIT_IS_ORIGIN(circ))
+ continue;
+ or_circ = TO_OR_CIRCUIT(circ);
+ /* If the circuit has been idle for too long, and there are no streams
+ * on it, and it ends here, and it used a create_fast, mark it for close.
+ */
+ if (or_circ->p_chan && channel_is_client(or_circ->p_chan) &&
+ !circ->n_chan &&
+ !or_circ->n_streams && !or_circ->resolving_streams &&
+ channel_when_last_xmit(or_circ->p_chan) <= cutoff) {
+ log_info(LD_CIRC, "Closing circ_id %u (empty %d secs ago)",
+ (unsigned)or_circ->p_circ_id,
+ (int)(now - channel_when_last_xmit(or_circ->p_chan)));
+ circuit_mark_for_close(circ, END_CIRC_REASON_FINISHED);
+ }
+ }
+ SMARTLIST_FOREACH_END(circ);
+}
+
+/** Number of testing circuits we want open before testing our bandwidth. */
+#define NUM_PARALLEL_TESTING_CIRCS 4
+
+/** True iff we've ever had enough testing circuits open to test our
+ * bandwidth. */
+static int have_performed_bandwidth_test = 0;
+
+/** Reset have_performed_bandwidth_test, so we'll start building
+ * testing circuits again so we can exercise our bandwidth. */
+void
+reset_bandwidth_test(void)
+{
+ have_performed_bandwidth_test = 0;
+}
+
+/** Return 1 if we've already exercised our bandwidth, or if we
+ * have fewer than NUM_PARALLEL_TESTING_CIRCS testing circuits
+ * established or on the way. Else return 0.
+ */
+int
+circuit_enough_testing_circs(void)
+{
+ int num = 0;
+
+ if (have_performed_bandwidth_test)
+ return 1;
+
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
+ if (!circ->marked_for_close && CIRCUIT_IS_ORIGIN(circ) &&
+ circ->purpose == CIRCUIT_PURPOSE_TESTING &&
+ circ->state == CIRCUIT_STATE_OPEN)
+ num++;
+ }
+ SMARTLIST_FOREACH_END(circ);
+ return num >= NUM_PARALLEL_TESTING_CIRCS;
+}
+
+/** A testing circuit has completed. Take whatever stats we want.
+ * Noticing reachability is taken care of in onionskin_answer(),
+ * so there's no need to record anything here. But if we still want
+ * to do the bandwidth test, and we now have enough testing circuits
+ * open, do it.
+ */
+static void
+circuit_testing_opened(origin_circuit_t *circ)
+{
+ if (have_performed_bandwidth_test ||
+ !check_whether_orport_reachable(get_options())) {
+ /* either we've already done everything we want with testing circuits,
+ * or this testing circuit became open due to a fluke, e.g. we picked
+ * a last hop where we already had the connection open due to an
+ * outgoing local circuit. */
+ circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_AT_ORIGIN);
+ } else if (circuit_enough_testing_circs()) {
+ router_perform_bandwidth_test(NUM_PARALLEL_TESTING_CIRCS, time(NULL));
+ have_performed_bandwidth_test = 1;
+ } else
+ router_do_reachability_checks(1, 0);
+}
+
+/** A testing circuit has failed to build. Take whatever stats we want. */
+static void
+circuit_testing_failed(origin_circuit_t *circ, int at_last_hop)
+{
+ const or_options_t *options = get_options();
+ if (server_mode(options) && check_whether_orport_reachable(options))
+ return;
+
+ log_info(LD_GENERAL,
+ "Our testing circuit (to see if your ORPort is reachable) "
+ "has failed. I'll try again later.");
+
+ /* These aren't used yet. */
+ (void)circ;
+ (void)at_last_hop;
+}
+
+/** The circuit <b>circ</b> has just become open. Take the next
+ * step: for rendezvous circuits, we pass circ to the appropriate
+ * function in rendclient or rendservice. For general circuits, we
+ * call connection_ap_attach_pending, which looks for pending streams
+ * that could use circ.
+ */
+void
+circuit_has_opened(origin_circuit_t *circ)
+{
+ control_event_circuit_status(circ, CIRC_EVENT_BUILT, 0);
+
+ /* Remember that this circuit has finished building. Now if we start
+ * it building again later (e.g. by extending it), we will know not
+ * to consider its build time. */
+ circ->has_opened = 1;
+
+ switch (TO_CIRCUIT(circ)->purpose) {
+ case CIRCUIT_PURPOSE_C_ESTABLISH_REND:
+ hs_client_circuit_has_opened(circ);
+ /* Start building an intro circ if we don't have one yet. */
+ connection_ap_attach_pending(1);
+ /* This isn't a call to circuit_try_attaching_streams because a
+ * circuit in _C_ESTABLISH_REND state isn't connected to its
+ * hidden service yet, thus we can't attach streams to it yet,
+ * thus circuit_try_attaching_streams would always clear the
+ * circuit's isolation state. circuit_try_attaching_streams is
+ * called later, when the rend circ enters _C_REND_JOINED
+ * state. */
+ break;
+ case CIRCUIT_PURPOSE_C_INTRODUCING:
+ hs_client_circuit_has_opened(circ);
+ break;
+ case CIRCUIT_PURPOSE_C_GENERAL:
+ case CIRCUIT_PURPOSE_C_HSDIR_GET:
+ case CIRCUIT_PURPOSE_S_HSDIR_POST:
+ /* Tell any AP connections that have been waiting for a new
+ * circuit that one is ready. */
+ circuit_try_attaching_streams(circ);
+ break;
+ case CIRCUIT_PURPOSE_S_ESTABLISH_INTRO:
+ /* at the service, waiting for introductions */
+ hs_service_circuit_has_opened(circ);
+ break;
+ case CIRCUIT_PURPOSE_S_CONNECT_REND:
+ /* at the service, connecting to rend point */
+ hs_service_circuit_has_opened(circ);
+ break;
+ case CIRCUIT_PURPOSE_TESTING:
+ circuit_testing_opened(circ);
+ break;
+ /* default:
+ * This won't happen in normal operation, but might happen if the
+ * controller did it. Just let it slide. */
+ }
+}
+
+/** If the stream-isolation state of <b>circ</b> can be cleared, clear
+ * it. Return non-zero iff <b>circ</b>'s isolation state was cleared. */
+static int
+circuit_try_clearing_isolation_state(origin_circuit_t *circ)
+{
+ if (/* The circuit may have become non-open if it was cannibalized.*/
+ circ->base_.state == CIRCUIT_STATE_OPEN &&
+ /* If !isolation_values_set, there is nothing to clear. */
+ circ->isolation_values_set &&
+ /* It's not legal to clear a circuit's isolation info if it's ever had
+ * streams attached */
+ !circ->isolation_any_streams_attached) {
+ /* If we have any isolation information set on this circuit, and
+ * we didn't manage to attach any streams to it, then we can
+ * and should clear it and try again. */
+ circuit_clear_isolation(circ);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/** Called when a circuit becomes ready for streams to be attached to
+ * it. */
+void
+circuit_try_attaching_streams(origin_circuit_t *circ)
+{
+ /* Attach streams to this circuit if we can. */
+ connection_ap_attach_pending(1);
+
+ /* The call to circuit_try_clearing_isolation_state here will do
+ * nothing and return 0 if we didn't attach any streams to circ
+ * above. */
+ if (circuit_try_clearing_isolation_state(circ)) {
+ /* Maybe *now* we can attach some streams to this circuit. */
+ connection_ap_attach_pending(1);
+ }
+}
+
+/** Called whenever a circuit could not be successfully built.
+ */
+void
+circuit_build_failed(origin_circuit_t *circ)
+{
+ channel_t *n_chan = NULL;
+ /* we should examine circ and see if it failed because of
+ * the last hop or an earlier hop. then use this info below.
+ */
+ int failed_at_last_hop = 0;
+
+ /* First, check to see if this was a path failure, rather than build
+ * failure.
+ *
+ * Note that we deliberately use circuit_get_cpath_len() (and not
+ * circuit_get_cpath_opened_len()) because we only want to ensure
+ * that a full path is *chosen*. This is different than a full path
+ * being *built*. We only want to count *build* failures below.
+ *
+ * Path selection failures can happen spuriously for a number
+ * of reasons (such as aggressive/invalid user-specified path
+ * restrictions in the torrc, insufficient microdescriptors, and
+ * non-user reasons like exitpolicy issues), and so should not be
+ * counted as failures below.
+ */
+ if (circuit_get_cpath_len(circ) < circ->build_state->desired_path_len) {
+ static ratelim_t pathfail_limit = RATELIM_INIT(3600);
+ log_fn_ratelim(&pathfail_limit, LOG_NOTICE, LD_CIRC,
+ "Our circuit %u (id: %" PRIu32 ") died due to an invalid "
+ "selected path, purpose %s. This may be a torrc "
+ "configuration issue, or a bug.",
+ TO_CIRCUIT(circ)->n_circ_id, circ->global_identifier,
+ circuit_purpose_to_string(TO_CIRCUIT(circ)->purpose));
+
+ /* If the path failed on an RP, retry it. */
+ if (TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_S_CONNECT_REND)
+ hs_circ_retry_service_rendezvous_point(circ);
+
+ /* In all other cases, just bail. The rest is just failure accounting
+ * that we don't want to do */
+ return;
+ }
+
+ /* If the last hop isn't open, and the second-to-last is, we failed
+ * at the last hop. */
+ if (circ->cpath &&
+ circ->cpath->prev->state != CPATH_STATE_OPEN &&
+ circ->cpath->prev->prev->state == CPATH_STATE_OPEN) {
+ failed_at_last_hop = 1;
+ }
+
+ /* Check if we failed at first hop */
+ if (circ->cpath &&
+ circ->cpath->state != CPATH_STATE_OPEN &&
+ ! circ->base_.received_destroy) {
+ /* We failed at the first hop for some reason other than a DESTROY cell.
+ * If there's an OR connection to blame, blame it. Also, avoid this relay
+ * for a while, and fail any one-hop directory fetches destined for it. */
+ const char *n_chan_ident = circ->cpath->extend_info->identity_digest;
+ tor_assert(n_chan_ident);
+ int already_marked = 0;
+ if (circ->base_.n_chan) {
+ n_chan = circ->base_.n_chan;
+
+ if (n_chan->is_bad_for_new_circs) {
+ /* We only want to blame this router when a fresh healthy
+ * connection fails. So don't mark this router as newly failed,
+ * since maybe this was just an old circuit attempt that's
+ * finally timing out now. Also, there's no need to blow away
+ * circuits/streams/etc, since the failure of an unhealthy conn
+ * doesn't tell us much about whether a healthy conn would
+ * succeed. */
+ already_marked = 1;
+ }
+ log_info(LD_OR,
+ "Our circuit %u (id: %" PRIu32 ") failed to get a response "
+ "from the first hop (%s). I'm going to try to rotate to a "
+ "better connection.",
+ TO_CIRCUIT(circ)->n_circ_id, circ->global_identifier,
+ channel_get_canonical_remote_descr(n_chan));
+ n_chan->is_bad_for_new_circs = 1;
+ } else {
+ log_info(LD_OR,
+ "Our circuit %u (id: %" PRIu32 ") died before the first hop "
+ "with no connection",
+ TO_CIRCUIT(circ)->n_circ_id, circ->global_identifier);
+ }
+ if (!already_marked) {
+ /*
+ * If we have guard state (new guard API) and our path selection
+ * code actually chose a full path, then blame the failure of this
+ * circuit on the guard.
+ */
+ if (circ->guard_state)
+ entry_guard_failed(&circ->guard_state);
+ /* if there are any one-hop streams waiting on this circuit, fail
+ * them now so they can retry elsewhere. */
+ connection_ap_fail_onehop(n_chan_ident, circ->build_state);
+ }
+ }
+
+ switch (circ->base_.purpose) {
+ case CIRCUIT_PURPOSE_C_HSDIR_GET:
+ case CIRCUIT_PURPOSE_S_HSDIR_POST:
+ case CIRCUIT_PURPOSE_C_GENERAL:
+ /* If we never built the circuit, note it as a failure. */
+ circuit_increment_failure_count();
+ if (failed_at_last_hop) {
+ /* Make sure any streams that demand our last hop as their exit
+ * know that it's unlikely to happen. */
+ circuit_discard_optional_exit_enclaves(circ->cpath->prev->extend_info);
+ }
+ break;
+ case CIRCUIT_PURPOSE_TESTING:
+ circuit_testing_failed(circ, failed_at_last_hop);
+ break;
+ case CIRCUIT_PURPOSE_S_ESTABLISH_INTRO:
+ /* at the service, waiting for introductions */
+ if (circ->base_.state != CIRCUIT_STATE_OPEN) {
+ circuit_increment_failure_count();
+ }
+ /* no need to care here, because the service will rebuild intro
+ * points periodically. */
+ break;
+ case CIRCUIT_PURPOSE_C_INTRODUCING:
+ /* at the client, connecting to intro point */
+ /* Don't increment failure count, since the service may have picked
+ * the introduction point maliciously */
+ /* The client will pick a new intro point when this one dies, if
+ * the stream in question still cares. No need to act here. */
+ break;
+ case CIRCUIT_PURPOSE_C_ESTABLISH_REND:
+ /* at the client, waiting for the service */
+ circuit_increment_failure_count();
+ /* the client will pick a new rend point when this one dies, if
+ * the stream in question still cares. No need to act here. */
+ break;
+ case CIRCUIT_PURPOSE_S_CONNECT_REND:
+ /* at the service, connecting to rend point */
+ /* Don't increment failure count, since the client may have picked
+ * the rendezvous point maliciously */
+ log_info(LD_REND,
+ "Couldn't connect to the client's chosen rend point %s "
+ "(%s hop failed).",
+ escaped(build_state_get_exit_nickname(circ->build_state)),
+ failed_at_last_hop?"last":"non-last");
+ hs_circ_retry_service_rendezvous_point(circ);
+ break;
+ /* default:
+ * This won't happen in normal operation, but might happen if the
+ * controller did it. Just let it slide. */
+ }
+}
+
+/** Number of consecutive failures so far; should only be touched by
+ * circuit_launch_new and circuit_*_failure_count.
+ */
+static int n_circuit_failures = 0;
+/** Before the last time we called circuit_reset_failure_count(), were
+ * there a lot of failures? */
+static int did_circs_fail_last_period = 0;
+
+/** Don't retry launching a new circuit if we try this many times with no
+ * success. */
+#define MAX_CIRCUIT_FAILURES 5
+
+/** Launch a new circuit; see circuit_launch_by_extend_info() for
+ * details on arguments. */
+origin_circuit_t *
+circuit_launch(uint8_t purpose, int flags)
+{
+ return circuit_launch_by_extend_info(purpose, NULL, flags);
+}
+
+/* Do we have enough descriptors to build paths?
+ * If need_exit is true, return 1 if we can build exit paths.
+ * (We need at least one Exit in the consensus to build exit paths.)
+ * If need_exit is false, return 1 if we can build internal paths.
+ */
+static int
+have_enough_path_info(int need_exit)
+{
+ if (need_exit)
+ return router_have_consensus_path() == CONSENSUS_PATH_EXIT;
+ else
+ return router_have_consensus_path() != CONSENSUS_PATH_UNKNOWN;
+}
+
+/**
+ * Tell us if a circuit is a hidden service circuit.
+ */
+int
+circuit_purpose_is_hidden_service(uint8_t purpose)
+{
+ if (purpose == CIRCUIT_PURPOSE_HS_VANGUARDS) {
+ return 1;
+ }
+
+ /* Client-side purpose */
+ if (purpose >= CIRCUIT_PURPOSE_C_HS_MIN_ &&
+ purpose <= CIRCUIT_PURPOSE_C_HS_MAX_) {
+ return 1;
+ }
+
+ /* Service-side purpose */
+ if (purpose >= CIRCUIT_PURPOSE_S_HS_MIN_ &&
+ purpose <= CIRCUIT_PURPOSE_S_HS_MAX_) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Return true if this circuit purpose should use vanguards
+ * or pinned Layer2 or Layer3 guards.
+ *
+ * This function takes both the circuit purpose and the
+ * torrc options for pinned middles/vanguards into account
+ * (ie: the circuit must be a hidden service circuit and
+ * vanguards/pinned middles must be enabled for it to return
+ * true).
+ */
+int
+circuit_should_use_vanguards(uint8_t purpose)
+{
+ const or_options_t *options = get_options();
+
+ /* Only hidden service circuits use vanguards */
+ if (!circuit_purpose_is_hidden_service(purpose))
+ return 0;
+
+ /* Pinned middles are effectively vanguards */
+ if (options->HSLayer2Nodes || options->HSLayer3Nodes)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Return true for the set of conditions for which it is OK to use
+ * a cannibalized circuit.
+ *
+ * Don't cannibalize for onehops, or tor2web, or certain purposes.
+ */
+static int
+circuit_should_cannibalize_to_build(uint8_t purpose_to_build,
+ int has_extend_info,
+ int onehop_tunnel,
+ int need_specific_rp)
+{
+
+ /* Do not try to cannibalize if this is a one hop circuit, or
+ * is a tor2web/special rp. */
+ if (onehop_tunnel || need_specific_rp) {
+ return 0;
+ }
+
+ /* Don't try to cannibalize for general purpose circuits that do not
+ * specify a custom exit. */
+ if (purpose_to_build == CIRCUIT_PURPOSE_C_GENERAL && !has_extend_info) {
+ return 0;
+ }
+
+ /* Don't cannibalize for testing circuits. We want to see if they
+ * complete normally. Also don't cannibalize for vanguard-purpose
+ * circuits, since those are specially pre-built for later
+ * cannibalization by the actual specific circuit types that need
+ * vanguards.
+ */
+ if (purpose_to_build == CIRCUIT_PURPOSE_TESTING ||
+ purpose_to_build == CIRCUIT_PURPOSE_HS_VANGUARDS) {
+ return 0;
+ }
+
+ /* For vanguards, the server-side intro circ is not cannibalized
+ * because we pre-build 4 hop HS circuits, and it only needs a 3 hop
+ * circuit. It is also long-lived, so it is more important that
+ * it have lower latency than get built fast.
+ */
+ if (circuit_should_use_vanguards(purpose_to_build) &&
+ purpose_to_build == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/** Launch a new circuit with purpose <b>purpose</b> and exit node
+ * <b>extend_info</b> (or NULL to select a random exit node). If flags
+ * contains CIRCLAUNCH_NEED_UPTIME, choose among routers with high uptime. If
+ * CIRCLAUNCH_NEED_CAPACITY is set, choose among routers with high bandwidth.
+ * If CIRCLAUNCH_IS_INTERNAL is true, the last hop need not be an exit node.
+ * If CIRCLAUNCH_ONEHOP_TUNNEL is set, the circuit will have only one hop.
+ * Return the newly allocated circuit on success, or NULL on failure. */
+origin_circuit_t *
+circuit_launch_by_extend_info(uint8_t purpose,
+ extend_info_t *extend_info,
+ int flags)
+{
+ origin_circuit_t *circ;
+ int onehop_tunnel = (flags & CIRCLAUNCH_ONEHOP_TUNNEL) != 0;
+ int have_path = have_enough_path_info(! (flags & CIRCLAUNCH_IS_INTERNAL) );
+ int need_specific_rp = 0;
+
+ /* Keep some stats about our attempts to launch HS rendezvous circuits */
+ if (purpose == CIRCUIT_PURPOSE_S_CONNECT_REND) {
+ hs_stats_note_service_rendezvous_launch();
+ }
+
+ if (!onehop_tunnel && (!router_have_minimum_dir_info() || !have_path)) {
+ log_debug(LD_CIRC,"Haven't %s yet; canceling "
+ "circuit launch.",
+ !router_have_minimum_dir_info() ?
+ "fetched enough directory info" :
+ "received a consensus with exits");
+ return NULL;
+ }
+
+ /* If Tor2webRendezvousPoints is enabled and we are dealing with an
+ RP circuit, we want a specific RP node so we shouldn't canibalize
+ an already existing circuit. */
+ if (get_options()->Tor2webRendezvousPoints &&
+ purpose == CIRCUIT_PURPOSE_C_ESTABLISH_REND) {
+ need_specific_rp = 1;
+ }
+
+ /* If we can/should cannibalize another circuit to build this one,
+ * then do so. */
+ if (circuit_should_cannibalize_to_build(purpose,
+ extend_info != NULL,
+ onehop_tunnel,
+ need_specific_rp)) {
+ /* see if there are appropriate circs available to cannibalize. */
+ /* XXX if we're planning to add a hop, perhaps we want to look for
+ * internal circs rather than exit circs? -RD */
+ circ = circuit_find_to_cannibalize(purpose, extend_info, flags);
+ if (circ) {
+ uint8_t old_purpose = circ->base_.purpose;
+ struct timeval old_timestamp_began = circ->base_.timestamp_began;
+
+ log_info(LD_CIRC, "Cannibalizing circ %u (id: %" PRIu32 ") for "
+ "purpose %d (%s)",
+ TO_CIRCUIT(circ)->n_circ_id, circ->global_identifier, purpose,
+ circuit_purpose_to_string(purpose));
+
+ if ((purpose == CIRCUIT_PURPOSE_S_CONNECT_REND ||
+ purpose == CIRCUIT_PURPOSE_C_INTRODUCING) &&
+ circ->path_state == PATH_STATE_BUILD_SUCCEEDED) {
+ /* Path bias: Cannibalized rends pre-emptively count as a
+ * successfully built but unused closed circuit. We don't
+ * wait until the extend (or the close) because the rend
+ * point could be malicious.
+ *
+ * Same deal goes for client side introductions. Clients
+ * can be manipulated to connect repeatedly to them
+ * (especially web clients).
+ *
+ * If we decide to probe the initial portion of these circs,
+ * (up to the adversary's final hop), we need to remove this,
+ * or somehow mark the circuit with a special path state.
+ */
+
+ /* This must be called before the purpose change */
+ pathbias_check_close(circ, END_CIRC_REASON_FINISHED);
+ }
+
+ circuit_change_purpose(TO_CIRCUIT(circ), purpose);
+ /* Reset the start date of this circ, else expire_building
+ * will see it and think it's been trying to build since it
+ * began.
+ *
+ * Technically, the code should reset this when the
+ * create cell is finally sent, but we're close enough
+ * here. */
+ tor_gettimeofday(&circ->base_.timestamp_began);
+
+ control_event_circuit_cannibalized(circ, old_purpose,
+ &old_timestamp_began);
+
+ switch (purpose) {
+ case CIRCUIT_PURPOSE_C_ESTABLISH_REND:
+ /* it's ready right now */
+ break;
+ case CIRCUIT_PURPOSE_C_INTRODUCING:
+ case CIRCUIT_PURPOSE_S_CONNECT_REND:
+ case CIRCUIT_PURPOSE_C_GENERAL:
+ case CIRCUIT_PURPOSE_S_HSDIR_POST:
+ case CIRCUIT_PURPOSE_C_HSDIR_GET:
+ case CIRCUIT_PURPOSE_S_ESTABLISH_INTRO:
+ /* need to add a new hop */
+ tor_assert(extend_info);
+ if (circuit_extend_to_new_exit(circ, extend_info) < 0)
+ return NULL;
+ break;
+ default:
+ log_warn(LD_BUG,
+ "unexpected purpose %d when cannibalizing a circ.",
+ purpose);
+ tor_fragile_assert();
+ return NULL;
+ }
+ return circ;
+ }
+ }
+
+ if (did_circs_fail_last_period &&
+ n_circuit_failures > MAX_CIRCUIT_FAILURES) {
+ /* too many failed circs in a row. don't try. */
+// log_fn(LOG_INFO,"%d failures so far, not trying.",n_circuit_failures);
+ return NULL;
+ }
+
+ /* try a circ. if it fails, circuit_mark_for_close will increment
+ * n_circuit_failures */
+ return circuit_establish_circuit(purpose, extend_info, flags);
+}
+
+/** Record another failure at opening a general circuit. When we have
+ * too many, we'll stop trying for the remainder of this minute.
+ */
+static void
+circuit_increment_failure_count(void)
+{
+ ++n_circuit_failures;
+ log_debug(LD_CIRC,"n_circuit_failures now %d.",n_circuit_failures);
+}
+
+/** Reset the failure count for opening general circuits. This means
+ * we will try MAX_CIRCUIT_FAILURES times more (if necessary) before
+ * stopping again.
+ */
+void
+circuit_reset_failure_count(int timeout)
+{
+ if (timeout && n_circuit_failures > MAX_CIRCUIT_FAILURES)
+ did_circs_fail_last_period = 1;
+ else
+ did_circs_fail_last_period = 0;
+ n_circuit_failures = 0;
+}
+
+/** Find an open circ that we're happy to use for <b>conn</b> and return 1. If
+ * there isn't one, and there isn't one on the way, launch one and return
+ * 0. If it will never work, return -1.
+ *
+ * Write the found or in-progress or launched circ into *circp.
+ */
+static int
+circuit_get_open_circ_or_launch(entry_connection_t *conn,
+ uint8_t desired_circuit_purpose,
+ origin_circuit_t **circp)
+{
+ origin_circuit_t *circ;
+ int check_exit_policy;
+ int need_uptime, need_internal;
+ int want_onehop;
+ const or_options_t *options = get_options();
+
+ tor_assert(conn);
+ tor_assert(circp);
+ if (ENTRY_TO_CONN(conn)->state != AP_CONN_STATE_CIRCUIT_WAIT) {
+ connection_t *c = ENTRY_TO_CONN(conn);
+ log_err(LD_BUG, "Connection state mismatch: wanted "
+ "AP_CONN_STATE_CIRCUIT_WAIT, but got %d (%s)",
+ c->state, conn_state_to_string(c->type, c->state));
+ }
+ tor_assert(ENTRY_TO_CONN(conn)->state == AP_CONN_STATE_CIRCUIT_WAIT);
+
+ /* Will the exit policy of the exit node apply to this stream? */
+ check_exit_policy =
+ conn->socks_request->command == SOCKS_COMMAND_CONNECT &&
+ !conn->use_begindir &&
+ !connection_edge_is_rendezvous_stream(ENTRY_TO_EDGE_CONN(conn));
+
+ /* Does this connection want a one-hop circuit? */
+ want_onehop = conn->want_onehop;
+
+ /* Do we need a high-uptime circuit? */
+ need_uptime = !conn->want_onehop && !conn->use_begindir &&
+ smartlist_contains_int_as_string(options->LongLivedPorts,
+ conn->socks_request->port);
+
+ /* Do we need an "internal" circuit? */
+ if (desired_circuit_purpose != CIRCUIT_PURPOSE_C_GENERAL)
+ need_internal = 1;
+ else if (conn->use_begindir || conn->want_onehop)
+ need_internal = 1;
+ else
+ need_internal = 0;
+
+ /* We now know what kind of circuit we need. See if there is an
+ * open circuit that we can use for this stream */
+ circ = circuit_get_best(conn, 1 /* Insist on open circuits */,
+ desired_circuit_purpose,
+ need_uptime, need_internal);
+
+ if (circ) {
+ /* We got a circuit that will work for this stream! We can return it. */
+ *circp = circ;
+ return 1; /* we're happy */
+ }
+
+ /* Okay, there's no circuit open that will work for this stream. Let's
+ * see if there's an in-progress circuit or if we have to launch one */
+
+ /* Do we know enough directory info to build circuits at all? */
+ int have_path = have_enough_path_info(!need_internal);
+
+ if (!want_onehop && (!router_have_minimum_dir_info() || !have_path)) {
+ /* If we don't have enough directory information, we can't build
+ * multihop circuits.
+ */
+ if (!connection_get_by_type(CONN_TYPE_DIR)) {
+ int severity = LOG_NOTICE;
+ /* Retry some stuff that might help the connection work. */
+ /* If we are configured with EntryNodes or UseBridges */
+ if (entry_list_is_constrained(options)) {
+ /* Retry all our guards / bridges.
+ * guards_retry_optimistic() always returns true here. */
+ int rv = guards_retry_optimistic(options);
+ tor_assert_nonfatal_once(rv);
+ log_fn(severity, LD_APP|LD_DIR,
+ "Application request when we haven't %s. "
+ "Optimistically trying known %s again.",
+ !router_have_minimum_dir_info() ?
+ "used client functionality lately" :
+ "received a consensus with exits",
+ options->UseBridges ? "bridges" : "entrynodes");
+ } else {
+ /* Getting directory documents doesn't help much if we have a limited
+ * number of guards */
+ tor_assert_nonfatal(!options->UseBridges);
+ tor_assert_nonfatal(!options->EntryNodes);
+ /* Retry our directory fetches, so we have a fresh set of guard info */
+ log_fn(severity, LD_APP|LD_DIR,
+ "Application request when we haven't %s. "
+ "Optimistically trying directory fetches again.",
+ !router_have_minimum_dir_info() ?
+ "used client functionality lately" :
+ "received a consensus with exits");
+ routerlist_retry_directory_downloads(time(NULL));
+ }
+ }
+ /* Since we didn't have enough directory info, we can't attach now. The
+ * stream will be dealt with when router_have_minimum_dir_info becomes 1,
+ * or when all directory attempts fail and directory_all_unreachable()
+ * kills it.
+ */
+ return 0;
+ }
+
+ /* Check whether the exit policy of the chosen exit, or the exit policies
+ * of _all_ nodes, would forbid this node. */
+ if (check_exit_policy) {
+ if (!conn->chosen_exit_name) {
+ struct in_addr in;
+ tor_addr_t addr, *addrp=NULL;
+ if (tor_inet_aton(conn->socks_request->address, &in)) {
+ tor_addr_from_in(&addr, &in);
+ addrp = &addr;
+ }
+ if (router_exit_policy_all_nodes_reject(addrp,
+ conn->socks_request->port,
+ need_uptime)) {
+ log_notice(LD_APP,
+ "No Tor server allows exit to %s:%d. Rejecting.",
+ safe_str_client(conn->socks_request->address),
+ conn->socks_request->port);
+ return -1;
+ }
+ } else {
+ /* XXXX Duplicates checks in connection_ap_handshake_attach_circuit:
+ * refactor into a single function. */
+ const node_t *node = node_get_by_nickname(conn->chosen_exit_name, 0);
+ int opt = conn->chosen_exit_optional;
+ if (node && !connection_ap_can_use_exit(conn, node)) {
+ log_fn(opt ? LOG_INFO : LOG_WARN, LD_APP,
+ "Requested exit point '%s' is excluded or "
+ "would refuse request. %s.",
+ conn->chosen_exit_name, opt ? "Trying others" : "Closing");
+ if (opt) {
+ conn->chosen_exit_optional = 0;
+ tor_free(conn->chosen_exit_name);
+ /* Try again. */
+ return circuit_get_open_circ_or_launch(conn,
+ desired_circuit_purpose,
+ circp);
+ }
+ return -1;
+ }
+ }
+ }
+
+ /* Now, check whether there already a circuit on the way that could handle
+ * this stream. This check matches the one above, but this time we
+ * do not require that the circuit will work. */
+ circ = circuit_get_best(conn, 0 /* don't insist on open circuits */,
+ desired_circuit_purpose,
+ need_uptime, need_internal);
+ if (circ)
+ log_debug(LD_CIRC, "one on the way!");
+
+ if (!circ) {
+ /* No open or in-progress circuit could handle this stream! We
+ * will have to launch one!
+ */
+
+ /* The chosen exit node, if there is one. */
+ extend_info_t *extend_info=NULL;
+ const int n_pending = count_pending_general_client_circuits();
+
+ /* Do we have too many pending circuits? */
+ if (n_pending >= options->MaxClientCircuitsPending) {
+ static ratelim_t delay_limit = RATELIM_INIT(10*60);
+ char *m;
+ if ((m = rate_limit_log(&delay_limit, approx_time()))) {
+ log_notice(LD_APP, "We'd like to launch a circuit to handle a "
+ "connection, but we already have %d general-purpose client "
+ "circuits pending. Waiting until some finish.%s",
+ n_pending, m);
+ tor_free(m);
+ }
+ return 0;
+ }
+
+ /* If this is a hidden service trying to start an introduction point,
+ * handle that case. */
+ if (desired_circuit_purpose == CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT) {
+ const edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn);
+ /* need to pick an intro point */
+ extend_info = hs_client_get_random_intro_from_edge(edge_conn);
+ if (!extend_info) {
+ log_info(LD_REND, "No intro points: re-fetching service descriptor.");
+ if (edge_conn->rend_data) {
+ rend_client_refetch_v2_renddesc(edge_conn->rend_data);
+ } else {
+ hs_client_refetch_hsdesc(&edge_conn->hs_ident->identity_pk);
+ }
+ connection_ap_mark_as_non_pending_circuit(conn);
+ ENTRY_TO_CONN(conn)->state = AP_CONN_STATE_RENDDESC_WAIT;
+ return 0;
+ }
+ log_info(LD_REND,"Chose %s as intro point for '%s'.",
+ extend_info_describe(extend_info),
+ (edge_conn->rend_data) ?
+ safe_str_client(rend_data_get_address(edge_conn->rend_data)) :
+ "service");
+ }
+
+ /* If we have specified a particular exit node for our
+ * connection, then be sure to open a circuit to that exit node.
+ */
+ if (desired_circuit_purpose == CIRCUIT_PURPOSE_C_GENERAL ||
+ desired_circuit_purpose == CIRCUIT_PURPOSE_S_HSDIR_POST ||
+ desired_circuit_purpose == CIRCUIT_PURPOSE_C_HSDIR_GET) {
+ if (conn->chosen_exit_name) {
+ const node_t *r;
+ int opt = conn->chosen_exit_optional;
+ r = node_get_by_nickname(conn->chosen_exit_name, 0);
+ if (r && node_has_preferred_descriptor(r, conn->want_onehop ? 1 : 0)) {
+ /* We might want to connect to an IPv6 bridge for loading
+ descriptors so we use the preferred address rather than
+ the primary. */
+ extend_info = extend_info_from_node(r, conn->want_onehop ? 1 : 0);
+ if (!extend_info) {
+ log_warn(LD_CIRC,"Could not make a one-hop connection to %s. "
+ "Discarding this circuit.", conn->chosen_exit_name);
+ return -1;
+ }
+ } else { /* ! (r && node_has_preferred_descriptor(...)) */
+ log_debug(LD_DIR, "considering %d, %s",
+ want_onehop, conn->chosen_exit_name);
+ if (want_onehop && conn->chosen_exit_name[0] == '$') {
+ /* We're asking for a one-hop circuit to a router that
+ * we don't have a routerinfo about. Make up an extend_info. */
+ /* XXX prop220: we need to make chosen_exit_name able to
+ * encode both key formats. This is not absolutely critical
+ * since this is just for one-hop circuits, but we should
+ * still get it done */
+ char digest[DIGEST_LEN];
+ char *hexdigest = conn->chosen_exit_name+1;
+ tor_addr_t addr;
+ if (strlen(hexdigest) < HEX_DIGEST_LEN ||
+ base16_decode(digest,DIGEST_LEN,
+ hexdigest,HEX_DIGEST_LEN) != DIGEST_LEN) {
+ log_info(LD_DIR, "Broken exit digest on tunnel conn. Closing.");
+ return -1;
+ }
+ if (tor_addr_parse(&addr, conn->socks_request->address) < 0) {
+ log_info(LD_DIR, "Broken address %s on tunnel conn. Closing.",
+ escaped_safe_str_client(conn->socks_request->address));
+ return -1;
+ }
+ /* XXXX prop220 add a workaround for ed25519 ID below*/
+ extend_info = extend_info_new(conn->chosen_exit_name+1,
+ digest,
+ NULL, /* Ed25519 ID */
+ NULL, NULL, /* onion keys */
+ &addr, conn->socks_request->port);
+ } else { /* ! (want_onehop && conn->chosen_exit_name[0] == '$') */
+ /* We will need an onion key for the router, and we
+ * don't have one. Refuse or relax requirements. */
+ log_fn(opt ? LOG_INFO : LOG_WARN, LD_APP,
+ "Requested exit point '%s' is not known. %s.",
+ conn->chosen_exit_name, opt ? "Trying others" : "Closing");
+ if (opt) {
+ conn->chosen_exit_optional = 0;
+ tor_free(conn->chosen_exit_name);
+ /* Try again with no requested exit */
+ return circuit_get_open_circ_or_launch(conn,
+ desired_circuit_purpose,
+ circp);
+ }
+ return -1;
+ }
+ }
+ }
+ } /* Done checking for general circutis with chosen exits. */
+
+ /* What purpose do we need to launch this circuit with? */
+ uint8_t new_circ_purpose;
+ if (desired_circuit_purpose == CIRCUIT_PURPOSE_C_REND_JOINED)
+ new_circ_purpose = CIRCUIT_PURPOSE_C_ESTABLISH_REND;
+ else if (desired_circuit_purpose == CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT)
+ new_circ_purpose = CIRCUIT_PURPOSE_C_INTRODUCING;
+ else
+ new_circ_purpose = desired_circuit_purpose;
+
+#ifdef ENABLE_TOR2WEB_MODE
+ /* If tor2Web is on, then hidden service requests should be one-hop.
+ */
+ if (options->Tor2webMode &&
+ (new_circ_purpose == CIRCUIT_PURPOSE_C_ESTABLISH_REND ||
+ new_circ_purpose == CIRCUIT_PURPOSE_C_INTRODUCING)) {
+ want_onehop = 1;
+ }
+#endif /* defined(ENABLE_TOR2WEB_MODE) */
+
+ /* Determine what kind of a circuit to launch, and actually launch it. */
+ {
+ int flags = CIRCLAUNCH_NEED_CAPACITY;
+ if (want_onehop) flags |= CIRCLAUNCH_ONEHOP_TUNNEL;
+ if (need_uptime) flags |= CIRCLAUNCH_NEED_UPTIME;
+ if (need_internal) flags |= CIRCLAUNCH_IS_INTERNAL;
+
+ /* If we are about to pick a v3 RP right now, make sure we pick a
+ * rendezvous point that supports the v3 protocol! */
+ if (desired_circuit_purpose == CIRCUIT_PURPOSE_C_REND_JOINED &&
+ new_circ_purpose == CIRCUIT_PURPOSE_C_ESTABLISH_REND &&
+ ENTRY_TO_EDGE_CONN(conn)->hs_ident) {
+ flags |= CIRCLAUNCH_IS_V3_RP;
+ log_info(LD_GENERAL, "Getting rendezvous circuit to v3 service!");
+ }
+
+ circ = circuit_launch_by_extend_info(new_circ_purpose, extend_info,
+ flags);
+ }
+
+ extend_info_free(extend_info);
+
+ /* Now trigger things that need to happen when we launch circuits */
+
+ if (desired_circuit_purpose == CIRCUIT_PURPOSE_C_GENERAL ||
+ desired_circuit_purpose == CIRCUIT_PURPOSE_C_HSDIR_GET ||
+ desired_circuit_purpose == CIRCUIT_PURPOSE_S_HSDIR_POST) {
+ /* We just caused a circuit to get built because of this stream.
+ * If this stream has caused a _lot_ of circuits to be built, that's
+ * a bad sign: we should tell the user. */
+ if (conn->num_circuits_launched < NUM_CIRCUITS_LAUNCHED_THRESHOLD &&
+ ++conn->num_circuits_launched == NUM_CIRCUITS_LAUNCHED_THRESHOLD)
+ log_info(LD_CIRC, "The application request to %s:%d has launched "
+ "%d circuits without finding one it likes.",
+ escaped_safe_str_client(conn->socks_request->address),
+ conn->socks_request->port,
+ conn->num_circuits_launched);
+ } else {
+ /* help predict this next time */
+ rep_hist_note_used_internal(time(NULL), need_uptime, 1);
+ if (circ) {
+ const edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn);
+ if (edge_conn->rend_data) {
+ /* write the service_id into circ */
+ circ->rend_data = rend_data_dup(edge_conn->rend_data);
+ } else if (edge_conn->hs_ident) {
+ circ->hs_ident =
+ hs_ident_circuit_new(&edge_conn->hs_ident->identity_pk,
+ HS_IDENT_CIRCUIT_INTRO);
+ }
+ if (circ->base_.purpose == CIRCUIT_PURPOSE_C_ESTABLISH_REND &&
+ circ->base_.state == CIRCUIT_STATE_OPEN)
+ circuit_has_opened(circ);
+ }
+ }
+ } /* endif (!circ) */
+
+ /* We either found a good circuit, or launched a new circuit, or failed to
+ * do so. Report success, and delay. */
+
+ if (circ) {
+ /* Mark the circuit with the isolation fields for this connection.
+ * When the circuit arrives, we'll clear these flags: this is
+ * just some internal bookkeeping to make sure that we have
+ * launched enough circuits.
+ */
+ connection_edge_update_circuit_isolation(conn, circ, 0);
+ } else {
+ log_info(LD_APP,
+ "No safe circuit (purpose %d) ready for edge "
+ "connection; delaying.",
+ desired_circuit_purpose);
+ }
+ *circp = circ;
+ return 0;
+}
+
+/** Return true iff <b>crypt_path</b> is one of the crypt_paths for
+ * <b>circ</b>. */
+static int
+cpath_is_on_circuit(origin_circuit_t *circ, crypt_path_t *crypt_path)
+{
+ crypt_path_t *cpath, *cpath_next = NULL;
+ for (cpath = circ->cpath; cpath_next != circ->cpath; cpath = cpath_next) {
+ cpath_next = cpath->next;
+ if (crypt_path == cpath)
+ return 1;
+ }
+ return 0;
+}
+
+/** Return true iff client-side optimistic data is supported. */
+static int
+optimistic_data_enabled(void)
+{
+ const or_options_t *options = get_options();
+ if (options->OptimisticData < 0) {
+ /* Note: this default was 0 before #18815 was merged. We can't take the
+ * parameter out of the consensus until versions before that are all
+ * obsolete. */
+ const int32_t enabled =
+ networkstatus_get_param(NULL, "UseOptimisticData", /*default*/ 1, 0, 1);
+ return (int)enabled;
+ }
+ return options->OptimisticData;
+}
+
+/** Attach the AP stream <b>apconn</b> to circ's linked list of
+ * p_streams. Also set apconn's cpath_layer to <b>cpath</b>, or to the last
+ * hop in circ's cpath if <b>cpath</b> is NULL.
+ */
+static void
+link_apconn_to_circ(entry_connection_t *apconn, origin_circuit_t *circ,
+ crypt_path_t *cpath)
+{
+ const node_t *exitnode = NULL;
+
+ /* add it into the linked list of streams on this circuit */
+ log_debug(LD_APP|LD_CIRC, "attaching new conn to circ. n_circ_id %u.",
+ (unsigned)circ->base_.n_circ_id);
+ /* reset it, so we can measure circ timeouts */
+ ENTRY_TO_CONN(apconn)->timestamp_last_read_allowed = time(NULL);
+ ENTRY_TO_EDGE_CONN(apconn)->next_stream = circ->p_streams;
+ ENTRY_TO_EDGE_CONN(apconn)->on_circuit = TO_CIRCUIT(circ);
+ /* assert_connection_ok(conn, time(NULL)); */
+ circ->p_streams = ENTRY_TO_EDGE_CONN(apconn);
+
+ if (connection_edge_is_rendezvous_stream(ENTRY_TO_EDGE_CONN(apconn))) {
+ /* We are attaching a stream to a rendezvous circuit. That means
+ * that an attempt to connect to a hidden service just
+ * succeeded. Tell rendclient.c. */
+ hs_client_note_connection_attempt_succeeded(ENTRY_TO_EDGE_CONN(apconn));
+ }
+
+ if (cpath) { /* we were given one; use it */
+ tor_assert(cpath_is_on_circuit(circ, cpath));
+ } else {
+ /* use the last hop in the circuit */
+ tor_assert(circ->cpath);
+ tor_assert(circ->cpath->prev);
+ tor_assert(circ->cpath->prev->state == CPATH_STATE_OPEN);
+ cpath = circ->cpath->prev;
+ }
+ ENTRY_TO_EDGE_CONN(apconn)->cpath_layer = cpath;
+
+ circ->isolation_any_streams_attached = 1;
+ connection_edge_update_circuit_isolation(apconn, circ, 0);
+
+ /* Compute the exitnode if possible, for logging below */
+ if (cpath->extend_info)
+ exitnode = node_get_by_id(cpath->extend_info->identity_digest);
+
+ /* See if we can use optimistic data on this circuit */
+ if (optimistic_data_enabled() &&
+ (circ->base_.purpose == CIRCUIT_PURPOSE_C_GENERAL ||
+ circ->base_.purpose == CIRCUIT_PURPOSE_C_HSDIR_GET ||
+ circ->base_.purpose == CIRCUIT_PURPOSE_S_HSDIR_POST ||
+ circ->base_.purpose == CIRCUIT_PURPOSE_C_REND_JOINED))
+ apconn->may_use_optimistic_data = 1;
+ else
+ apconn->may_use_optimistic_data = 0;
+ log_info(LD_APP, "Looks like completed circuit to %s %s allow "
+ "optimistic data for connection to %s",
+ circ->base_.purpose == CIRCUIT_PURPOSE_C_GENERAL ?
+ /* node_describe() does the right thing if exitnode is NULL */
+ safe_str_client(node_describe(exitnode)) :
+ "hidden service",
+ apconn->may_use_optimistic_data ? "does" : "doesn't",
+ safe_str_client(apconn->socks_request->address));
+}
+
+/** Return true iff <b>address</b> is matched by one of the entries in
+ * TrackHostExits. */
+int
+hostname_in_track_host_exits(const or_options_t *options, const char *address)
+{
+ if (!options->TrackHostExits)
+ return 0;
+ SMARTLIST_FOREACH_BEGIN(options->TrackHostExits, const char *, cp) {
+ if (cp[0] == '.') { /* match end */
+ if (cp[1] == '\0' ||
+ !strcasecmpend(address, cp) ||
+ !strcasecmp(address, &cp[1]))
+ return 1;
+ } else if (strcasecmp(cp, address) == 0) {
+ return 1;
+ }
+ } SMARTLIST_FOREACH_END(cp);
+ return 0;
+}
+
+/** If an exit wasn't explicitly specified for <b>conn</b>, consider saving
+ * the exit that we *did* choose for use by future connections to
+ * <b>conn</b>'s destination.
+ */
+static void
+consider_recording_trackhost(const entry_connection_t *conn,
+ const origin_circuit_t *circ)
+{
+ const or_options_t *options = get_options();
+ char *new_address = NULL;
+ char fp[HEX_DIGEST_LEN+1];
+
+ /* Search the addressmap for this conn's destination. */
+ /* If they're not in the address map.. */
+ if (!options->TrackHostExits ||
+ addressmap_have_mapping(conn->socks_request->address,
+ options->TrackHostExitsExpire))
+ return; /* nothing to track, or already mapped */
+
+ if (!hostname_in_track_host_exits(options, conn->socks_request->address) ||
+ !circ->build_state->chosen_exit)
+ return;
+
+ /* write down the fingerprint of the chosen exit, not the nickname,
+ * because the chosen exit might not be named. */
+ base16_encode(fp, sizeof(fp),
+ circ->build_state->chosen_exit->identity_digest, DIGEST_LEN);
+
+ /* Add this exit/hostname pair to the addressmap. */
+ tor_asprintf(&new_address, "%s.%s.exit",
+ conn->socks_request->address, fp);
+
+ addressmap_register(conn->socks_request->address, new_address,
+ time(NULL) + options->TrackHostExitsExpire,
+ ADDRMAPSRC_TRACKEXIT, 0, 0);
+}
+
+/** Attempt to attach the connection <b>conn</b> to <b>circ</b>, and send a
+ * begin or resolve cell as appropriate. Return values are as for
+ * connection_ap_handshake_attach_circuit. The stream will exit from the hop
+ * indicated by <b>cpath</b>, or from the last hop in circ's cpath if
+ * <b>cpath</b> is NULL. */
+int
+connection_ap_handshake_attach_chosen_circuit(entry_connection_t *conn,
+ origin_circuit_t *circ,
+ crypt_path_t *cpath)
+{
+ connection_t *base_conn = ENTRY_TO_CONN(conn);
+ tor_assert(conn);
+ tor_assert(base_conn->state == AP_CONN_STATE_CIRCUIT_WAIT ||
+ base_conn->state == AP_CONN_STATE_CONTROLLER_WAIT);
+ tor_assert(conn->socks_request);
+ tor_assert(circ);
+ tor_assert(circ->base_.state == CIRCUIT_STATE_OPEN);
+
+ base_conn->state = AP_CONN_STATE_CIRCUIT_WAIT;
+
+ if (!circ->base_.timestamp_dirty ||
+ ((conn->entry_cfg.isolation_flags & ISO_SOCKSAUTH) &&
+ (conn->entry_cfg.socks_iso_keep_alive) &&
+ (conn->socks_request->usernamelen ||
+ conn->socks_request->passwordlen))) {
+ /* When stream isolation is in use and controlled by an application
+ * we are willing to keep using the stream. */
+ circ->base_.timestamp_dirty = approx_time();
+ }
+
+ pathbias_count_use_attempt(circ);
+
+ /* Now, actually link the connection. */
+ link_apconn_to_circ(conn, circ, cpath);
+
+ tor_assert(conn->socks_request);
+ if (conn->socks_request->command == SOCKS_COMMAND_CONNECT) {
+ if (!conn->use_begindir)
+ consider_recording_trackhost(conn, circ);
+ if (connection_ap_handshake_send_begin(conn) < 0)
+ return -1;
+ } else {
+ if (connection_ap_handshake_send_resolve(conn) < 0)
+ return -1;
+ }
+
+ return 1;
+}
+
+/**
+ * Return an appropriate circuit purpose for non-rend streams.
+ * We don't handle rends here because a rend stream triggers two
+ * circuit builds with different purposes, so it is handled elsewhere.
+ *
+ * This function just figures out what type of hsdir activity this is,
+ * and tells us. Everything else is general.
+ */
+static int
+connection_ap_get_nonrend_circ_purpose(const entry_connection_t *conn)
+{
+ const connection_t *base_conn = ENTRY_TO_CONN(conn);
+ tor_assert_nonfatal(!connection_edge_is_rendezvous_stream(
+ ENTRY_TO_EDGE_CONN(conn)));
+
+ if (base_conn->linked_conn &&
+ base_conn->linked_conn->type == CONN_TYPE_DIR) {
+ /* Set a custom purpose for hsdir activity */
+ if (base_conn->linked_conn->purpose == DIR_PURPOSE_UPLOAD_RENDDESC_V2 ||
+ base_conn->linked_conn->purpose == DIR_PURPOSE_UPLOAD_HSDESC) {
+ return CIRCUIT_PURPOSE_S_HSDIR_POST;
+ } else if (base_conn->linked_conn->purpose
+ == DIR_PURPOSE_FETCH_RENDDESC_V2 ||
+ base_conn->linked_conn->purpose
+ == DIR_PURPOSE_FETCH_HSDESC) {
+ return CIRCUIT_PURPOSE_C_HSDIR_GET;
+ }
+ }
+
+ /* All other purposes are general for now */
+ return CIRCUIT_PURPOSE_C_GENERAL;
+}
+
+/** Try to find a safe live circuit for stream <b>conn</b>. If we find one,
+ * attach the stream, send appropriate cells, and return 1. Otherwise,
+ * try to launch new circuit(s) for the stream. If we can launch
+ * circuits, return 0. Otherwise, if we simply can't proceed with
+ * this stream, return -1. (conn needs to die, and is maybe already marked).
+ */
+/* XXXX this function should mark for close whenever it returns -1;
+ * its callers shouldn't have to worry about that. */
+int
+connection_ap_handshake_attach_circuit(entry_connection_t *conn)
+{
+ connection_t *base_conn = ENTRY_TO_CONN(conn);
+ int retval;
+ int conn_age;
+ int want_onehop;
+
+ tor_assert(conn);
+ tor_assert(base_conn->state == AP_CONN_STATE_CIRCUIT_WAIT);
+ tor_assert(conn->socks_request);
+ want_onehop = conn->want_onehop;
+
+ conn_age = (int)(time(NULL) - base_conn->timestamp_created);
+
+ /* Is this connection so old that we should give up on it? */
+ if (conn_age >= get_options()->SocksTimeout) {
+ int severity = (tor_addr_is_null(&base_conn->addr) && !base_conn->port) ?
+ LOG_INFO : LOG_NOTICE;
+ log_fn(severity, LD_APP,
+ "Tried for %d seconds to get a connection to %s:%d. Giving up.",
+ conn_age, safe_str_client(conn->socks_request->address),
+ conn->socks_request->port);
+ return -1;
+ }
+
+ /* We handle "general" (non-onion) connections much more straightforwardly.
+ */
+ if (!connection_edge_is_rendezvous_stream(ENTRY_TO_EDGE_CONN(conn))) {
+ /* we're a general conn */
+ origin_circuit_t *circ=NULL;
+
+ /* Are we linked to a dir conn that aims to fetch a consensus?
+ * We check here because the conn might no longer be needed. */
+ if (base_conn->linked_conn &&
+ base_conn->linked_conn->type == CONN_TYPE_DIR &&
+ base_conn->linked_conn->purpose == DIR_PURPOSE_FETCH_CONSENSUS) {
+
+ /* Yes we are. Is there a consensus fetch farther along than us? */
+ if (networkstatus_consensus_is_already_downloading(
+ TO_DIR_CONN(base_conn->linked_conn)->requested_resource)) {
+ /* We're doing the "multiple consensus fetch attempts" game from
+ * proposal 210, and we're late to the party. Just close this conn.
+ * The circuit and TLS conn that we made will time out after a while
+ * if nothing else wants to use them. */
+ log_info(LD_DIR, "Closing extra consensus fetch (to %s) since one "
+ "is already downloading.", base_conn->linked_conn->address);
+ return -1;
+ }
+ }
+
+ /* If we have a chosen exit, we need to use a circuit that's
+ * open to that exit. See what exit we meant, and whether we can use it.
+ */
+ if (conn->chosen_exit_name) {
+ const node_t *node = node_get_by_nickname(conn->chosen_exit_name, 0);
+ int opt = conn->chosen_exit_optional;
+ if (!node && !want_onehop) {
+ /* We ran into this warning when trying to extend a circuit to a
+ * hidden service directory for which we didn't have a router
+ * descriptor. See flyspray task 767 for more details. We should
+ * keep this in mind when deciding to use BEGIN_DIR cells for other
+ * directory requests as well. -KL*/
+ log_fn(opt ? LOG_INFO : LOG_WARN, LD_APP,
+ "Requested exit point '%s' is not known. %s.",
+ conn->chosen_exit_name, opt ? "Trying others" : "Closing");
+ if (opt) {
+ /* If we are allowed to ignore the .exit request, do so */
+ conn->chosen_exit_optional = 0;
+ tor_free(conn->chosen_exit_name);
+ return 0;
+ }
+ return -1;
+ }
+ if (node && !connection_ap_can_use_exit(conn, node)) {
+ log_fn(opt ? LOG_INFO : LOG_WARN, LD_APP,
+ "Requested exit point '%s' is excluded or "
+ "would refuse request. %s.",
+ conn->chosen_exit_name, opt ? "Trying others" : "Closing");
+ if (opt) {
+ /* If we are allowed to ignore the .exit request, do so */
+ conn->chosen_exit_optional = 0;
+ tor_free(conn->chosen_exit_name);
+ return 0;
+ }
+ return -1;
+ }
+ }
+
+ /* Find the circuit that we should use, if there is one. Otherwise
+ * launch it
+ */
+ retval = circuit_get_open_circ_or_launch(conn,
+ connection_ap_get_nonrend_circ_purpose(conn),
+ &circ);
+
+ if (retval < 1) {
+ /* We were either told "-1" (complete failure) or 0 (circuit in
+ * progress); we can't attach this stream yet. */
+ return retval;
+ }
+
+ log_debug(LD_APP|LD_CIRC,
+ "Attaching apconn to circ %u (stream %d sec old).",
+ (unsigned)circ->base_.n_circ_id, conn_age);
+ /* print the circ's path, so clients can figure out which circs are
+ * sucking. */
+ circuit_log_path(LOG_INFO,LD_APP|LD_CIRC,circ);
+
+ /* We have found a suitable circuit for our conn. Hurray. Do
+ * the attachment. */
+ return connection_ap_handshake_attach_chosen_circuit(conn, circ, NULL);
+
+ } else { /* we're a rendezvous conn */
+ origin_circuit_t *rendcirc=NULL, *introcirc=NULL;
+
+ tor_assert(!ENTRY_TO_EDGE_CONN(conn)->cpath_layer);
+
+ /* start by finding a rendezvous circuit for us */
+
+ retval = circuit_get_open_circ_or_launch(
+ conn, CIRCUIT_PURPOSE_C_REND_JOINED, &rendcirc);
+ if (retval < 0) return -1; /* failed */
+
+ if (retval > 0) {
+ tor_assert(rendcirc);
+ /* one is already established, attach */
+ log_info(LD_REND,
+ "rend joined circ %u (id: %" PRIu32 ") already here. "
+ "Attaching. (stream %d sec old)",
+ (unsigned) TO_CIRCUIT(rendcirc)->n_circ_id,
+ rendcirc->global_identifier, conn_age);
+ /* Mark rendezvous circuits as 'newly dirty' every time you use
+ * them, since the process of rebuilding a rendezvous circ is so
+ * expensive. There is a tradeoff between linkability and
+ * feasibility, at this point.
+ */
+ rendcirc->base_.timestamp_dirty = time(NULL);
+
+ /* We've also attempted to use them. If they fail, we need to
+ * probe them for path bias */
+ pathbias_count_use_attempt(rendcirc);
+
+ link_apconn_to_circ(conn, rendcirc, NULL);
+ if (connection_ap_handshake_send_begin(conn) < 0)
+ return 0; /* already marked, let them fade away */
+ return 1;
+ }
+
+ /* At this point we need to re-check the state, since it's possible that
+ * our call to circuit_get_open_circ_or_launch() changed the connection's
+ * state from "CIRCUIT_WAIT" to "RENDDESC_WAIT" because we decided to
+ * re-fetch the descriptor.
+ */
+ if (ENTRY_TO_CONN(conn)->state != AP_CONN_STATE_CIRCUIT_WAIT) {
+ log_info(LD_REND, "This connection is no longer ready to attach; its "
+ "state changed."
+ "(We probably have to re-fetch its descriptor.)");
+ return 0;
+ }
+
+ if (rendcirc && (rendcirc->base_.purpose ==
+ CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED)) {
+ log_info(LD_REND,
+ "pending-join circ %u (id: %" PRIu32 ") already here, with "
+ "intro ack. Stalling. (stream %d sec old)",
+ (unsigned) TO_CIRCUIT(rendcirc)->n_circ_id,
+ rendcirc->global_identifier, conn_age);
+ return 0;
+ }
+
+ /* it's on its way. find an intro circ. */
+ retval = circuit_get_open_circ_or_launch(
+ conn, CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT, &introcirc);
+ if (retval < 0) return -1; /* failed */
+
+ if (retval > 0) {
+ /* one has already sent the intro. keep waiting. */
+ tor_assert(introcirc);
+ log_info(LD_REND, "Intro circ %u (id: %" PRIu32 ") present and "
+ "awaiting ACK. Rend circuit %u (id: %" PRIu32 "). "
+ "Stalling. (stream %d sec old)",
+ (unsigned) TO_CIRCUIT(introcirc)->n_circ_id,
+ introcirc->global_identifier,
+ rendcirc ? (unsigned) TO_CIRCUIT(rendcirc)->n_circ_id : 0,
+ rendcirc ? rendcirc->global_identifier : 0,
+ conn_age);
+ return 0;
+ }
+
+ /* now rendcirc and introcirc are each either undefined or not finished */
+
+ if (rendcirc && introcirc &&
+ rendcirc->base_.purpose == CIRCUIT_PURPOSE_C_REND_READY) {
+ log_info(LD_REND,
+ "ready rend circ %u (id: %" PRIu32 ") already here. No"
+ "intro-ack yet on intro %u (id: %" PRIu32 "). "
+ "(stream %d sec old)",
+ (unsigned) TO_CIRCUIT(rendcirc)->n_circ_id,
+ rendcirc->global_identifier,
+ (unsigned) TO_CIRCUIT(introcirc)->n_circ_id,
+ introcirc->global_identifier, conn_age);
+
+ tor_assert(introcirc->base_.purpose == CIRCUIT_PURPOSE_C_INTRODUCING);
+ if (introcirc->base_.state == CIRCUIT_STATE_OPEN) {
+ int ret;
+ log_info(LD_REND, "Found open intro circ %u (id: %" PRIu32 "). "
+ "Rend circuit %u (id: %" PRIu32 "); Sending "
+ "introduction. (stream %d sec old)",
+ (unsigned) TO_CIRCUIT(introcirc)->n_circ_id,
+ introcirc->global_identifier,
+ (unsigned) TO_CIRCUIT(rendcirc)->n_circ_id,
+ rendcirc->global_identifier, conn_age);
+ ret = hs_client_send_introduce1(introcirc, rendcirc);
+ switch (ret) {
+ case 0: /* success */
+ rendcirc->base_.timestamp_dirty = time(NULL);
+ introcirc->base_.timestamp_dirty = time(NULL);
+
+ pathbias_count_use_attempt(introcirc);
+ pathbias_count_use_attempt(rendcirc);
+
+ assert_circuit_ok(TO_CIRCUIT(rendcirc));
+ assert_circuit_ok(TO_CIRCUIT(introcirc));
+ return 0;
+ case -1: /* transient error */
+ return 0;
+ case -2: /* permanent error */
+ return -1;
+ default: /* oops */
+ tor_fragile_assert();
+ return -1;
+ }
+ }
+ }
+
+ log_info(LD_REND, "Intro %u (id: %" PRIu32 ") and rend circuit %u "
+ "(id: %" PRIu32 ") circuits are not both ready. "
+ "Stalling conn. (%d sec old)",
+ introcirc ? (unsigned) TO_CIRCUIT(introcirc)->n_circ_id : 0,
+ introcirc ? introcirc->global_identifier : 0,
+ rendcirc ? (unsigned) TO_CIRCUIT(rendcirc)->n_circ_id : 0,
+ rendcirc ? rendcirc->global_identifier : 0, conn_age);
+ return 0;
+ }
+}
+
+/** Change <b>circ</b>'s purpose to <b>new_purpose</b>. */
+void
+circuit_change_purpose(circuit_t *circ, uint8_t new_purpose)
+{
+ uint8_t old_purpose;
+ /* Don't allow an OR circ to become an origin circ or vice versa. */
+ tor_assert(!!(CIRCUIT_IS_ORIGIN(circ)) ==
+ !!(CIRCUIT_PURPOSE_IS_ORIGIN(new_purpose)));
+
+ if (circ->purpose == new_purpose) return;
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ char old_purpose_desc[80] = "";
+
+ strncpy(old_purpose_desc, circuit_purpose_to_string(circ->purpose), 80-1);
+ old_purpose_desc[80-1] = '\0';
+
+ log_debug(LD_CIRC,
+ "changing purpose of origin circ %d "
+ "from \"%s\" (%d) to \"%s\" (%d)",
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier,
+ old_purpose_desc,
+ circ->purpose,
+ circuit_purpose_to_string(new_purpose),
+ new_purpose);
+ }
+
+ old_purpose = circ->purpose;
+ circ->purpose = new_purpose;
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ control_event_circuit_purpose_changed(TO_ORIGIN_CIRCUIT(circ),
+ old_purpose);
+ }
+}
+
+/** Mark <b>circ</b> so that no more connections can be attached to it. */
+void
+mark_circuit_unusable_for_new_conns(origin_circuit_t *circ)
+{
+ const or_options_t *options = get_options();
+ tor_assert(circ);
+
+ /* XXXX This is a kludge; we're only keeping it around in case there's
+ * something that doesn't check unusable_for_new_conns, and to avoid
+ * deeper refactoring of our expiration logic. */
+ if (! circ->base_.timestamp_dirty)
+ circ->base_.timestamp_dirty = approx_time();
+ if (options->MaxCircuitDirtiness >= circ->base_.timestamp_dirty)
+ circ->base_.timestamp_dirty = 1; /* prevent underflow */
+ else
+ circ->base_.timestamp_dirty -= options->MaxCircuitDirtiness;
+
+ circ->unusable_for_new_conns = 1;
+}
+
+/**
+ * Add relay_body_len and RELAY_PAYLOAD_SIZE-relay_body_len to
+ * the valid delivered written fields and the overhead field,
+ * respectively.
+ */
+void
+circuit_sent_valid_data(origin_circuit_t *circ, uint16_t relay_body_len)
+{
+ if (!circ) return;
+
+ tor_assert_nonfatal(relay_body_len <= RELAY_PAYLOAD_SIZE);
+
+ circ->n_delivered_written_circ_bw =
+ tor_add_u32_nowrap(circ->n_delivered_written_circ_bw, relay_body_len);
+ circ->n_overhead_written_circ_bw =
+ tor_add_u32_nowrap(circ->n_overhead_written_circ_bw,
+ RELAY_PAYLOAD_SIZE-relay_body_len);
+}
+
+/**
+ * Add relay_body_len and RELAY_PAYLOAD_SIZE-relay_body_len to
+ * the valid delivered read field and the overhead field,
+ * respectively.
+ */
+void
+circuit_read_valid_data(origin_circuit_t *circ, uint16_t relay_body_len)
+{
+ if (!circ) return;
+
+ tor_assert_nonfatal(relay_body_len <= RELAY_PAYLOAD_SIZE);
+
+ circ->n_delivered_read_circ_bw =
+ tor_add_u32_nowrap(circ->n_delivered_read_circ_bw, relay_body_len);
+ circ->n_overhead_read_circ_bw =
+ tor_add_u32_nowrap(circ->n_overhead_read_circ_bw,
+ RELAY_PAYLOAD_SIZE-relay_body_len);
+}
diff --git a/src/core/or/circuituse.h b/src/core/or/circuituse.h
new file mode 100644
index 0000000000..b65e85d170
--- /dev/null
+++ b/src/core/or/circuituse.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuituse.h
+ * \brief Header file for circuituse.c.
+ **/
+
+#ifndef TOR_CIRCUITUSE_H
+#define TOR_CIRCUITUSE_H
+
+void circuit_expire_building(void);
+void circuit_expire_waiting_for_better_guard(void);
+void circuit_remove_handled_ports(smartlist_t *needed_ports);
+int circuit_stream_is_being_handled(entry_connection_t *conn, uint16_t port,
+ int min);
+void circuit_log_ancient_one_hop_circuits(int age);
+#if 0
+int circuit_conforms_to_options(const origin_circuit_t *circ,
+ const or_options_t *options);
+#endif
+void circuit_build_needed_circs(time_t now);
+void circuit_expire_old_circs_as_needed(time_t now);
+void circuit_detach_stream(circuit_t *circ, edge_connection_t *conn);
+
+void circuit_expire_old_circuits_serverside(time_t now);
+
+void reset_bandwidth_test(void);
+int circuit_enough_testing_circs(void);
+
+void circuit_has_opened(origin_circuit_t *circ);
+void circuit_try_attaching_streams(origin_circuit_t *circ);
+void circuit_build_failed(origin_circuit_t *circ);
+
+/** Flag to set when a circuit should have only a single hop. */
+#define CIRCLAUNCH_ONEHOP_TUNNEL (1<<0)
+/** Flag to set when a circuit needs to be built of high-uptime nodes */
+#define CIRCLAUNCH_NEED_UPTIME (1<<1)
+/** Flag to set when a circuit needs to be built of high-capacity nodes */
+#define CIRCLAUNCH_NEED_CAPACITY (1<<2)
+/** Flag to set when the last hop of a circuit doesn't need to be an
+ * exit node. */
+#define CIRCLAUNCH_IS_INTERNAL (1<<3)
+/** Flag to set when we are trying to launch a v3 rendezvous circuit. We need
+ * to apply some additional filters on the node picked. */
+#define CIRCLAUNCH_IS_V3_RP (1<<4)
+origin_circuit_t *circuit_launch_by_extend_info(uint8_t purpose,
+ extend_info_t *info,
+ int flags);
+origin_circuit_t *circuit_launch(uint8_t purpose, int flags);
+void circuit_reset_failure_count(int timeout);
+int connection_ap_handshake_attach_chosen_circuit(entry_connection_t *conn,
+ origin_circuit_t *circ,
+ crypt_path_t *cpath);
+int connection_ap_handshake_attach_circuit(entry_connection_t *conn);
+
+void circuit_change_purpose(circuit_t *circ, uint8_t new_purpose);
+
+int hostname_in_track_host_exits(const or_options_t *options,
+ const char *address);
+void mark_circuit_unusable_for_new_conns(origin_circuit_t *circ);
+
+int circuit_purpose_is_hidden_service(uint8_t);
+int circuit_should_use_vanguards(uint8_t);
+void circuit_sent_valid_data(origin_circuit_t *circ, uint16_t relay_body_len);
+void circuit_read_valid_data(origin_circuit_t *circ, uint16_t relay_body_len);
+
+#ifdef TOR_UNIT_TESTS
+/* Used only by circuituse.c and test_circuituse.c */
+
+STATIC int circuit_is_available_for_use(const circuit_t *circ);
+
+STATIC int needs_exit_circuits(time_t now,
+ int *port_needs_uptime,
+ int *port_needs_capacity);
+STATIC int needs_hs_server_circuits(time_t now,
+ int num_uptime_internal);
+
+STATIC int needs_hs_client_circuits(time_t now,
+ int *needs_uptime,
+ int *needs_capacity,
+ int num_internal,
+ int num_uptime_internal);
+
+STATIC int needs_circuits_for_build(int num);
+
+#endif /* defined(TOR_UNIT_TESTS) */
+
+#endif /* !defined(TOR_CIRCUITUSE_H) */
+
diff --git a/src/core/or/command.c b/src/core/or/command.c
new file mode 100644
index 0000000000..c2d6e2bb26
--- /dev/null
+++ b/src/core/or/command.c
@@ -0,0 +1,665 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file command.c
+ * \brief Functions for processing incoming cells.
+ *
+ * When we receive a cell from a client or a relay, it arrives on some
+ * channel, and tells us what to do with it. In this module, we dispatch based
+ * on the cell type using the functions command_process_cell() and
+ * command_process_var_cell(), and deal with the cell accordingly. (These
+ * handlers are installed on a channel with the command_setup_channel()
+ * function.)
+ *
+ * Channels have a chance to handle some cell types on their own before they
+ * are ever passed here --- typically, they do this for cells that are
+ * specific to a given channel type. For example, in channeltls.c, the cells
+ * for the initial connection handshake are handled before we get here. (Of
+ * course, the fact that there _is_ only one channel type for now means that
+ * we may have gotten the factoring wrong here.)
+ *
+ * Handling other cell types is mainly farmed off to other modules, after
+ * initial sanity-checking. CREATE* cells are handled ultimately in onion.c,
+ * CREATED* cells trigger circuit creation in circuitbuild.c, DESTROY cells
+ * are handled here (since they're simple), and RELAY cells, in all their
+ * complexity, are passed off to relay.c.
+ **/
+
+/* In-points to command.c:
+ *
+ * - command_process_cell(), called from
+ * incoming cell handlers of channel_t instances;
+ * callbacks registered in command_setup_channel(),
+ * called when channels are created in circuitbuild.c
+ */
+#include "or/or.h"
+#include "or/channel.h"
+#include "or/circuitbuild.h"
+#include "or/circuitlist.h"
+#include "or/command.h"
+#include "or/connection.h"
+#include "or/connection_or.h"
+#include "or/config.h"
+#include "or/control.h"
+#include "or/cpuworker.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "or/dos.h"
+#include "or/hibernate.h"
+#include "or/nodelist.h"
+#include "or/onion.h"
+#include "or/rephist.h"
+#include "or/relay.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+
+#include "or/cell_st.h"
+#include "or/or_circuit_st.h"
+#include "or/origin_circuit_st.h"
+#include "or/var_cell_st.h"
+
+/** How many CELL_CREATE cells have we received, ever? */
+uint64_t stats_n_create_cells_processed = 0;
+/** How many CELL_CREATED cells have we received, ever? */
+uint64_t stats_n_created_cells_processed = 0;
+/** How many CELL_RELAY cells have we received, ever? */
+uint64_t stats_n_relay_cells_processed = 0;
+/** How many CELL_DESTROY cells have we received, ever? */
+uint64_t stats_n_destroy_cells_processed = 0;
+
+/* Handle an incoming channel */
+static void command_handle_incoming_channel(channel_listener_t *listener,
+ channel_t *chan);
+
+/* These are the main functions for processing cells */
+static void command_process_create_cell(cell_t *cell, channel_t *chan);
+static void command_process_created_cell(cell_t *cell, channel_t *chan);
+static void command_process_relay_cell(cell_t *cell, channel_t *chan);
+static void command_process_destroy_cell(cell_t *cell, channel_t *chan);
+
+/** Convert the cell <b>command</b> into a lower-case, human-readable
+ * string. */
+const char *
+cell_command_to_string(uint8_t command)
+{
+ switch (command) {
+ case CELL_PADDING: return "padding";
+ case CELL_CREATE: return "create";
+ case CELL_CREATED: return "created";
+ case CELL_RELAY: return "relay";
+ case CELL_DESTROY: return "destroy";
+ case CELL_CREATE_FAST: return "create_fast";
+ case CELL_CREATED_FAST: return "created_fast";
+ case CELL_VERSIONS: return "versions";
+ case CELL_NETINFO: return "netinfo";
+ case CELL_RELAY_EARLY: return "relay_early";
+ case CELL_CREATE2: return "create2";
+ case CELL_CREATED2: return "created2";
+ case CELL_VPADDING: return "vpadding";
+ case CELL_CERTS: return "certs";
+ case CELL_AUTH_CHALLENGE: return "auth_challenge";
+ case CELL_AUTHENTICATE: return "authenticate";
+ case CELL_AUTHORIZE: return "authorize";
+ default: return "unrecognized";
+ }
+}
+
+#ifdef KEEP_TIMING_STATS
+/** This is a wrapper function around the actual function that processes the
+ * <b>cell</b> that just arrived on <b>conn</b>. Increment <b>*time</b>
+ * by the number of microseconds used by the call to <b>*func(cell, conn)</b>.
+ */
+static void
+command_time_process_cell(cell_t *cell, channel_t *chan, int *time,
+ void (*func)(cell_t *, channel_t *))
+{
+ struct timeval start, end;
+ long time_passed;
+
+ tor_gettimeofday(&start);
+
+ (*func)(cell, chan);
+
+ tor_gettimeofday(&end);
+ time_passed = tv_udiff(&start, &end) ;
+
+ if (time_passed > 10000) { /* more than 10ms */
+ log_debug(LD_OR,"That call just took %ld ms.",time_passed/1000);
+ }
+ if (time_passed < 0) {
+ log_info(LD_GENERAL,"That call took us back in time!");
+ time_passed = 0;
+ }
+ *time += time_passed;
+}
+#endif /* defined(KEEP_TIMING_STATS) */
+
+/** Process a <b>cell</b> that was just received on <b>chan</b>. Keep internal
+ * statistics about how many of each cell we've processed so far
+ * this second, and the total number of microseconds it took to
+ * process each type of cell.
+ */
+void
+command_process_cell(channel_t *chan, cell_t *cell)
+{
+#ifdef KEEP_TIMING_STATS
+ /* how many of each cell have we seen so far this second? needs better
+ * name. */
+ static int num_create=0, num_created=0, num_relay=0, num_destroy=0;
+ /* how long has it taken to process each type of cell? */
+ static int create_time=0, created_time=0, relay_time=0, destroy_time=0;
+ static time_t current_second = 0; /* from previous calls to time */
+
+ time_t now = time(NULL);
+
+ if (now > current_second) { /* the second has rolled over */
+ /* print stats */
+ log_info(LD_OR,
+ "At end of second: %d creates (%d ms), %d createds (%d ms), "
+ "%d relays (%d ms), %d destroys (%d ms)",
+ num_create, create_time/1000,
+ num_created, created_time/1000,
+ num_relay, relay_time/1000,
+ num_destroy, destroy_time/1000);
+
+ /* zero out stats */
+ num_create = num_created = num_relay = num_destroy = 0;
+ create_time = created_time = relay_time = destroy_time = 0;
+
+ /* remember which second it is, for next time */
+ current_second = now;
+ }
+#endif /* defined(KEEP_TIMING_STATS) */
+
+#ifdef KEEP_TIMING_STATS
+#define PROCESS_CELL(tp, cl, cn) STMT_BEGIN { \
+ ++num ## tp; \
+ command_time_process_cell(cl, cn, & tp ## time , \
+ command_process_ ## tp ## _cell); \
+ } STMT_END
+#else /* !(defined(KEEP_TIMING_STATS)) */
+#define PROCESS_CELL(tp, cl, cn) command_process_ ## tp ## _cell(cl, cn)
+#endif /* defined(KEEP_TIMING_STATS) */
+
+ switch (cell->command) {
+ case CELL_CREATE:
+ case CELL_CREATE_FAST:
+ case CELL_CREATE2:
+ ++stats_n_create_cells_processed;
+ PROCESS_CELL(create, cell, chan);
+ break;
+ case CELL_CREATED:
+ case CELL_CREATED_FAST:
+ case CELL_CREATED2:
+ ++stats_n_created_cells_processed;
+ PROCESS_CELL(created, cell, chan);
+ break;
+ case CELL_RELAY:
+ case CELL_RELAY_EARLY:
+ ++stats_n_relay_cells_processed;
+ PROCESS_CELL(relay, cell, chan);
+ break;
+ case CELL_DESTROY:
+ ++stats_n_destroy_cells_processed;
+ PROCESS_CELL(destroy, cell, chan);
+ break;
+ default:
+ log_fn(LOG_INFO, LD_PROTOCOL,
+ "Cell of unknown or unexpected type (%d) received. "
+ "Dropping.",
+ cell->command);
+ break;
+ }
+}
+
+/** Process an incoming var_cell from a channel; in the current protocol all
+ * the var_cells are handshake-related and handled below the channel layer,
+ * so this just logs a warning and drops the cell.
+ */
+
+void
+command_process_var_cell(channel_t *chan, var_cell_t *var_cell)
+{
+ tor_assert(chan);
+ tor_assert(var_cell);
+
+ log_info(LD_PROTOCOL,
+ "Received unexpected var_cell above the channel layer of type %d"
+ "; dropping it.",
+ var_cell->command);
+}
+
+/** Process a 'create' <b>cell</b> that just arrived from <b>chan</b>. Make a
+ * new circuit with the p_circ_id specified in cell. Put the circuit in state
+ * onionskin_pending, and pass the onionskin to the cpuworker. Circ will get
+ * picked up again when the cpuworker finishes decrypting it.
+ */
+static void
+command_process_create_cell(cell_t *cell, channel_t *chan)
+{
+ or_circuit_t *circ;
+ const or_options_t *options = get_options();
+ int id_is_high;
+ create_cell_t *create_cell;
+
+ tor_assert(cell);
+ tor_assert(chan);
+
+ log_debug(LD_OR,
+ "Got a CREATE cell for circ_id %u on channel %"PRIu64
+ " (%p)",
+ (unsigned)cell->circ_id,
+ (chan->global_identifier), chan);
+
+ /* First thing we do, even though the cell might be invalid, is inform the
+ * DoS mitigation subsystem layer of this event. Validation is done by this
+ * function. */
+ dos_cc_new_create_cell(chan);
+
+ /* We check for the conditions that would make us drop the cell before
+ * we check for the conditions that would make us send a DESTROY back,
+ * since those conditions would make a DESTROY nonsensical. */
+ if (cell->circ_id == 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Received a create cell (type %d) from %s with zero circID; "
+ " ignoring.", (int)cell->command,
+ channel_get_actual_remote_descr(chan));
+ return;
+ }
+
+ if (circuit_id_in_use_on_channel(cell->circ_id, chan)) {
+ const node_t *node = node_get_by_id(chan->identity_digest);
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Received CREATE cell (circID %u) for known circ. "
+ "Dropping (age %d).",
+ (unsigned)cell->circ_id,
+ (int)(time(NULL) - channel_when_created(chan)));
+ if (node) {
+ char *p = esc_for_log(node_get_platform(node));
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Details: router %s, platform %s.",
+ node_describe(node), p);
+ tor_free(p);
+ }
+ return;
+ }
+
+ if (we_are_hibernating()) {
+ log_info(LD_OR,
+ "Received create cell but we're shutting down. Sending back "
+ "destroy.");
+ channel_send_destroy(cell->circ_id, chan,
+ END_CIRC_REASON_HIBERNATING);
+ return;
+ }
+
+ /* Check if we should apply a defense for this channel. */
+ if (dos_cc_get_defense_type(chan) == DOS_CC_DEFENSE_REFUSE_CELL) {
+ channel_send_destroy(cell->circ_id, chan,
+ END_CIRC_REASON_RESOURCELIMIT);
+ return;
+ }
+
+ if (!server_mode(options) ||
+ (!public_server_mode(options) && channel_is_outgoing(chan))) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Received create cell (type %d) from %s, but we're connected "
+ "to it as a client. "
+ "Sending back a destroy.",
+ (int)cell->command, channel_get_canonical_remote_descr(chan));
+ channel_send_destroy(cell->circ_id, chan,
+ END_CIRC_REASON_TORPROTOCOL);
+ return;
+ }
+
+ /* If the high bit of the circuit ID is not as expected, close the
+ * circ. */
+ if (chan->wide_circ_ids)
+ id_is_high = cell->circ_id & (1u<<31);
+ else
+ id_is_high = cell->circ_id & (1u<<15);
+ if ((id_is_high &&
+ chan->circ_id_type == CIRC_ID_TYPE_HIGHER) ||
+ (!id_is_high &&
+ chan->circ_id_type == CIRC_ID_TYPE_LOWER)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Received create cell with unexpected circ_id %u. Closing.",
+ (unsigned)cell->circ_id);
+ channel_send_destroy(cell->circ_id, chan,
+ END_CIRC_REASON_TORPROTOCOL);
+ return;
+ }
+
+ circ = or_circuit_new(cell->circ_id, chan);
+ circ->base_.purpose = CIRCUIT_PURPOSE_OR;
+ circuit_set_state(TO_CIRCUIT(circ), CIRCUIT_STATE_ONIONSKIN_PENDING);
+ create_cell = tor_malloc_zero(sizeof(create_cell_t));
+ if (create_cell_parse(create_cell, cell) < 0) {
+ tor_free(create_cell);
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Bogus/unrecognized create cell; closing.");
+ circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_TORPROTOCOL);
+ return;
+ }
+
+ if (!channel_is_client(chan)) {
+ /* remember create types we've seen, but don't remember them from
+ * clients, to be extra conservative about client statistics. */
+ rep_hist_note_circuit_handshake_requested(create_cell->handshake_type);
+ }
+
+ if (create_cell->handshake_type != ONION_HANDSHAKE_TYPE_FAST) {
+ /* hand it off to the cpuworkers, and then return. */
+
+ if (assign_onionskin_to_cpuworker(circ, create_cell) < 0) {
+ log_debug(LD_GENERAL,"Failed to hand off onionskin. Closing.");
+ circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_RESOURCELIMIT);
+ return;
+ }
+ log_debug(LD_OR,"success: handed off onionskin.");
+ } else {
+ /* This is a CREATE_FAST cell; we can handle it immediately without using
+ * a CPU worker. */
+ uint8_t keys[CPATH_KEY_MATERIAL_LEN];
+ uint8_t rend_circ_nonce[DIGEST_LEN];
+ int len;
+ created_cell_t created_cell;
+
+ memset(&created_cell, 0, sizeof(created_cell));
+ len = onion_skin_server_handshake(ONION_HANDSHAKE_TYPE_FAST,
+ create_cell->onionskin,
+ create_cell->handshake_len,
+ NULL,
+ created_cell.reply,
+ keys, CPATH_KEY_MATERIAL_LEN,
+ rend_circ_nonce);
+ tor_free(create_cell);
+ if (len < 0) {
+ log_warn(LD_OR,"Failed to generate key material. Closing.");
+ circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_INTERNAL);
+ return;
+ }
+ created_cell.cell_type = CELL_CREATED_FAST;
+ created_cell.handshake_len = len;
+
+ if (onionskin_answer(circ, &created_cell,
+ (const char *)keys, sizeof(keys),
+ rend_circ_nonce)<0) {
+ log_warn(LD_OR,"Failed to reply to CREATE_FAST cell. Closing.");
+ circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_INTERNAL);
+ return;
+ }
+ memwipe(keys, 0, sizeof(keys));
+ }
+}
+
+/** Process a 'created' <b>cell</b> that just arrived from <b>chan</b>.
+ * Find the circuit
+ * that it's intended for. If we're not the origin of the circuit, package
+ * the 'created' cell in an 'extended' relay cell and pass it back. If we
+ * are the origin of the circuit, send it to circuit_finish_handshake() to
+ * finish processing keys, and then call circuit_send_next_onion_skin() to
+ * extend to the next hop in the circuit if necessary.
+ */
+static void
+command_process_created_cell(cell_t *cell, channel_t *chan)
+{
+ circuit_t *circ;
+ extended_cell_t extended_cell;
+
+ circ = circuit_get_by_circid_channel(cell->circ_id, chan);
+
+ if (!circ) {
+ log_info(LD_OR,
+ "(circID %u) unknown circ (probably got a destroy earlier). "
+ "Dropping.", (unsigned)cell->circ_id);
+ return;
+ }
+
+ if (circ->n_circ_id != cell->circ_id || circ->n_chan != chan) {
+ log_fn(LOG_PROTOCOL_WARN,LD_PROTOCOL,
+ "got created cell from Tor client? Closing.");
+ circuit_mark_for_close(circ, END_CIRC_REASON_TORPROTOCOL);
+ return;
+ }
+
+ if (created_cell_parse(&extended_cell.created_cell, cell) < 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR, "Unparseable created cell.");
+ circuit_mark_for_close(circ, END_CIRC_REASON_TORPROTOCOL);
+ return;
+ }
+
+ if (CIRCUIT_IS_ORIGIN(circ)) { /* we're the OP. Handshake this. */
+ origin_circuit_t *origin_circ = TO_ORIGIN_CIRCUIT(circ);
+ int err_reason = 0;
+ log_debug(LD_OR,"at OP. Finishing handshake.");
+ if ((err_reason = circuit_finish_handshake(origin_circ,
+ &extended_cell.created_cell)) < 0) {
+ circuit_mark_for_close(circ, -err_reason);
+ return;
+ }
+ log_debug(LD_OR,"Moving to next skin.");
+ if ((err_reason = circuit_send_next_onion_skin(origin_circ)) < 0) {
+ log_info(LD_OR,"circuit_send_next_onion_skin failed.");
+ /* XXX push this circuit_close lower */
+ circuit_mark_for_close(circ, -err_reason);
+ return;
+ }
+ } else { /* pack it into an extended relay cell, and send it. */
+ uint8_t command=0;
+ uint16_t len=0;
+ uint8_t payload[RELAY_PAYLOAD_SIZE];
+ log_debug(LD_OR,
+ "Converting created cell to extended relay cell, sending.");
+ memset(payload, 0, sizeof(payload));
+ if (extended_cell.created_cell.cell_type == CELL_CREATED2)
+ extended_cell.cell_type = RELAY_COMMAND_EXTENDED2;
+ else
+ extended_cell.cell_type = RELAY_COMMAND_EXTENDED;
+ if (extended_cell_format(&command, &len, payload, &extended_cell) < 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR, "Can't format extended cell.");
+ circuit_mark_for_close(circ, END_CIRC_REASON_TORPROTOCOL);
+ return;
+ }
+
+ relay_send_command_from_edge(0, circ, command,
+ (const char*)payload, len, NULL);
+ }
+}
+
+/** Process a 'relay' or 'relay_early' <b>cell</b> that just arrived from
+ * <b>conn</b>. Make sure it came in with a recognized circ_id. Pass it on to
+ * circuit_receive_relay_cell() for actual processing.
+ */
+static void
+command_process_relay_cell(cell_t *cell, channel_t *chan)
+{
+ const or_options_t *options = get_options();
+ circuit_t *circ;
+ int reason, direction;
+
+ circ = circuit_get_by_circid_channel(cell->circ_id, chan);
+
+ if (!circ) {
+ log_debug(LD_OR,
+ "unknown circuit %u on connection from %s. Dropping.",
+ (unsigned)cell->circ_id,
+ channel_get_canonical_remote_descr(chan));
+ return;
+ }
+
+ if (circ->state == CIRCUIT_STATE_ONIONSKIN_PENDING) {
+ log_fn(LOG_PROTOCOL_WARN,LD_PROTOCOL,"circuit in create_wait. Closing.");
+ circuit_mark_for_close(circ, END_CIRC_REASON_TORPROTOCOL);
+ return;
+ }
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ /* if we're a relay and treating connections with recent local
+ * traffic better, then this is one of them. */
+ channel_timestamp_client(chan);
+
+ /* Count all circuit bytes here for control port accuracy. We want
+ * to count even invalid/dropped relay cells, hence counting
+ * before the recognized check and the connection_edge_process_relay
+ * cell checks.
+ */
+ origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
+
+ /* Count the payload bytes only. We don't care about cell headers */
+ ocirc->n_read_circ_bw = tor_add_u32_nowrap(ocirc->n_read_circ_bw,
+ CELL_PAYLOAD_SIZE);
+ }
+
+ if (!CIRCUIT_IS_ORIGIN(circ) &&
+ chan == TO_OR_CIRCUIT(circ)->p_chan &&
+ cell->circ_id == TO_OR_CIRCUIT(circ)->p_circ_id)
+ direction = CELL_DIRECTION_OUT;
+ else
+ direction = CELL_DIRECTION_IN;
+
+ /* If we have a relay_early cell, make sure that it's outbound, and we've
+ * gotten no more than MAX_RELAY_EARLY_CELLS_PER_CIRCUIT of them. */
+ if (cell->command == CELL_RELAY_EARLY) {
+ if (direction == CELL_DIRECTION_IN) {
+ /* Inbound early cells could once be encountered as a result of
+ * bug 1038; but relays running versions before 0.2.1.19 are long
+ * gone from the network, so any such cells now are surprising. */
+ log_warn(LD_OR,
+ "Received an inbound RELAY_EARLY cell on circuit %u."
+ " Closing circuit. Please report this event,"
+ " along with the following message.",
+ (unsigned)cell->circ_id);
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ circuit_log_path(LOG_WARN, LD_OR, TO_ORIGIN_CIRCUIT(circ));
+ } else if (circ->n_chan) {
+ log_warn(LD_OR, " upstream=%s",
+ channel_get_actual_remote_descr(circ->n_chan));
+ }
+ circuit_mark_for_close(circ, END_CIRC_REASON_TORPROTOCOL);
+ return;
+ } else {
+ or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
+ if (or_circ->remaining_relay_early_cells == 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Received too many RELAY_EARLY cells on circ %u from %s."
+ " Closing circuit.",
+ (unsigned)cell->circ_id,
+ safe_str(channel_get_canonical_remote_descr(chan)));
+ circuit_mark_for_close(circ, END_CIRC_REASON_TORPROTOCOL);
+ return;
+ }
+ --or_circ->remaining_relay_early_cells;
+ }
+ }
+
+ if ((reason = circuit_receive_relay_cell(cell, circ, direction)) < 0) {
+ log_fn(LOG_PROTOCOL_WARN,LD_PROTOCOL,"circuit_receive_relay_cell "
+ "(%s) failed. Closing.",
+ direction==CELL_DIRECTION_OUT?"forward":"backward");
+ circuit_mark_for_close(circ, -reason);
+ }
+
+ /* If this is a cell in an RP circuit, count it as part of the
+ hidden service stats */
+ if (options->HiddenServiceStatistics &&
+ !CIRCUIT_IS_ORIGIN(circ) &&
+ TO_OR_CIRCUIT(circ)->circuit_carries_hs_traffic_stats) {
+ rep_hist_seen_new_rp_cell();
+ }
+}
+
+/** Process a 'destroy' <b>cell</b> that just arrived from
+ * <b>chan</b>. Find the circ that it refers to (if any).
+ *
+ * If the circ is in state
+ * onionskin_pending, then call onion_pending_remove() to remove it
+ * from the pending onion list (note that if it's already being
+ * processed by the cpuworker, it won't be in the list anymore; but
+ * when the cpuworker returns it, the circuit will be gone, and the
+ * cpuworker response will be dropped).
+ *
+ * Then mark the circuit for close (which marks all edges for close,
+ * and passes the destroy cell onward if necessary).
+ */
+static void
+command_process_destroy_cell(cell_t *cell, channel_t *chan)
+{
+ circuit_t *circ;
+ int reason;
+
+ circ = circuit_get_by_circid_channel(cell->circ_id, chan);
+ if (!circ) {
+ log_info(LD_OR,"unknown circuit %u on connection from %s. Dropping.",
+ (unsigned)cell->circ_id,
+ channel_get_canonical_remote_descr(chan));
+ return;
+ }
+ log_debug(LD_OR,"Received for circID %u.",(unsigned)cell->circ_id);
+
+ reason = (uint8_t)cell->payload[0];
+ circ->received_destroy = 1;
+
+ if (!CIRCUIT_IS_ORIGIN(circ) &&
+ chan == TO_OR_CIRCUIT(circ)->p_chan &&
+ cell->circ_id == TO_OR_CIRCUIT(circ)->p_circ_id) {
+ /* the destroy came from behind */
+ circuit_set_p_circid_chan(TO_OR_CIRCUIT(circ), 0, NULL);
+ circuit_mark_for_close(circ, reason|END_CIRC_REASON_FLAG_REMOTE);
+ } else { /* the destroy came from ahead */
+ circuit_set_n_circid_chan(circ, 0, NULL);
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ circuit_mark_for_close(circ, reason|END_CIRC_REASON_FLAG_REMOTE);
+ } else {
+ char payload[1];
+ log_debug(LD_OR, "Delivering 'truncated' back.");
+ payload[0] = (char)reason;
+ relay_send_command_from_edge(0, circ, RELAY_COMMAND_TRUNCATED,
+ payload, sizeof(payload), NULL);
+ }
+ }
+}
+
+/** Callback to handle a new channel; call command_setup_channel() to give
+ * it the right cell handlers.
+ */
+
+static void
+command_handle_incoming_channel(channel_listener_t *listener, channel_t *chan)
+{
+ tor_assert(listener);
+ tor_assert(chan);
+
+ command_setup_channel(chan);
+}
+
+/** Given a channel, install the right handlers to process incoming
+ * cells on it.
+ */
+
+void
+command_setup_channel(channel_t *chan)
+{
+ tor_assert(chan);
+
+ channel_set_cell_handlers(chan,
+ command_process_cell,
+ command_process_var_cell);
+}
+
+/** Given a listener, install the right handler to process incoming
+ * channels on it.
+ */
+
+void
+command_setup_listener(channel_listener_t *listener)
+{
+ tor_assert(listener);
+ tor_assert(listener->state == CHANNEL_LISTENER_STATE_LISTENING);
+
+ channel_listener_set_listener_fn(listener, command_handle_incoming_channel);
+}
+
diff --git a/src/core/or/command.h b/src/core/or/command.h
new file mode 100644
index 0000000000..864a5b2fd0
--- /dev/null
+++ b/src/core/or/command.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file command.h
+ * \brief Header file for command.c.
+ **/
+
+#ifndef TOR_COMMAND_H
+#define TOR_COMMAND_H
+
+#include "or/channel.h"
+
+void command_process_cell(channel_t *chan, cell_t *cell);
+void command_process_var_cell(channel_t *chan, var_cell_t *cell);
+void command_setup_channel(channel_t *chan);
+void command_setup_listener(channel_listener_t *chan_l);
+
+const char *cell_command_to_string(uint8_t command);
+
+extern uint64_t stats_n_padding_cells_processed;
+extern uint64_t stats_n_create_cells_processed;
+extern uint64_t stats_n_created_cells_processed;
+extern uint64_t stats_n_relay_cells_processed;
+extern uint64_t stats_n_destroy_cells_processed;
+
+#endif /* !defined(TOR_COMMAND_H) */
+
diff --git a/src/core/or/connection_edge.c b/src/core/or/connection_edge.c
new file mode 100644
index 0000000000..13d957a937
--- /dev/null
+++ b/src/core/or/connection_edge.c
@@ -0,0 +1,4224 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file connection_edge.c
+ * \brief Handle edge streams.
+ *
+ * An edge_connection_t is a subtype of a connection_t, and represents two
+ * critical concepts in Tor: a stream, and an edge connection. From the Tor
+ * protocol's point of view, a stream is a bi-directional channel that is
+ * multiplexed on a single circuit. Each stream on a circuit is identified
+ * with a separate 16-bit stream ID, local to the (circuit,exit) pair.
+ * Streams are created in response to client requests.
+ *
+ * An edge connection is one thing that can implement a stream: it is either a
+ * TCP application socket that has arrived via (e.g.) a SOCKS request, or an
+ * exit connection.
+ *
+ * Not every instance of edge_connection_t truly represents an edge connction,
+ * however. (Sorry!) We also create edge_connection_t objects for streams that
+ * we will not be handling with TCP. The types of these streams are:
+ * <ul>
+ * <li>DNS lookup streams, created on the client side in response to
+ * a UDP DNS request received on a DNSPort, or a RESOLVE command
+ * on a controller.
+ * <li>DNS lookup streams, created on the exit side in response to
+ * a RELAY_RESOLVE cell from a client.
+ * <li>Tunneled directory streams, created on the directory cache side
+ * in response to a RELAY_BEGIN_DIR cell. These streams attach directly
+ * to a dir_connection_t object without ever using TCP.
+ * </ul>
+ *
+ * This module handles general-purpose functionality having to do with
+ * edge_connection_t. On the client side, it accepts various types of
+ * application requests on SocksPorts, TransPorts, and NATDPorts, and
+ * creates streams appropriately.
+ *
+ * This module is also responsible for implementing stream isolation:
+ * ensuring that streams that should not be linkable to one another are
+ * kept to different circuits.
+ *
+ * On the exit side, this module handles the various stream-creating
+ * type of RELAY cells by launching appropriate outgoing connections,
+ * DNS requests, or directory connection objects.
+ *
+ * And for all edge connections, this module is responsible for handling
+ * incoming and outdoing data as it arrives or leaves in the relay.c
+ * module. (Outgoing data will be packaged in
+ * connection_edge_process_inbuf() as it calls
+ * connection_edge_package_raw_inbuf(); incoming data from RELAY_DATA
+ * cells is applied in connection_edge_process_relay_cell().)
+ **/
+#define CONNECTION_EDGE_PRIVATE
+
+#include "or/or.h"
+
+#include "lib/err/backtrace.h"
+
+#include "or/addressmap.h"
+#include "lib/container/buffers.h"
+#include "or/channel.h"
+#include "or/circpathbias.h"
+#include "or/circuitlist.h"
+#include "or/circuituse.h"
+#include "or/config.h"
+#include "or/connection.h"
+#include "or/connection_edge.h"
+#include "or/connection_or.h"
+#include "or/control.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "or/dns.h"
+#include "or/dnsserv.h"
+#include "or/directory.h"
+#include "or/dirserv.h"
+#include "or/hibernate.h"
+#include "or/hs_common.h"
+#include "or/hs_cache.h"
+#include "or/hs_client.h"
+#include "or/hs_circuit.h"
+#include "or/main.h"
+#include "or/networkstatus.h"
+#include "or/nodelist.h"
+#include "or/policies.h"
+#include "or/proto_http.h"
+#include "or/proto_socks.h"
+#include "or/reasons.h"
+#include "or/relay.h"
+#include "or/rendclient.h"
+#include "or/rendcommon.h"
+#include "or/rendservice.h"
+#include "or/rephist.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+#include "or/routerset.h"
+#include "or/circuitbuild.h"
+
+#include "or/cell_st.h"
+#include "or/cpath_build_state_st.h"
+#include "or/dir_connection_st.h"
+#include "or/entry_connection_st.h"
+#include "or/extend_info_st.h"
+#include "or/node_st.h"
+#include "or/or_circuit_st.h"
+#include "or/origin_circuit_st.h"
+#include "or/socks_request_st.h"
+#include "lib/evloop/compat_libevent.h"
+
+#ifdef HAVE_LINUX_TYPES_H
+#include <linux/types.h>
+#endif
+#ifdef HAVE_LINUX_NETFILTER_IPV4_H
+#include <linux/netfilter_ipv4.h>
+#define TRANS_NETFILTER
+#define TRANS_NETFILTER_IPV4
+#endif
+
+#ifdef HAVE_LINUX_IF_H
+#include <linux/if.h>
+#endif
+
+#ifdef HAVE_LINUX_NETFILTER_IPV6_IP6_TABLES_H
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#if defined(IP6T_SO_ORIGINAL_DST)
+#define TRANS_NETFILTER
+#define TRANS_NETFILTER_IPV6
+#endif
+#endif /* defined(HAVE_LINUX_NETFILTER_IPV6_IP6_TABLES_H) */
+
+#if defined(HAVE_NET_IF_H) && defined(HAVE_NET_PFVAR_H)
+#include <net/if.h>
+#include <net/pfvar.h>
+#define TRANS_PF
+#endif
+
+#ifdef IP_TRANSPARENT
+#define TRANS_TPROXY
+#endif
+
+#define SOCKS4_GRANTED 90
+#define SOCKS4_REJECT 91
+
+static int connection_ap_handshake_process_socks(entry_connection_t *conn);
+static int connection_ap_process_natd(entry_connection_t *conn);
+static int connection_exit_connect_dir(edge_connection_t *exitconn);
+static int consider_plaintext_ports(entry_connection_t *conn, uint16_t port);
+static int connection_ap_supports_optimistic_data(const entry_connection_t *);
+
+/** Convert a connection_t* to an edge_connection_t*; assert if the cast is
+ * invalid. */
+edge_connection_t *
+TO_EDGE_CONN(connection_t *c)
+{
+ tor_assert(c->magic == EDGE_CONNECTION_MAGIC ||
+ c->magic == ENTRY_CONNECTION_MAGIC);
+ return DOWNCAST(edge_connection_t, c);
+}
+
+entry_connection_t *
+TO_ENTRY_CONN(connection_t *c)
+{
+ tor_assert(c->magic == ENTRY_CONNECTION_MAGIC);
+ return (entry_connection_t*) SUBTYPE_P(c, entry_connection_t, edge_.base_);
+}
+
+entry_connection_t *
+EDGE_TO_ENTRY_CONN(edge_connection_t *c)
+{
+ tor_assert(c->base_.magic == ENTRY_CONNECTION_MAGIC);
+ return (entry_connection_t*) SUBTYPE_P(c, entry_connection_t, edge_);
+}
+
+/** An AP stream has failed/finished. If it hasn't already sent back
+ * a socks reply, send one now (based on endreason). Also set
+ * has_sent_end to 1, and mark the conn.
+ */
+MOCK_IMPL(void,
+connection_mark_unattached_ap_,(entry_connection_t *conn, int endreason,
+ int line, const char *file))
+{
+ connection_t *base_conn = ENTRY_TO_CONN(conn);
+ edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn);
+ tor_assert(base_conn->type == CONN_TYPE_AP);
+ ENTRY_TO_EDGE_CONN(conn)->edge_has_sent_end = 1; /* no circ yet */
+
+ /* If this is a rendezvous stream and it is failing without ever
+ * being attached to a circuit, assume that an attempt to connect to
+ * the destination hidden service has just ended.
+ *
+ * XXXX This condition doesn't limit to only streams failing
+ * without ever being attached. That sloppiness should be harmless,
+ * but we should fix it someday anyway. */
+ if ((edge_conn->on_circuit != NULL || edge_conn->edge_has_sent_end) &&
+ connection_edge_is_rendezvous_stream(edge_conn)) {
+ if (edge_conn->rend_data) {
+ rend_client_note_connection_attempt_ended(edge_conn->rend_data);
+ }
+ }
+
+ if (base_conn->marked_for_close) {
+ /* This call will warn as appropriate. */
+ connection_mark_for_close_(base_conn, line, file);
+ return;
+ }
+
+ if (!conn->socks_request->has_finished) {
+ if (endreason & END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED)
+ log_warn(LD_BUG,
+ "stream (marked at %s:%d) sending two socks replies?",
+ file, line);
+
+ if (SOCKS_COMMAND_IS_CONNECT(conn->socks_request->command))
+ connection_ap_handshake_socks_reply(conn, NULL, 0, endreason);
+ else if (SOCKS_COMMAND_IS_RESOLVE(conn->socks_request->command))
+ connection_ap_handshake_socks_resolved(conn,
+ RESOLVED_TYPE_ERROR_TRANSIENT,
+ 0, NULL, -1, -1);
+ else /* unknown or no handshake at all. send no response. */
+ conn->socks_request->has_finished = 1;
+ }
+
+ connection_mark_and_flush_(base_conn, line, file);
+
+ ENTRY_TO_EDGE_CONN(conn)->end_reason = endreason;
+}
+
+/** There was an EOF. Send an end and mark the connection for close.
+ */
+int
+connection_edge_reached_eof(edge_connection_t *conn)
+{
+ if (connection_get_inbuf_len(TO_CONN(conn)) &&
+ connection_state_is_open(TO_CONN(conn))) {
+ /* it still has stuff to process. don't let it die yet. */
+ return 0;
+ }
+ log_info(LD_EDGE,"conn (fd "TOR_SOCKET_T_FORMAT") reached eof. Closing.",
+ conn->base_.s);
+ if (!conn->base_.marked_for_close) {
+ /* only mark it if not already marked. it's possible to
+ * get the 'end' right around when the client hangs up on us. */
+ connection_edge_end(conn, END_STREAM_REASON_DONE);
+ if (conn->base_.type == CONN_TYPE_AP) {
+ /* eof, so don't send a socks reply back */
+ if (EDGE_TO_ENTRY_CONN(conn)->socks_request)
+ EDGE_TO_ENTRY_CONN(conn)->socks_request->has_finished = 1;
+ }
+ connection_mark_for_close(TO_CONN(conn));
+ }
+ return 0;
+}
+
+/** Handle new bytes on conn->inbuf based on state:
+ * - If it's waiting for socks info, try to read another step of the
+ * socks handshake out of conn->inbuf.
+ * - If it's waiting for the original destination, fetch it.
+ * - If it's open, then package more relay cells from the stream.
+ * - Else, leave the bytes on inbuf alone for now.
+ *
+ * Mark and return -1 if there was an unexpected error with the conn,
+ * else return 0.
+ */
+int
+connection_edge_process_inbuf(edge_connection_t *conn, int package_partial)
+{
+ tor_assert(conn);
+
+ switch (conn->base_.state) {
+ case AP_CONN_STATE_SOCKS_WAIT:
+ if (connection_ap_handshake_process_socks(EDGE_TO_ENTRY_CONN(conn)) <0) {
+ /* already marked */
+ return -1;
+ }
+ return 0;
+ case AP_CONN_STATE_NATD_WAIT:
+ if (connection_ap_process_natd(EDGE_TO_ENTRY_CONN(conn)) < 0) {
+ /* already marked */
+ return -1;
+ }
+ return 0;
+ case AP_CONN_STATE_HTTP_CONNECT_WAIT:
+ if (connection_ap_process_http_connect(EDGE_TO_ENTRY_CONN(conn)) < 0) {
+ return -1;
+ }
+ return 0;
+ case AP_CONN_STATE_OPEN:
+ case EXIT_CONN_STATE_OPEN:
+ if (connection_edge_package_raw_inbuf(conn, package_partial, NULL) < 0) {
+ /* (We already sent an end cell if possible) */
+ connection_mark_for_close(TO_CONN(conn));
+ return -1;
+ }
+ return 0;
+ case AP_CONN_STATE_CONNECT_WAIT:
+ if (connection_ap_supports_optimistic_data(EDGE_TO_ENTRY_CONN(conn))) {
+ log_info(LD_EDGE,
+ "data from edge while in '%s' state. Sending it anyway. "
+ "package_partial=%d, buflen=%ld",
+ conn_state_to_string(conn->base_.type, conn->base_.state),
+ package_partial,
+ (long)connection_get_inbuf_len(TO_CONN(conn)));
+ if (connection_edge_package_raw_inbuf(conn, package_partial, NULL)<0) {
+ /* (We already sent an end cell if possible) */
+ connection_mark_for_close(TO_CONN(conn));
+ return -1;
+ }
+ return 0;
+ }
+ /* Fall through if the connection is on a circuit without optimistic
+ * data support. */
+ /* Falls through. */
+ case EXIT_CONN_STATE_CONNECTING:
+ case AP_CONN_STATE_RENDDESC_WAIT:
+ case AP_CONN_STATE_CIRCUIT_WAIT:
+ case AP_CONN_STATE_RESOLVE_WAIT:
+ case AP_CONN_STATE_CONTROLLER_WAIT:
+ log_info(LD_EDGE,
+ "data from edge while in '%s' state. Leaving it on buffer.",
+ conn_state_to_string(conn->base_.type, conn->base_.state));
+ return 0;
+ }
+ log_warn(LD_BUG,"Got unexpected state %d. Closing.",conn->base_.state);
+ tor_fragile_assert();
+ connection_edge_end(conn, END_STREAM_REASON_INTERNAL);
+ connection_mark_for_close(TO_CONN(conn));
+ return -1;
+}
+
+/** This edge needs to be closed, because its circuit has closed.
+ * Mark it for close and return 0.
+ */
+int
+connection_edge_destroy(circid_t circ_id, edge_connection_t *conn)
+{
+ if (!conn->base_.marked_for_close) {
+ log_info(LD_EDGE, "CircID %u: At an edge. Marking connection for close.",
+ (unsigned) circ_id);
+ if (conn->base_.type == CONN_TYPE_AP) {
+ entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn);
+ connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_DESTROY);
+ control_event_stream_bandwidth(conn);
+ control_event_stream_status(entry_conn, STREAM_EVENT_CLOSED,
+ END_STREAM_REASON_DESTROY);
+ conn->end_reason |= END_STREAM_REASON_FLAG_ALREADY_SENT_CLOSED;
+ } else {
+ /* closing the circuit, nothing to send an END to */
+ conn->edge_has_sent_end = 1;
+ conn->end_reason = END_STREAM_REASON_DESTROY;
+ conn->end_reason |= END_STREAM_REASON_FLAG_ALREADY_SENT_CLOSED;
+ connection_mark_and_flush(TO_CONN(conn));
+ }
+ }
+ conn->cpath_layer = NULL;
+ conn->on_circuit = NULL;
+ return 0;
+}
+
+/** Send a raw end cell to the stream with ID <b>stream_id</b> out over the
+ * <b>circ</b> towards the hop identified with <b>cpath_layer</b>. If this
+ * is not a client connection, set the relay end cell's reason for closing
+ * as <b>reason</b> */
+static int
+relay_send_end_cell_from_edge(streamid_t stream_id, circuit_t *circ,
+ uint8_t reason, crypt_path_t *cpath_layer)
+{
+ char payload[1];
+
+ if (CIRCUIT_PURPOSE_IS_CLIENT(circ->purpose)) {
+ /* Never send the server an informative reason code; it doesn't need to
+ * know why the client stream is failing. */
+ reason = END_STREAM_REASON_MISC;
+ }
+
+ payload[0] = (char) reason;
+
+ /* Note: we have to use relay_send_command_from_edge here, not
+ * connection_edge_end or connection_edge_send_command, since those require
+ * that we have a stream connected to a circuit, and we don't connect to a
+ * circuit until we have a pending/successful resolve. */
+ return relay_send_command_from_edge(stream_id, circ, RELAY_COMMAND_END,
+ payload, 1, cpath_layer);
+}
+
+/* If the connection <b>conn</b> is attempting to connect to an external
+ * destination that is an hidden service and the reason is a connection
+ * refused or timeout, log it so the operator can take appropriate actions.
+ * The log statement is a rate limited warning. */
+static void
+warn_if_hs_unreachable(const edge_connection_t *conn, uint8_t reason)
+{
+ tor_assert(conn);
+
+ if (conn->base_.type == CONN_TYPE_EXIT &&
+ connection_edge_is_rendezvous_stream(conn) &&
+ (reason == END_STREAM_REASON_CONNECTREFUSED ||
+ reason == END_STREAM_REASON_TIMEOUT)) {
+#define WARN_FAILED_HS_CONNECTION 300
+ static ratelim_t warn_limit = RATELIM_INIT(WARN_FAILED_HS_CONNECTION);
+ char *m;
+ if ((m = rate_limit_log(&warn_limit, approx_time()))) {
+ log_warn(LD_EDGE, "Onion service connection to %s failed (%s)",
+ (conn->base_.socket_family == AF_UNIX) ?
+ safe_str(conn->base_.address) :
+ safe_str(fmt_addrport(&conn->base_.addr, conn->base_.port)),
+ stream_end_reason_to_string(reason));
+ tor_free(m);
+ }
+ }
+}
+
+/** Send a relay end cell from stream <b>conn</b> down conn's circuit, and
+ * remember that we've done so. If this is not a client connection, set the
+ * relay end cell's reason for closing as <b>reason</b>.
+ *
+ * Return -1 if this function has already been called on this conn,
+ * else return 0.
+ */
+int
+connection_edge_end(edge_connection_t *conn, uint8_t reason)
+{
+ char payload[RELAY_PAYLOAD_SIZE];
+ size_t payload_len=1;
+ circuit_t *circ;
+ uint8_t control_reason = reason;
+
+ if (conn->edge_has_sent_end) {
+ log_warn(LD_BUG,"(Harmless.) Calling connection_edge_end (reason %d) "
+ "on an already ended stream?", reason);
+ tor_fragile_assert();
+ return -1;
+ }
+
+ if (conn->base_.marked_for_close) {
+ log_warn(LD_BUG,
+ "called on conn that's already marked for close at %s:%d.",
+ conn->base_.marked_for_close_file, conn->base_.marked_for_close);
+ return 0;
+ }
+
+ circ = circuit_get_by_edge_conn(conn);
+ if (circ && CIRCUIT_PURPOSE_IS_CLIENT(circ->purpose)) {
+ /* If this is a client circuit, don't send the server an informative
+ * reason code; it doesn't need to know why the client stream is
+ * failing. */
+ reason = END_STREAM_REASON_MISC;
+ }
+
+ payload[0] = (char)reason;
+ if (reason == END_STREAM_REASON_EXITPOLICY &&
+ !connection_edge_is_rendezvous_stream(conn)) {
+ int addrlen;
+ if (tor_addr_family(&conn->base_.addr) == AF_INET) {
+ set_uint32(payload+1, tor_addr_to_ipv4n(&conn->base_.addr));
+ addrlen = 4;
+ } else {
+ memcpy(payload+1, tor_addr_to_in6_addr8(&conn->base_.addr), 16);
+ addrlen = 16;
+ }
+ set_uint32(payload+1+addrlen, htonl(dns_clip_ttl(conn->address_ttl)));
+ payload_len += 4+addrlen;
+ }
+
+ if (circ && !circ->marked_for_close) {
+ log_debug(LD_EDGE,"Sending end on conn (fd "TOR_SOCKET_T_FORMAT").",
+ conn->base_.s);
+ connection_edge_send_command(conn, RELAY_COMMAND_END,
+ payload, payload_len);
+ /* We'll log warn if the connection was an hidden service and couldn't be
+ * made because the service wasn't available. */
+ warn_if_hs_unreachable(conn, control_reason);
+ } else {
+ log_debug(LD_EDGE,"No circ to send end on conn "
+ "(fd "TOR_SOCKET_T_FORMAT").",
+ conn->base_.s);
+ }
+
+ conn->edge_has_sent_end = 1;
+ conn->end_reason = control_reason;
+ return 0;
+}
+
+/** An error has just occurred on an operation on an edge connection
+ * <b>conn</b>. Extract the errno; convert it to an end reason, and send an
+ * appropriate relay end cell to the other end of the connection's circuit.
+ **/
+int
+connection_edge_end_errno(edge_connection_t *conn)
+{
+ uint8_t reason;
+ tor_assert(conn);
+ reason = errno_to_stream_end_reason(tor_socket_errno(conn->base_.s));
+ return connection_edge_end(conn, reason);
+}
+
+/** We just wrote some data to <b>conn</b>; act appropriately.
+ *
+ * (That is, if it's open, consider sending a stream-level sendme cell if we
+ * have just flushed enough.)
+ */
+int
+connection_edge_flushed_some(edge_connection_t *conn)
+{
+ switch (conn->base_.state) {
+ case AP_CONN_STATE_OPEN:
+ case EXIT_CONN_STATE_OPEN:
+ connection_edge_consider_sending_sendme(conn);
+ break;
+ }
+ return 0;
+}
+
+/** Connection <b>conn</b> has finished writing and has no bytes left on
+ * its outbuf.
+ *
+ * If it's in state 'open', stop writing, consider responding with a
+ * sendme, and return.
+ * Otherwise, stop writing and return.
+ *
+ * If <b>conn</b> is broken, mark it for close and return -1, else
+ * return 0.
+ */
+int
+connection_edge_finished_flushing(edge_connection_t *conn)
+{
+ tor_assert(conn);
+
+ switch (conn->base_.state) {
+ case AP_CONN_STATE_OPEN:
+ case EXIT_CONN_STATE_OPEN:
+ connection_edge_consider_sending_sendme(conn);
+ return 0;
+ case AP_CONN_STATE_SOCKS_WAIT:
+ case AP_CONN_STATE_NATD_WAIT:
+ case AP_CONN_STATE_RENDDESC_WAIT:
+ case AP_CONN_STATE_CIRCUIT_WAIT:
+ case AP_CONN_STATE_CONNECT_WAIT:
+ case AP_CONN_STATE_CONTROLLER_WAIT:
+ case AP_CONN_STATE_RESOLVE_WAIT:
+ case AP_CONN_STATE_HTTP_CONNECT_WAIT:
+ return 0;
+ default:
+ log_warn(LD_BUG, "Called in unexpected state %d.",conn->base_.state);
+ tor_fragile_assert();
+ return -1;
+ }
+ return 0;
+}
+
+/** Longest size for the relay payload of a RELAY_CONNECTED cell that we're
+ * able to generate. */
+/* 4 zero bytes; 1 type byte; 16 byte IPv6 address; 4 byte TTL. */
+#define MAX_CONNECTED_CELL_PAYLOAD_LEN 25
+
+/** Set the buffer at <b>payload_out</b> -- which must have at least
+ * MAX_CONNECTED_CELL_PAYLOAD_LEN bytes available -- to the body of a
+ * RELAY_CONNECTED cell indicating that we have connected to <b>addr</b>, and
+ * that the name resolution that led us to <b>addr</b> will be valid for
+ * <b>ttl</b> seconds. Return -1 on error, or the number of bytes used on
+ * success. */
+STATIC int
+connected_cell_format_payload(uint8_t *payload_out,
+ const tor_addr_t *addr,
+ uint32_t ttl)
+{
+ const sa_family_t family = tor_addr_family(addr);
+ int connected_payload_len;
+
+ /* should be needless */
+ memset(payload_out, 0, MAX_CONNECTED_CELL_PAYLOAD_LEN);
+
+ if (family == AF_INET) {
+ set_uint32(payload_out, tor_addr_to_ipv4n(addr));
+ connected_payload_len = 4;
+ } else if (family == AF_INET6) {
+ set_uint32(payload_out, 0);
+ set_uint8(payload_out + 4, 6);
+ memcpy(payload_out + 5, tor_addr_to_in6_addr8(addr), 16);
+ connected_payload_len = 21;
+ } else {
+ return -1;
+ }
+
+ set_uint32(payload_out + connected_payload_len, htonl(dns_clip_ttl(ttl)));
+ connected_payload_len += 4;
+
+ tor_assert(connected_payload_len <= MAX_CONNECTED_CELL_PAYLOAD_LEN);
+
+ return connected_payload_len;
+}
+
+/** Connected handler for exit connections: start writing pending
+ * data, deliver 'CONNECTED' relay cells as appropriate, and check
+ * any pending data that may have been received. */
+int
+connection_edge_finished_connecting(edge_connection_t *edge_conn)
+{
+ connection_t *conn;
+
+ tor_assert(edge_conn);
+ tor_assert(edge_conn->base_.type == CONN_TYPE_EXIT);
+ conn = TO_CONN(edge_conn);
+ tor_assert(conn->state == EXIT_CONN_STATE_CONNECTING);
+
+ log_info(LD_EXIT,"Exit connection to %s:%u (%s) established.",
+ escaped_safe_str(conn->address), conn->port,
+ safe_str(fmt_and_decorate_addr(&conn->addr)));
+
+ rep_hist_note_exit_stream_opened(conn->port);
+
+ conn->state = EXIT_CONN_STATE_OPEN;
+ connection_watch_events(conn, READ_EVENT); /* stop writing, keep reading */
+ if (connection_get_outbuf_len(conn)) /* in case there are any queued relay
+ * cells */
+ connection_start_writing(conn);
+ /* deliver a 'connected' relay cell back through the circuit. */
+ if (connection_edge_is_rendezvous_stream(edge_conn)) {
+ if (connection_edge_send_command(edge_conn,
+ RELAY_COMMAND_CONNECTED, NULL, 0) < 0)
+ return 0; /* circuit is closed, don't continue */
+ } else {
+ uint8_t connected_payload[MAX_CONNECTED_CELL_PAYLOAD_LEN];
+ int connected_payload_len =
+ connected_cell_format_payload(connected_payload, &conn->addr,
+ edge_conn->address_ttl);
+ if (connected_payload_len < 0)
+ return -1;
+
+ if (connection_edge_send_command(edge_conn,
+ RELAY_COMMAND_CONNECTED,
+ (char*)connected_payload, connected_payload_len) < 0)
+ return 0; /* circuit is closed, don't continue */
+ }
+ tor_assert(edge_conn->package_window > 0);
+ /* in case the server has written anything */
+ return connection_edge_process_inbuf(edge_conn, 1);
+}
+
+/** A list of all the entry_connection_t * objects that are not marked
+ * for close, and are in AP_CONN_STATE_CIRCUIT_WAIT.
+ *
+ * (Right now, we check in several places to make sure that this list is
+ * correct. When it's incorrect, we'll fix it, and log a BUG message.)
+ */
+static smartlist_t *pending_entry_connections = NULL;
+
+static int untried_pending_connections = 0;
+
+/**
+ * Mainloop event to tell us to scan for pending connections that can
+ * be attached.
+ */
+static mainloop_event_t *attach_pending_entry_connections_ev = NULL;
+
+/** Common code to connection_(ap|exit)_about_to_close. */
+static void
+connection_edge_about_to_close(edge_connection_t *edge_conn)
+{
+ if (!edge_conn->edge_has_sent_end) {
+ connection_t *conn = TO_CONN(edge_conn);
+ log_warn(LD_BUG, "(Harmless.) Edge connection (marked at %s:%d) "
+ "hasn't sent end yet?",
+ conn->marked_for_close_file, conn->marked_for_close);
+ tor_fragile_assert();
+ }
+}
+
+/** Called when we're about to finally unlink and free an AP (client)
+ * connection: perform necessary accounting and cleanup */
+void
+connection_ap_about_to_close(entry_connection_t *entry_conn)
+{
+ circuit_t *circ;
+ edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(entry_conn);
+ connection_t *conn = ENTRY_TO_CONN(entry_conn);
+
+ connection_edge_about_to_close(edge_conn);
+
+ if (entry_conn->socks_request->has_finished == 0) {
+ /* since conn gets removed right after this function finishes,
+ * there's no point trying to send back a reply at this point. */
+ log_warn(LD_BUG,"Closing stream (marked at %s:%d) without sending"
+ " back a socks reply.",
+ conn->marked_for_close_file, conn->marked_for_close);
+ }
+ if (!edge_conn->end_reason) {
+ log_warn(LD_BUG,"Closing stream (marked at %s:%d) without having"
+ " set end_reason.",
+ conn->marked_for_close_file, conn->marked_for_close);
+ }
+ if (entry_conn->dns_server_request) {
+ log_warn(LD_BUG,"Closing stream (marked at %s:%d) without having"
+ " replied to DNS request.",
+ conn->marked_for_close_file, conn->marked_for_close);
+ dnsserv_reject_request(entry_conn);
+ }
+
+ if (TO_CONN(edge_conn)->state == AP_CONN_STATE_CIRCUIT_WAIT) {
+ smartlist_remove(pending_entry_connections, entry_conn);
+ }
+
+#if 1
+ /* Check to make sure that this isn't in pending_entry_connections if it
+ * didn't actually belong there. */
+ if (TO_CONN(edge_conn)->type == CONN_TYPE_AP) {
+ connection_ap_warn_and_unmark_if_pending_circ(entry_conn,
+ "about_to_close");
+ }
+#endif /* 1 */
+
+ control_event_stream_bandwidth(edge_conn);
+ control_event_stream_status(entry_conn, STREAM_EVENT_CLOSED,
+ edge_conn->end_reason);
+ circ = circuit_get_by_edge_conn(edge_conn);
+ if (circ)
+ circuit_detach_stream(circ, edge_conn);
+}
+
+/** Called when we're about to finally unlink and free an exit
+ * connection: perform necessary accounting and cleanup */
+void
+connection_exit_about_to_close(edge_connection_t *edge_conn)
+{
+ circuit_t *circ;
+ connection_t *conn = TO_CONN(edge_conn);
+
+ connection_edge_about_to_close(edge_conn);
+
+ circ = circuit_get_by_edge_conn(edge_conn);
+ if (circ)
+ circuit_detach_stream(circ, edge_conn);
+ if (conn->state == EXIT_CONN_STATE_RESOLVING) {
+ connection_dns_remove(edge_conn);
+ }
+}
+
+/** Define a schedule for how long to wait between retrying
+ * application connections. Rather than waiting a fixed amount of
+ * time between each retry, we wait 10 seconds each for the first
+ * two tries, and 15 seconds for each retry after
+ * that. Hopefully this will improve the expected user experience. */
+static int
+compute_retry_timeout(entry_connection_t *conn)
+{
+ int timeout = get_options()->CircuitStreamTimeout;
+ if (timeout) /* if our config options override the default, use them */
+ return timeout;
+ if (conn->num_socks_retries < 2) /* try 0 and try 1 */
+ return 10;
+ return 15;
+}
+
+/** Find all general-purpose AP streams waiting for a response that sent their
+ * begin/resolve cell too long ago. Detach from their current circuit, and
+ * mark their current circuit as unsuitable for new streams. Then call
+ * connection_ap_handshake_attach_circuit() to attach to a new circuit (if
+ * available) or launch a new one.
+ *
+ * For rendezvous streams, simply give up after SocksTimeout seconds (with no
+ * retry attempt).
+ */
+void
+connection_ap_expire_beginning(void)
+{
+ edge_connection_t *conn;
+ entry_connection_t *entry_conn;
+ circuit_t *circ;
+ time_t now = time(NULL);
+ const or_options_t *options = get_options();
+ int severity;
+ int cutoff;
+ int seconds_idle, seconds_since_born;
+ smartlist_t *conns = get_connection_array();
+
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, base_conn) {
+ if (base_conn->type != CONN_TYPE_AP || base_conn->marked_for_close)
+ continue;
+ entry_conn = TO_ENTRY_CONN(base_conn);
+ conn = ENTRY_TO_EDGE_CONN(entry_conn);
+ /* if it's an internal linked connection, don't yell its status. */
+ severity = (tor_addr_is_null(&base_conn->addr) && !base_conn->port)
+ ? LOG_INFO : LOG_NOTICE;
+ seconds_idle = (int)( now - base_conn->timestamp_last_read_allowed );
+ seconds_since_born = (int)( now - base_conn->timestamp_created );
+
+ if (base_conn->state == AP_CONN_STATE_OPEN)
+ continue;
+
+ /* We already consider SocksTimeout in
+ * connection_ap_handshake_attach_circuit(), but we need to consider
+ * it here too because controllers that put streams in controller_wait
+ * state never ask Tor to attach the circuit. */
+ if (AP_CONN_STATE_IS_UNATTACHED(base_conn->state)) {
+ if (seconds_since_born >= options->SocksTimeout) {
+ log_fn(severity, LD_APP,
+ "Tried for %d seconds to get a connection to %s:%d. "
+ "Giving up. (%s)",
+ seconds_since_born,
+ safe_str_client(entry_conn->socks_request->address),
+ entry_conn->socks_request->port,
+ conn_state_to_string(CONN_TYPE_AP, base_conn->state));
+ connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TIMEOUT);
+ }
+ continue;
+ }
+
+ /* We're in state connect_wait or resolve_wait now -- waiting for a
+ * reply to our relay cell. See if we want to retry/give up. */
+
+ cutoff = compute_retry_timeout(entry_conn);
+ if (seconds_idle < cutoff)
+ continue;
+ circ = circuit_get_by_edge_conn(conn);
+ if (!circ) { /* it's vanished? */
+ log_info(LD_APP,"Conn is waiting (address %s), but lost its circ.",
+ safe_str_client(entry_conn->socks_request->address));
+ connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TIMEOUT);
+ continue;
+ }
+ if (circ->purpose == CIRCUIT_PURPOSE_C_REND_JOINED) {
+ if (seconds_idle >= options->SocksTimeout) {
+ log_fn(severity, LD_REND,
+ "Rend stream is %d seconds late. Giving up on address"
+ " '%s.onion'.",
+ seconds_idle,
+ safe_str_client(entry_conn->socks_request->address));
+ /* Roll back path bias use state so that we probe the circuit
+ * if nothing else succeeds on it */
+ pathbias_mark_use_rollback(TO_ORIGIN_CIRCUIT(circ));
+
+ connection_edge_end(conn, END_STREAM_REASON_TIMEOUT);
+ connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TIMEOUT);
+ }
+ continue;
+ }
+
+ if (circ->purpose != CIRCUIT_PURPOSE_C_GENERAL &&
+ circ->purpose != CIRCUIT_PURPOSE_C_HSDIR_GET &&
+ circ->purpose != CIRCUIT_PURPOSE_S_HSDIR_POST &&
+ circ->purpose != CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT &&
+ circ->purpose != CIRCUIT_PURPOSE_PATH_BIAS_TESTING) {
+ log_warn(LD_BUG, "circuit->purpose == CIRCUIT_PURPOSE_C_GENERAL failed. "
+ "The purpose on the circuit was %s; it was in state %s, "
+ "path_state %s.",
+ circuit_purpose_to_string(circ->purpose),
+ circuit_state_to_string(circ->state),
+ CIRCUIT_IS_ORIGIN(circ) ?
+ pathbias_state_to_string(TO_ORIGIN_CIRCUIT(circ)->path_state) :
+ "none");
+ }
+ log_fn(cutoff < 15 ? LOG_INFO : severity, LD_APP,
+ "We tried for %d seconds to connect to '%s' using exit %s."
+ " Retrying on a new circuit.",
+ seconds_idle,
+ safe_str_client(entry_conn->socks_request->address),
+ conn->cpath_layer ?
+ extend_info_describe(conn->cpath_layer->extend_info):
+ "*unnamed*");
+ /* send an end down the circuit */
+ connection_edge_end(conn, END_STREAM_REASON_TIMEOUT);
+ /* un-mark it as ending, since we're going to reuse it */
+ conn->edge_has_sent_end = 0;
+ conn->end_reason = 0;
+ /* make us not try this circuit again, but allow
+ * current streams on it to survive if they can */
+ mark_circuit_unusable_for_new_conns(TO_ORIGIN_CIRCUIT(circ));
+
+ /* give our stream another 'cutoff' seconds to try */
+ conn->base_.timestamp_last_read_allowed += cutoff;
+ if (entry_conn->num_socks_retries < 250) /* avoid overflow */
+ entry_conn->num_socks_retries++;
+ /* move it back into 'pending' state, and try to attach. */
+ if (connection_ap_detach_retriable(entry_conn, TO_ORIGIN_CIRCUIT(circ),
+ END_STREAM_REASON_TIMEOUT)<0) {
+ if (!base_conn->marked_for_close)
+ connection_mark_unattached_ap(entry_conn,
+ END_STREAM_REASON_CANT_ATTACH);
+ }
+ } SMARTLIST_FOREACH_END(base_conn);
+}
+
+/**
+ * As connection_ap_attach_pending, but first scans the entire connection
+ * array to see if any elements are missing.
+ */
+void
+connection_ap_rescan_and_attach_pending(void)
+{
+ entry_connection_t *entry_conn;
+ smartlist_t *conns = get_connection_array();
+
+ if (PREDICT_UNLIKELY(NULL == pending_entry_connections))
+ pending_entry_connections = smartlist_new();
+
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
+ if (conn->marked_for_close ||
+ conn->type != CONN_TYPE_AP ||
+ conn->state != AP_CONN_STATE_CIRCUIT_WAIT)
+ continue;
+
+ entry_conn = TO_ENTRY_CONN(conn);
+ tor_assert(entry_conn);
+ if (! smartlist_contains(pending_entry_connections, entry_conn)) {
+ log_warn(LD_BUG, "Found a connection %p that was supposed to be "
+ "in pending_entry_connections, but wasn't. No worries; "
+ "adding it.",
+ pending_entry_connections);
+ untried_pending_connections = 1;
+ connection_ap_mark_as_pending_circuit(entry_conn);
+ }
+
+ } SMARTLIST_FOREACH_END(conn);
+
+ connection_ap_attach_pending(1);
+}
+
+#ifdef DEBUGGING_17659
+#define UNMARK() do { \
+ entry_conn->marked_pending_circ_line = 0; \
+ entry_conn->marked_pending_circ_file = 0; \
+ } while (0)
+#else /* !(defined(DEBUGGING_17659)) */
+#define UNMARK() do { } while (0)
+#endif /* defined(DEBUGGING_17659) */
+
+/** Tell any AP streams that are listed as waiting for a new circuit to try
+ * again. If there is an available circuit for a stream, attach it. Otherwise,
+ * launch a new circuit.
+ *
+ * If <b>retry</b> is false, only check the list if it contains at least one
+ * streams that we have not yet tried to attach to a circuit.
+ */
+void
+connection_ap_attach_pending(int retry)
+{
+ if (PREDICT_UNLIKELY(!pending_entry_connections)) {
+ return;
+ }
+
+ if (untried_pending_connections == 0 && !retry)
+ return;
+
+ /* Don't allow any modifications to list while we are iterating over
+ * it. We'll put streams back on this list if we can't attach them
+ * immediately. */
+ smartlist_t *pending = pending_entry_connections;
+ pending_entry_connections = smartlist_new();
+
+ SMARTLIST_FOREACH_BEGIN(pending,
+ entry_connection_t *, entry_conn) {
+ connection_t *conn = ENTRY_TO_CONN(entry_conn);
+ tor_assert(conn && entry_conn);
+ if (conn->marked_for_close) {
+ UNMARK();
+ continue;
+ }
+ if (conn->magic != ENTRY_CONNECTION_MAGIC) {
+ log_warn(LD_BUG, "%p has impossible magic value %u.",
+ entry_conn, (unsigned)conn->magic);
+ UNMARK();
+ continue;
+ }
+ if (conn->state != AP_CONN_STATE_CIRCUIT_WAIT) {
+ log_warn(LD_BUG, "%p is no longer in circuit_wait. Its current state "
+ "is %s. Why is it on pending_entry_connections?",
+ entry_conn,
+ conn_state_to_string(conn->type, conn->state));
+ UNMARK();
+ continue;
+ }
+
+ /* Okay, we're through the sanity checks. Try to handle this stream. */
+ if (connection_ap_handshake_attach_circuit(entry_conn) < 0) {
+ if (!conn->marked_for_close)
+ connection_mark_unattached_ap(entry_conn,
+ END_STREAM_REASON_CANT_ATTACH);
+ }
+
+ if (! conn->marked_for_close &&
+ conn->type == CONN_TYPE_AP &&
+ conn->state == AP_CONN_STATE_CIRCUIT_WAIT) {
+ /* Is it still waiting for a circuit? If so, we didn't attach it,
+ * so it's still pending. Put it back on the list.
+ */
+ if (!smartlist_contains(pending_entry_connections, entry_conn)) {
+ smartlist_add(pending_entry_connections, entry_conn);
+ continue;
+ }
+ }
+
+ /* If we got here, then we either closed the connection, or
+ * we attached it. */
+ UNMARK();
+ } SMARTLIST_FOREACH_END(entry_conn);
+
+ smartlist_free(pending);
+ untried_pending_connections = 0;
+}
+
+static void
+attach_pending_entry_connections_cb(mainloop_event_t *ev, void *arg)
+{
+ (void)ev;
+ (void)arg;
+ connection_ap_attach_pending(0);
+}
+
+/** Mark <b>entry_conn</b> as needing to get attached to a circuit.
+ *
+ * And <b>entry_conn</b> must be in AP_CONN_STATE_CIRCUIT_WAIT,
+ * should not already be pending a circuit. The circuit will get
+ * launched or the connection will get attached the next time we
+ * call connection_ap_attach_pending().
+ */
+void
+connection_ap_mark_as_pending_circuit_(entry_connection_t *entry_conn,
+ const char *fname, int lineno)
+{
+ connection_t *conn = ENTRY_TO_CONN(entry_conn);
+ tor_assert(conn->state == AP_CONN_STATE_CIRCUIT_WAIT);
+ tor_assert(conn->magic == ENTRY_CONNECTION_MAGIC);
+ if (conn->marked_for_close)
+ return;
+
+ if (PREDICT_UNLIKELY(NULL == pending_entry_connections)) {
+ pending_entry_connections = smartlist_new();
+ }
+ if (PREDICT_UNLIKELY(NULL == attach_pending_entry_connections_ev)) {
+ attach_pending_entry_connections_ev = mainloop_event_postloop_new(
+ attach_pending_entry_connections_cb, NULL);
+ }
+ if (PREDICT_UNLIKELY(smartlist_contains(pending_entry_connections,
+ entry_conn))) {
+ log_warn(LD_BUG, "What?? pending_entry_connections already contains %p! "
+ "(Called from %s:%d.)",
+ entry_conn, fname, lineno);
+#ifdef DEBUGGING_17659
+ const char *f2 = entry_conn->marked_pending_circ_file;
+ log_warn(LD_BUG, "(Previously called from %s:%d.)\n",
+ f2 ? f2 : "<NULL>",
+ entry_conn->marked_pending_circ_line);
+#endif /* defined(DEBUGGING_17659) */
+ log_backtrace(LOG_WARN, LD_BUG, "To debug, this may help");
+ return;
+ }
+
+#ifdef DEBUGGING_17659
+ entry_conn->marked_pending_circ_line = (uint16_t) lineno;
+ entry_conn->marked_pending_circ_file = fname;
+#endif
+
+ untried_pending_connections = 1;
+ smartlist_add(pending_entry_connections, entry_conn);
+
+ mainloop_event_activate(attach_pending_entry_connections_ev);
+}
+
+/** Mark <b>entry_conn</b> as no longer waiting for a circuit. */
+void
+connection_ap_mark_as_non_pending_circuit(entry_connection_t *entry_conn)
+{
+ if (PREDICT_UNLIKELY(NULL == pending_entry_connections))
+ return;
+ UNMARK();
+ smartlist_remove(pending_entry_connections, entry_conn);
+}
+
+/* DOCDOC */
+void
+connection_ap_warn_and_unmark_if_pending_circ(entry_connection_t *entry_conn,
+ const char *where)
+{
+ if (pending_entry_connections &&
+ smartlist_contains(pending_entry_connections, entry_conn)) {
+ log_warn(LD_BUG, "What was %p doing in pending_entry_connections in %s?",
+ entry_conn, where);
+ connection_ap_mark_as_non_pending_circuit(entry_conn);
+ }
+}
+
+/** Tell any AP streams that are waiting for a one-hop tunnel to
+ * <b>failed_digest</b> that they are going to fail. */
+/* XXXX We should get rid of this function, and instead attach
+ * one-hop streams to circ->p_streams so they get marked in
+ * circuit_mark_for_close like normal p_streams. */
+void
+connection_ap_fail_onehop(const char *failed_digest,
+ cpath_build_state_t *build_state)
+{
+ entry_connection_t *entry_conn;
+ char digest[DIGEST_LEN];
+ smartlist_t *conns = get_connection_array();
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
+ if (conn->marked_for_close ||
+ conn->type != CONN_TYPE_AP ||
+ conn->state != AP_CONN_STATE_CIRCUIT_WAIT)
+ continue;
+ entry_conn = TO_ENTRY_CONN(conn);
+ if (!entry_conn->want_onehop)
+ continue;
+ if (hexdigest_to_digest(entry_conn->chosen_exit_name, digest) < 0 ||
+ tor_memneq(digest, failed_digest, DIGEST_LEN))
+ continue;
+ if (tor_digest_is_zero(digest)) {
+ /* we don't know the digest; have to compare addr:port */
+ tor_addr_t addr;
+ if (!build_state || !build_state->chosen_exit ||
+ !entry_conn->socks_request) {
+ continue;
+ }
+ if (tor_addr_parse(&addr, entry_conn->socks_request->address)<0 ||
+ !tor_addr_eq(&build_state->chosen_exit->addr, &addr) ||
+ build_state->chosen_exit->port != entry_conn->socks_request->port)
+ continue;
+ }
+ log_info(LD_APP, "Closing one-hop stream to '%s/%s' because the OR conn "
+ "just failed.", entry_conn->chosen_exit_name,
+ entry_conn->socks_request->address);
+ connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TIMEOUT);
+ } SMARTLIST_FOREACH_END(conn);
+}
+
+/** A circuit failed to finish on its last hop <b>info</b>. If there
+ * are any streams waiting with this exit node in mind, but they
+ * don't absolutely require it, make them give up on it.
+ */
+void
+circuit_discard_optional_exit_enclaves(extend_info_t *info)
+{
+ entry_connection_t *entry_conn;
+ const node_t *r1, *r2;
+
+ smartlist_t *conns = get_connection_array();
+ SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
+ if (conn->marked_for_close ||
+ conn->type != CONN_TYPE_AP ||
+ conn->state != AP_CONN_STATE_CIRCUIT_WAIT)
+ continue;
+ entry_conn = TO_ENTRY_CONN(conn);
+ if (!entry_conn->chosen_exit_optional &&
+ !entry_conn->chosen_exit_retries)
+ continue;
+ r1 = node_get_by_nickname(entry_conn->chosen_exit_name,
+ NNF_NO_WARN_UNNAMED);
+ r2 = node_get_by_id(info->identity_digest);
+ if (!r1 || !r2 || r1 != r2)
+ continue;
+ tor_assert(entry_conn->socks_request);
+ if (entry_conn->chosen_exit_optional) {
+ log_info(LD_APP, "Giving up on enclave exit '%s' for destination %s.",
+ safe_str_client(entry_conn->chosen_exit_name),
+ escaped_safe_str_client(entry_conn->socks_request->address));
+ entry_conn->chosen_exit_optional = 0;
+ tor_free(entry_conn->chosen_exit_name); /* clears it */
+ /* if this port is dangerous, warn or reject it now that we don't
+ * think it'll be using an enclave. */
+ consider_plaintext_ports(entry_conn, entry_conn->socks_request->port);
+ }
+ if (entry_conn->chosen_exit_retries) {
+ if (--entry_conn->chosen_exit_retries == 0) { /* give up! */
+ clear_trackexithost_mappings(entry_conn->chosen_exit_name);
+ tor_free(entry_conn->chosen_exit_name); /* clears it */
+ /* if this port is dangerous, warn or reject it now that we don't
+ * think it'll be using an enclave. */
+ consider_plaintext_ports(entry_conn, entry_conn->socks_request->port);
+ }
+ }
+ } SMARTLIST_FOREACH_END(conn);
+}
+
+/** The AP connection <b>conn</b> has just failed while attaching or
+ * sending a BEGIN or resolving on <b>circ</b>, but another circuit
+ * might work. Detach the circuit, and either reattach it, launch a
+ * new circuit, tell the controller, or give up as appropriate.
+ *
+ * Returns -1 on err, 1 on success, 0 on not-yet-sure.
+ */
+int
+connection_ap_detach_retriable(entry_connection_t *conn,
+ origin_circuit_t *circ,
+ int reason)
+{
+ control_event_stream_status(conn, STREAM_EVENT_FAILED_RETRIABLE, reason);
+ ENTRY_TO_CONN(conn)->timestamp_last_read_allowed = time(NULL);
+
+ /* Roll back path bias use state so that we probe the circuit
+ * if nothing else succeeds on it */
+ pathbias_mark_use_rollback(circ);
+
+ if (conn->pending_optimistic_data) {
+ buf_set_to_copy(&conn->sending_optimistic_data,
+ conn->pending_optimistic_data);
+ }
+
+ if (!get_options()->LeaveStreamsUnattached || conn->use_begindir) {
+ /* If we're attaching streams ourself, or if this connection is
+ * a tunneled directory connection, then just attach it. */
+ ENTRY_TO_CONN(conn)->state = AP_CONN_STATE_CIRCUIT_WAIT;
+ circuit_detach_stream(TO_CIRCUIT(circ),ENTRY_TO_EDGE_CONN(conn));
+ connection_ap_mark_as_pending_circuit(conn);
+ } else {
+ CONNECTION_AP_EXPECT_NONPENDING(conn);
+ ENTRY_TO_CONN(conn)->state = AP_CONN_STATE_CONTROLLER_WAIT;
+ circuit_detach_stream(TO_CIRCUIT(circ),ENTRY_TO_EDGE_CONN(conn));
+ }
+ return 0;
+}
+
+/** Check if <b>conn</b> is using a dangerous port. Then warn and/or
+ * reject depending on our config options. */
+static int
+consider_plaintext_ports(entry_connection_t *conn, uint16_t port)
+{
+ const or_options_t *options = get_options();
+ int reject = smartlist_contains_int_as_string(
+ options->RejectPlaintextPorts, port);
+
+ if (smartlist_contains_int_as_string(options->WarnPlaintextPorts, port)) {
+ log_warn(LD_APP, "Application request to port %d: this port is "
+ "commonly used for unencrypted protocols. Please make sure "
+ "you don't send anything you would mind the rest of the "
+ "Internet reading!%s", port, reject ? " Closing." : "");
+ control_event_client_status(LOG_WARN, "DANGEROUS_PORT PORT=%d RESULT=%s",
+ port, reject ? "REJECT" : "WARN");
+ }
+
+ if (reject) {
+ log_info(LD_APP, "Port %d listed in RejectPlaintextPorts. Closing.", port);
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
+ return -1;
+ }
+
+ return 0;
+}
+
+/** How many times do we try connecting with an exit configured via
+ * TrackHostExits before concluding that it won't work any more and trying a
+ * different one? */
+#define TRACKHOSTEXITS_RETRIES 5
+
+/** Call connection_ap_handshake_rewrite_and_attach() unless a controller
+ * asked us to leave streams unattached. Return 0 in that case.
+ *
+ * See connection_ap_handshake_rewrite_and_attach()'s
+ * documentation for arguments and return value.
+ */
+MOCK_IMPL(int,
+connection_ap_rewrite_and_attach_if_allowed,(entry_connection_t *conn,
+ origin_circuit_t *circ,
+ crypt_path_t *cpath))
+{
+ const or_options_t *options = get_options();
+
+ if (options->LeaveStreamsUnattached) {
+ CONNECTION_AP_EXPECT_NONPENDING(conn);
+ ENTRY_TO_CONN(conn)->state = AP_CONN_STATE_CONTROLLER_WAIT;
+ return 0;
+ }
+ return connection_ap_handshake_rewrite_and_attach(conn, circ, cpath);
+}
+
+/* Try to perform any map-based rewriting of the target address in
+ * <b>conn</b>, filling in the fields of <b>out</b> as we go, and modifying
+ * conn->socks_request.address as appropriate.
+ */
+STATIC void
+connection_ap_handshake_rewrite(entry_connection_t *conn,
+ rewrite_result_t *out)
+{
+ socks_request_t *socks = conn->socks_request;
+ const or_options_t *options = get_options();
+ tor_addr_t addr_tmp;
+
+ /* Initialize all the fields of 'out' to reasonable defaults */
+ out->automap = 0;
+ out->exit_source = ADDRMAPSRC_NONE;
+ out->map_expires = TIME_MAX;
+ out->end_reason = 0;
+ out->should_close = 0;
+ out->orig_address[0] = 0;
+
+ /* We convert all incoming addresses to lowercase. */
+ tor_strlower(socks->address);
+ /* Remember the original address. */
+ strlcpy(out->orig_address, socks->address, sizeof(out->orig_address));
+ log_debug(LD_APP,"Client asked for %s:%d",
+ safe_str_client(socks->address),
+ socks->port);
+
+ /* Check for whether this is a .exit address. By default, those are
+ * disallowed when they're coming straight from the client, but you're
+ * allowed to have them in MapAddress commands and so forth. */
+ if (!strcmpend(socks->address, ".exit")) {
+ log_warn(LD_APP, "The \".exit\" notation is disabled in Tor due to "
+ "security risks.");
+ control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s",
+ escaped(socks->address));
+ out->end_reason = END_STREAM_REASON_TORPROTOCOL;
+ out->should_close = 1;
+ return;
+ }
+
+ /* Remember the original address so we can tell the user about what
+ * they actually said, not just what it turned into. */
+ /* XXX yes, this is the same as out->orig_address above. One is
+ * in the output, and one is in the connection. */
+ if (! conn->original_dest_address) {
+ /* Is the 'if' necessary here? XXXX */
+ conn->original_dest_address = tor_strdup(conn->socks_request->address);
+ }
+
+ /* First, apply MapAddress and MAPADDRESS mappings. We need to do
+ * these only for non-reverse lookups, since they don't exist for those.
+ * We also need to do this before we consider automapping, since we might
+ * e.g. resolve irc.oftc.net into irconionaddress.onion, at which point
+ * we'd need to automap it. */
+ if (socks->command != SOCKS_COMMAND_RESOLVE_PTR) {
+ const unsigned rewrite_flags = AMR_FLAG_USE_MAPADDRESS;
+ if (addressmap_rewrite(socks->address, sizeof(socks->address),
+ rewrite_flags, &out->map_expires, &out->exit_source)) {
+ control_event_stream_status(conn, STREAM_EVENT_REMAP,
+ REMAP_STREAM_SOURCE_CACHE);
+ }
+ }
+
+ /* Now see if we need to create or return an existing Hostname->IP
+ * automapping. Automapping happens when we're asked to resolve a
+ * hostname, and AutomapHostsOnResolve is set, and the hostname has a
+ * suffix listed in AutomapHostsSuffixes. It's a handy feature
+ * that lets you have Tor assign e.g. IPv6 addresses for .onion
+ * names, and return them safely from DNSPort.
+ */
+ if (socks->command == SOCKS_COMMAND_RESOLVE &&
+ tor_addr_parse(&addr_tmp, socks->address)<0 &&
+ options->AutomapHostsOnResolve) {
+ /* Check the suffix... */
+ out->automap = addressmap_address_should_automap(socks->address, options);
+ if (out->automap) {
+ /* If we get here, then we should apply an automapping for this. */
+ const char *new_addr;
+ /* We return an IPv4 address by default, or an IPv6 address if we
+ * are allowed to do so. */
+ int addr_type = RESOLVED_TYPE_IPV4;
+ if (conn->socks_request->socks_version != 4) {
+ if (!conn->entry_cfg.ipv4_traffic ||
+ (conn->entry_cfg.ipv6_traffic && conn->entry_cfg.prefer_ipv6) ||
+ conn->entry_cfg.prefer_ipv6_virtaddr)
+ addr_type = RESOLVED_TYPE_IPV6;
+ }
+ /* Okay, register the target address as automapped, and find the new
+ * address we're supposed to give as a resolve answer. (Return a cached
+ * value if we've looked up this address before.
+ */
+ new_addr = addressmap_register_virtual_address(
+ addr_type, tor_strdup(socks->address));
+ if (! new_addr) {
+ log_warn(LD_APP, "Unable to automap address %s",
+ escaped_safe_str(socks->address));
+ out->end_reason = END_STREAM_REASON_INTERNAL;
+ out->should_close = 1;
+ return;
+ }
+ log_info(LD_APP, "Automapping %s to %s",
+ escaped_safe_str_client(socks->address),
+ safe_str_client(new_addr));
+ strlcpy(socks->address, new_addr, sizeof(socks->address));
+ }
+ }
+
+ /* Now handle reverse lookups, if they're in the cache. This doesn't
+ * happen too often, since client-side DNS caching is off by default,
+ * and very deprecated. */
+ if (socks->command == SOCKS_COMMAND_RESOLVE_PTR) {
+ unsigned rewrite_flags = 0;
+ if (conn->entry_cfg.use_cached_ipv4_answers)
+ rewrite_flags |= AMR_FLAG_USE_IPV4_DNS;
+ if (conn->entry_cfg.use_cached_ipv6_answers)
+ rewrite_flags |= AMR_FLAG_USE_IPV6_DNS;
+
+ if (addressmap_rewrite_reverse(socks->address, sizeof(socks->address),
+ rewrite_flags, &out->map_expires)) {
+ char *result = tor_strdup(socks->address);
+ /* remember _what_ is supposed to have been resolved. */
+ tor_snprintf(socks->address, sizeof(socks->address), "REVERSE[%s]",
+ out->orig_address);
+ connection_ap_handshake_socks_resolved(conn, RESOLVED_TYPE_HOSTNAME,
+ strlen(result), (uint8_t*)result,
+ -1,
+ out->map_expires);
+ tor_free(result);
+ out->end_reason = END_STREAM_REASON_DONE |
+ END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED;
+ out->should_close = 1;
+ return;
+ }
+
+ /* Hang on, did we find an answer saying that this is a reverse lookup for
+ * an internal address? If so, we should reject it if we're configured to
+ * do so. */
+ if (options->ClientDNSRejectInternalAddresses) {
+ /* Don't let clients try to do a reverse lookup on 10.0.0.1. */
+ tor_addr_t addr;
+ int ok;
+ ok = tor_addr_parse_PTR_name(
+ &addr, socks->address, AF_UNSPEC, 1);
+ if (ok == 1 && tor_addr_is_internal(&addr, 0)) {
+ connection_ap_handshake_socks_resolved(conn, RESOLVED_TYPE_ERROR,
+ 0, NULL, -1, TIME_MAX);
+ out->end_reason = END_STREAM_REASON_SOCKSPROTOCOL |
+ END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED;
+ out->should_close = 1;
+ return;
+ }
+ }
+ }
+
+ /* If we didn't automap it before, then this is still the address that
+ * came straight from the user, mapped according to any
+ * MapAddress/MAPADDRESS commands. Now apply other mappings,
+ * including previously registered Automap entries (IP back to
+ * hostname), TrackHostExits entries, and client-side DNS cache
+ * entries (if they're turned on).
+ */
+ if (socks->command != SOCKS_COMMAND_RESOLVE_PTR &&
+ !out->automap) {
+ unsigned rewrite_flags = AMR_FLAG_USE_AUTOMAP | AMR_FLAG_USE_TRACKEXIT;
+ addressmap_entry_source_t exit_source2;
+ if (conn->entry_cfg.use_cached_ipv4_answers)
+ rewrite_flags |= AMR_FLAG_USE_IPV4_DNS;
+ if (conn->entry_cfg.use_cached_ipv6_answers)
+ rewrite_flags |= AMR_FLAG_USE_IPV6_DNS;
+ if (addressmap_rewrite(socks->address, sizeof(socks->address),
+ rewrite_flags, &out->map_expires, &exit_source2)) {
+ control_event_stream_status(conn, STREAM_EVENT_REMAP,
+ REMAP_STREAM_SOURCE_CACHE);
+ }
+ if (out->exit_source == ADDRMAPSRC_NONE) {
+ /* If it wasn't a .exit before, maybe it turned into a .exit. Remember
+ * the original source of a .exit. */
+ out->exit_source = exit_source2;
+ }
+ }
+
+ /* Check to see whether we're about to use an address in the virtual
+ * range without actually having gotten it from an Automap. */
+ if (!out->automap && address_is_in_virtual_range(socks->address)) {
+ /* This address was probably handed out by
+ * client_dns_get_unmapped_address, but the mapping was discarded for some
+ * reason. Or the user typed in a virtual address range manually. We
+ * *don't* want to send the address through Tor; that's likely to fail,
+ * and may leak information.
+ */
+ log_warn(LD_APP,"Missing mapping for virtual address '%s'. Refusing.",
+ safe_str_client(socks->address));
+ out->end_reason = END_STREAM_REASON_INTERNAL;
+ out->should_close = 1;
+ return;
+ }
+}
+
+/** We just received a SOCKS request in <b>conn</b> to an onion address of type
+ * <b>addresstype</b>. Start connecting to the onion service. */
+static int
+connection_ap_handle_onion(entry_connection_t *conn,
+ socks_request_t *socks,
+ origin_circuit_t *circ,
+ hostname_type_t addresstype)
+{
+ time_t now = approx_time();
+ connection_t *base_conn = ENTRY_TO_CONN(conn);
+
+ /* If .onion address requests are disabled, refuse the request */
+ if (!conn->entry_cfg.onion_traffic) {
+ log_warn(LD_APP, "Onion address %s requested from a port with .onion "
+ "disabled", safe_str_client(socks->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
+ return -1;
+ }
+
+ /* Check whether it's RESOLVE or RESOLVE_PTR. We don't handle those
+ * for hidden service addresses. */
+ if (SOCKS_COMMAND_IS_RESOLVE(socks->command)) {
+ /* if it's a resolve request, fail it right now, rather than
+ * building all the circuits and then realizing it won't work. */
+ log_warn(LD_APP,
+ "Resolve requests to hidden services not allowed. Failing.");
+ connection_ap_handshake_socks_resolved(conn,RESOLVED_TYPE_ERROR,
+ 0,NULL,-1,TIME_MAX);
+ connection_mark_unattached_ap(conn,
+ END_STREAM_REASON_SOCKSPROTOCOL |
+ END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED);
+ return -1;
+ }
+
+ /* If we were passed a circuit, then we need to fail. .onion addresses
+ * only work when we launch our own circuits for now. */
+ if (circ) {
+ log_warn(LD_CONTROL, "Attachstream to a circuit is not "
+ "supported for .onion addresses currently. Failing.");
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
+ return -1;
+ }
+
+ /* Interface: Regardless of HS version after the block below we should have
+ set onion_address, rend_cache_lookup_result, and descriptor_is_usable. */
+ const char *onion_address = NULL;
+ int rend_cache_lookup_result = -ENOENT;
+ int descriptor_is_usable = 0;
+
+ if (addresstype == ONION_V2_HOSTNAME) { /* it's a v2 hidden service */
+ rend_cache_entry_t *entry = NULL;
+ /* Look up if we have client authorization configured for this hidden
+ * service. If we do, associate it with the rend_data. */
+ rend_service_authorization_t *client_auth =
+ rend_client_lookup_service_authorization(socks->address);
+
+ const uint8_t *cookie = NULL;
+ rend_auth_type_t auth_type = REND_NO_AUTH;
+ if (client_auth) {
+ log_info(LD_REND, "Using previously configured client authorization "
+ "for hidden service request.");
+ auth_type = client_auth->auth_type;
+ cookie = client_auth->descriptor_cookie;
+ }
+
+ /* Fill in the rend_data field so we can start doing a connection to
+ * a hidden service. */
+ rend_data_t *rend_data = ENTRY_TO_EDGE_CONN(conn)->rend_data =
+ rend_data_client_create(socks->address, NULL, (char *) cookie,
+ auth_type);
+ if (rend_data == NULL) {
+ return -1;
+ }
+ onion_address = rend_data_get_address(rend_data);
+ log_info(LD_REND,"Got a hidden service request for ID '%s'",
+ safe_str_client(onion_address));
+
+ rend_cache_lookup_result = rend_cache_lookup_entry(onion_address,-1,
+ &entry);
+ if (!rend_cache_lookup_result && entry) {
+ descriptor_is_usable = rend_client_any_intro_points_usable(entry);
+ }
+ } else { /* it's a v3 hidden service */
+ tor_assert(addresstype == ONION_V3_HOSTNAME);
+ const hs_descriptor_t *cached_desc = NULL;
+ int retval;
+ /* Create HS conn identifier with HS pubkey */
+ hs_ident_edge_conn_t *hs_conn_ident =
+ tor_malloc_zero(sizeof(hs_ident_edge_conn_t));
+
+ retval = hs_parse_address(socks->address, &hs_conn_ident->identity_pk,
+ NULL, NULL);
+ if (retval < 0) {
+ log_warn(LD_GENERAL, "failed to parse hs address");
+ tor_free(hs_conn_ident);
+ return -1;
+ }
+ ENTRY_TO_EDGE_CONN(conn)->hs_ident = hs_conn_ident;
+
+ onion_address = socks->address;
+
+ /* Check the v3 desc cache */
+ cached_desc = hs_cache_lookup_as_client(&hs_conn_ident->identity_pk);
+ if (cached_desc) {
+ rend_cache_lookup_result = 0;
+ descriptor_is_usable =
+ hs_client_any_intro_points_usable(&hs_conn_ident->identity_pk,
+ cached_desc);
+ log_info(LD_GENERAL, "Found %s descriptor in cache for %s. %s.",
+ (descriptor_is_usable) ? "usable" : "unusable",
+ safe_str_client(onion_address),
+ (descriptor_is_usable) ? "Not fetching." : "Refecting.");
+ } else {
+ rend_cache_lookup_result = -ENOENT;
+ }
+ }
+
+ /* Lookup the given onion address. If invalid, stop right now.
+ * Otherwise, we might have it in the cache or not. */
+ unsigned int refetch_desc = 0;
+ if (rend_cache_lookup_result < 0) {
+ switch (-rend_cache_lookup_result) {
+ case EINVAL:
+ /* We should already have rejected this address! */
+ log_warn(LD_BUG,"Invalid service name '%s'",
+ safe_str_client(onion_address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
+ return -1;
+ case ENOENT:
+ /* We didn't have this; we should look it up. */
+ log_info(LD_REND, "No descriptor found in our cache for %s. Fetching.",
+ safe_str_client(onion_address));
+ refetch_desc = 1;
+ break;
+ default:
+ log_warn(LD_BUG, "Unknown cache lookup error %d",
+ rend_cache_lookup_result);
+ return -1;
+ }
+ }
+
+ /* Help predict that we'll want to do hidden service circuits in the
+ * future. We're not sure if it will need a stable circuit yet, but
+ * we know we'll need *something*. */
+ rep_hist_note_used_internal(now, 0, 1);
+
+ /* Now we have a descriptor but is it usable or not? If not, refetch.
+ * Also, a fetch could have been requested if the onion address was not
+ * found in the cache previously. */
+ if (refetch_desc || !descriptor_is_usable) {
+ edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn);
+ connection_ap_mark_as_non_pending_circuit(conn);
+ base_conn->state = AP_CONN_STATE_RENDDESC_WAIT;
+ if (addresstype == ONION_V2_HOSTNAME) {
+ tor_assert(edge_conn->rend_data);
+ rend_client_refetch_v2_renddesc(edge_conn->rend_data);
+ /* Whatever the result of the refetch, we don't go further. */
+ return 0;
+ } else {
+ tor_assert(addresstype == ONION_V3_HOSTNAME);
+ tor_assert(edge_conn->hs_ident);
+ /* Attempt to fetch the hsv3 descriptor. Check the retval to see how it
+ * went and act accordingly. */
+ int ret = hs_client_refetch_hsdesc(&edge_conn->hs_ident->identity_pk);
+ switch (ret) {
+ case HS_CLIENT_FETCH_MISSING_INFO:
+ /* Keeping the connection in descriptor wait state is fine because
+ * once we get enough dirinfo or a new live consensus, the HS client
+ * subsystem is notified and every connection in that state will
+ * trigger a fetch for the service key. */
+ case HS_CLIENT_FETCH_LAUNCHED:
+ case HS_CLIENT_FETCH_PENDING:
+ case HS_CLIENT_FETCH_HAVE_DESC:
+ return 0;
+ case HS_CLIENT_FETCH_ERROR:
+ case HS_CLIENT_FETCH_NO_HSDIRS:
+ case HS_CLIENT_FETCH_NOT_ALLOWED:
+ /* Can't proceed further and better close the SOCKS request. */
+ return -1;
+ }
+ }
+ }
+
+ /* We have the descriptor! So launch a connection to the HS. */
+ log_info(LD_REND, "Descriptor is here. Great.");
+
+ base_conn->state = AP_CONN_STATE_CIRCUIT_WAIT;
+ /* We'll try to attach it at the next event loop, or whenever
+ * we call connection_ap_attach_pending() */
+ connection_ap_mark_as_pending_circuit(conn);
+ return 0;
+}
+
+/** Connection <b>conn</b> just finished its socks handshake, or the
+ * controller asked us to take care of it. If <b>circ</b> is defined,
+ * then that's where we'll want to attach it. Otherwise we have to
+ * figure it out ourselves.
+ *
+ * First, parse whether it's a .exit address, remap it, and so on. Then
+ * if it's for a general circuit, try to attach it to a circuit (or launch
+ * one as needed), else if it's for a rendezvous circuit, fetch a
+ * rendezvous descriptor first (or attach/launch a circuit if the
+ * rendezvous descriptor is already here and fresh enough).
+ *
+ * The stream will exit from the hop
+ * indicated by <b>cpath</b>, or from the last hop in circ's cpath if
+ * <b>cpath</b> is NULL.
+ */
+int
+connection_ap_handshake_rewrite_and_attach(entry_connection_t *conn,
+ origin_circuit_t *circ,
+ crypt_path_t *cpath)
+{
+ socks_request_t *socks = conn->socks_request;
+ const or_options_t *options = get_options();
+ connection_t *base_conn = ENTRY_TO_CONN(conn);
+ time_t now = time(NULL);
+ rewrite_result_t rr;
+
+ /* First we'll do the rewrite part. Let's see if we get a reasonable
+ * answer.
+ */
+ memset(&rr, 0, sizeof(rr));
+ connection_ap_handshake_rewrite(conn,&rr);
+
+ if (rr.should_close) {
+ /* connection_ap_handshake_rewrite told us to close the connection:
+ * either because it sent back an answer, or because it sent back an
+ * error */
+ connection_mark_unattached_ap(conn, rr.end_reason);
+ if (END_STREAM_REASON_DONE == (rr.end_reason & END_STREAM_REASON_MASK))
+ return 0;
+ else
+ return -1;
+ }
+
+ const time_t map_expires = rr.map_expires;
+ const int automap = rr.automap;
+ const addressmap_entry_source_t exit_source = rr.exit_source;
+
+ /* Now, we parse the address to see if it's an .onion or .exit or
+ * other special address.
+ */
+ const hostname_type_t addresstype = parse_extended_hostname(socks->address);
+
+ /* Now see whether the hostname is bogus. This could happen because of an
+ * onion hostname whose format we don't recognize. */
+ if (addresstype == BAD_HOSTNAME) {
+ control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s",
+ escaped(socks->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
+ return -1;
+ }
+
+ /* If this is a .exit hostname, strip off the .name.exit part, and
+ * see whether we're willing to connect there, and and otherwise handle the
+ * .exit address.
+ *
+ * We'll set chosen_exit_name and/or close the connection as appropriate.
+ */
+ if (addresstype == EXIT_HOSTNAME) {
+ /* If StrictNodes is not set, then .exit overrides ExcludeNodes but
+ * not ExcludeExitNodes. */
+ routerset_t *excludeset = options->StrictNodes ?
+ options->ExcludeExitNodesUnion_ : options->ExcludeExitNodes;
+ const node_t *node = NULL;
+
+ /* If this .exit was added by an AUTOMAP, then it came straight from
+ * a user. That's not safe. */
+ if (exit_source == ADDRMAPSRC_AUTOMAP) {
+ /* Whoops; this one is stale. It must have gotten added earlier?
+ * (Probably this is not possible, since AllowDotExit no longer
+ * exists.) */
+ log_warn(LD_APP,"Stale automapped address for '%s.exit'. Refusing.",
+ safe_str_client(socks->address));
+ control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s",
+ escaped(socks->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
+ tor_assert_nonfatal_unreached();
+ return -1;
+ }
+
+ /* Double-check to make sure there are no .exits coming from
+ * impossible/weird sources. */
+ if (exit_source == ADDRMAPSRC_DNS || exit_source == ADDRMAPSRC_NONE) {
+ /* It shouldn't be possible to get a .exit address from any of these
+ * sources. */
+ log_warn(LD_BUG,"Address '%s.exit', with impossible source for the "
+ ".exit part. Refusing.",
+ safe_str_client(socks->address));
+ control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s",
+ escaped(socks->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
+ return -1;
+ }
+
+ tor_assert(!automap);
+
+ /* Now, find the character before the .(name) part.
+ * (The ".exit" part got stripped off by "parse_extended_hostname").
+ *
+ * We're going to put the exit name into conn->chosen_exit_name, and
+ * look up a node correspondingly. */
+ char *s = strrchr(socks->address,'.');
+ if (s) {
+ /* The address was of the form "(stuff).(name).exit */
+ if (s[1] != '\0') {
+ /* Looks like a real .exit one. */
+ conn->chosen_exit_name = tor_strdup(s+1);
+ node = node_get_by_nickname(conn->chosen_exit_name, 0);
+
+ if (exit_source == ADDRMAPSRC_TRACKEXIT) {
+ /* We 5 tries before it expires the addressmap */
+ conn->chosen_exit_retries = TRACKHOSTEXITS_RETRIES;
+ }
+ *s = 0;
+ } else {
+ /* Oops, the address was (stuff)..exit. That's not okay. */
+ log_warn(LD_APP,"Malformed exit address '%s.exit'. Refusing.",
+ safe_str_client(socks->address));
+ control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s",
+ escaped(socks->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
+ return -1;
+ }
+ } else {
+ /* It looks like they just asked for "foo.exit". That's a special
+ * form that means (foo's address).foo.exit. */
+
+ conn->chosen_exit_name = tor_strdup(socks->address);
+ node = node_get_by_nickname(conn->chosen_exit_name, 0);
+ if (node) {
+ *socks->address = 0;
+ node_get_address_string(node, socks->address, sizeof(socks->address));
+ }
+ }
+
+ /* Now make sure that the chosen exit exists... */
+ if (!node) {
+ log_warn(LD_APP,
+ "Unrecognized relay in exit address '%s.exit'. Refusing.",
+ safe_str_client(socks->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
+ return -1;
+ }
+ /* ...and make sure that it isn't excluded. */
+ if (routerset_contains_node(excludeset, node)) {
+ log_warn(LD_APP,
+ "Excluded relay in exit address '%s.exit'. Refusing.",
+ safe_str_client(socks->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
+ return -1;
+ }
+ /* XXXX-1090 Should we also allow foo.bar.exit if ExitNodes is set and
+ Bar is not listed in it? I say yes, but our revised manpage branch
+ implies no. */
+ }
+
+ /* Now, we handle everything that isn't a .onion address. */
+ if (addresstype != ONION_V2_HOSTNAME && addresstype != ONION_V3_HOSTNAME) {
+ /* Not a hidden-service request. It's either a hostname or an IP,
+ * possibly with a .exit that we stripped off. We're going to check
+ * if we're allowed to connect/resolve there, and then launch the
+ * appropriate request. */
+
+ /* Check for funny characters in the address. */
+ if (address_is_invalid_destination(socks->address, 1)) {
+ control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s",
+ escaped(socks->address));
+ log_warn(LD_APP,
+ "Destination '%s' seems to be an invalid hostname. Failing.",
+ safe_str_client(socks->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
+ return -1;
+ }
+
+#ifdef ENABLE_TOR2WEB_MODE
+ /* If we're running in Tor2webMode, we don't allow anything BUT .onion
+ * addresses. */
+ if (options->Tor2webMode) {
+ log_warn(LD_APP, "Refusing to connect to non-hidden-service hostname "
+ "or IP address %s because tor2web mode is enabled.",
+ safe_str_client(socks->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
+ return -1;
+ }
+#endif /* defined(ENABLE_TOR2WEB_MODE) */
+
+ /* socks->address is a non-onion hostname or IP address.
+ * If we can't do any non-onion requests, refuse the connection.
+ * If we have a hostname but can't do DNS, refuse the connection.
+ * If we have an IP address, but we can't use that address family,
+ * refuse the connection.
+ *
+ * If we can do DNS requests, and we can use at least one address family,
+ * then we have to resolve the address first. Then we'll know if it
+ * resolves to a usable address family. */
+
+ /* First, check if all non-onion traffic is disabled */
+ if (!conn->entry_cfg.dns_request && !conn->entry_cfg.ipv4_traffic
+ && !conn->entry_cfg.ipv6_traffic) {
+ log_warn(LD_APP, "Refusing to connect to non-hidden-service hostname "
+ "or IP address %s because Port has OnionTrafficOnly set (or "
+ "NoDNSRequest, NoIPv4Traffic, and NoIPv6Traffic).",
+ safe_str_client(socks->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
+ return -1;
+ }
+
+ /* Then check if we have a hostname or IP address, and whether DNS or
+ * the IP address family are permitted. Reject if not. */
+ tor_addr_t dummy_addr;
+ int socks_family = tor_addr_parse(&dummy_addr, socks->address);
+ /* family will be -1 for a non-onion hostname that's not an IP */
+ if (socks_family == -1) {
+ if (!conn->entry_cfg.dns_request) {
+ log_warn(LD_APP, "Refusing to connect to hostname %s "
+ "because Port has NoDNSRequest set.",
+ safe_str_client(socks->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
+ return -1;
+ }
+ } else if (socks_family == AF_INET) {
+ if (!conn->entry_cfg.ipv4_traffic) {
+ log_warn(LD_APP, "Refusing to connect to IPv4 address %s because "
+ "Port has NoIPv4Traffic set.",
+ safe_str_client(socks->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
+ return -1;
+ }
+ } else if (socks_family == AF_INET6) {
+ if (!conn->entry_cfg.ipv6_traffic) {
+ log_warn(LD_APP, "Refusing to connect to IPv6 address %s because "
+ "Port has NoIPv6Traffic set.",
+ safe_str_client(socks->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
+ return -1;
+ }
+ } else {
+ tor_assert_nonfatal_unreached_once();
+ }
+
+ /* See if this is a hostname lookup that we can answer immediately.
+ * (For example, an attempt to look up the IP address for an IP address.)
+ */
+ if (socks->command == SOCKS_COMMAND_RESOLVE) {
+ tor_addr_t answer;
+ /* Reply to resolves immediately if we can. */
+ if (tor_addr_parse(&answer, socks->address) >= 0) {/* is it an IP? */
+ /* remember _what_ is supposed to have been resolved. */
+ strlcpy(socks->address, rr.orig_address, sizeof(socks->address));
+ connection_ap_handshake_socks_resolved_addr(conn, &answer, -1,
+ map_expires);
+ connection_mark_unattached_ap(conn,
+ END_STREAM_REASON_DONE |
+ END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED);
+ return 0;
+ }
+ tor_assert(!automap);
+ rep_hist_note_used_resolve(now); /* help predict this next time */
+ } else if (socks->command == SOCKS_COMMAND_CONNECT) {
+ /* Now see if this is a connect request that we can reject immediately */
+
+ tor_assert(!automap);
+ /* Don't allow connections to port 0. */
+ if (socks->port == 0) {
+ log_notice(LD_APP,"Application asked to connect to port 0. Refusing.");
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
+ return -1;
+ }
+ /* You can't make connections to internal addresses, by default.
+ * Exceptions are begindir requests (where the address is meaningless),
+ * or cases where you've hand-configured a particular exit, thereby
+ * making the local address meaningful. */
+ if (options->ClientRejectInternalAddresses &&
+ !conn->use_begindir && !conn->chosen_exit_name && !circ) {
+ /* If we reach this point then we don't want to allow internal
+ * addresses. Check if we got one. */
+ tor_addr_t addr;
+ if (tor_addr_hostname_is_local(socks->address) ||
+ (tor_addr_parse(&addr, socks->address) >= 0 &&
+ tor_addr_is_internal(&addr, 0))) {
+ /* If this is an explicit private address with no chosen exit node,
+ * then we really don't want to try to connect to it. That's
+ * probably an error. */
+ if (conn->is_transparent_ap) {
+#define WARN_INTRVL_LOOP 300
+ static ratelim_t loop_warn_limit = RATELIM_INIT(WARN_INTRVL_LOOP);
+ char *m;
+ if ((m = rate_limit_log(&loop_warn_limit, approx_time()))) {
+ log_warn(LD_NET,
+ "Rejecting request for anonymous connection to private "
+ "address %s on a TransPort or NATDPort. Possible loop "
+ "in your NAT rules?%s", safe_str_client(socks->address),
+ m);
+ tor_free(m);
+ }
+ } else {
+#define WARN_INTRVL_PRIV 300
+ static ratelim_t priv_warn_limit = RATELIM_INIT(WARN_INTRVL_PRIV);
+ char *m;
+ if ((m = rate_limit_log(&priv_warn_limit, approx_time()))) {
+ log_warn(LD_NET,
+ "Rejecting SOCKS request for anonymous connection to "
+ "private address %s.%s",
+ safe_str_client(socks->address),m);
+ tor_free(m);
+ }
+ }
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_PRIVATE_ADDR);
+ return -1;
+ }
+ } /* end "if we should check for internal addresses" */
+
+ /* Okay. We're still doing a CONNECT, and it wasn't a private
+ * address. Here we do special handling for literal IP addresses,
+ * to see if we should reject this preemptively, and to set up
+ * fields in conn->entry_cfg to tell the exit what AF we want. */
+ {
+ tor_addr_t addr;
+ /* XXX Duplicate call to tor_addr_parse. */
+ if (tor_addr_parse(&addr, socks->address) >= 0) {
+ /* If we reach this point, it's an IPv4 or an IPv6 address. */
+ sa_family_t family = tor_addr_family(&addr);
+
+ if ((family == AF_INET && ! conn->entry_cfg.ipv4_traffic) ||
+ (family == AF_INET6 && ! conn->entry_cfg.ipv6_traffic)) {
+ /* You can't do an IPv4 address on a v6-only socks listener,
+ * or vice versa. */
+ log_warn(LD_NET, "Rejecting SOCKS request for an IP address "
+ "family that this listener does not support.");
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
+ return -1;
+ } else if (family == AF_INET6 && socks->socks_version == 4) {
+ /* You can't make a socks4 request to an IPv6 address. Socks4
+ * doesn't support that. */
+ log_warn(LD_NET, "Rejecting SOCKS4 request for an IPv6 address.");
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
+ return -1;
+ } else if (socks->socks_version == 4 &&
+ !conn->entry_cfg.ipv4_traffic) {
+ /* You can't do any kind of Socks4 request when IPv4 is forbidden.
+ *
+ * XXX raise this check outside the enclosing block? */
+ log_warn(LD_NET, "Rejecting SOCKS4 request on a listener with "
+ "no IPv4 traffic supported.");
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY);
+ return -1;
+ } else if (family == AF_INET6) {
+ /* Tell the exit: we won't accept any ipv4 connection to an IPv6
+ * address. */
+ conn->entry_cfg.ipv4_traffic = 0;
+ } else if (family == AF_INET) {
+ /* Tell the exit: we won't accept any ipv6 connection to an IPv4
+ * address. */
+ conn->entry_cfg.ipv6_traffic = 0;
+ }
+ }
+ }
+
+ /* we never allow IPv6 answers on socks4. (TODO: Is this smart?) */
+ if (socks->socks_version == 4)
+ conn->entry_cfg.ipv6_traffic = 0;
+
+ /* Still handling CONNECT. Now, check for exit enclaves. (Which we
+ * don't do on BEGIN_DIR, or when there is a chosen exit.)
+ *
+ * TODO: Should we remove this? Exit enclaves are nutty and don't
+ * work very well
+ */
+ if (!conn->use_begindir && !conn->chosen_exit_name && !circ) {
+ /* see if we can find a suitable enclave exit */
+ const node_t *r =
+ router_find_exact_exit_enclave(socks->address, socks->port);
+ if (r) {
+ log_info(LD_APP,
+ "Redirecting address %s to exit at enclave router %s",
+ safe_str_client(socks->address), node_describe(r));
+ /* use the hex digest, not nickname, in case there are two
+ routers with this nickname */
+ conn->chosen_exit_name =
+ tor_strdup(hex_str(r->identity, DIGEST_LEN));
+ conn->chosen_exit_optional = 1;
+ }
+ }
+
+ /* Still handling CONNECT: warn or reject if it's using a dangerous
+ * port. */
+ if (!conn->use_begindir && !conn->chosen_exit_name && !circ)
+ if (consider_plaintext_ports(conn, socks->port) < 0)
+ return -1;
+
+ /* Remember the port so that we will predict that more requests
+ there will happen in the future. */
+ if (!conn->use_begindir) {
+ /* help predict this next time */
+ rep_hist_note_used_port(now, socks->port);
+ }
+ } else if (socks->command == SOCKS_COMMAND_RESOLVE_PTR) {
+ rep_hist_note_used_resolve(now); /* help predict this next time */
+ /* no extra processing needed */
+ } else {
+ /* We should only be doing CONNECT, RESOLVE, or RESOLVE_PTR! */
+ tor_fragile_assert();
+ }
+
+ /* Okay. At this point we've set chosen_exit_name if needed, rewritten the
+ * address, and decided not to reject it for any number of reasons. Now
+ * mark the connection as waiting for a circuit, and try to attach it!
+ */
+ base_conn->state = AP_CONN_STATE_CIRCUIT_WAIT;
+
+ /* If we were given a circuit to attach to, try to attach. Otherwise,
+ * try to find a good one and attach to that. */
+ int rv;
+ if (circ) {
+ rv = connection_ap_handshake_attach_chosen_circuit(conn, circ, cpath);
+ } else {
+ /* We'll try to attach it at the next event loop, or whenever
+ * we call connection_ap_attach_pending() */
+ connection_ap_mark_as_pending_circuit(conn);
+ rv = 0;
+ }
+
+ /* If the above function returned 0 then we're waiting for a circuit.
+ * if it returned 1, we're attached. Both are okay. But if it returned
+ * -1, there was an error, so make sure the connection is marked, and
+ * return -1. */
+ if (rv < 0) {
+ if (!base_conn->marked_for_close)
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_CANT_ATTACH);
+ return -1;
+ }
+
+ return 0;
+ } else {
+ /* If we get here, it's a request for a .onion address! */
+ tor_assert(addresstype == ONION_V2_HOSTNAME ||
+ addresstype == ONION_V3_HOSTNAME);
+ tor_assert(!automap);
+ return connection_ap_handle_onion(conn, socks, circ, addresstype);
+ }
+
+ return 0; /* unreached but keeps the compiler happy */
+}
+
+#ifdef TRANS_PF
+static int pf_socket = -1;
+int
+get_pf_socket(void)
+{
+ int pf;
+ /* This should be opened before dropping privileges. */
+ if (pf_socket >= 0)
+ return pf_socket;
+
+#if defined(OpenBSD)
+ /* only works on OpenBSD */
+ pf = tor_open_cloexec("/dev/pf", O_RDONLY, 0);
+#else
+ /* works on NetBSD and FreeBSD */
+ pf = tor_open_cloexec("/dev/pf", O_RDWR, 0);
+#endif /* defined(OpenBSD) */
+
+ if (pf < 0) {
+ log_warn(LD_NET, "open(\"/dev/pf\") failed: %s", strerror(errno));
+ return -1;
+ }
+
+ pf_socket = pf;
+ return pf_socket;
+}
+#endif /* defined(TRANS_PF) */
+
+#if defined(TRANS_NETFILTER) || defined(TRANS_PF) || \
+ defined(TRANS_TPROXY)
+/** Try fill in the address of <b>req</b> from the socket configured
+ * with <b>conn</b>. */
+static int
+destination_from_socket(entry_connection_t *conn, socks_request_t *req)
+{
+ struct sockaddr_storage orig_dst;
+ socklen_t orig_dst_len = sizeof(orig_dst);
+ tor_addr_t addr;
+
+#ifdef TRANS_TPROXY
+ if (get_options()->TransProxyType_parsed == TPT_TPROXY) {
+ if (getsockname(ENTRY_TO_CONN(conn)->s, (struct sockaddr*)&orig_dst,
+ &orig_dst_len) < 0) {
+ int e = tor_socket_errno(ENTRY_TO_CONN(conn)->s);
+ log_warn(LD_NET, "getsockname() failed: %s", tor_socket_strerror(e));
+ return -1;
+ }
+ goto done;
+ }
+#endif /* defined(TRANS_TPROXY) */
+
+#ifdef TRANS_NETFILTER
+ int rv = -1;
+ switch (ENTRY_TO_CONN(conn)->socket_family) {
+#ifdef TRANS_NETFILTER_IPV4
+ case AF_INET:
+ rv = getsockopt(ENTRY_TO_CONN(conn)->s, SOL_IP, SO_ORIGINAL_DST,
+ (struct sockaddr*)&orig_dst, &orig_dst_len);
+ break;
+#endif /* defined(TRANS_NETFILTER_IPV4) */
+#ifdef TRANS_NETFILTER_IPV6
+ case AF_INET6:
+ rv = getsockopt(ENTRY_TO_CONN(conn)->s, SOL_IPV6, IP6T_SO_ORIGINAL_DST,
+ (struct sockaddr*)&orig_dst, &orig_dst_len);
+ break;
+#endif /* defined(TRANS_NETFILTER_IPV6) */
+ default:
+ log_warn(LD_BUG,
+ "Received transparent data from an unsuported socket family %d",
+ ENTRY_TO_CONN(conn)->socket_family);
+ return -1;
+ }
+ if (rv < 0) {
+ int e = tor_socket_errno(ENTRY_TO_CONN(conn)->s);
+ log_warn(LD_NET, "getsockopt() failed: %s", tor_socket_strerror(e));
+ return -1;
+ }
+ goto done;
+#elif defined(TRANS_PF)
+ if (getsockname(ENTRY_TO_CONN(conn)->s, (struct sockaddr*)&orig_dst,
+ &orig_dst_len) < 0) {
+ int e = tor_socket_errno(ENTRY_TO_CONN(conn)->s);
+ log_warn(LD_NET, "getsockname() failed: %s", tor_socket_strerror(e));
+ return -1;
+ }
+ goto done;
+#else
+ (void)conn;
+ (void)req;
+ log_warn(LD_BUG, "Unable to determine destination from socket.");
+ return -1;
+#endif /* defined(TRANS_NETFILTER) || ... */
+
+ done:
+ tor_addr_from_sockaddr(&addr, (struct sockaddr*)&orig_dst, &req->port);
+ tor_addr_to_str(req->address, &addr, sizeof(req->address), 1);
+
+ return 0;
+}
+#endif /* defined(TRANS_NETFILTER) || defined(TRANS_PF) || ... */
+
+#ifdef TRANS_PF
+static int
+destination_from_pf(entry_connection_t *conn, socks_request_t *req)
+{
+ struct sockaddr_storage proxy_addr;
+ socklen_t proxy_addr_len = sizeof(proxy_addr);
+ struct sockaddr *proxy_sa = (struct sockaddr*) &proxy_addr;
+ struct pfioc_natlook pnl;
+ tor_addr_t addr;
+ int pf = -1;
+
+ if (getsockname(ENTRY_TO_CONN(conn)->s, (struct sockaddr*)&proxy_addr,
+ &proxy_addr_len) < 0) {
+ int e = tor_socket_errno(ENTRY_TO_CONN(conn)->s);
+ log_warn(LD_NET, "getsockname() to determine transocks destination "
+ "failed: %s", tor_socket_strerror(e));
+ return -1;
+ }
+
+#ifdef __FreeBSD__
+ if (get_options()->TransProxyType_parsed == TPT_IPFW) {
+ /* ipfw(8) is used and in this case getsockname returned the original
+ destination */
+ if (tor_addr_from_sockaddr(&addr, proxy_sa, &req->port) < 0) {
+ tor_fragile_assert();
+ return -1;
+ }
+
+ tor_addr_to_str(req->address, &addr, sizeof(req->address), 0);
+
+ return 0;
+ }
+#endif /* defined(__FreeBSD__) */
+
+ memset(&pnl, 0, sizeof(pnl));
+ pnl.proto = IPPROTO_TCP;
+ pnl.direction = PF_OUT;
+ if (proxy_sa->sa_family == AF_INET) {
+ struct sockaddr_in *sin = (struct sockaddr_in *)proxy_sa;
+ pnl.af = AF_INET;
+ pnl.saddr.v4.s_addr = tor_addr_to_ipv4n(&ENTRY_TO_CONN(conn)->addr);
+ pnl.sport = htons(ENTRY_TO_CONN(conn)->port);
+ pnl.daddr.v4.s_addr = sin->sin_addr.s_addr;
+ pnl.dport = sin->sin_port;
+ } else if (proxy_sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)proxy_sa;
+ pnl.af = AF_INET6;
+ memcpy(&pnl.saddr.v6, tor_addr_to_in6(&ENTRY_TO_CONN(conn)->addr),
+ sizeof(struct in6_addr));
+ pnl.sport = htons(ENTRY_TO_CONN(conn)->port);
+ memcpy(&pnl.daddr.v6, &sin6->sin6_addr, sizeof(struct in6_addr));
+ pnl.dport = sin6->sin6_port;
+ } else {
+ log_warn(LD_NET, "getsockname() gave an unexpected address family (%d)",
+ (int)proxy_sa->sa_family);
+ return -1;
+ }
+
+ pf = get_pf_socket();
+ if (pf<0)
+ return -1;
+
+ if (ioctl(pf, DIOCNATLOOK, &pnl) < 0) {
+ log_warn(LD_NET, "ioctl(DIOCNATLOOK) failed: %s", strerror(errno));
+ return -1;
+ }
+
+ if (pnl.af == AF_INET) {
+ tor_addr_from_ipv4n(&addr, pnl.rdaddr.v4.s_addr);
+ } else if (pnl.af == AF_INET6) {
+ tor_addr_from_in6(&addr, &pnl.rdaddr.v6);
+ } else {
+ tor_fragile_assert();
+ return -1;
+ }
+
+ tor_addr_to_str(req->address, &addr, sizeof(req->address), 1);
+ req->port = ntohs(pnl.rdport);
+
+ return 0;
+}
+#endif /* defined(TRANS_PF) */
+
+/** Fetch the original destination address and port from a
+ * system-specific interface and put them into a
+ * socks_request_t as if they came from a socks request.
+ *
+ * Return -1 if an error prevents fetching the destination,
+ * else return 0.
+ */
+static int
+connection_ap_get_original_destination(entry_connection_t *conn,
+ socks_request_t *req)
+{
+#ifdef TRANS_NETFILTER
+ return destination_from_socket(conn, req);
+#elif defined(TRANS_PF)
+ const or_options_t *options = get_options();
+
+ if (options->TransProxyType_parsed == TPT_PF_DIVERT)
+ return destination_from_socket(conn, req);
+
+ if (options->TransProxyType_parsed == TPT_DEFAULT ||
+ options->TransProxyType_parsed == TPT_IPFW)
+ return destination_from_pf(conn, req);
+
+ (void)conn;
+ (void)req;
+ log_warn(LD_BUG, "Proxy destination determination mechanism %s unknown.",
+ options->TransProxyType);
+ return -1;
+#else
+ (void)conn;
+ (void)req;
+ log_warn(LD_BUG, "Called connection_ap_get_original_destination, but no "
+ "transparent proxy method was configured.");
+ return -1;
+#endif /* defined(TRANS_NETFILTER) || ... */
+}
+
+/** connection_edge_process_inbuf() found a conn in state
+ * socks_wait. See if conn->inbuf has the right bytes to proceed with
+ * the socks handshake.
+ *
+ * If the handshake is complete, send it to
+ * connection_ap_handshake_rewrite_and_attach().
+ *
+ * Return -1 if an unexpected error with conn occurs (and mark it for close),
+ * else return 0.
+ */
+static int
+connection_ap_handshake_process_socks(entry_connection_t *conn)
+{
+ socks_request_t *socks;
+ int sockshere;
+ const or_options_t *options = get_options();
+ int had_reply = 0;
+ connection_t *base_conn = ENTRY_TO_CONN(conn);
+
+ tor_assert(conn);
+ tor_assert(base_conn->type == CONN_TYPE_AP);
+ tor_assert(base_conn->state == AP_CONN_STATE_SOCKS_WAIT);
+ tor_assert(conn->socks_request);
+ socks = conn->socks_request;
+
+ log_debug(LD_APP,"entered.");
+
+ sockshere = fetch_from_buf_socks(base_conn->inbuf, socks,
+ options->TestSocks, options->SafeSocks);
+
+ if (socks->replylen) {
+ had_reply = 1;
+ connection_buf_add((const char*)socks->reply, socks->replylen,
+ base_conn);
+ socks->replylen = 0;
+ if (sockshere == -1) {
+ /* An invalid request just got a reply, no additional
+ * one is necessary. */
+ socks->has_finished = 1;
+ }
+ }
+
+ if (sockshere == 0) {
+ log_debug(LD_APP,"socks handshake not all here yet.");
+ return 0;
+ } else if (sockshere == -1) {
+ if (!had_reply) {
+ log_warn(LD_APP,"Fetching socks handshake failed. Closing.");
+ connection_ap_handshake_socks_reply(conn, NULL, 0,
+ END_STREAM_REASON_SOCKSPROTOCOL);
+ }
+ connection_mark_unattached_ap(conn,
+ END_STREAM_REASON_SOCKSPROTOCOL |
+ END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED);
+ return -1;
+ } /* else socks handshake is done, continue processing */
+
+ if (SOCKS_COMMAND_IS_CONNECT(socks->command))
+ control_event_stream_status(conn, STREAM_EVENT_NEW, 0);
+ else
+ control_event_stream_status(conn, STREAM_EVENT_NEW_RESOLVE, 0);
+
+ return connection_ap_rewrite_and_attach_if_allowed(conn, NULL, NULL);
+}
+
+/** connection_init_accepted_conn() found a new trans AP conn.
+ * Get the original destination and send it to
+ * connection_ap_handshake_rewrite_and_attach().
+ *
+ * Return -1 if an unexpected error with conn (and it should be marked
+ * for close), else return 0.
+ */
+int
+connection_ap_process_transparent(entry_connection_t *conn)
+{
+ socks_request_t *socks;
+
+ tor_assert(conn);
+ tor_assert(conn->socks_request);
+ socks = conn->socks_request;
+
+ /* pretend that a socks handshake completed so we don't try to
+ * send a socks reply down a transparent conn */
+ socks->command = SOCKS_COMMAND_CONNECT;
+ socks->has_finished = 1;
+
+ log_debug(LD_APP,"entered.");
+
+ if (connection_ap_get_original_destination(conn, socks) < 0) {
+ log_warn(LD_APP,"Fetching original destination failed. Closing.");
+ connection_mark_unattached_ap(conn,
+ END_STREAM_REASON_CANT_FETCH_ORIG_DEST);
+ return -1;
+ }
+ /* we have the original destination */
+
+ control_event_stream_status(conn, STREAM_EVENT_NEW, 0);
+
+ return connection_ap_rewrite_and_attach_if_allowed(conn, NULL, NULL);
+}
+
+/** connection_edge_process_inbuf() found a conn in state natd_wait. See if
+ * conn-\>inbuf has the right bytes to proceed. See FreeBSD's libalias(3) and
+ * ProxyEncodeTcpStream() in src/lib/libalias/alias_proxy.c for the encoding
+ * form of the original destination.
+ *
+ * If the original destination is complete, send it to
+ * connection_ap_handshake_rewrite_and_attach().
+ *
+ * Return -1 if an unexpected error with conn (and it should be marked
+ * for close), else return 0.
+ */
+static int
+connection_ap_process_natd(entry_connection_t *conn)
+{
+ char tmp_buf[36], *tbuf, *daddr;
+ size_t tlen = 30;
+ int err, port_ok;
+ socks_request_t *socks;
+
+ tor_assert(conn);
+ tor_assert(ENTRY_TO_CONN(conn)->state == AP_CONN_STATE_NATD_WAIT);
+ tor_assert(conn->socks_request);
+ socks = conn->socks_request;
+
+ log_debug(LD_APP,"entered.");
+
+ /* look for LF-terminated "[DEST ip_addr port]"
+ * where ip_addr is a dotted-quad and port is in string form */
+ err = connection_buf_get_line(ENTRY_TO_CONN(conn), tmp_buf, &tlen);
+ if (err == 0)
+ return 0;
+ if (err < 0) {
+ log_warn(LD_APP,"NATD handshake failed (DEST too long). Closing");
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_INVALID_NATD_DEST);
+ return -1;
+ }
+
+ if (strcmpstart(tmp_buf, "[DEST ")) {
+ log_warn(LD_APP,"NATD handshake was ill-formed; closing. The client "
+ "said: %s",
+ escaped(tmp_buf));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_INVALID_NATD_DEST);
+ return -1;
+ }
+
+ daddr = tbuf = &tmp_buf[0] + 6; /* after end of "[DEST " */
+ if (!(tbuf = strchr(tbuf, ' '))) {
+ log_warn(LD_APP,"NATD handshake was ill-formed; closing. The client "
+ "said: %s",
+ escaped(tmp_buf));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_INVALID_NATD_DEST);
+ return -1;
+ }
+ *tbuf++ = '\0';
+
+ /* pretend that a socks handshake completed so we don't try to
+ * send a socks reply down a natd conn */
+ strlcpy(socks->address, daddr, sizeof(socks->address));
+ socks->port = (uint16_t)
+ tor_parse_long(tbuf, 10, 1, 65535, &port_ok, &daddr);
+ if (!port_ok) {
+ log_warn(LD_APP,"NATD handshake failed; port %s is ill-formed or out "
+ "of range.", escaped(tbuf));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_INVALID_NATD_DEST);
+ return -1;
+ }
+
+ socks->command = SOCKS_COMMAND_CONNECT;
+ socks->has_finished = 1;
+
+ control_event_stream_status(conn, STREAM_EVENT_NEW, 0);
+
+ ENTRY_TO_CONN(conn)->state = AP_CONN_STATE_CIRCUIT_WAIT;
+
+ return connection_ap_rewrite_and_attach_if_allowed(conn, NULL, NULL);
+}
+
+/** Called on an HTTP CONNECT entry connection when some bytes have arrived,
+ * but we have not yet received a full HTTP CONNECT request. Try to parse an
+ * HTTP CONNECT request from the connection's inbuf. On success, set up the
+ * connection's socks_request field and try to attach the connection. On
+ * failure, send an HTTP reply, and mark the connection.
+ */
+STATIC int
+connection_ap_process_http_connect(entry_connection_t *conn)
+{
+ if (BUG(ENTRY_TO_CONN(conn)->state != AP_CONN_STATE_HTTP_CONNECT_WAIT))
+ return -1;
+
+ char *headers = NULL, *body = NULL;
+ char *command = NULL, *addrport = NULL;
+ char *addr = NULL;
+ size_t bodylen = 0;
+
+ const char *errmsg = NULL;
+ int rv = 0;
+
+ const int http_status =
+ fetch_from_buf_http(ENTRY_TO_CONN(conn)->inbuf, &headers, 8192,
+ &body, &bodylen, 1024, 0);
+ if (http_status < 0) {
+ /* Bad http status */
+ errmsg = "HTTP/1.0 400 Bad Request\r\n\r\n";
+ goto err;
+ } else if (http_status == 0) {
+ /* no HTTP request yet. */
+ goto done;
+ }
+
+ const int cmd_status = parse_http_command(headers, &command, &addrport);
+ if (cmd_status < 0) {
+ errmsg = "HTTP/1.0 400 Bad Request\r\n\r\n";
+ goto err;
+ }
+ tor_assert(command);
+ tor_assert(addrport);
+ if (strcasecmp(command, "connect")) {
+ errmsg = "HTTP/1.0 405 Method Not Allowed\r\n\r\n";
+ goto err;
+ }
+
+ tor_assert(conn->socks_request);
+ socks_request_t *socks = conn->socks_request;
+ uint16_t port;
+ if (tor_addr_port_split(LOG_WARN, addrport, &addr, &port) < 0) {
+ errmsg = "HTTP/1.0 400 Bad Request\r\n\r\n";
+ goto err;
+ }
+ if (strlen(addr) >= MAX_SOCKS_ADDR_LEN) {
+ errmsg = "HTTP/1.0 414 Request-URI Too Long\r\n\r\n";
+ goto err;
+ }
+
+ /* Abuse the 'username' and 'password' fields here. They are already an
+ * abuse. */
+ {
+ char *authorization = http_get_header(headers, "Proxy-Authorization: ");
+ if (authorization) {
+ socks->username = authorization; // steal reference
+ socks->usernamelen = strlen(authorization);
+ }
+ char *isolation = http_get_header(headers, "X-Tor-Stream-Isolation: ");
+ if (isolation) {
+ socks->password = isolation; // steal reference
+ socks->passwordlen = strlen(isolation);
+ }
+ }
+
+ socks->command = SOCKS_COMMAND_CONNECT;
+ socks->listener_type = CONN_TYPE_AP_HTTP_CONNECT_LISTENER;
+ strlcpy(socks->address, addr, sizeof(socks->address));
+ socks->port = port;
+
+ control_event_stream_status(conn, STREAM_EVENT_NEW, 0);
+
+ rv = connection_ap_rewrite_and_attach_if_allowed(conn, NULL, NULL);
+
+ // XXXX send a "100 Continue" message?
+
+ goto done;
+
+ err:
+ if (BUG(errmsg == NULL))
+ errmsg = "HTTP/1.0 400 Bad Request\r\n\r\n";
+ log_warn(LD_EDGE, "Saying %s", escaped(errmsg));
+ connection_buf_add(errmsg, strlen(errmsg), ENTRY_TO_CONN(conn));
+ connection_mark_unattached_ap(conn,
+ END_STREAM_REASON_HTTPPROTOCOL|
+ END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED);
+
+ done:
+ tor_free(headers);
+ tor_free(body);
+ tor_free(command);
+ tor_free(addrport);
+ tor_free(addr);
+ return rv;
+}
+
+/** Iterate over the two bytes of stream_id until we get one that is not
+ * already in use; return it. Return 0 if can't get a unique stream_id.
+ */
+streamid_t
+get_unique_stream_id_by_circ(origin_circuit_t *circ)
+{
+ edge_connection_t *tmpconn;
+ streamid_t test_stream_id;
+ uint32_t attempts=0;
+
+ again:
+ test_stream_id = circ->next_stream_id++;
+ if (++attempts > 1<<16) {
+ /* Make sure we don't loop forever if all stream_id's are used. */
+ log_warn(LD_APP,"No unused stream IDs. Failing.");
+ return 0;
+ }
+ if (test_stream_id == 0)
+ goto again;
+ for (tmpconn = circ->p_streams; tmpconn; tmpconn=tmpconn->next_stream)
+ if (tmpconn->stream_id == test_stream_id)
+ goto again;
+ return test_stream_id;
+}
+
+/** Return true iff <b>conn</b> is linked to a circuit and configured to use
+ * an exit that supports optimistic data. */
+static int
+connection_ap_supports_optimistic_data(const entry_connection_t *conn)
+{
+ const edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn);
+ /* We can only send optimistic data if we're connected to an open
+ general circuit. */
+ if (edge_conn->on_circuit == NULL ||
+ edge_conn->on_circuit->state != CIRCUIT_STATE_OPEN ||
+ (edge_conn->on_circuit->purpose != CIRCUIT_PURPOSE_C_GENERAL &&
+ edge_conn->on_circuit->purpose != CIRCUIT_PURPOSE_C_HSDIR_GET &&
+ edge_conn->on_circuit->purpose != CIRCUIT_PURPOSE_S_HSDIR_POST &&
+ edge_conn->on_circuit->purpose != CIRCUIT_PURPOSE_C_REND_JOINED))
+ return 0;
+
+ return conn->may_use_optimistic_data;
+}
+
+/** Return a bitmask of BEGIN_FLAG_* flags that we should transmit in the
+ * RELAY_BEGIN cell for <b>ap_conn</b>. */
+static uint32_t
+connection_ap_get_begincell_flags(entry_connection_t *ap_conn)
+{
+ edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(ap_conn);
+ const node_t *exitnode = NULL;
+ const crypt_path_t *cpath_layer = edge_conn->cpath_layer;
+ uint32_t flags = 0;
+
+ /* No flags for begindir */
+ if (ap_conn->use_begindir)
+ return 0;
+
+ /* No flags for hidden services. */
+ if (edge_conn->on_circuit->purpose != CIRCUIT_PURPOSE_C_GENERAL)
+ return 0;
+
+ /* If only IPv4 is supported, no flags */
+ if (ap_conn->entry_cfg.ipv4_traffic && !ap_conn->entry_cfg.ipv6_traffic)
+ return 0;
+
+ if (! cpath_layer ||
+ ! cpath_layer->extend_info)
+ return 0;
+
+ if (!ap_conn->entry_cfg.ipv4_traffic)
+ flags |= BEGIN_FLAG_IPV4_NOT_OK;
+
+ exitnode = node_get_by_id(cpath_layer->extend_info->identity_digest);
+
+ if (ap_conn->entry_cfg.ipv6_traffic && exitnode) {
+ tor_addr_t a;
+ tor_addr_make_null(&a, AF_INET6);
+ if (compare_tor_addr_to_node_policy(&a, ap_conn->socks_request->port,
+ exitnode)
+ != ADDR_POLICY_REJECTED) {
+ /* Only say "IPv6 OK" if the exit node supports IPv6. Otherwise there's
+ * no point. */
+ flags |= BEGIN_FLAG_IPV6_OK;
+ }
+ }
+
+ if (flags == BEGIN_FLAG_IPV6_OK) {
+ /* When IPv4 and IPv6 are both allowed, consider whether to say we
+ * prefer IPv6. Otherwise there's no point in declaring a preference */
+ if (ap_conn->entry_cfg.prefer_ipv6)
+ flags |= BEGIN_FLAG_IPV6_PREFERRED;
+ }
+
+ if (flags == BEGIN_FLAG_IPV4_NOT_OK) {
+ log_warn(LD_EDGE, "I'm about to ask a node for a connection that I "
+ "am telling it to fulfil with neither IPv4 nor IPv6. That's "
+ "not going to work. Did you perhaps ask for an IPv6 address "
+ "on an IPv4Only port, or vice versa?");
+ }
+
+ return flags;
+}
+
+/** Write a relay begin cell, using destaddr and destport from ap_conn's
+ * socks_request field, and send it down circ.
+ *
+ * If ap_conn is broken, mark it for close and return -1. Else return 0.
+ */
+MOCK_IMPL(int,
+connection_ap_handshake_send_begin,(entry_connection_t *ap_conn))
+{
+ char payload[CELL_PAYLOAD_SIZE];
+ int payload_len;
+ int begin_type;
+ const or_options_t *options = get_options();
+ origin_circuit_t *circ;
+ edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(ap_conn);
+ connection_t *base_conn = TO_CONN(edge_conn);
+ tor_assert(edge_conn->on_circuit);
+ circ = TO_ORIGIN_CIRCUIT(edge_conn->on_circuit);
+
+ tor_assert(base_conn->type == CONN_TYPE_AP);
+ tor_assert(base_conn->state == AP_CONN_STATE_CIRCUIT_WAIT);
+ tor_assert(ap_conn->socks_request);
+ tor_assert(SOCKS_COMMAND_IS_CONNECT(ap_conn->socks_request->command));
+
+ edge_conn->stream_id = get_unique_stream_id_by_circ(circ);
+ if (edge_conn->stream_id==0) {
+ /* XXXX+ Instead of closing this stream, we should make it get
+ * retried on another circuit. */
+ connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL);
+
+ /* Mark this circuit "unusable for new streams". */
+ mark_circuit_unusable_for_new_conns(circ);
+ return -1;
+ }
+
+ /* Set up begin cell flags. */
+ edge_conn->begincell_flags = connection_ap_get_begincell_flags(ap_conn);
+
+ tor_snprintf(payload,RELAY_PAYLOAD_SIZE, "%s:%d",
+ (circ->base_.purpose == CIRCUIT_PURPOSE_C_GENERAL) ?
+ ap_conn->socks_request->address : "",
+ ap_conn->socks_request->port);
+ payload_len = (int)strlen(payload)+1;
+ if (payload_len <= RELAY_PAYLOAD_SIZE - 4 && edge_conn->begincell_flags) {
+ set_uint32(payload + payload_len, htonl(edge_conn->begincell_flags));
+ payload_len += 4;
+ }
+
+ log_info(LD_APP,
+ "Sending relay cell %d on circ %u to begin stream %d.",
+ (int)ap_conn->use_begindir,
+ (unsigned)circ->base_.n_circ_id,
+ edge_conn->stream_id);
+
+ begin_type = ap_conn->use_begindir ?
+ RELAY_COMMAND_BEGIN_DIR : RELAY_COMMAND_BEGIN;
+
+ /* Check that circuits are anonymised, based on their type. */
+ if (begin_type == RELAY_COMMAND_BEGIN) {
+ /* This connection is a standard OR connection.
+ * Make sure its path length is anonymous, or that we're in a
+ * non-anonymous mode. */
+ assert_circ_anonymity_ok(circ, options);
+ } else if (begin_type == RELAY_COMMAND_BEGIN_DIR) {
+ /* This connection is a begindir directory connection.
+ * Look at the linked directory connection to access the directory purpose.
+ * If a BEGINDIR connection is ever not linked, that's a bug. */
+ if (BUG(!base_conn->linked)) {
+ return -1;
+ }
+ connection_t *linked_dir_conn_base = base_conn->linked_conn;
+ /* If the linked connection has been unlinked by other code, we can't send
+ * a begin cell on it. */
+ if (!linked_dir_conn_base) {
+ return -1;
+ }
+ /* Sensitive directory connections must have an anonymous path length.
+ * Otherwise, directory connections are typically one-hop.
+ * This matches the earlier check for directory connection path anonymity
+ * in directory_initiate_request(). */
+ if (purpose_needs_anonymity(linked_dir_conn_base->purpose,
+ TO_DIR_CONN(linked_dir_conn_base)->router_purpose,
+ TO_DIR_CONN(linked_dir_conn_base)->requested_resource)) {
+ assert_circ_anonymity_ok(circ, options);
+ }
+ } else {
+ /* This code was written for the two connection types BEGIN and BEGIN_DIR
+ */
+ tor_assert_unreached();
+ }
+
+ if (connection_edge_send_command(edge_conn, begin_type,
+ begin_type == RELAY_COMMAND_BEGIN ? payload : NULL,
+ begin_type == RELAY_COMMAND_BEGIN ? payload_len : 0) < 0)
+ return -1; /* circuit is closed, don't continue */
+
+ edge_conn->package_window = STREAMWINDOW_START;
+ edge_conn->deliver_window = STREAMWINDOW_START;
+ base_conn->state = AP_CONN_STATE_CONNECT_WAIT;
+ log_info(LD_APP,"Address/port sent, ap socket "TOR_SOCKET_T_FORMAT
+ ", n_circ_id %u",
+ base_conn->s, (unsigned)circ->base_.n_circ_id);
+ control_event_stream_status(ap_conn, STREAM_EVENT_SENT_CONNECT, 0);
+
+ /* If there's queued-up data, send it now */
+ if ((connection_get_inbuf_len(base_conn) ||
+ ap_conn->sending_optimistic_data) &&
+ connection_ap_supports_optimistic_data(ap_conn)) {
+ log_info(LD_APP, "Sending up to %ld + %ld bytes of queued-up data",
+ (long)connection_get_inbuf_len(base_conn),
+ ap_conn->sending_optimistic_data ?
+ (long)buf_datalen(ap_conn->sending_optimistic_data) : 0);
+ if (connection_edge_package_raw_inbuf(edge_conn, 1, NULL) < 0) {
+ connection_mark_for_close(base_conn);
+ }
+ }
+
+ return 0;
+}
+
+/** Write a relay resolve cell, using destaddr and destport from ap_conn's
+ * socks_request field, and send it down circ.
+ *
+ * If ap_conn is broken, mark it for close and return -1. Else return 0.
+ */
+int
+connection_ap_handshake_send_resolve(entry_connection_t *ap_conn)
+{
+ int payload_len, command;
+ const char *string_addr;
+ char inaddr_buf[REVERSE_LOOKUP_NAME_BUF_LEN];
+ origin_circuit_t *circ;
+ edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(ap_conn);
+ connection_t *base_conn = TO_CONN(edge_conn);
+ tor_assert(edge_conn->on_circuit);
+ circ = TO_ORIGIN_CIRCUIT(edge_conn->on_circuit);
+
+ tor_assert(base_conn->type == CONN_TYPE_AP);
+ tor_assert(base_conn->state == AP_CONN_STATE_CIRCUIT_WAIT);
+ tor_assert(ap_conn->socks_request);
+ tor_assert(circ->base_.purpose == CIRCUIT_PURPOSE_C_GENERAL);
+
+ command = ap_conn->socks_request->command;
+ tor_assert(SOCKS_COMMAND_IS_RESOLVE(command));
+
+ edge_conn->stream_id = get_unique_stream_id_by_circ(circ);
+ if (edge_conn->stream_id==0) {
+ /* XXXX+ Instead of closing this stream, we should make it get
+ * retried on another circuit. */
+ connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL);
+
+ /* Mark this circuit "unusable for new streams". */
+ mark_circuit_unusable_for_new_conns(circ);
+ return -1;
+ }
+
+ if (command == SOCKS_COMMAND_RESOLVE) {
+ string_addr = ap_conn->socks_request->address;
+ payload_len = (int)strlen(string_addr)+1;
+ } else {
+ /* command == SOCKS_COMMAND_RESOLVE_PTR */
+ const char *a = ap_conn->socks_request->address;
+ tor_addr_t addr;
+ int r;
+
+ /* We're doing a reverse lookup. The input could be an IP address, or
+ * could be an .in-addr.arpa or .ip6.arpa address */
+ r = tor_addr_parse_PTR_name(&addr, a, AF_UNSPEC, 1);
+ if (r <= 0) {
+ log_warn(LD_APP, "Rejecting ill-formed reverse lookup of %s",
+ safe_str_client(a));
+ connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL);
+ return -1;
+ }
+
+ r = tor_addr_to_PTR_name(inaddr_buf, sizeof(inaddr_buf), &addr);
+ if (r < 0) {
+ log_warn(LD_BUG, "Couldn't generate reverse lookup hostname of %s",
+ safe_str_client(a));
+ connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL);
+ return -1;
+ }
+
+ string_addr = inaddr_buf;
+ payload_len = (int)strlen(inaddr_buf)+1;
+ tor_assert(payload_len <= (int)sizeof(inaddr_buf));
+ }
+
+ log_debug(LD_APP,
+ "Sending relay cell to begin stream %d.", edge_conn->stream_id);
+
+ if (connection_edge_send_command(edge_conn,
+ RELAY_COMMAND_RESOLVE,
+ string_addr, payload_len) < 0)
+ return -1; /* circuit is closed, don't continue */
+
+ if (!base_conn->address) {
+ /* This might be unnecessary. XXXX */
+ base_conn->address = tor_addr_to_str_dup(&base_conn->addr);
+ }
+ base_conn->state = AP_CONN_STATE_RESOLVE_WAIT;
+ log_info(LD_APP,"Address sent for resolve, ap socket "TOR_SOCKET_T_FORMAT
+ ", n_circ_id %u",
+ base_conn->s, (unsigned)circ->base_.n_circ_id);
+ control_event_stream_status(ap_conn, STREAM_EVENT_SENT_RESOLVE, 0);
+ return 0;
+}
+
+/** Make an AP connection_t linked to the connection_t <b>partner</b>. make a
+ * new linked connection pair, and attach one side to the conn, connection_add
+ * it, initialize it to circuit_wait, and call
+ * connection_ap_handshake_attach_circuit(conn) on it.
+ *
+ * Return the newly created end of the linked connection pair, or -1 if error.
+ */
+entry_connection_t *
+connection_ap_make_link(connection_t *partner,
+ char *address, uint16_t port,
+ const char *digest,
+ int session_group, int isolation_flags,
+ int use_begindir, int want_onehop)
+{
+ entry_connection_t *conn;
+ connection_t *base_conn;
+
+ log_info(LD_APP,"Making internal %s tunnel to %s:%d ...",
+ want_onehop ? "direct" : "anonymized",
+ safe_str_client(address), port);
+
+ conn = entry_connection_new(CONN_TYPE_AP, tor_addr_family(&partner->addr));
+ base_conn = ENTRY_TO_CONN(conn);
+ base_conn->linked = 1; /* so that we can add it safely below. */
+
+ /* populate conn->socks_request */
+
+ /* leave version at zero, so the socks_reply is empty */
+ conn->socks_request->socks_version = 0;
+ conn->socks_request->has_finished = 0; /* waiting for 'connected' */
+ strlcpy(conn->socks_request->address, address,
+ sizeof(conn->socks_request->address));
+ conn->socks_request->port = port;
+ conn->socks_request->command = SOCKS_COMMAND_CONNECT;
+ conn->want_onehop = want_onehop;
+ conn->use_begindir = use_begindir;
+ if (use_begindir) {
+ conn->chosen_exit_name = tor_malloc(HEX_DIGEST_LEN+2);
+ conn->chosen_exit_name[0] = '$';
+ tor_assert(digest);
+ base16_encode(conn->chosen_exit_name+1,HEX_DIGEST_LEN+1,
+ digest, DIGEST_LEN);
+ }
+
+ /* Populate isolation fields. */
+ conn->socks_request->listener_type = CONN_TYPE_DIR_LISTENER;
+ conn->original_dest_address = tor_strdup(address);
+ conn->entry_cfg.session_group = session_group;
+ conn->entry_cfg.isolation_flags = isolation_flags;
+
+ base_conn->address = tor_strdup("(Tor_internal)");
+ tor_addr_make_unspec(&base_conn->addr);
+ base_conn->port = 0;
+
+ connection_link_connections(partner, base_conn);
+
+ if (connection_add(base_conn) < 0) { /* no space, forget it */
+ connection_free(base_conn);
+ return NULL;
+ }
+
+ base_conn->state = AP_CONN_STATE_CIRCUIT_WAIT;
+
+ control_event_stream_status(conn, STREAM_EVENT_NEW, 0);
+
+ /* attaching to a dirty circuit is fine */
+ connection_ap_mark_as_pending_circuit(conn);
+ log_info(LD_APP,"... application connection created and linked.");
+ return conn;
+}
+
+/** Notify any interested controller connections about a new hostname resolve
+ * or resolve error. Takes the same arguments as does
+ * connection_ap_handshake_socks_resolved(). */
+static void
+tell_controller_about_resolved_result(entry_connection_t *conn,
+ int answer_type,
+ size_t answer_len,
+ const char *answer,
+ int ttl,
+ time_t expires)
+{
+ expires = time(NULL) + ttl;
+ if (answer_type == RESOLVED_TYPE_IPV4 && answer_len >= 4) {
+ char *cp = tor_dup_ip(ntohl(get_uint32(answer)));
+ control_event_address_mapped(conn->socks_request->address,
+ cp, expires, NULL, 0);
+ tor_free(cp);
+ } else if (answer_type == RESOLVED_TYPE_HOSTNAME && answer_len < 256) {
+ char *cp = tor_strndup(answer, answer_len);
+ control_event_address_mapped(conn->socks_request->address,
+ cp, expires, NULL, 0);
+ tor_free(cp);
+ } else {
+ control_event_address_mapped(conn->socks_request->address,
+ "<error>", time(NULL)+ttl,
+ "error=yes", 0);
+ }
+}
+
+/**
+ * As connection_ap_handshake_socks_resolved, but take a tor_addr_t to send
+ * as the answer.
+ */
+void
+connection_ap_handshake_socks_resolved_addr(entry_connection_t *conn,
+ const tor_addr_t *answer,
+ int ttl,
+ time_t expires)
+{
+ if (tor_addr_family(answer) == AF_INET) {
+ uint32_t a = tor_addr_to_ipv4n(answer); /* network order */
+ connection_ap_handshake_socks_resolved(conn,RESOLVED_TYPE_IPV4,4,
+ (uint8_t*)&a,
+ ttl, expires);
+ } else if (tor_addr_family(answer) == AF_INET6) {
+ const uint8_t *a = tor_addr_to_in6_addr8(answer);
+ connection_ap_handshake_socks_resolved(conn,RESOLVED_TYPE_IPV6,16,
+ a,
+ ttl, expires);
+ } else {
+ log_warn(LD_BUG, "Got called with address of unexpected family %d",
+ tor_addr_family(answer));
+ connection_ap_handshake_socks_resolved(conn,
+ RESOLVED_TYPE_ERROR,0,NULL,-1,-1);
+ }
+}
+
+/** Send an answer to an AP connection that has requested a DNS lookup via
+ * SOCKS. The type should be one of RESOLVED_TYPE_(IPV4|IPV6|HOSTNAME) or -1
+ * for unreachable; the answer should be in the format specified in the socks
+ * extensions document. <b>ttl</b> is the ttl for the answer, or -1 on
+ * certain errors or for values that didn't come via DNS. <b>expires</b> is
+ * a time when the answer expires, or -1 or TIME_MAX if there's a good TTL.
+ **/
+/* XXXX the use of the ttl and expires fields is nutty. Let's make this
+ * interface and those that use it less ugly. */
+MOCK_IMPL(void,
+connection_ap_handshake_socks_resolved,(entry_connection_t *conn,
+ int answer_type,
+ size_t answer_len,
+ const uint8_t *answer,
+ int ttl,
+ time_t expires))
+{
+ char buf[384];
+ size_t replylen;
+
+ if (ttl >= 0) {
+ if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) {
+ tor_addr_t a;
+ tor_addr_from_ipv4n(&a, get_uint32(answer));
+ if (! tor_addr_is_null(&a)) {
+ client_dns_set_addressmap(conn,
+ conn->socks_request->address, &a,
+ conn->chosen_exit_name, ttl);
+ }
+ } else if (answer_type == RESOLVED_TYPE_IPV6 && answer_len == 16) {
+ tor_addr_t a;
+ tor_addr_from_ipv6_bytes(&a, (char*)answer);
+ if (! tor_addr_is_null(&a)) {
+ client_dns_set_addressmap(conn,
+ conn->socks_request->address, &a,
+ conn->chosen_exit_name, ttl);
+ }
+ } else if (answer_type == RESOLVED_TYPE_HOSTNAME && answer_len < 256) {
+ char *cp = tor_strndup((char*)answer, answer_len);
+ client_dns_set_reverse_addressmap(conn,
+ conn->socks_request->address,
+ cp,
+ conn->chosen_exit_name, ttl);
+ tor_free(cp);
+ }
+ }
+
+ if (ENTRY_TO_EDGE_CONN(conn)->is_dns_request) {
+ if (conn->dns_server_request) {
+ /* We had a request on our DNS port: answer it. */
+ dnsserv_resolved(conn, answer_type, answer_len, (char*)answer, ttl);
+ conn->socks_request->has_finished = 1;
+ return;
+ } else {
+ /* This must be a request from the controller. Since answers to those
+ * requests are not cached, they do not generate an ADDRMAP event on
+ * their own. */
+ tell_controller_about_resolved_result(conn, answer_type, answer_len,
+ (char*)answer, ttl, expires);
+ conn->socks_request->has_finished = 1;
+ return;
+ }
+ /* We shouldn't need to free conn here; it gets marked by the caller. */
+ }
+
+ if (conn->socks_request->socks_version == 4) {
+ buf[0] = 0x00; /* version */
+ if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) {
+ buf[1] = SOCKS4_GRANTED;
+ set_uint16(buf+2, 0);
+ memcpy(buf+4, answer, 4); /* address */
+ replylen = SOCKS4_NETWORK_LEN;
+ } else { /* "error" */
+ buf[1] = SOCKS4_REJECT;
+ memset(buf+2, 0, 6);
+ replylen = SOCKS4_NETWORK_LEN;
+ }
+ } else if (conn->socks_request->socks_version == 5) {
+ /* SOCKS5 */
+ buf[0] = 0x05; /* version */
+ if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) {
+ buf[1] = SOCKS5_SUCCEEDED;
+ buf[2] = 0; /* reserved */
+ buf[3] = 0x01; /* IPv4 address type */
+ memcpy(buf+4, answer, 4); /* address */
+ set_uint16(buf+8, 0); /* port == 0. */
+ replylen = 10;
+ } else if (answer_type == RESOLVED_TYPE_IPV6 && answer_len == 16) {
+ buf[1] = SOCKS5_SUCCEEDED;
+ buf[2] = 0; /* reserved */
+ buf[3] = 0x04; /* IPv6 address type */
+ memcpy(buf+4, answer, 16); /* address */
+ set_uint16(buf+20, 0); /* port == 0. */
+ replylen = 22;
+ } else if (answer_type == RESOLVED_TYPE_HOSTNAME && answer_len < 256) {
+ buf[1] = SOCKS5_SUCCEEDED;
+ buf[2] = 0; /* reserved */
+ buf[3] = 0x03; /* Domainname address type */
+ buf[4] = (char)answer_len;
+ memcpy(buf+5, answer, answer_len); /* address */
+ set_uint16(buf+5+answer_len, 0); /* port == 0. */
+ replylen = 5+answer_len+2;
+ } else {
+ buf[1] = SOCKS5_HOST_UNREACHABLE;
+ memset(buf+2, 0, 8);
+ replylen = 10;
+ }
+ } else {
+ /* no socks version info; don't send anything back */
+ return;
+ }
+ connection_ap_handshake_socks_reply(conn, buf, replylen,
+ (answer_type == RESOLVED_TYPE_IPV4 ||
+ answer_type == RESOLVED_TYPE_IPV6 ||
+ answer_type == RESOLVED_TYPE_HOSTNAME) ?
+ 0 : END_STREAM_REASON_RESOLVEFAILED);
+}
+
+/** Send a socks reply to stream <b>conn</b>, using the appropriate
+ * socks version, etc, and mark <b>conn</b> as completed with SOCKS
+ * handshaking.
+ *
+ * If <b>reply</b> is defined, then write <b>replylen</b> bytes of it to conn
+ * and return, else reply based on <b>endreason</b> (one of
+ * END_STREAM_REASON_*). If <b>reply</b> is undefined, <b>endreason</b> can't
+ * be 0 or REASON_DONE. Send endreason to the controller, if appropriate.
+ */
+void
+connection_ap_handshake_socks_reply(entry_connection_t *conn, char *reply,
+ size_t replylen, int endreason)
+{
+ char buf[256];
+ socks5_reply_status_t status =
+ stream_end_reason_to_socks5_response(endreason);
+
+ tor_assert(conn->socks_request); /* make sure it's an AP stream */
+
+ if (!SOCKS_COMMAND_IS_RESOLVE(conn->socks_request->command)) {
+ control_event_stream_status(conn, status==SOCKS5_SUCCEEDED ?
+ STREAM_EVENT_SUCCEEDED : STREAM_EVENT_FAILED,
+ endreason);
+ }
+
+ /* Flag this stream's circuit as having completed a stream successfully
+ * (for path bias) */
+ if (status == SOCKS5_SUCCEEDED ||
+ endreason == END_STREAM_REASON_RESOLVEFAILED ||
+ endreason == END_STREAM_REASON_CONNECTREFUSED ||
+ endreason == END_STREAM_REASON_CONNRESET ||
+ endreason == END_STREAM_REASON_NOROUTE ||
+ endreason == END_STREAM_REASON_RESOURCELIMIT) {
+ if (!conn->edge_.on_circuit ||
+ !CIRCUIT_IS_ORIGIN(conn->edge_.on_circuit)) {
+ if (endreason != END_STREAM_REASON_RESOLVEFAILED) {
+ log_info(LD_BUG,
+ "No origin circuit for successful SOCKS stream %"PRIu64
+ ". Reason: %d",
+ (ENTRY_TO_CONN(conn)->global_identifier),
+ endreason);
+ }
+ /*
+ * Else DNS remaps and failed hidden service lookups can send us
+ * here with END_STREAM_REASON_RESOLVEFAILED; ignore it
+ *
+ * Perhaps we could make the test more precise; we can tell hidden
+ * services by conn->edge_.renddata != NULL; anything analogous for
+ * the DNS remap case?
+ */
+ } else {
+ // XXX: Hrmm. It looks like optimistic data can't go through this
+ // codepath, but someone should probably test it and make sure.
+ // We don't want to mark optimistically opened streams as successful.
+ pathbias_mark_use_success(TO_ORIGIN_CIRCUIT(conn->edge_.on_circuit));
+ }
+ }
+
+ if (conn->socks_request->has_finished) {
+ log_warn(LD_BUG, "(Harmless.) duplicate calls to "
+ "connection_ap_handshake_socks_reply.");
+ return;
+ }
+ if (replylen) { /* we already have a reply in mind */
+ connection_buf_add(reply, replylen, ENTRY_TO_CONN(conn));
+ conn->socks_request->has_finished = 1;
+ return;
+ }
+ if (conn->socks_request->listener_type ==
+ CONN_TYPE_AP_HTTP_CONNECT_LISTENER) {
+ const char *response = end_reason_to_http_connect_response_line(endreason);
+ if (!response) {
+ response = "HTTP/1.0 400 Bad Request\r\n\r\n";
+ }
+ connection_buf_add(response, strlen(response), ENTRY_TO_CONN(conn));
+ } else if (conn->socks_request->socks_version == 4) {
+ memset(buf,0,SOCKS4_NETWORK_LEN);
+ buf[1] = (status==SOCKS5_SUCCEEDED ? SOCKS4_GRANTED : SOCKS4_REJECT);
+ /* leave version, destport, destip zero */
+ connection_buf_add(buf, SOCKS4_NETWORK_LEN, ENTRY_TO_CONN(conn));
+ } else if (conn->socks_request->socks_version == 5) {
+ size_t buf_len;
+ memset(buf,0,sizeof(buf));
+ if (tor_addr_family(&conn->edge_.base_.addr) == AF_INET) {
+ buf[0] = 5; /* version 5 */
+ buf[1] = (char)status;
+ buf[2] = 0;
+ buf[3] = 1; /* ipv4 addr */
+ /* 4 bytes for the header, 2 bytes for the port, 4 for the address. */
+ buf_len = 10;
+ } else { /* AF_INET6. */
+ buf[0] = 5; /* version 5 */
+ buf[1] = (char)status;
+ buf[2] = 0;
+ buf[3] = 4; /* ipv6 addr */
+ /* 4 bytes for the header, 2 bytes for the port, 16 for the address. */
+ buf_len = 22;
+ }
+ connection_buf_add(buf,buf_len,ENTRY_TO_CONN(conn));
+ }
+ /* If socks_version isn't 4 or 5, don't send anything.
+ * This can happen in the case of AP bridges. */
+ conn->socks_request->has_finished = 1;
+ return;
+}
+
+/** Read a RELAY_BEGIN or RELAY_BEGIN_DIR cell from <b>cell</b>, decode it, and
+ * place the result in <b>bcell</b>. On success return 0; on failure return
+ * <0 and set *<b>end_reason_out</b> to the end reason we should send back to
+ * the client.
+ *
+ * Return -1 in the case where we want to send a RELAY_END cell, and < -1 when
+ * we don't.
+ **/
+STATIC int
+begin_cell_parse(const cell_t *cell, begin_cell_t *bcell,
+ uint8_t *end_reason_out)
+{
+ relay_header_t rh;
+ const uint8_t *body, *nul;
+
+ memset(bcell, 0, sizeof(*bcell));
+ *end_reason_out = END_STREAM_REASON_MISC;
+
+ relay_header_unpack(&rh, cell->payload);
+ if (rh.length > RELAY_PAYLOAD_SIZE) {
+ return -2; /*XXXX why not TORPROTOCOL? */
+ }
+
+ bcell->stream_id = rh.stream_id;
+
+ if (rh.command == RELAY_COMMAND_BEGIN_DIR) {
+ bcell->is_begindir = 1;
+ return 0;
+ } else if (rh.command != RELAY_COMMAND_BEGIN) {
+ log_warn(LD_BUG, "Got an unexpected command %d", (int)rh.command);
+ *end_reason_out = END_STREAM_REASON_INTERNAL;
+ return -1;
+ }
+
+ body = cell->payload + RELAY_HEADER_SIZE;
+ nul = memchr(body, 0, rh.length);
+ if (! nul) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Relay begin cell has no \\0. Closing.");
+ *end_reason_out = END_STREAM_REASON_TORPROTOCOL;
+ return -1;
+ }
+
+ if (tor_addr_port_split(LOG_PROTOCOL_WARN,
+ (char*)(body),
+ &bcell->address,&bcell->port)<0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Unable to parse addr:port in relay begin cell. Closing.");
+ *end_reason_out = END_STREAM_REASON_TORPROTOCOL;
+ return -1;
+ }
+ if (bcell->port == 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Missing port in relay begin cell. Closing.");
+ tor_free(bcell->address);
+ *end_reason_out = END_STREAM_REASON_TORPROTOCOL;
+ return -1;
+ }
+ if (body + rh.length >= nul + 4)
+ bcell->flags = ntohl(get_uint32(nul+1));
+
+ return 0;
+}
+
+/** For the given <b>circ</b> and the edge connection <b>conn</b>, setup the
+ * connection, attach it to the circ and connect it. Return 0 on success
+ * or END_CIRC_AT_ORIGIN if we can't find the requested hidden service port
+ * where the caller should close the circuit. */
+static int
+handle_hs_exit_conn(circuit_t *circ, edge_connection_t *conn)
+{
+ int ret;
+ origin_circuit_t *origin_circ;
+
+ assert_circuit_ok(circ);
+ tor_assert(circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED);
+ tor_assert(conn);
+
+ log_debug(LD_REND, "Connecting the hidden service rendezvous circuit "
+ "to the service destination.");
+
+ origin_circ = TO_ORIGIN_CIRCUIT(circ);
+ conn->base_.address = tor_strdup("(rendezvous)");
+ conn->base_.state = EXIT_CONN_STATE_CONNECTING;
+
+ /* The circuit either has an hs identifier for v3+ or a rend_data for legacy
+ * service. */
+ if (origin_circ->rend_data) {
+ conn->rend_data = rend_data_dup(origin_circ->rend_data);
+ tor_assert(connection_edge_is_rendezvous_stream(conn));
+ ret = rend_service_set_connection_addr_port(conn, origin_circ);
+ } else if (origin_circ->hs_ident) {
+ /* Setup the identifier to be the one for the circuit service. */
+ conn->hs_ident =
+ hs_ident_edge_conn_new(&origin_circ->hs_ident->identity_pk);
+ tor_assert(connection_edge_is_rendezvous_stream(conn));
+ ret = hs_service_set_conn_addr_port(origin_circ, conn);
+ } else {
+ /* We should never get here if the circuit's purpose is rendezvous. */
+ tor_assert_nonfatal_unreached();
+ return -1;
+ }
+ if (ret < 0) {
+ log_info(LD_REND, "Didn't find rendezvous service (addr%s, port %d)",
+ fmt_addr(&TO_CONN(conn)->addr), TO_CONN(conn)->port);
+ /* Send back reason DONE because we want to make hidden service port
+ * scanning harder thus instead of returning that the exit policy
+ * didn't match, which makes it obvious that the port is closed,
+ * return DONE and kill the circuit. That way, a user (malicious or
+ * not) needs one circuit per bad port unless it matches the policy of
+ * the hidden service. */
+ relay_send_end_cell_from_edge(conn->stream_id, circ,
+ END_STREAM_REASON_DONE,
+ origin_circ->cpath->prev);
+ connection_free_(TO_CONN(conn));
+
+ /* Drop the circuit here since it might be someone deliberately
+ * scanning the hidden service ports. Note that this mitigates port
+ * scanning by adding more work on the attacker side to successfully
+ * scan but does not fully solve it. */
+ if (ret < -1) {
+ return END_CIRC_AT_ORIGIN;
+ } else {
+ return 0;
+ }
+ }
+
+ /* Link the circuit and the connection crypt path. */
+ conn->cpath_layer = origin_circ->cpath->prev;
+
+ /* Add it into the linked list of p_streams on this circuit */
+ conn->next_stream = origin_circ->p_streams;
+ origin_circ->p_streams = conn;
+ conn->on_circuit = circ;
+ assert_circuit_ok(circ);
+
+ hs_inc_rdv_stream_counter(origin_circ);
+
+ /* Connect tor to the hidden service destination. */
+ connection_exit_connect(conn);
+
+ /* For path bias: This circuit was used successfully */
+ pathbias_mark_use_success(origin_circ);
+ return 0;
+}
+
+/** A relay 'begin' or 'begin_dir' cell has arrived, and either we are
+ * an exit hop for the circuit, or we are the origin and it is a
+ * rendezvous begin.
+ *
+ * Launch a new exit connection and initialize things appropriately.
+ *
+ * If it's a rendezvous stream, call connection_exit_connect() on
+ * it.
+ *
+ * For general streams, call dns_resolve() on it first, and only call
+ * connection_exit_connect() if the dns answer is already known.
+ *
+ * Note that we don't call connection_add() on the new stream! We wait
+ * for connection_exit_connect() to do that.
+ *
+ * Return -(some circuit end reason) if we want to tear down <b>circ</b>.
+ * Else return 0.
+ */
+int
+connection_exit_begin_conn(cell_t *cell, circuit_t *circ)
+{
+ edge_connection_t *n_stream;
+ relay_header_t rh;
+ char *address = NULL;
+ uint16_t port = 0;
+ or_circuit_t *or_circ = NULL;
+ origin_circuit_t *origin_circ = NULL;
+ crypt_path_t *layer_hint = NULL;
+ const or_options_t *options = get_options();
+ begin_cell_t bcell;
+ int rv;
+ uint8_t end_reason=0;
+
+ assert_circuit_ok(circ);
+ if (!CIRCUIT_IS_ORIGIN(circ)) {
+ or_circ = TO_OR_CIRCUIT(circ);
+ } else {
+ tor_assert(circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED);
+ origin_circ = TO_ORIGIN_CIRCUIT(circ);
+ layer_hint = origin_circ->cpath->prev;
+ }
+
+ relay_header_unpack(&rh, cell->payload);
+ if (rh.length > RELAY_PAYLOAD_SIZE)
+ return -END_CIRC_REASON_TORPROTOCOL;
+
+ if (!server_mode(options) &&
+ circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Relay begin cell at non-server. Closing.");
+ relay_send_end_cell_from_edge(rh.stream_id, circ,
+ END_STREAM_REASON_EXITPOLICY, NULL);
+ return 0;
+ }
+
+ rv = begin_cell_parse(cell, &bcell, &end_reason);
+ if (rv < -1) {
+ return -END_CIRC_REASON_TORPROTOCOL;
+ } else if (rv == -1) {
+ tor_free(bcell.address);
+ relay_send_end_cell_from_edge(rh.stream_id, circ, end_reason, layer_hint);
+ return 0;
+ }
+
+ if (! bcell.is_begindir) {
+ /* Steal reference */
+ address = bcell.address;
+ port = bcell.port;
+
+ if (or_circ && or_circ->p_chan) {
+ const int client_chan = channel_is_client(or_circ->p_chan);
+ if ((client_chan ||
+ (!connection_or_digest_is_known_relay(
+ or_circ->p_chan->identity_digest) &&
+ should_refuse_unknown_exits(options)))) {
+ /* Don't let clients use us as a single-hop proxy. It attracts
+ * attackers and users who'd be better off with, well, single-hop
+ * proxies. */
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Attempt by %s to open a stream %s. Closing.",
+ safe_str(channel_get_canonical_remote_descr(or_circ->p_chan)),
+ client_chan ? "on first hop of circuit" :
+ "from unknown relay");
+ relay_send_end_cell_from_edge(rh.stream_id, circ,
+ client_chan ?
+ END_STREAM_REASON_TORPROTOCOL :
+ END_STREAM_REASON_MISC,
+ NULL);
+ tor_free(address);
+ return 0;
+ }
+ }
+ } else if (rh.command == RELAY_COMMAND_BEGIN_DIR) {
+ if (!directory_permits_begindir_requests(options) ||
+ circ->purpose != CIRCUIT_PURPOSE_OR) {
+ relay_send_end_cell_from_edge(rh.stream_id, circ,
+ END_STREAM_REASON_NOTDIRECTORY, layer_hint);
+ return 0;
+ }
+ /* Make sure to get the 'real' address of the previous hop: the
+ * caller might want to know whether the remote IP address has changed,
+ * and we might already have corrected base_.addr[ess] for the relay's
+ * canonical IP address. */
+ if (or_circ && or_circ->p_chan)
+ address = tor_strdup(channel_get_actual_remote_address(or_circ->p_chan));
+ else
+ address = tor_strdup("127.0.0.1");
+ port = 1; /* XXXX This value is never actually used anywhere, and there
+ * isn't "really" a connection here. But we
+ * need to set it to something nonzero. */
+ } else {
+ log_warn(LD_BUG, "Got an unexpected command %d", (int)rh.command);
+ relay_send_end_cell_from_edge(rh.stream_id, circ,
+ END_STREAM_REASON_INTERNAL, layer_hint);
+ return 0;
+ }
+
+ if (! options->IPv6Exit) {
+ /* I don't care if you prefer IPv6; I can't give you any. */
+ bcell.flags &= ~BEGIN_FLAG_IPV6_PREFERRED;
+ /* If you don't want IPv4, I can't help. */
+ if (bcell.flags & BEGIN_FLAG_IPV4_NOT_OK) {
+ tor_free(address);
+ relay_send_end_cell_from_edge(rh.stream_id, circ,
+ END_STREAM_REASON_EXITPOLICY, layer_hint);
+ return 0;
+ }
+ }
+
+ log_debug(LD_EXIT,"Creating new exit connection.");
+ /* The 'AF_INET' here is temporary; we might need to change it later in
+ * connection_exit_connect(). */
+ n_stream = edge_connection_new(CONN_TYPE_EXIT, AF_INET);
+
+ /* Remember the tunneled request ID in the new edge connection, so that
+ * we can measure download times. */
+ n_stream->dirreq_id = circ->dirreq_id;
+
+ n_stream->base_.purpose = EXIT_PURPOSE_CONNECT;
+ n_stream->begincell_flags = bcell.flags;
+ n_stream->stream_id = rh.stream_id;
+ n_stream->base_.port = port;
+ /* leave n_stream->s at -1, because it's not yet valid */
+ n_stream->package_window = STREAMWINDOW_START;
+ n_stream->deliver_window = STREAMWINDOW_START;
+
+ if (circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED) {
+ int ret;
+ tor_free(address);
+ /* We handle this circuit and stream in this function for all supported
+ * hidden service version. */
+ ret = handle_hs_exit_conn(circ, n_stream);
+
+ if (ret == 0) {
+ /* This was a valid cell. Count it as delivered + overhead. */
+ circuit_read_valid_data(origin_circ, rh.length);
+ }
+ return ret;
+ }
+ tor_strlower(address);
+ n_stream->base_.address = address;
+ n_stream->base_.state = EXIT_CONN_STATE_RESOLVEFAILED;
+ /* default to failed, change in dns_resolve if it turns out not to fail */
+
+ /* If we're hibernating or shutting down, we refuse to open new streams. */
+ if (we_are_hibernating()) {
+ relay_send_end_cell_from_edge(rh.stream_id, circ,
+ END_STREAM_REASON_HIBERNATING, NULL);
+ connection_free_(TO_CONN(n_stream));
+ return 0;
+ }
+
+ n_stream->on_circuit = circ;
+
+ if (rh.command == RELAY_COMMAND_BEGIN_DIR) {
+ tor_addr_t tmp_addr;
+ tor_assert(or_circ);
+ if (or_circ->p_chan &&
+ channel_get_addr_if_possible(or_circ->p_chan, &tmp_addr)) {
+ tor_addr_copy(&n_stream->base_.addr, &tmp_addr);
+ }
+ return connection_exit_connect_dir(n_stream);
+ }
+
+ log_debug(LD_EXIT,"about to start the dns_resolve().");
+
+ /* send it off to the gethostbyname farm */
+ switch (dns_resolve(n_stream)) {
+ case 1: /* resolve worked; now n_stream is attached to circ. */
+ assert_circuit_ok(circ);
+ log_debug(LD_EXIT,"about to call connection_exit_connect().");
+ connection_exit_connect(n_stream);
+ return 0;
+ case -1: /* resolve failed */
+ relay_send_end_cell_from_edge(rh.stream_id, circ,
+ END_STREAM_REASON_RESOLVEFAILED, NULL);
+ /* n_stream got freed. don't touch it. */
+ break;
+ case 0: /* resolve added to pending list */
+ assert_circuit_ok(circ);
+ break;
+ }
+ return 0;
+}
+
+/**
+ * Called when we receive a RELAY_COMMAND_RESOLVE cell 'cell' along the
+ * circuit <b>circ</b>;
+ * begin resolving the hostname, and (eventually) reply with a RESOLVED cell.
+ */
+int
+connection_exit_begin_resolve(cell_t *cell, or_circuit_t *circ)
+{
+ edge_connection_t *dummy_conn;
+ relay_header_t rh;
+
+ assert_circuit_ok(TO_CIRCUIT(circ));
+ relay_header_unpack(&rh, cell->payload);
+ if (rh.length > RELAY_PAYLOAD_SIZE)
+ return -1;
+
+ /* This 'dummy_conn' only exists to remember the stream ID
+ * associated with the resolve request; and to make the
+ * implementation of dns.c more uniform. (We really only need to
+ * remember the circuit, the stream ID, and the hostname to be
+ * resolved; but if we didn't store them in a connection like this,
+ * the housekeeping in dns.c would get way more complicated.)
+ */
+ dummy_conn = edge_connection_new(CONN_TYPE_EXIT, AF_INET);
+ dummy_conn->stream_id = rh.stream_id;
+ dummy_conn->base_.address = tor_strndup(
+ (char*)cell->payload+RELAY_HEADER_SIZE,
+ rh.length);
+ dummy_conn->base_.port = 0;
+ dummy_conn->base_.state = EXIT_CONN_STATE_RESOLVEFAILED;
+ dummy_conn->base_.purpose = EXIT_PURPOSE_RESOLVE;
+
+ dummy_conn->on_circuit = TO_CIRCUIT(circ);
+
+ /* send it off to the gethostbyname farm */
+ switch (dns_resolve(dummy_conn)) {
+ case -1: /* Impossible to resolve; a resolved cell was sent. */
+ /* Connection freed; don't touch it. */
+ return 0;
+ case 1: /* The result was cached; a resolved cell was sent. */
+ if (!dummy_conn->base_.marked_for_close)
+ connection_free_(TO_CONN(dummy_conn));
+ return 0;
+ case 0: /* resolve added to pending list */
+ assert_circuit_ok(TO_CIRCUIT(circ));
+ break;
+ }
+ return 0;
+}
+
+/** Helper: Return true and set *<b>why_rejected</b> to an optional clarifying
+ * message message iff we do not allow connections to <b>addr</b>:<b>port</b>.
+ */
+static int
+my_exit_policy_rejects(const tor_addr_t *addr,
+ uint16_t port,
+ const char **why_rejected)
+{
+ if (router_compare_to_my_exit_policy(addr, port)) {
+ *why_rejected = "";
+ return 1;
+ } else if (tor_addr_family(addr) == AF_INET6 && !get_options()->IPv6Exit) {
+ *why_rejected = " (IPv6 address without IPv6Exit configured)";
+ return 1;
+ }
+ return 0;
+}
+
+/** Connect to conn's specified addr and port. If it worked, conn
+ * has now been added to the connection_array.
+ *
+ * Send back a connected cell. Include the resolved IP of the destination
+ * address, but <em>only</em> if it's a general exit stream. (Rendezvous
+ * streams must not reveal what IP they connected to.)
+ */
+void
+connection_exit_connect(edge_connection_t *edge_conn)
+{
+ const tor_addr_t *addr;
+ uint16_t port;
+ connection_t *conn = TO_CONN(edge_conn);
+ int socket_error = 0, result;
+ const char *why_failed_exit_policy = NULL;
+
+ /* Apply exit policy to non-rendezvous connections. */
+ if (! connection_edge_is_rendezvous_stream(edge_conn) &&
+ my_exit_policy_rejects(&edge_conn->base_.addr,
+ edge_conn->base_.port,
+ &why_failed_exit_policy)) {
+ if (BUG(!why_failed_exit_policy))
+ why_failed_exit_policy = "";
+ log_info(LD_EXIT,"%s:%d failed exit policy%s. Closing.",
+ escaped_safe_str_client(conn->address), conn->port,
+ why_failed_exit_policy);
+ connection_edge_end(edge_conn, END_STREAM_REASON_EXITPOLICY);
+ circuit_detach_stream(circuit_get_by_edge_conn(edge_conn), edge_conn);
+ connection_free(conn);
+ return;
+ }
+
+#ifdef HAVE_SYS_UN_H
+ if (conn->socket_family != AF_UNIX) {
+#else
+ {
+#endif /* defined(HAVE_SYS_UN_H) */
+ addr = &conn->addr;
+ port = conn->port;
+
+ if (tor_addr_family(addr) == AF_INET6)
+ conn->socket_family = AF_INET6;
+
+ log_debug(LD_EXIT, "about to try connecting");
+ result = connection_connect(conn, conn->address,
+ addr, port, &socket_error);
+#ifdef HAVE_SYS_UN_H
+ } else {
+ /*
+ * In the AF_UNIX case, we expect to have already had conn->port = 1,
+ * tor_addr_make_unspec(conn->addr) (cf. the way we mark in the incoming
+ * case in connection_handle_listener_read()), and conn->address should
+ * have the socket path to connect to.
+ */
+ tor_assert(conn->address && strlen(conn->address) > 0);
+
+ log_debug(LD_EXIT, "about to try connecting");
+ result = connection_connect_unix(conn, conn->address, &socket_error);
+#endif /* defined(HAVE_SYS_UN_H) */
+ }
+
+ switch (result) {
+ case -1: {
+ int reason = errno_to_stream_end_reason(socket_error);
+ connection_edge_end(edge_conn, reason);
+ circuit_detach_stream(circuit_get_by_edge_conn(edge_conn), edge_conn);
+ connection_free(conn);
+ return;
+ }
+ case 0:
+ conn->state = EXIT_CONN_STATE_CONNECTING;
+
+ connection_watch_events(conn, READ_EVENT | WRITE_EVENT);
+ /* writable indicates finish;
+ * readable/error indicates broken link in windows-land. */
+ return;
+ /* case 1: fall through */
+ }
+
+ conn->state = EXIT_CONN_STATE_OPEN;
+ if (connection_get_outbuf_len(conn)) {
+ /* in case there are any queued data cells, from e.g. optimistic data */
+ connection_watch_events(conn, READ_EVENT|WRITE_EVENT);
+ } else {
+ connection_watch_events(conn, READ_EVENT);
+ }
+
+ /* also, deliver a 'connected' cell back through the circuit. */
+ if (connection_edge_is_rendezvous_stream(edge_conn)) {
+ /* don't send an address back! */
+ connection_edge_send_command(edge_conn,
+ RELAY_COMMAND_CONNECTED,
+ NULL, 0);
+ } else { /* normal stream */
+ uint8_t connected_payload[MAX_CONNECTED_CELL_PAYLOAD_LEN];
+ int connected_payload_len =
+ connected_cell_format_payload(connected_payload, &conn->addr,
+ edge_conn->address_ttl);
+ if (connected_payload_len < 0) {
+ connection_edge_end(edge_conn, END_STREAM_REASON_INTERNAL);
+ circuit_detach_stream(circuit_get_by_edge_conn(edge_conn), edge_conn);
+ connection_free(conn);
+ return;
+ }
+
+ connection_edge_send_command(edge_conn,
+ RELAY_COMMAND_CONNECTED,
+ (char*)connected_payload,
+ connected_payload_len);
+ }
+}
+
+/** Given an exit conn that should attach to us as a directory server, open a
+ * bridge connection with a linked connection pair, create a new directory
+ * conn, and join them together. Return 0 on success (or if there was an
+ * error we could send back an end cell for). Return -(some circuit end
+ * reason) if the circuit needs to be torn down. Either connects
+ * <b>exitconn</b>, frees it, or marks it, as appropriate.
+ */
+static int
+connection_exit_connect_dir(edge_connection_t *exitconn)
+{
+ dir_connection_t *dirconn = NULL;
+ or_circuit_t *circ = TO_OR_CIRCUIT(exitconn->on_circuit);
+
+ log_info(LD_EXIT, "Opening local connection for anonymized directory exit");
+
+ exitconn->base_.state = EXIT_CONN_STATE_OPEN;
+
+ dirconn = dir_connection_new(tor_addr_family(&exitconn->base_.addr));
+
+ tor_addr_copy(&dirconn->base_.addr, &exitconn->base_.addr);
+ dirconn->base_.port = 0;
+ dirconn->base_.address = tor_strdup(exitconn->base_.address);
+ dirconn->base_.type = CONN_TYPE_DIR;
+ dirconn->base_.purpose = DIR_PURPOSE_SERVER;
+ dirconn->base_.state = DIR_CONN_STATE_SERVER_COMMAND_WAIT;
+
+ /* Note that the new dir conn belongs to the same tunneled request as
+ * the edge conn, so that we can measure download times. */
+ dirconn->dirreq_id = exitconn->dirreq_id;
+
+ connection_link_connections(TO_CONN(dirconn), TO_CONN(exitconn));
+
+ if (connection_add(TO_CONN(exitconn))<0) {
+ connection_edge_end(exitconn, END_STREAM_REASON_RESOURCELIMIT);
+ connection_free_(TO_CONN(exitconn));
+ connection_free_(TO_CONN(dirconn));
+ return 0;
+ }
+
+ /* link exitconn to circ, now that we know we can use it. */
+ exitconn->next_stream = circ->n_streams;
+ circ->n_streams = exitconn;
+
+ if (connection_add(TO_CONN(dirconn))<0) {
+ connection_edge_end(exitconn, END_STREAM_REASON_RESOURCELIMIT);
+ connection_close_immediate(TO_CONN(exitconn));
+ connection_mark_for_close(TO_CONN(exitconn));
+ connection_free_(TO_CONN(dirconn));
+ return 0;
+ }
+
+ connection_start_reading(TO_CONN(dirconn));
+ connection_start_reading(TO_CONN(exitconn));
+
+ if (connection_edge_send_command(exitconn,
+ RELAY_COMMAND_CONNECTED, NULL, 0) < 0) {
+ connection_mark_for_close(TO_CONN(exitconn));
+ connection_mark_for_close(TO_CONN(dirconn));
+ return 0;
+ }
+
+ return 0;
+}
+
+/** Return 1 if <b>conn</b> is a rendezvous stream, or 0 if
+ * it is a general stream.
+ */
+int
+connection_edge_is_rendezvous_stream(const edge_connection_t *conn)
+{
+ tor_assert(conn);
+ /* It should not be possible to set both of these structs */
+ tor_assert_nonfatal(!(conn->rend_data && conn->hs_ident));
+
+ if (conn->rend_data || conn->hs_ident) {
+ return 1;
+ }
+ return 0;
+}
+
+/** Return 1 if router <b>exit_node</b> is likely to allow stream <b>conn</b>
+ * to exit from it, or 0 if it probably will not allow it.
+ * (We might be uncertain if conn's destination address has not yet been
+ * resolved.)
+ */
+int
+connection_ap_can_use_exit(const entry_connection_t *conn,
+ const node_t *exit_node)
+{
+ const or_options_t *options = get_options();
+
+ tor_assert(conn);
+ tor_assert(conn->socks_request);
+ tor_assert(exit_node);
+
+ /* If a particular exit node has been requested for the new connection,
+ * make sure the exit node of the existing circuit matches exactly.
+ */
+ if (conn->chosen_exit_name) {
+ const node_t *chosen_exit =
+ node_get_by_nickname(conn->chosen_exit_name, 0);
+ if (!chosen_exit || tor_memneq(chosen_exit->identity,
+ exit_node->identity, DIGEST_LEN)) {
+ /* doesn't match */
+// log_debug(LD_APP,"Requested node '%s', considering node '%s'. No.",
+// conn->chosen_exit_name, exit->nickname);
+ return 0;
+ }
+ }
+
+ if (conn->use_begindir) {
+ /* Internal directory fetches do not count as exiting. */
+ return 1;
+ }
+
+ if (conn->socks_request->command == SOCKS_COMMAND_CONNECT) {
+ tor_addr_t addr, *addrp = NULL;
+ addr_policy_result_t r;
+ if (0 == tor_addr_parse(&addr, conn->socks_request->address)) {
+ addrp = &addr;
+ } else if (!conn->entry_cfg.ipv4_traffic && conn->entry_cfg.ipv6_traffic) {
+ tor_addr_make_null(&addr, AF_INET6);
+ addrp = &addr;
+ } else if (conn->entry_cfg.ipv4_traffic && !conn->entry_cfg.ipv6_traffic) {
+ tor_addr_make_null(&addr, AF_INET);
+ addrp = &addr;
+ }
+ r = compare_tor_addr_to_node_policy(addrp, conn->socks_request->port,
+ exit_node);
+ if (r == ADDR_POLICY_REJECTED)
+ return 0; /* We know the address, and the exit policy rejects it. */
+ if (r == ADDR_POLICY_PROBABLY_REJECTED && !conn->chosen_exit_name)
+ return 0; /* We don't know the addr, but the exit policy rejects most
+ * addresses with this port. Since the user didn't ask for
+ * this node, err on the side of caution. */
+ } else if (SOCKS_COMMAND_IS_RESOLVE(conn->socks_request->command)) {
+ /* Don't send DNS requests to non-exit servers by default. */
+ if (!conn->chosen_exit_name && node_exit_policy_rejects_all(exit_node))
+ return 0;
+ }
+ if (routerset_contains_node(options->ExcludeExitNodesUnion_, exit_node)) {
+ /* Not a suitable exit. Refuse it. */
+ return 0;
+ }
+
+ return 1;
+}
+
+/** If address is of the form "y.onion" with a well-formed handle y:
+ * Put a NUL after y, lower-case it, and return ONION_V2_HOSTNAME or
+ * ONION_V3_HOSTNAME depending on the HS version.
+ *
+ * If address is of the form "x.y.onion" with a well-formed handle x:
+ * Drop "x.", put a NUL after y, lower-case it, and return
+ * ONION_V2_HOSTNAME or ONION_V3_HOSTNAME depending on the HS version.
+ *
+ * If address is of the form "y.onion" with a badly-formed handle y:
+ * Return BAD_HOSTNAME and log a message.
+ *
+ * If address is of the form "y.exit":
+ * Put a NUL after y and return EXIT_HOSTNAME.
+ *
+ * Otherwise:
+ * Return NORMAL_HOSTNAME and change nothing.
+ */
+hostname_type_t
+parse_extended_hostname(char *address)
+{
+ char *s;
+ char *q;
+ char query[HS_SERVICE_ADDR_LEN_BASE32+1];
+
+ s = strrchr(address,'.');
+ if (!s)
+ return NORMAL_HOSTNAME; /* no dot, thus normal */
+ if (!strcmp(s+1,"exit")) {
+ *s = 0; /* NUL-terminate it */
+ return EXIT_HOSTNAME; /* .exit */
+ }
+ if (strcmp(s+1,"onion"))
+ return NORMAL_HOSTNAME; /* neither .exit nor .onion, thus normal */
+
+ /* so it is .onion */
+ *s = 0; /* NUL-terminate it */
+ /* locate a 'sub-domain' component, in order to remove it */
+ q = strrchr(address, '.');
+ if (q == address) {
+ goto failed; /* reject sub-domain, as DNS does */
+ }
+ q = (NULL == q) ? address : q + 1;
+ if (strlcpy(query, q, HS_SERVICE_ADDR_LEN_BASE32+1) >=
+ HS_SERVICE_ADDR_LEN_BASE32+1)
+ goto failed;
+ if (q != address) {
+ memmove(address, q, strlen(q) + 1 /* also get \0 */);
+ }
+ if (rend_valid_v2_service_id(query)) {
+ return ONION_V2_HOSTNAME; /* success */
+ }
+ if (hs_address_is_valid(query)) {
+ return ONION_V3_HOSTNAME;
+ }
+ failed:
+ /* otherwise, return to previous state and return 0 */
+ *s = '.';
+ log_warn(LD_APP, "Invalid onion hostname %s; rejecting",
+ safe_str_client(address));
+ return BAD_HOSTNAME;
+}
+
+/** Return true iff the (possibly NULL) <b>alen</b>-byte chunk of memory at
+ * <b>a</b> is equal to the (possibly NULL) <b>blen</b>-byte chunk of memory
+ * at <b>b</b>. */
+static int
+memeq_opt(const char *a, size_t alen, const char *b, size_t blen)
+{
+ if (a == NULL) {
+ return (b == NULL);
+ } else if (b == NULL) {
+ return 0;
+ } else if (alen != blen) {
+ return 0;
+ } else {
+ return tor_memeq(a, b, alen);
+ }
+}
+
+/**
+ * Return true iff none of the isolation flags and fields in <b>conn</b>
+ * should prevent it from being attached to <b>circ</b>.
+ */
+int
+connection_edge_compatible_with_circuit(const entry_connection_t *conn,
+ const origin_circuit_t *circ)
+{
+ const uint8_t iso = conn->entry_cfg.isolation_flags;
+ const socks_request_t *sr = conn->socks_request;
+
+ /* If circ has never been used for an isolated connection, we can
+ * totally use it for this one. */
+ if (!circ->isolation_values_set)
+ return 1;
+
+ /* If circ has been used for connections having more than one value
+ * for some field f, it will have the corresponding bit set in
+ * isolation_flags_mixed. If isolation_flags_mixed has any bits
+ * in common with iso, then conn must be isolated from at least
+ * one stream that has been attached to circ. */
+ if ((iso & circ->isolation_flags_mixed) != 0) {
+ /* For at least one field where conn is isolated, the circuit
+ * already has mixed streams. */
+ return 0;
+ }
+
+ if (! conn->original_dest_address) {
+ log_warn(LD_BUG, "Reached connection_edge_compatible_with_circuit without "
+ "having set conn->original_dest_address");
+ ((entry_connection_t*)conn)->original_dest_address =
+ tor_strdup(conn->socks_request->address);
+ }
+
+ if ((iso & ISO_STREAM) &&
+ (circ->associated_isolated_stream_global_id !=
+ ENTRY_TO_CONN(conn)->global_identifier))
+ return 0;
+
+ if ((iso & ISO_DESTPORT) && conn->socks_request->port != circ->dest_port)
+ return 0;
+ if ((iso & ISO_DESTADDR) &&
+ strcasecmp(conn->original_dest_address, circ->dest_address))
+ return 0;
+ if ((iso & ISO_SOCKSAUTH) &&
+ (! memeq_opt(sr->username, sr->usernamelen,
+ circ->socks_username, circ->socks_username_len) ||
+ ! memeq_opt(sr->password, sr->passwordlen,
+ circ->socks_password, circ->socks_password_len)))
+ return 0;
+ if ((iso & ISO_CLIENTPROTO) &&
+ (conn->socks_request->listener_type != circ->client_proto_type ||
+ conn->socks_request->socks_version != circ->client_proto_socksver))
+ return 0;
+ if ((iso & ISO_CLIENTADDR) &&
+ !tor_addr_eq(&ENTRY_TO_CONN(conn)->addr, &circ->client_addr))
+ return 0;
+ if ((iso & ISO_SESSIONGRP) &&
+ conn->entry_cfg.session_group != circ->session_group)
+ return 0;
+ if ((iso & ISO_NYM_EPOCH) && conn->nym_epoch != circ->nym_epoch)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * If <b>dry_run</b> is false, update <b>circ</b>'s isolation flags and fields
+ * to reflect having had <b>conn</b> attached to it, and return 0. Otherwise,
+ * if <b>dry_run</b> is true, then make no changes to <b>circ</b>, and return
+ * a bitfield of isolation flags that we would have to set in
+ * isolation_flags_mixed to add <b>conn</b> to <b>circ</b>, or -1 if
+ * <b>circ</b> has had no streams attached to it.
+ */
+int
+connection_edge_update_circuit_isolation(const entry_connection_t *conn,
+ origin_circuit_t *circ,
+ int dry_run)
+{
+ const socks_request_t *sr = conn->socks_request;
+ if (! conn->original_dest_address) {
+ log_warn(LD_BUG, "Reached connection_update_circuit_isolation without "
+ "having set conn->original_dest_address");
+ ((entry_connection_t*)conn)->original_dest_address =
+ tor_strdup(conn->socks_request->address);
+ }
+
+ if (!circ->isolation_values_set) {
+ if (dry_run)
+ return -1;
+ circ->associated_isolated_stream_global_id =
+ ENTRY_TO_CONN(conn)->global_identifier;
+ circ->dest_port = conn->socks_request->port;
+ circ->dest_address = tor_strdup(conn->original_dest_address);
+ circ->client_proto_type = conn->socks_request->listener_type;
+ circ->client_proto_socksver = conn->socks_request->socks_version;
+ tor_addr_copy(&circ->client_addr, &ENTRY_TO_CONN(conn)->addr);
+ circ->session_group = conn->entry_cfg.session_group;
+ circ->nym_epoch = conn->nym_epoch;
+ circ->socks_username = sr->username ?
+ tor_memdup(sr->username, sr->usernamelen) : NULL;
+ circ->socks_password = sr->password ?
+ tor_memdup(sr->password, sr->passwordlen) : NULL;
+ circ->socks_username_len = sr->usernamelen;
+ circ->socks_password_len = sr->passwordlen;
+
+ circ->isolation_values_set = 1;
+ return 0;
+ } else {
+ uint8_t mixed = 0;
+ if (conn->socks_request->port != circ->dest_port)
+ mixed |= ISO_DESTPORT;
+ if (strcasecmp(conn->original_dest_address, circ->dest_address))
+ mixed |= ISO_DESTADDR;
+ if (!memeq_opt(sr->username, sr->usernamelen,
+ circ->socks_username, circ->socks_username_len) ||
+ !memeq_opt(sr->password, sr->passwordlen,
+ circ->socks_password, circ->socks_password_len))
+ mixed |= ISO_SOCKSAUTH;
+ if ((conn->socks_request->listener_type != circ->client_proto_type ||
+ conn->socks_request->socks_version != circ->client_proto_socksver))
+ mixed |= ISO_CLIENTPROTO;
+ if (!tor_addr_eq(&ENTRY_TO_CONN(conn)->addr, &circ->client_addr))
+ mixed |= ISO_CLIENTADDR;
+ if (conn->entry_cfg.session_group != circ->session_group)
+ mixed |= ISO_SESSIONGRP;
+ if (conn->nym_epoch != circ->nym_epoch)
+ mixed |= ISO_NYM_EPOCH;
+
+ if (dry_run)
+ return mixed;
+
+ if ((mixed & conn->entry_cfg.isolation_flags) != 0) {
+ log_warn(LD_BUG, "Updating a circuit with seemingly incompatible "
+ "isolation flags.");
+ }
+ circ->isolation_flags_mixed |= mixed;
+ return 0;
+ }
+}
+
+/**
+ * Clear the isolation settings on <b>circ</b>.
+ *
+ * This only works on an open circuit that has never had a stream attached to
+ * it, and whose isolation settings are hypothetical. (We set hypothetical
+ * isolation settings on circuits as we're launching them, so that we
+ * know whether they can handle more streams or whether we need to launch
+ * even more circuits. Once the circuit is open, if it turns out that
+ * we no longer have any streams to attach to it, we clear the isolation flags
+ * and data so that other streams can have a chance.)
+ */
+void
+circuit_clear_isolation(origin_circuit_t *circ)
+{
+ if (circ->isolation_any_streams_attached) {
+ log_warn(LD_BUG, "Tried to clear the isolation status of a dirty circuit");
+ return;
+ }
+ if (TO_CIRCUIT(circ)->state != CIRCUIT_STATE_OPEN) {
+ log_warn(LD_BUG, "Tried to clear the isolation status of a non-open "
+ "circuit");
+ return;
+ }
+
+ circ->isolation_values_set = 0;
+ circ->isolation_flags_mixed = 0;
+ circ->associated_isolated_stream_global_id = 0;
+ circ->client_proto_type = 0;
+ circ->client_proto_socksver = 0;
+ circ->dest_port = 0;
+ tor_addr_make_unspec(&circ->client_addr);
+ tor_free(circ->dest_address);
+ circ->session_group = -1;
+ circ->nym_epoch = 0;
+ if (circ->socks_username) {
+ memwipe(circ->socks_username, 0x11, circ->socks_username_len);
+ tor_free(circ->socks_username);
+ }
+ if (circ->socks_password) {
+ memwipe(circ->socks_password, 0x05, circ->socks_password_len);
+ tor_free(circ->socks_password);
+ }
+ circ->socks_username_len = circ->socks_password_len = 0;
+}
+
+/** Free all storage held in module-scoped variables for connection_edge.c */
+void
+connection_edge_free_all(void)
+{
+ untried_pending_connections = 0;
+ smartlist_free(pending_entry_connections);
+ pending_entry_connections = NULL;
+ mainloop_event_free(attach_pending_entry_connections_ev);
+}
diff --git a/src/core/or/connection_edge.h b/src/core/or/connection_edge.h
new file mode 100644
index 0000000000..24968b2778
--- /dev/null
+++ b/src/core/or/connection_edge.h
@@ -0,0 +1,248 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file connection_edge.h
+ * \brief Header file for connection_edge.c.
+ **/
+
+#ifndef TOR_CONNECTION_EDGE_H
+#define TOR_CONNECTION_EDGE_H
+
+#include "lib/testsupport/testsupport.h"
+
+edge_connection_t *TO_EDGE_CONN(connection_t *);
+entry_connection_t *TO_ENTRY_CONN(connection_t *);
+entry_connection_t *EDGE_TO_ENTRY_CONN(edge_connection_t *);
+
+#define EXIT_CONN_STATE_MIN_ 1
+/** State for an exit connection: waiting for response from DNS farm. */
+#define EXIT_CONN_STATE_RESOLVING 1
+/** State for an exit connection: waiting for connect() to finish. */
+#define EXIT_CONN_STATE_CONNECTING 2
+/** State for an exit connection: open and ready to transmit data. */
+#define EXIT_CONN_STATE_OPEN 3
+/** State for an exit connection: waiting to be removed. */
+#define EXIT_CONN_STATE_RESOLVEFAILED 4
+#define EXIT_CONN_STATE_MAX_ 4
+
+/* The AP state values must be disjoint from the EXIT state values. */
+#define AP_CONN_STATE_MIN_ 5
+/** State for a SOCKS connection: waiting for SOCKS request. */
+#define AP_CONN_STATE_SOCKS_WAIT 5
+/** State for a SOCKS connection: got a y.onion URL; waiting to receive
+ * rendezvous descriptor. */
+#define AP_CONN_STATE_RENDDESC_WAIT 6
+/** The controller will attach this connection to a circuit; it isn't our
+ * job to do so. */
+#define AP_CONN_STATE_CONTROLLER_WAIT 7
+/** State for a SOCKS connection: waiting for a completed circuit. */
+#define AP_CONN_STATE_CIRCUIT_WAIT 8
+/** State for a SOCKS connection: sent BEGIN, waiting for CONNECTED. */
+#define AP_CONN_STATE_CONNECT_WAIT 9
+/** State for a SOCKS connection: sent RESOLVE, waiting for RESOLVED. */
+#define AP_CONN_STATE_RESOLVE_WAIT 10
+/** State for a SOCKS connection: ready to send and receive. */
+#define AP_CONN_STATE_OPEN 11
+/** State for a transparent natd connection: waiting for original
+ * destination. */
+#define AP_CONN_STATE_NATD_WAIT 12
+/** State for an HTTP tunnel: waiting for an HTTP CONNECT command. */
+#define AP_CONN_STATE_HTTP_CONNECT_WAIT 13
+#define AP_CONN_STATE_MAX_ 13
+
+#define EXIT_PURPOSE_MIN_ 1
+/** This exit stream wants to do an ordinary connect. */
+#define EXIT_PURPOSE_CONNECT 1
+/** This exit stream wants to do a resolve (either normal or reverse). */
+#define EXIT_PURPOSE_RESOLVE 2
+#define EXIT_PURPOSE_MAX_ 2
+
+/** True iff the AP_CONN_STATE_* value <b>s</b> means that the corresponding
+ * edge connection is not attached to any circuit. */
+#define AP_CONN_STATE_IS_UNATTACHED(s) \
+ ((s) <= AP_CONN_STATE_CIRCUIT_WAIT || (s) == AP_CONN_STATE_NATD_WAIT)
+
+#define connection_mark_unattached_ap(conn, endreason) \
+ connection_mark_unattached_ap_((conn), (endreason), __LINE__, SHORT_FILE__)
+
+MOCK_DECL(void,connection_mark_unattached_ap_,
+ (entry_connection_t *conn, int endreason,
+ int line, const char *file));
+int connection_edge_reached_eof(edge_connection_t *conn);
+int connection_edge_process_inbuf(edge_connection_t *conn,
+ int package_partial);
+int connection_edge_destroy(circid_t circ_id, edge_connection_t *conn);
+int connection_edge_end(edge_connection_t *conn, uint8_t reason);
+int connection_edge_end_errno(edge_connection_t *conn);
+int connection_edge_flushed_some(edge_connection_t *conn);
+int connection_edge_finished_flushing(edge_connection_t *conn);
+int connection_edge_finished_connecting(edge_connection_t *conn);
+
+void connection_ap_about_to_close(entry_connection_t *edge_conn);
+void connection_exit_about_to_close(edge_connection_t *edge_conn);
+
+MOCK_DECL(int,
+ connection_ap_handshake_send_begin,(entry_connection_t *ap_conn));
+int connection_ap_handshake_send_resolve(entry_connection_t *ap_conn);
+
+entry_connection_t *connection_ap_make_link(connection_t *partner,
+ char *address, uint16_t port,
+ const char *digest,
+ int session_group,
+ int isolation_flags,
+ int use_begindir, int want_onehop);
+void connection_ap_handshake_socks_reply(entry_connection_t *conn, char *reply,
+ size_t replylen,
+ int endreason);
+MOCK_DECL(void,connection_ap_handshake_socks_resolved,
+ (entry_connection_t *conn,
+ int answer_type,
+ size_t answer_len,
+ const uint8_t *answer,
+ int ttl,
+ time_t expires));
+void connection_ap_handshake_socks_resolved_addr(entry_connection_t *conn,
+ const tor_addr_t *answer,
+ int ttl,
+ time_t expires);
+
+int connection_exit_begin_conn(cell_t *cell, circuit_t *circ);
+int connection_exit_begin_resolve(cell_t *cell, or_circuit_t *circ);
+void connection_exit_connect(edge_connection_t *conn);
+int connection_edge_is_rendezvous_stream(const edge_connection_t *conn);
+int connection_ap_can_use_exit(const entry_connection_t *conn,
+ const node_t *exit);
+void connection_ap_expire_beginning(void);
+void connection_ap_rescan_and_attach_pending(void);
+void connection_ap_attach_pending(int retry);
+void connection_ap_mark_as_pending_circuit_(entry_connection_t *entry_conn,
+ const char *file, int line);
+#define connection_ap_mark_as_pending_circuit(c) \
+ connection_ap_mark_as_pending_circuit_((c), __FILE__, __LINE__)
+void connection_ap_mark_as_non_pending_circuit(entry_connection_t *entry_conn);
+#define CONNECTION_AP_EXPECT_NONPENDING(c) do { \
+ if (ENTRY_TO_CONN(c)->state == AP_CONN_STATE_CIRCUIT_WAIT) { \
+ log_warn(LD_BUG, "At %s:%d: %p was unexpectedly in circuit_wait.", \
+ __FILE__, __LINE__, (c)); \
+ connection_ap_mark_as_non_pending_circuit(c); \
+ } \
+ } while (0)
+void connection_ap_fail_onehop(const char *failed_digest,
+ cpath_build_state_t *build_state);
+void circuit_discard_optional_exit_enclaves(extend_info_t *info);
+int connection_ap_detach_retriable(entry_connection_t *conn,
+ origin_circuit_t *circ,
+ int reason);
+int connection_ap_process_transparent(entry_connection_t *conn);
+
+int address_is_invalid_destination(const char *address, int client);
+
+MOCK_DECL(int, connection_ap_rewrite_and_attach_if_allowed,
+ (entry_connection_t *conn,
+ origin_circuit_t *circ,
+ crypt_path_t *cpath));
+int connection_ap_handshake_rewrite_and_attach(entry_connection_t *conn,
+ origin_circuit_t *circ,
+ crypt_path_t *cpath);
+
+/** Possible return values for parse_extended_hostname. */
+typedef enum hostname_type_t {
+ NORMAL_HOSTNAME, ONION_V2_HOSTNAME, ONION_V3_HOSTNAME,
+ EXIT_HOSTNAME, BAD_HOSTNAME
+} hostname_type_t;
+hostname_type_t parse_extended_hostname(char *address);
+
+#if defined(HAVE_NET_IF_H) && defined(HAVE_NET_PFVAR_H)
+int get_pf_socket(void);
+#endif
+
+int connection_edge_compatible_with_circuit(const entry_connection_t *conn,
+ const origin_circuit_t *circ);
+int connection_edge_update_circuit_isolation(const entry_connection_t *conn,
+ origin_circuit_t *circ,
+ int dry_run);
+void circuit_clear_isolation(origin_circuit_t *circ);
+streamid_t get_unique_stream_id_by_circ(origin_circuit_t *circ);
+
+void connection_edge_free_all(void);
+
+void connection_ap_warn_and_unmark_if_pending_circ(
+ entry_connection_t *entry_conn,
+ const char *where);
+
+/** @name Begin-cell flags
+ *
+ * These flags are used in RELAY_BEGIN cells to change the default behavior
+ * of the cell.
+ *
+ * @{
+ **/
+/** When this flag is set, the client is willing to get connected to IPv6
+ * addresses */
+#define BEGIN_FLAG_IPV6_OK (1u<<0)
+/** When this flag is set, the client DOES NOT support connecting to IPv4
+ * addresses. (The sense of this flag is inverted from IPV6_OK, so that the
+ * old default behavior of Tor is equivalent to having all flags set to 0.)
+ **/
+#define BEGIN_FLAG_IPV4_NOT_OK (1u<<1)
+/** When this flag is set, if we find both an IPv4 and an IPv6 address,
+ * we use the IPv6 address. Otherwise we use the IPv4 address. */
+#define BEGIN_FLAG_IPV6_PREFERRED (1u<<2)
+/**@}*/
+
+#ifdef CONNECTION_EDGE_PRIVATE
+
+/** A parsed BEGIN or BEGIN_DIR cell */
+typedef struct begin_cell_t {
+ /** The address the client has asked us to connect to, or NULL if this is
+ * a BEGIN_DIR cell*/
+ char *address;
+ /** The flags specified in the BEGIN cell's body. One or more of
+ * BEGIN_FLAG_*. */
+ uint32_t flags;
+ /** The client's requested port. */
+ uint16_t port;
+ /** The client's requested Stream ID */
+ uint16_t stream_id;
+ /** True iff this is a BEGIN_DIR cell. */
+ unsigned is_begindir : 1;
+} begin_cell_t;
+
+STATIC int begin_cell_parse(const cell_t *cell, begin_cell_t *bcell,
+ uint8_t *end_reason_out);
+STATIC int connected_cell_format_payload(uint8_t *payload_out,
+ const tor_addr_t *addr,
+ uint32_t ttl);
+
+typedef struct {
+ /** Original address, after we lowercased it but before we started
+ * mapping it.
+ */
+ char orig_address[MAX_SOCKS_ADDR_LEN];
+ /** True iff the address has been automatically remapped to a local
+ * address in VirtualAddrNetwork. (Only set true when we do a resolve
+ * and get a virtual address; not when we connect to the address.) */
+ int automap;
+ /** If this connection has a .exit address, who put it there? */
+ addressmap_entry_source_t exit_source;
+ /** If we've rewritten the address, when does this map expire? */
+ time_t map_expires;
+ /** If we should close the connection, this is the end_reason to pass
+ * to connection_mark_unattached_ap */
+ int end_reason;
+ /** True iff we should close the connection, either because of error or
+ * because of successful early RESOLVED reply. */
+ int should_close;
+} rewrite_result_t;
+
+STATIC void connection_ap_handshake_rewrite(entry_connection_t *conn,
+ rewrite_result_t *out);
+
+STATIC int connection_ap_process_http_connect(entry_connection_t *conn);
+#endif /* defined(CONNECTION_EDGE_PRIVATE) */
+
+#endif /* !defined(TOR_CONNECTION_EDGE_H) */
diff --git a/src/core/or/connection_or.c b/src/core/or/connection_or.c
new file mode 100644
index 0000000000..71ab905942
--- /dev/null
+++ b/src/core/or/connection_or.c
@@ -0,0 +1,2996 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file connection_or.c
+ * \brief Functions to handle OR connections, TLS handshaking, and
+ * cells on the network.
+ *
+ * An or_connection_t is a subtype of connection_t (as implemented in
+ * connection.c) that uses a TLS connection to send and receive cells on the
+ * Tor network. (By sending and receiving cells connection_or.c, it cooperates
+ * with channeltls.c to implement a the channel interface of channel.c.)
+ *
+ * Every OR connection has an underlying tortls_t object (as implemented in
+ * tortls.c) which it uses as its TLS stream. It is responsible for
+ * sending and receiving cells over that TLS.
+ *
+ * This module also implements the client side of the v3 Tor link handshake,
+ **/
+#include "or/or.h"
+#include "or/bridges.h"
+#include "lib/container/buffers.h"
+/*
+ * Define this so we get channel internal functions, since we're implementing
+ * part of a subclass (channel_tls_t).
+ */
+#define TOR_CHANNEL_INTERNAL_
+#define CONNECTION_OR_PRIVATE
+#include "or/channel.h"
+#include "or/channeltls.h"
+#include "or/circuitbuild.h"
+#include "or/circuitlist.h"
+#include "or/circuitstats.h"
+#include "or/command.h"
+#include "or/config.h"
+#include "or/connection.h"
+#include "or/connection_or.h"
+#include "or/control.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "or/dirserv.h"
+#include "or/entrynodes.h"
+#include "or/geoip.h"
+#include "or/main.h"
+#include "trunnel/link_handshake.h"
+#include "or/microdesc.h"
+#include "or/networkstatus.h"
+#include "or/nodelist.h"
+#include "or/proto_cell.h"
+#include "or/reasons.h"
+#include "or/relay.h"
+#include "or/rephist.h"
+#include "or/router.h"
+#include "or/routerkeys.h"
+#include "or/routerlist.h"
+#include "or/ext_orport.h"
+#include "or/scheduler.h"
+#include "or/torcert.h"
+#include "or/channelpadding.h"
+
+#include "or/cell_st.h"
+#include "or/cell_queue_st.h"
+#include "or/or_connection_st.h"
+#include "or/or_handshake_certs_st.h"
+#include "or/or_handshake_state_st.h"
+#include "or/or_state_st.h"
+#include "or/routerinfo_st.h"
+#include "or/var_cell_st.h"
+#include "lib/crypt_ops/crypto_format.h"
+
+#include "lib/tls/tortls.h"
+
+static int connection_tls_finish_handshake(or_connection_t *conn);
+static int connection_or_launch_v3_or_handshake(or_connection_t *conn);
+static int connection_or_process_cells_from_inbuf(or_connection_t *conn);
+static int connection_or_check_valid_tls_handshake(or_connection_t *conn,
+ int started_here,
+ char *digest_rcvd_out);
+
+static void connection_or_tls_renegotiated_cb(tor_tls_t *tls, void *_conn);
+
+static unsigned int
+connection_or_is_bad_for_new_circs(or_connection_t *or_conn);
+static void connection_or_mark_bad_for_new_circs(or_connection_t *or_conn);
+
+/*
+ * Call this when changing connection state, so notifications to the owning
+ * channel can be handled.
+ */
+
+static void connection_or_change_state(or_connection_t *conn, uint8_t state);
+
+static void connection_or_check_canonicity(or_connection_t *conn,
+ int started_here);
+
+/**************************************************************/
+
+/** Convert a connection_t* to an or_connection_t*; assert if the cast is
+ * invalid. */
+or_connection_t *
+TO_OR_CONN(connection_t *c)
+{
+ tor_assert(c->magic == OR_CONNECTION_MAGIC);
+ return DOWNCAST(or_connection_t, c);
+}
+
+/** Global map between Extended ORPort identifiers and OR
+ * connections. */
+static digestmap_t *orconn_ext_or_id_map = NULL;
+
+/** Clear clear conn->identity_digest and update other data
+ * structures as appropriate.*/
+void
+connection_or_clear_identity(or_connection_t *conn)
+{
+ tor_assert(conn);
+ memset(conn->identity_digest, 0, DIGEST_LEN);
+}
+
+/** Clear all identities in OR conns.*/
+void
+connection_or_clear_identity_map(void)
+{
+ smartlist_t *conns = get_connection_array();
+ SMARTLIST_FOREACH(conns, connection_t *, conn,
+ {
+ if (conn->type == CONN_TYPE_OR) {
+ connection_or_clear_identity(TO_OR_CONN(conn));
+ }
+ });
+}
+
+/** Change conn->identity_digest to digest, and add conn into
+ * the appropriate digest maps.
+ *
+ * NOTE that this function only allows two kinds of transitions: from
+ * unset identity to set identity, and from idempotent re-settings
+ * of the same identity. It's not allowed to clear an identity or to
+ * change an identity. Return 0 on success, and -1 if the transition
+ * is not allowed.
+ **/
+static void
+connection_or_set_identity_digest(or_connection_t *conn,
+ const char *rsa_digest,
+ const ed25519_public_key_t *ed_id)
+{
+ channel_t *chan = NULL;
+ tor_assert(conn);
+ tor_assert(rsa_digest);
+
+ if (conn->chan)
+ chan = TLS_CHAN_TO_BASE(conn->chan);
+
+ log_info(LD_HANDSHAKE, "Set identity digest for %p (%s): %s %s.",
+ conn,
+ escaped_safe_str(conn->base_.address),
+ hex_str(rsa_digest, DIGEST_LEN),
+ ed25519_fmt(ed_id));
+ log_info(LD_HANDSHAKE, " (Previously: %s %s)",
+ hex_str(conn->identity_digest, DIGEST_LEN),
+ chan ? ed25519_fmt(&chan->ed25519_identity) : "<null>");
+
+ const int rsa_id_was_set = ! tor_digest_is_zero(conn->identity_digest);
+ const int ed_id_was_set =
+ chan && !ed25519_public_key_is_zero(&chan->ed25519_identity);
+ const int rsa_changed =
+ tor_memneq(conn->identity_digest, rsa_digest, DIGEST_LEN);
+ const int ed_changed = ed_id_was_set &&
+ (!ed_id || !ed25519_pubkey_eq(ed_id, &chan->ed25519_identity));
+
+ tor_assert(!rsa_changed || !rsa_id_was_set);
+ tor_assert(!ed_changed || !ed_id_was_set);
+
+ if (!rsa_changed && !ed_changed)
+ return;
+
+ /* If the identity was set previously, remove the old mapping. */
+ if (rsa_id_was_set) {
+ connection_or_clear_identity(conn);
+ if (chan)
+ channel_clear_identity_digest(chan);
+ }
+
+ memcpy(conn->identity_digest, rsa_digest, DIGEST_LEN);
+
+ /* If we're initializing the IDs to zero, don't add a mapping yet. */
+ if (tor_digest_is_zero(rsa_digest) &&
+ (!ed_id || ed25519_public_key_is_zero(ed_id)))
+ return;
+
+ /* Deal with channels */
+ if (chan)
+ channel_set_identity_digest(chan, rsa_digest, ed_id);
+}
+
+/** Remove the Extended ORPort identifier of <b>conn</b> from the
+ * global identifier list. Also, clear the identifier from the
+ * connection itself. */
+void
+connection_or_remove_from_ext_or_id_map(or_connection_t *conn)
+{
+ or_connection_t *tmp;
+ if (!orconn_ext_or_id_map)
+ return;
+ if (!conn->ext_or_conn_id)
+ return;
+
+ tmp = digestmap_remove(orconn_ext_or_id_map, conn->ext_or_conn_id);
+ if (!tor_digest_is_zero(conn->ext_or_conn_id))
+ tor_assert(tmp == conn);
+
+ memset(conn->ext_or_conn_id, 0, EXT_OR_CONN_ID_LEN);
+}
+
+/** Return the connection whose ext_or_id is <b>id</b>. Return NULL if no such
+ * connection is found. */
+or_connection_t *
+connection_or_get_by_ext_or_id(const char *id)
+{
+ if (!orconn_ext_or_id_map)
+ return NULL;
+ return digestmap_get(orconn_ext_or_id_map, id);
+}
+
+/** Deallocate the global Extended ORPort identifier list */
+void
+connection_or_clear_ext_or_id_map(void)
+{
+ digestmap_free(orconn_ext_or_id_map, NULL);
+ orconn_ext_or_id_map = NULL;
+}
+
+/** Creates an Extended ORPort identifier for <b>conn</b> and deposits
+ * it into the global list of identifiers. */
+void
+connection_or_set_ext_or_identifier(or_connection_t *conn)
+{
+ char random_id[EXT_OR_CONN_ID_LEN];
+ or_connection_t *tmp;
+
+ if (!orconn_ext_or_id_map)
+ orconn_ext_or_id_map = digestmap_new();
+
+ /* Remove any previous identifiers: */
+ if (conn->ext_or_conn_id && !tor_digest_is_zero(conn->ext_or_conn_id))
+ connection_or_remove_from_ext_or_id_map(conn);
+
+ do {
+ crypto_rand(random_id, sizeof(random_id));
+ } while (digestmap_get(orconn_ext_or_id_map, random_id));
+
+ if (!conn->ext_or_conn_id)
+ conn->ext_or_conn_id = tor_malloc_zero(EXT_OR_CONN_ID_LEN);
+
+ memcpy(conn->ext_or_conn_id, random_id, EXT_OR_CONN_ID_LEN);
+
+ tmp = digestmap_set(orconn_ext_or_id_map, random_id, conn);
+ tor_assert(!tmp);
+}
+
+/**************************************************************/
+
+/** Map from a string describing what a non-open OR connection was doing when
+ * failed, to an intptr_t describing the count of connections that failed that
+ * way. Note that the count is stored _as_ the pointer.
+ */
+static strmap_t *broken_connection_counts;
+
+/** If true, do not record information in <b>broken_connection_counts</b>. */
+static int disable_broken_connection_counts = 0;
+
+/** Record that an OR connection failed in <b>state</b>. */
+static void
+note_broken_connection(const char *state)
+{
+ void *ptr;
+ intptr_t val;
+ if (disable_broken_connection_counts)
+ return;
+
+ if (!broken_connection_counts)
+ broken_connection_counts = strmap_new();
+
+ ptr = strmap_get(broken_connection_counts, state);
+ val = (intptr_t)ptr;
+ val++;
+ ptr = (void*)val;
+ strmap_set(broken_connection_counts, state, ptr);
+}
+
+/** Forget all recorded states for failed connections. If
+ * <b>stop_recording</b> is true, don't record any more. */
+void
+clear_broken_connection_map(int stop_recording)
+{
+ if (broken_connection_counts)
+ strmap_free(broken_connection_counts, NULL);
+ broken_connection_counts = NULL;
+ if (stop_recording)
+ disable_broken_connection_counts = 1;
+}
+
+/** Write a detailed description the state of <b>orconn</b> into the
+ * <b>buflen</b>-byte buffer at <b>buf</b>. This description includes not
+ * only the OR-conn level state but also the TLS state. It's useful for
+ * diagnosing broken handshakes. */
+static void
+connection_or_get_state_description(or_connection_t *orconn,
+ char *buf, size_t buflen)
+{
+ connection_t *conn = TO_CONN(orconn);
+ const char *conn_state;
+ char tls_state[256];
+
+ tor_assert(conn->type == CONN_TYPE_OR || conn->type == CONN_TYPE_EXT_OR);
+
+ conn_state = conn_state_to_string(conn->type, conn->state);
+ tor_tls_get_state_description(orconn->tls, tls_state, sizeof(tls_state));
+
+ tor_snprintf(buf, buflen, "%s with SSL state %s", conn_state, tls_state);
+}
+
+/** Record the current state of <b>orconn</b> as the state of a broken
+ * connection. */
+static void
+connection_or_note_state_when_broken(or_connection_t *orconn)
+{
+ char buf[256];
+ if (disable_broken_connection_counts)
+ return;
+ connection_or_get_state_description(orconn, buf, sizeof(buf));
+ log_info(LD_HANDSHAKE,"Connection died in state '%s'", buf);
+ note_broken_connection(buf);
+}
+
+/** Helper type used to sort connection states and find the most frequent. */
+typedef struct broken_state_count_t {
+ intptr_t count;
+ const char *state;
+} broken_state_count_t;
+
+/** Helper function used to sort broken_state_count_t by frequency. */
+static int
+broken_state_count_compare(const void **a_ptr, const void **b_ptr)
+{
+ const broken_state_count_t *a = *a_ptr, *b = *b_ptr;
+ if (b->count < a->count)
+ return -1;
+ else if (b->count == a->count)
+ return 0;
+ else
+ return 1;
+}
+
+/** Upper limit on the number of different states to report for connection
+ * failure. */
+#define MAX_REASONS_TO_REPORT 10
+
+/** Report a list of the top states for failed OR connections at log level
+ * <b>severity</b>, in log domain <b>domain</b>. */
+void
+connection_or_report_broken_states(int severity, int domain)
+{
+ int total = 0;
+ smartlist_t *items;
+
+ if (!broken_connection_counts || disable_broken_connection_counts)
+ return;
+
+ items = smartlist_new();
+ STRMAP_FOREACH(broken_connection_counts, state, void *, countptr) {
+ broken_state_count_t *c = tor_malloc(sizeof(broken_state_count_t));
+ c->count = (intptr_t)countptr;
+ total += (int)c->count;
+ c->state = state;
+ smartlist_add(items, c);
+ } STRMAP_FOREACH_END;
+
+ smartlist_sort(items, broken_state_count_compare);
+
+ tor_log(severity, domain, "%d connections have failed%s", total,
+ smartlist_len(items) > MAX_REASONS_TO_REPORT ? ". Top reasons:" : ":");
+
+ SMARTLIST_FOREACH_BEGIN(items, const broken_state_count_t *, c) {
+ if (c_sl_idx > MAX_REASONS_TO_REPORT)
+ break;
+ tor_log(severity, domain,
+ " %d connections died in state %s", (int)c->count, c->state);
+ } SMARTLIST_FOREACH_END(c);
+
+ SMARTLIST_FOREACH(items, broken_state_count_t *, c, tor_free(c));
+ smartlist_free(items);
+}
+
+/** Call this to change or_connection_t states, so the owning channel_tls_t can
+ * be notified.
+ */
+
+static void
+connection_or_change_state(or_connection_t *conn, uint8_t state)
+{
+ uint8_t old_state;
+
+ tor_assert(conn);
+
+ old_state = conn->base_.state;
+ conn->base_.state = state;
+
+ if (conn->chan)
+ channel_tls_handle_state_change_on_orconn(conn->chan, conn,
+ old_state, state);
+}
+
+/** Return the number of circuits using an or_connection_t; this used to
+ * be an or_connection_t field, but it got moved to channel_t and we
+ * shouldn't maintain two copies. */
+
+MOCK_IMPL(int,
+connection_or_get_num_circuits, (or_connection_t *conn))
+{
+ tor_assert(conn);
+
+ if (conn->chan) {
+ return channel_num_circuits(TLS_CHAN_TO_BASE(conn->chan));
+ } else return 0;
+}
+
+/**************************************************************/
+
+/** Pack the cell_t host-order structure <b>src</b> into network-order
+ * in the buffer <b>dest</b>. See tor-spec.txt for details about the
+ * wire format.
+ *
+ * Note that this function doesn't touch <b>dst</b>-\>next: the caller
+ * should set it or clear it as appropriate.
+ */
+void
+cell_pack(packed_cell_t *dst, const cell_t *src, int wide_circ_ids)
+{
+ char *dest = dst->body;
+ if (wide_circ_ids) {
+ set_uint32(dest, htonl(src->circ_id));
+ dest += 4;
+ } else {
+ /* Clear the last two bytes of dest, in case we can accidentally
+ * send them to the network somehow. */
+ memset(dest+CELL_MAX_NETWORK_SIZE-2, 0, 2);
+ set_uint16(dest, htons(src->circ_id));
+ dest += 2;
+ }
+ set_uint8(dest, src->command);
+ memcpy(dest+1, src->payload, CELL_PAYLOAD_SIZE);
+}
+
+/** Unpack the network-order buffer <b>src</b> into a host-order
+ * cell_t structure <b>dest</b>.
+ */
+static void
+cell_unpack(cell_t *dest, const char *src, int wide_circ_ids)
+{
+ if (wide_circ_ids) {
+ dest->circ_id = ntohl(get_uint32(src));
+ src += 4;
+ } else {
+ dest->circ_id = ntohs(get_uint16(src));
+ src += 2;
+ }
+ dest->command = get_uint8(src);
+ memcpy(dest->payload, src+1, CELL_PAYLOAD_SIZE);
+}
+
+/** Write the header of <b>cell</b> into the first VAR_CELL_MAX_HEADER_SIZE
+ * bytes of <b>hdr_out</b>. Returns number of bytes used. */
+int
+var_cell_pack_header(const var_cell_t *cell, char *hdr_out, int wide_circ_ids)
+{
+ int r;
+ if (wide_circ_ids) {
+ set_uint32(hdr_out, htonl(cell->circ_id));
+ hdr_out += 4;
+ r = VAR_CELL_MAX_HEADER_SIZE;
+ } else {
+ set_uint16(hdr_out, htons(cell->circ_id));
+ hdr_out += 2;
+ r = VAR_CELL_MAX_HEADER_SIZE - 2;
+ }
+ set_uint8(hdr_out, cell->command);
+ set_uint16(hdr_out+1, htons(cell->payload_len));
+ return r;
+}
+
+/** Allocate and return a new var_cell_t with <b>payload_len</b> bytes of
+ * payload space. */
+var_cell_t *
+var_cell_new(uint16_t payload_len)
+{
+ size_t size = offsetof(var_cell_t, payload) + payload_len;
+ var_cell_t *cell = tor_malloc_zero(size);
+ cell->payload_len = payload_len;
+ cell->command = 0;
+ cell->circ_id = 0;
+ return cell;
+}
+
+/**
+ * Copy a var_cell_t
+ */
+
+var_cell_t *
+var_cell_copy(const var_cell_t *src)
+{
+ var_cell_t *copy = NULL;
+ size_t size = 0;
+
+ if (src != NULL) {
+ size = offsetof(var_cell_t, payload) + src->payload_len;
+ copy = tor_malloc_zero(size);
+ copy->payload_len = src->payload_len;
+ copy->command = src->command;
+ copy->circ_id = src->circ_id;
+ memcpy(copy->payload, src->payload, copy->payload_len);
+ }
+
+ return copy;
+}
+
+/** Release all space held by <b>cell</b>. */
+void
+var_cell_free_(var_cell_t *cell)
+{
+ tor_free(cell);
+}
+
+/** We've received an EOF from <b>conn</b>. Mark it for close and return. */
+int
+connection_or_reached_eof(or_connection_t *conn)
+{
+ tor_assert(conn);
+
+ log_info(LD_OR,"OR connection reached EOF. Closing.");
+ connection_or_close_normally(conn, 1);
+
+ return 0;
+}
+
+/** Handle any new bytes that have come in on connection <b>conn</b>.
+ * If conn is in 'open' state, hand it to
+ * connection_or_process_cells_from_inbuf()
+ * (else do nothing).
+ */
+int
+connection_or_process_inbuf(or_connection_t *conn)
+{
+ /** Don't let the inbuf of a nonopen OR connection grow beyond this many
+ * bytes: it's either a broken client, a non-Tor client, or a DOS
+ * attempt. */
+#define MAX_OR_INBUF_WHEN_NONOPEN 0
+
+ int ret = 0;
+ tor_assert(conn);
+
+ switch (conn->base_.state) {
+ case OR_CONN_STATE_PROXY_HANDSHAKING:
+ ret = connection_read_proxy_handshake(TO_CONN(conn));
+
+ /* start TLS after handshake completion, or deal with error */
+ if (ret == 1) {
+ tor_assert(TO_CONN(conn)->proxy_state == PROXY_CONNECTED);
+ if (connection_tls_start_handshake(conn, 0) < 0)
+ ret = -1;
+ /* Touch the channel's active timestamp if there is one */
+ if (conn->chan)
+ channel_timestamp_active(TLS_CHAN_TO_BASE(conn->chan));
+ }
+ if (ret < 0) {
+ connection_or_close_for_error(conn, 0);
+ }
+
+ return ret;
+ case OR_CONN_STATE_TLS_SERVER_RENEGOTIATING:
+ case OR_CONN_STATE_OPEN:
+ case OR_CONN_STATE_OR_HANDSHAKING_V2:
+ case OR_CONN_STATE_OR_HANDSHAKING_V3:
+ return connection_or_process_cells_from_inbuf(conn);
+ default:
+ break; /* don't do anything */
+ }
+
+ /* This check was necessary with 0.2.2, when the TLS_SERVER_RENEGOTIATING
+ * check would otherwise just let data accumulate. It serves no purpose
+ * in 0.2.3.
+ *
+ * XXXX Remove this check once we verify that the above paragraph is
+ * 100% true. */
+ if (buf_datalen(conn->base_.inbuf) > MAX_OR_INBUF_WHEN_NONOPEN) {
+ log_fn(LOG_PROTOCOL_WARN, LD_NET, "Accumulated too much data (%d bytes) "
+ "on nonopen OR connection %s %s:%u in state %s; closing.",
+ (int)buf_datalen(conn->base_.inbuf),
+ connection_or_nonopen_was_started_here(conn) ? "to" : "from",
+ conn->base_.address, conn->base_.port,
+ conn_state_to_string(conn->base_.type, conn->base_.state));
+ connection_or_close_for_error(conn, 0);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+/** Called whenever we have flushed some data on an or_conn: add more data
+ * from active circuits. */
+int
+connection_or_flushed_some(or_connection_t *conn)
+{
+ size_t datalen;
+
+ /* Update the channel's active timestamp if there is one */
+ if (conn->chan)
+ channel_timestamp_active(TLS_CHAN_TO_BASE(conn->chan));
+
+ /* If we're under the low water mark, add cells until we're just over the
+ * high water mark. */
+ datalen = connection_get_outbuf_len(TO_CONN(conn));
+ if (datalen < OR_CONN_LOWWATER) {
+ /* Let the scheduler know */
+ scheduler_channel_wants_writes(TLS_CHAN_TO_BASE(conn->chan));
+ }
+
+ return 0;
+}
+
+/** This is for channeltls.c to ask how many cells we could accept if
+ * they were available. */
+ssize_t
+connection_or_num_cells_writeable(or_connection_t *conn)
+{
+ size_t datalen, cell_network_size;
+ ssize_t n = 0;
+
+ tor_assert(conn);
+
+ /*
+ * If we're under the high water mark, we're potentially
+ * writeable; note this is different from the calculation above
+ * used to trigger when to start writing after we've stopped.
+ */
+ datalen = connection_get_outbuf_len(TO_CONN(conn));
+ if (datalen < OR_CONN_HIGHWATER) {
+ cell_network_size = get_cell_network_size(conn->wide_circ_ids);
+ n = CEIL_DIV(OR_CONN_HIGHWATER - datalen, cell_network_size);
+ }
+
+ return n;
+}
+
+/** Connection <b>conn</b> has finished writing and has no bytes left on
+ * its outbuf.
+ *
+ * Otherwise it's in state "open": stop writing and return.
+ *
+ * If <b>conn</b> is broken, mark it for close and return -1, else
+ * return 0.
+ */
+int
+connection_or_finished_flushing(or_connection_t *conn)
+{
+ tor_assert(conn);
+ assert_connection_ok(TO_CONN(conn),0);
+
+ switch (conn->base_.state) {
+ case OR_CONN_STATE_PROXY_HANDSHAKING:
+ case OR_CONN_STATE_OPEN:
+ case OR_CONN_STATE_OR_HANDSHAKING_V2:
+ case OR_CONN_STATE_OR_HANDSHAKING_V3:
+ break;
+ default:
+ log_err(LD_BUG,"Called in unexpected state %d.", conn->base_.state);
+ tor_fragile_assert();
+ return -1;
+ }
+
+ /* Update the channel's active timestamp if there is one */
+ if (conn->chan)
+ channel_timestamp_active(TLS_CHAN_TO_BASE(conn->chan));
+
+ return 0;
+}
+
+/** Connected handler for OR connections: begin the TLS handshake.
+ */
+int
+connection_or_finished_connecting(or_connection_t *or_conn)
+{
+ const int proxy_type = or_conn->proxy_type;
+ connection_t *conn;
+
+ tor_assert(or_conn);
+ conn = TO_CONN(or_conn);
+ tor_assert(conn->state == OR_CONN_STATE_CONNECTING);
+
+ log_debug(LD_HANDSHAKE,"OR connect() to router at %s:%u finished.",
+ conn->address,conn->port);
+ control_event_bootstrap(BOOTSTRAP_STATUS_HANDSHAKE, 0);
+
+ if (proxy_type != PROXY_NONE) {
+ /* start proxy handshake */
+ if (connection_proxy_connect(conn, proxy_type) < 0) {
+ connection_or_close_for_error(or_conn, 0);
+ return -1;
+ }
+
+ connection_start_reading(conn);
+ connection_or_change_state(or_conn, OR_CONN_STATE_PROXY_HANDSHAKING);
+ return 0;
+ }
+
+ if (connection_tls_start_handshake(or_conn, 0) < 0) {
+ /* TLS handshaking error of some kind. */
+ connection_or_close_for_error(or_conn, 0);
+ return -1;
+ }
+ return 0;
+}
+
+/** Called when we're about to finally unlink and free an OR connection:
+ * perform necessary accounting and cleanup */
+void
+connection_or_about_to_close(or_connection_t *or_conn)
+{
+ connection_t *conn = TO_CONN(or_conn);
+
+ /* Tell the controlling channel we're closed */
+ if (or_conn->chan) {
+ channel_closed(TLS_CHAN_TO_BASE(or_conn->chan));
+ /*
+ * NULL this out because the channel might hang around a little
+ * longer before channel_run_cleanup() gets it.
+ */
+ or_conn->chan->conn = NULL;
+ or_conn->chan = NULL;
+ }
+
+ /* Remember why we're closing this connection. */
+ if (conn->state != OR_CONN_STATE_OPEN) {
+ /* now mark things down as needed */
+ if (connection_or_nonopen_was_started_here(or_conn)) {
+ const or_options_t *options = get_options();
+ connection_or_note_state_when_broken(or_conn);
+ /* Tell the new guard API about the channel failure */
+ entry_guard_chan_failed(TLS_CHAN_TO_BASE(or_conn->chan));
+ if (conn->state >= OR_CONN_STATE_TLS_HANDSHAKING) {
+ int reason = tls_error_to_orconn_end_reason(or_conn->tls_error);
+ control_event_or_conn_status(or_conn, OR_CONN_EVENT_FAILED,
+ reason);
+ if (!authdir_mode_tests_reachability(options))
+ control_event_bootstrap_prob_or(
+ orconn_end_reason_to_control_string(reason),
+ reason, or_conn);
+ }
+ }
+ } else if (conn->hold_open_until_flushed) {
+ /* We only set hold_open_until_flushed when we're intentionally
+ * closing a connection. */
+ control_event_or_conn_status(or_conn, OR_CONN_EVENT_CLOSED,
+ tls_error_to_orconn_end_reason(or_conn->tls_error));
+ } else if (!tor_digest_is_zero(or_conn->identity_digest)) {
+ control_event_or_conn_status(or_conn, OR_CONN_EVENT_CLOSED,
+ tls_error_to_orconn_end_reason(or_conn->tls_error));
+ }
+}
+
+/** Return 1 if identity digest <b>id_digest</b> is known to be a
+ * currently or recently running relay. Otherwise return 0. */
+int
+connection_or_digest_is_known_relay(const char *id_digest)
+{
+ if (router_get_consensus_status_by_id(id_digest))
+ return 1; /* It's in the consensus: "yes" */
+ if (router_get_by_id_digest(id_digest))
+ return 1; /* Not in the consensus, but we have a descriptor for
+ * it. Probably it was in a recent consensus. "Yes". */
+ return 0;
+}
+
+/** Set the per-conn read and write limits for <b>conn</b>. If it's a known
+ * relay, we will rely on the global read and write buckets, so give it
+ * per-conn limits that are big enough they'll never matter. But if it's
+ * not a known relay, first check if we set PerConnBwRate/Burst, then
+ * check if the consensus sets them, else default to 'big enough'.
+ *
+ * If <b>reset</b> is true, set the bucket to be full. Otherwise, just
+ * clip the bucket if it happens to be <em>too</em> full.
+ */
+static void
+connection_or_update_token_buckets_helper(or_connection_t *conn, int reset,
+ const or_options_t *options)
+{
+ int rate, burst; /* per-connection rate limiting params */
+ if (connection_or_digest_is_known_relay(conn->identity_digest)) {
+ /* It's in the consensus, or we have a descriptor for it meaning it
+ * was probably in a recent consensus. It's a recognized relay:
+ * give it full bandwidth. */
+ rate = (int)options->BandwidthRate;
+ burst = (int)options->BandwidthBurst;
+ } else {
+ /* Not a recognized relay. Squeeze it down based on the suggested
+ * bandwidth parameters in the consensus, but allow local config
+ * options to override. */
+ rate = options->PerConnBWRate ? (int)options->PerConnBWRate :
+ networkstatus_get_param(NULL, "perconnbwrate",
+ (int)options->BandwidthRate, 1, INT32_MAX);
+ burst = options->PerConnBWBurst ? (int)options->PerConnBWBurst :
+ networkstatus_get_param(NULL, "perconnbwburst",
+ (int)options->BandwidthBurst, 1, INT32_MAX);
+ }
+
+ token_bucket_rw_adjust(&conn->bucket, rate, burst);
+ if (reset) {
+ token_bucket_rw_reset(&conn->bucket, monotime_coarse_get_stamp());
+ }
+}
+
+/** Either our set of relays or our per-conn rate limits have changed.
+ * Go through all the OR connections and update their token buckets to make
+ * sure they don't exceed their maximum values. */
+void
+connection_or_update_token_buckets(smartlist_t *conns,
+ const or_options_t *options)
+{
+ SMARTLIST_FOREACH(conns, connection_t *, conn,
+ {
+ if (connection_speaks_cells(conn))
+ connection_or_update_token_buckets_helper(TO_OR_CONN(conn), 0, options);
+ });
+}
+
+/* Mark <b>or_conn</b> as canonical if <b>is_canonical</b> is set, and
+ * non-canonical otherwise. Adjust idle_timeout accordingly.
+ */
+void
+connection_or_set_canonical(or_connection_t *or_conn,
+ int is_canonical)
+{
+ if (bool_eq(is_canonical, or_conn->is_canonical) &&
+ or_conn->idle_timeout != 0) {
+ /* Don't recalculate an existing idle_timeout unless the canonical
+ * status changed. */
+ return;
+ }
+
+ or_conn->is_canonical = !! is_canonical; /* force to a 1-bit boolean */
+ or_conn->idle_timeout = channelpadding_get_channel_idle_timeout(
+ TLS_CHAN_TO_BASE(or_conn->chan), is_canonical);
+
+ log_info(LD_CIRC,
+ "Channel %"PRIu64 " chose an idle timeout of %d.",
+ or_conn->chan ?
+ (TLS_CHAN_TO_BASE(or_conn->chan)->global_identifier):0,
+ or_conn->idle_timeout);
+}
+
+/** If we don't necessarily know the router we're connecting to, but we
+ * have an addr/port/id_digest, then fill in as much as we can. Start
+ * by checking to see if this describes a router we know.
+ * <b>started_here</b> is 1 if we are the initiator of <b>conn</b> and
+ * 0 if it's an incoming connection. */
+void
+connection_or_init_conn_from_address(or_connection_t *conn,
+ const tor_addr_t *addr, uint16_t port,
+ const char *id_digest,
+ const ed25519_public_key_t *ed_id,
+ int started_here)
+{
+ log_debug(LD_HANDSHAKE, "init conn from address %s: %s, %s (%d)",
+ fmt_addr(addr),
+ hex_str((const char*)id_digest, DIGEST_LEN),
+ ed25519_fmt(ed_id),
+ started_here);
+
+ connection_or_set_identity_digest(conn, id_digest, ed_id);
+ connection_or_update_token_buckets_helper(conn, 1, get_options());
+
+ conn->base_.port = port;
+ tor_addr_copy(&conn->base_.addr, addr);
+ tor_addr_copy(&conn->real_addr, addr);
+
+ connection_or_check_canonicity(conn, started_here);
+}
+
+/** Check whether the identity of <b>conn</b> matches a known node. If it
+ * does, check whether the address of conn matches the expected address, and
+ * update the connection's is_canonical flag, nickname, and address fields as
+ * appropriate. */
+static void
+connection_or_check_canonicity(or_connection_t *conn, int started_here)
+{
+ const char *id_digest = conn->identity_digest;
+ const ed25519_public_key_t *ed_id = NULL;
+ const tor_addr_t *addr = &conn->real_addr;
+ if (conn->chan)
+ ed_id = & TLS_CHAN_TO_BASE(conn->chan)->ed25519_identity;
+
+ const node_t *r = node_get_by_id(id_digest);
+ if (r &&
+ node_supports_ed25519_link_authentication(r, 1) &&
+ ! node_ed25519_id_matches(r, ed_id)) {
+ /* If this node is capable of proving an ed25519 ID,
+ * we can't call this a canonical connection unless both IDs match. */
+ r = NULL;
+ }
+
+ if (r) {
+ tor_addr_port_t node_ap;
+ node_get_pref_orport(r, &node_ap);
+ /* XXXX proposal 186 is making this more complex. For now, a conn
+ is canonical when it uses the _preferred_ address. */
+ if (tor_addr_eq(&conn->base_.addr, &node_ap.addr))
+ connection_or_set_canonical(conn, 1);
+ if (!started_here) {
+ /* Override the addr/port, so our log messages will make sense.
+ * This is dangerous, since if we ever try looking up a conn by
+ * its actual addr/port, we won't remember. Careful! */
+ /* XXXX arma: this is stupid, and it's the reason we need real_addr
+ * to track is_canonical properly. What requires it? */
+ /* XXXX <arma> i believe the reason we did this, originally, is because
+ * we wanted to log what OR a connection was to, and if we logged the
+ * right IP address and port 56244, that wouldn't be as helpful. now we
+ * log the "right" port too, so we know if it's moria1 or moria2.
+ */
+ tor_addr_copy(&conn->base_.addr, &node_ap.addr);
+ conn->base_.port = node_ap.port;
+ }
+ tor_free(conn->nickname);
+ conn->nickname = tor_strdup(node_get_nickname(r));
+ tor_free(conn->base_.address);
+ conn->base_.address = tor_addr_to_str_dup(&node_ap.addr);
+ } else {
+ tor_free(conn->nickname);
+ conn->nickname = tor_malloc(HEX_DIGEST_LEN+2);
+ conn->nickname[0] = '$';
+ base16_encode(conn->nickname+1, HEX_DIGEST_LEN+1,
+ conn->identity_digest, DIGEST_LEN);
+
+ tor_free(conn->base_.address);
+ conn->base_.address = tor_addr_to_str_dup(addr);
+ }
+
+ /*
+ * We have to tell channeltls.c to update the channel marks (local, in
+ * particular), since we may have changed the address.
+ */
+
+ if (conn->chan) {
+ channel_tls_update_marks(conn);
+ }
+}
+
+/** These just pass all the is_bad_for_new_circs manipulation on to
+ * channel_t */
+
+static unsigned int
+connection_or_is_bad_for_new_circs(or_connection_t *or_conn)
+{
+ tor_assert(or_conn);
+
+ if (or_conn->chan)
+ return channel_is_bad_for_new_circs(TLS_CHAN_TO_BASE(or_conn->chan));
+ else return 0;
+}
+
+static void
+connection_or_mark_bad_for_new_circs(or_connection_t *or_conn)
+{
+ tor_assert(or_conn);
+
+ if (or_conn->chan)
+ channel_mark_bad_for_new_circs(TLS_CHAN_TO_BASE(or_conn->chan));
+}
+
+/** How old do we let a connection to an OR get before deciding it's
+ * too old for new circuits? */
+#define TIME_BEFORE_OR_CONN_IS_TOO_OLD (60*60*24*7)
+
+/** Expire an or_connection if it is too old. Helper for
+ * connection_or_group_set_badness_ and fast path for
+ * channel_rsa_id_group_set_badness.
+ *
+ * Returns 1 if the connection was already expired, else 0.
+ */
+int
+connection_or_single_set_badness_(time_t now,
+ or_connection_t *or_conn,
+ int force)
+{
+ /* XXXX this function should also be about channels? */
+ if (or_conn->base_.marked_for_close ||
+ connection_or_is_bad_for_new_circs(or_conn))
+ return 1;
+
+ if (force ||
+ or_conn->base_.timestamp_created + TIME_BEFORE_OR_CONN_IS_TOO_OLD
+ < now) {
+ log_info(LD_OR,
+ "Marking OR conn to %s:%d as too old for new circuits "
+ "(fd "TOR_SOCKET_T_FORMAT", %d secs old).",
+ or_conn->base_.address, or_conn->base_.port, or_conn->base_.s,
+ (int)(now - or_conn->base_.timestamp_created));
+ connection_or_mark_bad_for_new_circs(or_conn);
+ }
+
+ return 0;
+}
+
+/** Given a list of all the or_connections with a given
+ * identity, set elements of that list as is_bad_for_new_circs as
+ * appropriate. Helper for connection_or_set_bad_connections().
+ *
+ * Specifically, we set the is_bad_for_new_circs flag on:
+ * - all connections if <b>force</b> is true.
+ * - all connections that are too old.
+ * - all open non-canonical connections for which a canonical connection
+ * exists to the same router.
+ * - all open canonical connections for which a 'better' canonical
+ * connection exists to the same router.
+ * - all open non-canonical connections for which a 'better' non-canonical
+ * connection exists to the same router at the same address.
+ *
+ * See channel_is_better() in channel.c for our idea of what makes one OR
+ * connection better than another.
+ */
+void
+connection_or_group_set_badness_(smartlist_t *group, int force)
+{
+ /* XXXX this function should be entirely about channels, not OR
+ * XXXX connections. */
+
+ or_connection_t *best = NULL;
+ int n_old = 0, n_inprogress = 0, n_canonical = 0, n_other = 0;
+ time_t now = time(NULL);
+
+ /* Pass 1: expire everything that's old, and see what the status of
+ * everything else is. */
+ SMARTLIST_FOREACH_BEGIN(group, or_connection_t *, or_conn) {
+ if (connection_or_single_set_badness_(now, or_conn, force))
+ continue;
+
+ if (connection_or_is_bad_for_new_circs(or_conn)) {
+ ++n_old;
+ } else if (or_conn->base_.state != OR_CONN_STATE_OPEN) {
+ ++n_inprogress;
+ } else if (or_conn->is_canonical) {
+ ++n_canonical;
+ } else {
+ ++n_other;
+ }
+ } SMARTLIST_FOREACH_END(or_conn);
+
+ /* Pass 2: We know how about how good the best connection is.
+ * expire everything that's worse, and find the very best if we can. */
+ SMARTLIST_FOREACH_BEGIN(group, or_connection_t *, or_conn) {
+ if (or_conn->base_.marked_for_close ||
+ connection_or_is_bad_for_new_circs(or_conn))
+ continue; /* This one doesn't need to be marked bad. */
+ if (or_conn->base_.state != OR_CONN_STATE_OPEN)
+ continue; /* Don't mark anything bad until we have seen what happens
+ * when the connection finishes. */
+ if (n_canonical && !or_conn->is_canonical) {
+ /* We have at least one open canonical connection to this router,
+ * and this one is open but not canonical. Mark it bad. */
+ log_info(LD_OR,
+ "Marking OR conn to %s:%d as unsuitable for new circuits: "
+ "(fd "TOR_SOCKET_T_FORMAT", %d secs old). It is not "
+ "canonical, and we have another connection to that OR that is.",
+ or_conn->base_.address, or_conn->base_.port, or_conn->base_.s,
+ (int)(now - or_conn->base_.timestamp_created));
+ connection_or_mark_bad_for_new_circs(or_conn);
+ continue;
+ }
+
+ if (!best ||
+ channel_is_better(TLS_CHAN_TO_BASE(or_conn->chan),
+ TLS_CHAN_TO_BASE(best->chan))) {
+ best = or_conn;
+ }
+ } SMARTLIST_FOREACH_END(or_conn);
+
+ if (!best)
+ return;
+
+ /* Pass 3: One connection to OR is best. If it's canonical, mark as bad
+ * every other open connection. If it's non-canonical, mark as bad
+ * every other open connection to the same address.
+ *
+ * XXXX This isn't optimal; if we have connections to an OR at multiple
+ * addresses, we'd like to pick the best _for each address_, and mark as
+ * bad every open connection that isn't best for its address. But this
+ * can only occur in cases where the other OR is old (so we have no
+ * canonical connection to it), or where all the connections to the OR are
+ * at noncanonical addresses and we have no good direct connection (which
+ * means we aren't at risk of attaching circuits to it anyway). As
+ * 0.1.2.x dies out, the first case will go away, and the second one is
+ * "mostly harmless", so a fix can wait until somebody is bored.
+ */
+ SMARTLIST_FOREACH_BEGIN(group, or_connection_t *, or_conn) {
+ if (or_conn->base_.marked_for_close ||
+ connection_or_is_bad_for_new_circs(or_conn) ||
+ or_conn->base_.state != OR_CONN_STATE_OPEN)
+ continue;
+ if (or_conn != best &&
+ channel_is_better(TLS_CHAN_TO_BASE(best->chan),
+ TLS_CHAN_TO_BASE(or_conn->chan))) {
+ /* This isn't the best conn, _and_ the best conn is better than it */
+ if (best->is_canonical) {
+ log_info(LD_OR,
+ "Marking OR conn to %s:%d as unsuitable for new circuits: "
+ "(fd "TOR_SOCKET_T_FORMAT", %d secs old). "
+ "We have a better canonical one "
+ "(fd "TOR_SOCKET_T_FORMAT"; %d secs old).",
+ or_conn->base_.address, or_conn->base_.port, or_conn->base_.s,
+ (int)(now - or_conn->base_.timestamp_created),
+ best->base_.s, (int)(now - best->base_.timestamp_created));
+ connection_or_mark_bad_for_new_circs(or_conn);
+ } else if (!tor_addr_compare(&or_conn->real_addr,
+ &best->real_addr, CMP_EXACT)) {
+ log_info(LD_OR,
+ "Marking OR conn to %s:%d as unsuitable for new circuits: "
+ "(fd "TOR_SOCKET_T_FORMAT", %d secs old). We have a better "
+ "one with the "
+ "same address (fd "TOR_SOCKET_T_FORMAT"; %d secs old).",
+ or_conn->base_.address, or_conn->base_.port, or_conn->base_.s,
+ (int)(now - or_conn->base_.timestamp_created),
+ best->base_.s, (int)(now - best->base_.timestamp_created));
+ connection_or_mark_bad_for_new_circs(or_conn);
+ }
+ }
+ } SMARTLIST_FOREACH_END(or_conn);
+}
+
+/* Lifetime of a connection failure. After that, we'll retry. This is in
+ * seconds. */
+#define OR_CONNECT_FAILURE_LIFETIME 60
+/* The interval to use with when to clean up the failure cache. */
+#define OR_CONNECT_FAILURE_CLEANUP_INTERVAL 60
+
+/* When is the next time we have to cleanup the failure map. We keep this
+ * because we clean it opportunistically. */
+static time_t or_connect_failure_map_next_cleanup_ts = 0;
+
+/* OR connection failure entry data structure. It is kept in the connection
+ * failure map defined below and indexed by OR identity digest, address and
+ * port.
+ *
+ * We need to identify a connection failure with these three values because we
+ * want to avoid to wrongfully blacklist a relay if someone is trying to
+ * extend to a known identity digest but with the wrong IP/port. For instance,
+ * it can happen if a relay changed its port but the client still has an old
+ * descriptor with the old port. We want to stop connecting to that
+ * IP/port/identity all together, not only the relay identity. */
+typedef struct or_connect_failure_entry_t {
+ HT_ENTRY(or_connect_failure_entry_t) node;
+ /* Identity digest of the connection where it is connecting to. */
+ uint8_t identity_digest[DIGEST_LEN];
+ /* This is the connection address from the base connection_t. After the
+ * connection is checked for canonicity, the base address should represent
+ * what we know instead of where we are connecting to. This is what we need
+ * so we can correlate known relays within the consensus. */
+ tor_addr_t addr;
+ uint16_t port;
+ /* Last time we were unable to connect. */
+ time_t last_failed_connect_ts;
+} or_connect_failure_entry_t;
+
+/* Map where we keep connection failure entries. They are indexed by addr,
+ * port and identity digest. */
+static HT_HEAD(or_connect_failure_ht, or_connect_failure_entry_t)
+ or_connect_failures_map = HT_INITIALIZER();
+
+/* Helper: Hashtable equal function. Return 1 if equal else 0. */
+static int
+or_connect_failure_ht_eq(const or_connect_failure_entry_t *a,
+ const or_connect_failure_entry_t *b)
+{
+ return fast_memeq(a->identity_digest, b->identity_digest, DIGEST_LEN) &&
+ tor_addr_eq(&a->addr, &b->addr) &&
+ a->port == b->port;
+}
+
+/* Helper: Return the hash for the hashtable of the given entry. For this
+ * table, it is a combination of address, port and identity digest. */
+static unsigned int
+or_connect_failure_ht_hash(const or_connect_failure_entry_t *entry)
+{
+ size_t offset = 0, addr_size;
+ const void *addr_ptr;
+ /* Largest size is IPv6 and IPv4 is smaller so it is fine. */
+ uint8_t data[16 + sizeof(uint16_t) + DIGEST_LEN];
+
+ /* Get the right address bytes depending on the family. */
+ switch (tor_addr_family(&entry->addr)) {
+ case AF_INET:
+ addr_size = 4;
+ addr_ptr = &entry->addr.addr.in_addr.s_addr;
+ break;
+ case AF_INET6:
+ addr_size = 16;
+ addr_ptr = &entry->addr.addr.in6_addr.s6_addr;
+ break;
+ default:
+ tor_assert_nonfatal_unreached();
+ return 0;
+ }
+
+ memcpy(data, addr_ptr, addr_size);
+ offset += addr_size;
+ memcpy(data + offset, entry->identity_digest, DIGEST_LEN);
+ offset += DIGEST_LEN;
+ set_uint16(data + offset, entry->port);
+ offset += sizeof(uint16_t);
+
+ return (unsigned int) siphash24g(data, offset);
+}
+
+HT_PROTOTYPE(or_connect_failure_ht, or_connect_failure_entry_t, node,
+ or_connect_failure_ht_hash, or_connect_failure_ht_eq)
+
+HT_GENERATE2(or_connect_failure_ht, or_connect_failure_entry_t, node,
+ or_connect_failure_ht_hash, or_connect_failure_ht_eq,
+ 0.6, tor_reallocarray_, tor_free_)
+
+/* Initialize a given connect failure entry with the given identity_digest,
+ * addr and port. All field are optional except ocf. */
+static void
+or_connect_failure_init(const char *identity_digest, const tor_addr_t *addr,
+ uint16_t port, or_connect_failure_entry_t *ocf)
+{
+ tor_assert(ocf);
+ if (identity_digest) {
+ memcpy(ocf->identity_digest, identity_digest,
+ sizeof(ocf->identity_digest));
+ }
+ if (addr) {
+ tor_addr_copy(&ocf->addr, addr);
+ }
+ ocf->port = port;
+}
+
+/* Return a newly allocated connection failure entry. It is initialized with
+ * the given or_conn data. This can't fail. */
+static or_connect_failure_entry_t *
+or_connect_failure_new(const or_connection_t *or_conn)
+{
+ or_connect_failure_entry_t *ocf = tor_malloc_zero(sizeof(*ocf));
+ or_connect_failure_init(or_conn->identity_digest, &or_conn->real_addr,
+ TO_CONN(or_conn)->port, ocf);
+ return ocf;
+}
+
+/* Return a connection failure entry matching the given or_conn. NULL is
+ * returned if not found. */
+static or_connect_failure_entry_t *
+or_connect_failure_find(const or_connection_t *or_conn)
+{
+ or_connect_failure_entry_t lookup;
+ tor_assert(or_conn);
+ or_connect_failure_init(or_conn->identity_digest, &TO_CONN(or_conn)->addr,
+ TO_CONN(or_conn)->port, &lookup);
+ return HT_FIND(or_connect_failure_ht, &or_connect_failures_map, &lookup);
+}
+
+/* Note down in the connection failure cache that a failure occurred on the
+ * given or_conn. */
+STATIC void
+note_or_connect_failed(const or_connection_t *or_conn)
+{
+ or_connect_failure_entry_t *ocf = NULL;
+
+ tor_assert(or_conn);
+
+ ocf = or_connect_failure_find(or_conn);
+ if (ocf == NULL) {
+ ocf = or_connect_failure_new(or_conn);
+ HT_INSERT(or_connect_failure_ht, &or_connect_failures_map, ocf);
+ }
+ ocf->last_failed_connect_ts = approx_time();
+}
+
+/* Cleanup the connection failure cache and remove all entries below the
+ * given cutoff. */
+static void
+or_connect_failure_map_cleanup(time_t cutoff)
+{
+ or_connect_failure_entry_t **ptr, **next, *entry;
+
+ for (ptr = HT_START(or_connect_failure_ht, &or_connect_failures_map);
+ ptr != NULL; ptr = next) {
+ entry = *ptr;
+ if (entry->last_failed_connect_ts <= cutoff) {
+ next = HT_NEXT_RMV(or_connect_failure_ht, &or_connect_failures_map, ptr);
+ tor_free(entry);
+ } else {
+ next = HT_NEXT(or_connect_failure_ht, &or_connect_failures_map, ptr);
+ }
+ }
+}
+
+/* Return true iff the given OR connection can connect to its destination that
+ * is the triplet identity_digest, address and port.
+ *
+ * The or_conn MUST have gone through connection_or_check_canonicity() so the
+ * base address is properly set to what we know or doesn't know. */
+STATIC int
+should_connect_to_relay(const or_connection_t *or_conn)
+{
+ time_t now, cutoff;
+ time_t connect_failed_since_ts = 0;
+ or_connect_failure_entry_t *ocf;
+
+ tor_assert(or_conn);
+
+ now = approx_time();
+ cutoff = now - OR_CONNECT_FAILURE_LIFETIME;
+
+ /* Opportunistically try to cleanup the failure cache. We do that at regular
+ * interval so it doesn't grow too big. */
+ if (or_connect_failure_map_next_cleanup_ts <= now) {
+ or_connect_failure_map_cleanup(cutoff);
+ or_connect_failure_map_next_cleanup_ts =
+ now + OR_CONNECT_FAILURE_CLEANUP_INTERVAL;
+ }
+
+ /* Look if we have failed previously to the same destination as this
+ * OR connection. */
+ ocf = or_connect_failure_find(or_conn);
+ if (ocf) {
+ connect_failed_since_ts = ocf->last_failed_connect_ts;
+ }
+ /* If we do have an unable to connect timestamp and it is below cutoff, we
+ * can connect. Or we have never failed before so let it connect. */
+ if (connect_failed_since_ts > cutoff) {
+ goto no_connect;
+ }
+
+ /* Ok we can connect! */
+ return 1;
+ no_connect:
+ return 0;
+}
+
+/** <b>conn</b> is in the 'connecting' state, and it failed to complete
+ * a TCP connection. Send notifications appropriately.
+ *
+ * <b>reason</b> specifies the or_conn_end_reason for the failure;
+ * <b>msg</b> specifies the strerror-style error message.
+ */
+void
+connection_or_connect_failed(or_connection_t *conn,
+ int reason, const char *msg)
+{
+ control_event_or_conn_status(conn, OR_CONN_EVENT_FAILED, reason);
+ if (!authdir_mode_tests_reachability(get_options()))
+ control_event_bootstrap_prob_or(msg, reason, conn);
+ note_or_connect_failed(conn);
+}
+
+/** <b>conn</b> got an error in connection_handle_read_impl() or
+ * connection_handle_write_impl() and is going to die soon.
+ *
+ * <b>reason</b> specifies the or_conn_end_reason for the failure;
+ * <b>msg</b> specifies the strerror-style error message.
+ */
+void
+connection_or_notify_error(or_connection_t *conn,
+ int reason, const char *msg)
+{
+ channel_t *chan;
+
+ tor_assert(conn);
+
+ /* If we're connecting, call connect_failed() too */
+ if (TO_CONN(conn)->state == OR_CONN_STATE_CONNECTING)
+ connection_or_connect_failed(conn, reason, msg);
+
+ /* Tell the controlling channel if we have one */
+ if (conn->chan) {
+ chan = TLS_CHAN_TO_BASE(conn->chan);
+ /* Don't transition if we're already in closing, closed or error */
+ if (!CHANNEL_CONDEMNED(chan)) {
+ channel_close_for_error(chan);
+ }
+ }
+
+ /* No need to mark for error because connection.c is about to do that */
+}
+
+/** Launch a new OR connection to <b>addr</b>:<b>port</b> and expect to
+ * handshake with an OR with identity digest <b>id_digest</b>. Optionally,
+ * pass in a pointer to a channel using this connection.
+ *
+ * If <b>id_digest</b> is me, do nothing. If we're already connected to it,
+ * return that connection. If the connect() is in progress, set the
+ * new conn's state to 'connecting' and return it. If connect() succeeds,
+ * call connection_tls_start_handshake() on it.
+ *
+ * This function is called from router_retry_connections(), for
+ * ORs connecting to ORs, and circuit_establish_circuit(), for
+ * OPs connecting to ORs.
+ *
+ * Return the launched conn, or NULL if it failed.
+ */
+
+MOCK_IMPL(or_connection_t *,
+connection_or_connect, (const tor_addr_t *_addr, uint16_t port,
+ const char *id_digest,
+ const ed25519_public_key_t *ed_id,
+ channel_tls_t *chan))
+{
+ or_connection_t *conn;
+ const or_options_t *options = get_options();
+ int socket_error = 0;
+ tor_addr_t addr;
+
+ int r;
+ tor_addr_t proxy_addr;
+ uint16_t proxy_port;
+ int proxy_type;
+
+ tor_assert(_addr);
+ tor_assert(id_digest);
+ tor_addr_copy(&addr, _addr);
+
+ if (server_mode(options) && router_digest_is_me(id_digest)) {
+ log_info(LD_PROTOCOL,"Client asked me to connect to myself. Refusing.");
+ return NULL;
+ }
+ if (server_mode(options) && router_ed25519_id_is_me(ed_id)) {
+ log_info(LD_PROTOCOL,"Client asked me to connect to myself by Ed25519 "
+ "identity. Refusing.");
+ return NULL;
+ }
+
+ conn = or_connection_new(CONN_TYPE_OR, tor_addr_family(&addr));
+
+ /*
+ * Set up conn so it's got all the data we need to remember for channels
+ *
+ * This stuff needs to happen before connection_or_init_conn_from_address()
+ * so connection_or_set_identity_digest() and such know where to look to
+ * keep the channel up to date.
+ */
+ conn->chan = chan;
+ chan->conn = conn;
+ connection_or_init_conn_from_address(conn, &addr, port, id_digest, ed_id, 1);
+
+ /* We have a proper OR connection setup, now check if we can connect to it
+ * that is we haven't had a failure earlier. This is to avoid to try to
+ * constantly connect to relays that we think are not reachable. */
+ if (!should_connect_to_relay(conn)) {
+ log_info(LD_GENERAL, "Can't connect to identity %s at %s:%u because we "
+ "failed earlier. Refusing.",
+ hex_str(id_digest, DIGEST_LEN), fmt_addr(&TO_CONN(conn)->addr),
+ TO_CONN(conn)->port);
+ connection_free_(TO_CONN(conn));
+ return NULL;
+ }
+
+ connection_or_change_state(conn, OR_CONN_STATE_CONNECTING);
+ control_event_or_conn_status(conn, OR_CONN_EVENT_LAUNCHED, 0);
+
+ conn->is_outgoing = 1;
+
+ /* If we are using a proxy server, find it and use it. */
+ r = get_proxy_addrport(&proxy_addr, &proxy_port, &proxy_type, TO_CONN(conn));
+ if (r == 0) {
+ conn->proxy_type = proxy_type;
+ if (proxy_type != PROXY_NONE) {
+ tor_addr_copy(&addr, &proxy_addr);
+ port = proxy_port;
+ conn->base_.proxy_state = PROXY_INFANT;
+ }
+ } else {
+ /* get_proxy_addrport() might fail if we have a Bridge line that
+ references a transport, but no ClientTransportPlugin lines
+ defining its transport proxy. If this is the case, let's try to
+ output a useful log message to the user. */
+ const char *transport_name =
+ find_transport_name_by_bridge_addrport(&TO_CONN(conn)->addr,
+ TO_CONN(conn)->port);
+
+ if (transport_name) {
+ log_warn(LD_GENERAL, "We were supposed to connect to bridge '%s' "
+ "using pluggable transport '%s', but we can't find a pluggable "
+ "transport proxy supporting '%s'. This can happen if you "
+ "haven't provided a ClientTransportPlugin line, or if "
+ "your pluggable transport proxy stopped running.",
+ fmt_addrport(&TO_CONN(conn)->addr, TO_CONN(conn)->port),
+ transport_name, transport_name);
+
+ control_event_bootstrap_prob_or(
+ "Can't connect to bridge",
+ END_OR_CONN_REASON_PT_MISSING,
+ conn);
+
+ } else {
+ log_warn(LD_GENERAL, "Tried to connect to '%s' through a proxy, but "
+ "the proxy address could not be found.",
+ fmt_addrport(&TO_CONN(conn)->addr, TO_CONN(conn)->port));
+ }
+
+ connection_free_(TO_CONN(conn));
+ return NULL;
+ }
+
+ switch (connection_connect(TO_CONN(conn), conn->base_.address,
+ &addr, port, &socket_error)) {
+ case -1:
+ /* We failed to establish a connection probably because of a local
+ * error. No need to blame the guard in this case. Notify the networking
+ * system of this failure. */
+ connection_or_connect_failed(conn,
+ errno_to_orconn_end_reason(socket_error),
+ tor_socket_strerror(socket_error));
+ connection_free_(TO_CONN(conn));
+ return NULL;
+ case 0:
+ connection_watch_events(TO_CONN(conn), READ_EVENT | WRITE_EVENT);
+ /* writable indicates finish, readable indicates broken link,
+ error indicates broken link on windows */
+ return conn;
+ /* case 1: fall through */
+ }
+
+ if (connection_or_finished_connecting(conn) < 0) {
+ /* already marked for close */
+ return NULL;
+ }
+ return conn;
+}
+
+/** Mark orconn for close and transition the associated channel, if any, to
+ * the closing state.
+ *
+ * It's safe to call this and connection_or_close_for_error() any time, and
+ * channel layer will treat it as a connection closing for reasons outside
+ * its control, like the remote end closing it. It can also be a local
+ * reason that's specific to connection_t/or_connection_t rather than
+ * the channel mechanism, such as expiration of old connections in
+ * run_connection_housekeeping(). If you want to close a channel_t
+ * from somewhere that logically works in terms of generic channels
+ * rather than connections, use channel_mark_for_close(); see also
+ * the comment on that function in channel.c.
+ */
+
+void
+connection_or_close_normally(or_connection_t *orconn, int flush)
+{
+ channel_t *chan = NULL;
+
+ tor_assert(orconn);
+ if (flush) connection_mark_and_flush_internal(TO_CONN(orconn));
+ else connection_mark_for_close_internal(TO_CONN(orconn));
+ if (orconn->chan) {
+ chan = TLS_CHAN_TO_BASE(orconn->chan);
+ /* Don't transition if we're already in closing, closed or error */
+ if (!CHANNEL_CONDEMNED(chan)) {
+ channel_close_from_lower_layer(chan);
+ }
+ }
+}
+
+/** Mark orconn for close and transition the associated channel, if any, to
+ * the error state.
+ */
+
+MOCK_IMPL(void,
+connection_or_close_for_error,(or_connection_t *orconn, int flush))
+{
+ channel_t *chan = NULL;
+
+ tor_assert(orconn);
+ if (flush) connection_mark_and_flush_internal(TO_CONN(orconn));
+ else connection_mark_for_close_internal(TO_CONN(orconn));
+ if (orconn->chan) {
+ chan = TLS_CHAN_TO_BASE(orconn->chan);
+ /* Don't transition if we're already in closing, closed or error */
+ if (!CHANNEL_CONDEMNED(chan)) {
+ channel_close_for_error(chan);
+ }
+ }
+}
+
+/** Begin the tls handshake with <b>conn</b>. <b>receiving</b> is 0 if
+ * we initiated the connection, else it's 1.
+ *
+ * Assign a new tls object to conn->tls, begin reading on <b>conn</b>, and
+ * pass <b>conn</b> to connection_tls_continue_handshake().
+ *
+ * Return -1 if <b>conn</b> is broken, else return 0.
+ */
+MOCK_IMPL(int,
+connection_tls_start_handshake,(or_connection_t *conn, int receiving))
+{
+ channel_listener_t *chan_listener;
+ channel_t *chan;
+
+ /* Incoming connections will need a new channel passed to the
+ * channel_tls_listener */
+ if (receiving) {
+ /* It shouldn't already be set */
+ tor_assert(!(conn->chan));
+ chan_listener = channel_tls_get_listener();
+ if (!chan_listener) {
+ chan_listener = channel_tls_start_listener();
+ command_setup_listener(chan_listener);
+ }
+ chan = channel_tls_handle_incoming(conn);
+ channel_listener_queue_incoming(chan_listener, chan);
+ }
+
+ connection_or_change_state(conn, OR_CONN_STATE_TLS_HANDSHAKING);
+ tor_assert(!conn->tls);
+ conn->tls = tor_tls_new(conn->base_.s, receiving);
+ if (!conn->tls) {
+ log_warn(LD_BUG,"tor_tls_new failed. Closing.");
+ return -1;
+ }
+ tor_tls_set_logged_address(conn->tls, // XXX client and relay?
+ escaped_safe_str(conn->base_.address));
+
+ connection_start_reading(TO_CONN(conn));
+ log_debug(LD_HANDSHAKE,"starting TLS handshake on fd "TOR_SOCKET_T_FORMAT,
+ conn->base_.s);
+
+ if (connection_tls_continue_handshake(conn) < 0)
+ return -1;
+
+ return 0;
+}
+
+/** Block all future attempts to renegotiate on 'conn' */
+void
+connection_or_block_renegotiation(or_connection_t *conn)
+{
+ tor_tls_t *tls = conn->tls;
+ if (!tls)
+ return;
+ tor_tls_set_renegotiate_callback(tls, NULL, NULL);
+ tor_tls_block_renegotiation(tls);
+}
+
+/** Invoked on the server side from inside tor_tls_read() when the server
+ * gets a successful TLS renegotiation from the client. */
+static void
+connection_or_tls_renegotiated_cb(tor_tls_t *tls, void *_conn)
+{
+ or_connection_t *conn = _conn;
+ (void)tls;
+
+ /* Don't invoke this again. */
+ connection_or_block_renegotiation(conn);
+
+ if (connection_tls_finish_handshake(conn) < 0) {
+ /* XXXX_TLS double-check that it's ok to do this from inside read. */
+ /* XXXX_TLS double-check that this verifies certificates. */
+ connection_or_close_for_error(conn, 0);
+ }
+}
+
+/** Move forward with the tls handshake. If it finishes, hand
+ * <b>conn</b> to connection_tls_finish_handshake().
+ *
+ * Return -1 if <b>conn</b> is broken, else return 0.
+ */
+int
+connection_tls_continue_handshake(or_connection_t *conn)
+{
+ int result;
+ check_no_tls_errors();
+
+ tor_assert(conn->base_.state == OR_CONN_STATE_TLS_HANDSHAKING);
+ // log_notice(LD_OR, "Continue handshake with %p", conn->tls);
+ result = tor_tls_handshake(conn->tls);
+ // log_notice(LD_OR, "Result: %d", result);
+
+ switch (result) {
+ CASE_TOR_TLS_ERROR_ANY:
+ log_info(LD_OR,"tls error [%s]. breaking connection.",
+ tor_tls_err_to_string(result));
+ return -1;
+ case TOR_TLS_DONE:
+ if (! tor_tls_used_v1_handshake(conn->tls)) {
+ if (!tor_tls_is_server(conn->tls)) {
+ tor_assert(conn->base_.state == OR_CONN_STATE_TLS_HANDSHAKING);
+ return connection_or_launch_v3_or_handshake(conn);
+ } else {
+ /* v2/v3 handshake, but we are not a client. */
+ log_debug(LD_OR, "Done with initial SSL handshake (server-side). "
+ "Expecting renegotiation or VERSIONS cell");
+ tor_tls_set_renegotiate_callback(conn->tls,
+ connection_or_tls_renegotiated_cb,
+ conn);
+ connection_or_change_state(conn,
+ OR_CONN_STATE_TLS_SERVER_RENEGOTIATING);
+ connection_stop_writing(TO_CONN(conn));
+ connection_start_reading(TO_CONN(conn));
+ return 0;
+ }
+ }
+ tor_assert(tor_tls_is_server(conn->tls));
+ return connection_tls_finish_handshake(conn);
+ case TOR_TLS_WANTWRITE:
+ connection_start_writing(TO_CONN(conn));
+ log_debug(LD_OR,"wanted write");
+ return 0;
+ case TOR_TLS_WANTREAD: /* handshaking conns are *always* reading */
+ log_debug(LD_OR,"wanted read");
+ return 0;
+ case TOR_TLS_CLOSE:
+ log_info(LD_OR,"tls closed. breaking connection.");
+ return -1;
+ }
+ return 0;
+}
+
+/** Return 1 if we initiated this connection, or 0 if it started
+ * out as an incoming connection.
+ */
+int
+connection_or_nonopen_was_started_here(or_connection_t *conn)
+{
+ tor_assert(conn->base_.type == CONN_TYPE_OR ||
+ conn->base_.type == CONN_TYPE_EXT_OR);
+ if (!conn->tls)
+ return 1; /* it's still in proxy states or something */
+ if (conn->handshake_state)
+ return conn->handshake_state->started_here;
+ return !tor_tls_is_server(conn->tls);
+}
+
+/** <b>Conn</b> just completed its handshake. Return 0 if all is well, and
+ * return -1 if they are lying, broken, or otherwise something is wrong.
+ *
+ * If we initiated this connection (<b>started_here</b> is true), make sure
+ * the other side sent a correctly formed certificate. If I initiated the
+ * connection, make sure it's the right relay by checking the certificate.
+ *
+ * Otherwise (if we _didn't_ initiate this connection), it's okay for
+ * the certificate to be weird or absent.
+ *
+ * If we return 0, and the certificate is as expected, write a hash of the
+ * identity key into <b>digest_rcvd_out</b>, which must have DIGEST_LEN
+ * space in it.
+ * If the certificate is invalid or missing on an incoming connection,
+ * we return 0 and set <b>digest_rcvd_out</b> to DIGEST_LEN NUL bytes.
+ * (If we return -1, the contents of this buffer are undefined.)
+ *
+ * As side effects,
+ * 1) Set conn->circ_id_type according to tor-spec.txt.
+ * 2) If we're an authdirserver and we initiated the connection: drop all
+ * descriptors that claim to be on that IP/port but that aren't
+ * this relay; and note that this relay is reachable.
+ * 3) If this is a bridge and we didn't configure its identity
+ * fingerprint, remember the keyid we just learned.
+ */
+static int
+connection_or_check_valid_tls_handshake(or_connection_t *conn,
+ int started_here,
+ char *digest_rcvd_out)
+{
+ crypto_pk_t *identity_rcvd=NULL;
+ const or_options_t *options = get_options();
+ int severity = server_mode(options) ? LOG_PROTOCOL_WARN : LOG_WARN;
+ const char *safe_address =
+ started_here ? conn->base_.address :
+ safe_str_client(conn->base_.address);
+ const char *conn_type = started_here ? "outgoing" : "incoming";
+ int has_cert = 0;
+
+ check_no_tls_errors();
+ has_cert = tor_tls_peer_has_cert(conn->tls);
+ if (started_here && !has_cert) {
+ log_info(LD_HANDSHAKE,"Tried connecting to router at %s:%d, but it didn't "
+ "send a cert! Closing.",
+ safe_address, conn->base_.port);
+ return -1;
+ } else if (!has_cert) {
+ log_debug(LD_HANDSHAKE,"Got incoming connection with no certificate. "
+ "That's ok.");
+ }
+ check_no_tls_errors();
+
+ if (has_cert) {
+ int v = tor_tls_verify(started_here?severity:LOG_INFO,
+ conn->tls, &identity_rcvd);
+ if (started_here && v<0) {
+ log_fn(severity,LD_HANDSHAKE,"Tried connecting to router at %s:%d: It"
+ " has a cert but it's invalid. Closing.",
+ safe_address, conn->base_.port);
+ return -1;
+ } else if (v<0) {
+ log_info(LD_HANDSHAKE,"Incoming connection gave us an invalid cert "
+ "chain; ignoring.");
+ } else {
+ log_debug(LD_HANDSHAKE,
+ "The certificate seems to be valid on %s connection "
+ "with %s:%d", conn_type, safe_address, conn->base_.port);
+ }
+ check_no_tls_errors();
+ }
+
+ if (identity_rcvd) {
+ if (crypto_pk_get_digest(identity_rcvd, digest_rcvd_out) < 0) {
+ crypto_pk_free(identity_rcvd);
+ return -1;
+ }
+ } else {
+ memset(digest_rcvd_out, 0, DIGEST_LEN);
+ }
+
+ tor_assert(conn->chan);
+ channel_set_circid_type(TLS_CHAN_TO_BASE(conn->chan), identity_rcvd, 1);
+
+ crypto_pk_free(identity_rcvd);
+
+ if (started_here) {
+ /* A TLS handshake can't teach us an Ed25519 ID, so we set it to NULL
+ * here. */
+ log_debug(LD_HANDSHAKE, "Calling client_learned_peer_id from "
+ "check_valid_tls_handshake");
+ return connection_or_client_learned_peer_id(conn,
+ (const uint8_t*)digest_rcvd_out,
+ NULL);
+ }
+
+ return 0;
+}
+
+/** Called when we (as a connection initiator) have definitively,
+ * authenticatedly, learned that ID of the Tor instance on the other
+ * side of <b>conn</b> is <b>rsa_peer_id</b> and optionally <b>ed_peer_id</b>.
+ * For v1 and v2 handshakes,
+ * this is right after we get a certificate chain in a TLS handshake
+ * or renegotiation. For v3+ handshakes, this is right after we get a
+ * certificate chain in a CERTS cell.
+ *
+ * If we did not know the ID before, record the one we got.
+ *
+ * If we wanted an ID, but we didn't get the one we expected, log a message
+ * and return -1.
+ * On relays:
+ * - log a protocol warning whenever the fingerprints don't match;
+ * On clients:
+ * - if a relay's fingerprint doesn't match, log a warning;
+ * - if we don't have updated relay fingerprints from a recent consensus, and
+ * a fallback directory mirror's hard-coded fingerprint has changed, log an
+ * info explaining that we will try another fallback.
+ *
+ * If we're testing reachability, remember what we learned.
+ *
+ * Return 0 on success, -1 on failure.
+ */
+int
+connection_or_client_learned_peer_id(or_connection_t *conn,
+ const uint8_t *rsa_peer_id,
+ const ed25519_public_key_t *ed_peer_id)
+{
+ const or_options_t *options = get_options();
+ channel_tls_t *chan_tls = conn->chan;
+ channel_t *chan = channel_tls_to_base(chan_tls);
+ int changed_identity = 0;
+ tor_assert(chan);
+
+ const int expected_rsa_key =
+ ! tor_digest_is_zero(conn->identity_digest);
+ const int expected_ed_key =
+ ! ed25519_public_key_is_zero(&chan->ed25519_identity);
+
+ log_info(LD_HANDSHAKE, "learned peer id for %p (%s): %s, %s",
+ conn,
+ safe_str_client(conn->base_.address),
+ hex_str((const char*)rsa_peer_id, DIGEST_LEN),
+ ed25519_fmt(ed_peer_id));
+
+ if (! expected_rsa_key && ! expected_ed_key) {
+ log_info(LD_HANDSHAKE, "(we had no ID in mind when we made this "
+ "connection.");
+ connection_or_set_identity_digest(conn,
+ (const char*)rsa_peer_id, ed_peer_id);
+ tor_free(conn->nickname);
+ conn->nickname = tor_malloc(HEX_DIGEST_LEN+2);
+ conn->nickname[0] = '$';
+ base16_encode(conn->nickname+1, HEX_DIGEST_LEN+1,
+ conn->identity_digest, DIGEST_LEN);
+ log_info(LD_HANDSHAKE, "Connected to router %s at %s:%d without knowing "
+ "its key. Hoping for the best.",
+ conn->nickname, conn->base_.address, conn->base_.port);
+ /* if it's a bridge and we didn't know its identity fingerprint, now
+ * we do -- remember it for future attempts. */
+ learned_router_identity(&conn->base_.addr, conn->base_.port,
+ (const char*)rsa_peer_id, ed_peer_id);
+ changed_identity = 1;
+ }
+
+ const int rsa_mismatch = expected_rsa_key &&
+ tor_memneq(rsa_peer_id, conn->identity_digest, DIGEST_LEN);
+ /* It only counts as an ed25519 mismatch if we wanted an ed25519 identity
+ * and didn't get it. It's okay if we get one that we didn't ask for. */
+ const int ed25519_mismatch =
+ expected_ed_key &&
+ (ed_peer_id == NULL ||
+ ! ed25519_pubkey_eq(&chan->ed25519_identity, ed_peer_id));
+
+ if (rsa_mismatch || ed25519_mismatch) {
+ /* I was aiming for a particular digest. I didn't get it! */
+ char seen_rsa[HEX_DIGEST_LEN+1];
+ char expected_rsa[HEX_DIGEST_LEN+1];
+ char seen_ed[ED25519_BASE64_LEN+1];
+ char expected_ed[ED25519_BASE64_LEN+1];
+ base16_encode(seen_rsa, sizeof(seen_rsa),
+ (const char*)rsa_peer_id, DIGEST_LEN);
+ base16_encode(expected_rsa, sizeof(expected_rsa), conn->identity_digest,
+ DIGEST_LEN);
+ if (ed_peer_id) {
+ ed25519_public_to_base64(seen_ed, ed_peer_id);
+ } else {
+ strlcpy(seen_ed, "no ed25519 key", sizeof(seen_ed));
+ }
+ if (! ed25519_public_key_is_zero(&chan->ed25519_identity)) {
+ ed25519_public_to_base64(expected_ed, &chan->ed25519_identity);
+ } else {
+ strlcpy(expected_ed, "no ed25519 key", sizeof(expected_ed));
+ }
+ const int using_hardcoded_fingerprints =
+ !networkstatus_get_reasonably_live_consensus(time(NULL),
+ usable_consensus_flavor());
+ const int is_fallback_fingerprint = router_digest_is_fallback_dir(
+ conn->identity_digest);
+ const int is_authority_fingerprint = router_digest_is_trusted_dir(
+ conn->identity_digest);
+ int severity;
+ const char *extra_log = "";
+
+ if (server_mode(options)) {
+ severity = LOG_PROTOCOL_WARN;
+ } else {
+ if (using_hardcoded_fingerprints) {
+ /* We need to do the checks in this order, because the list of
+ * fallbacks includes the list of authorities */
+ if (is_authority_fingerprint) {
+ severity = LOG_WARN;
+ } else if (is_fallback_fingerprint) {
+ /* we expect a small number of fallbacks to change from their
+ * hard-coded fingerprints over the life of a release */
+ severity = LOG_INFO;
+ extra_log = " Tor will try a different fallback.";
+ } else {
+ /* it's a bridge, it's either a misconfiguration, or unexpected */
+ severity = LOG_WARN;
+ }
+ } else {
+ /* a relay has changed its fingerprint from the one in the consensus */
+ severity = LOG_WARN;
+ }
+ }
+
+ log_fn(severity, LD_HANDSHAKE,
+ "Tried connecting to router at %s:%d, but RSA identity key was not "
+ "as expected: wanted %s + %s but got %s + %s.%s",
+ conn->base_.address, conn->base_.port,
+ expected_rsa, expected_ed, seen_rsa, seen_ed, extra_log);
+
+ /* Tell the new guard API about the channel failure */
+ entry_guard_chan_failed(TLS_CHAN_TO_BASE(conn->chan));
+ control_event_or_conn_status(conn, OR_CONN_EVENT_FAILED,
+ END_OR_CONN_REASON_OR_IDENTITY);
+ if (!authdir_mode_tests_reachability(options))
+ control_event_bootstrap_prob_or(
+ "Unexpected identity in router certificate",
+ END_OR_CONN_REASON_OR_IDENTITY,
+ conn);
+ return -1;
+ }
+
+ if (!expected_ed_key && ed_peer_id) {
+ log_info(LD_HANDSHAKE, "(we had no Ed25519 ID in mind when we made this "
+ "connection.");
+ connection_or_set_identity_digest(conn,
+ (const char*)rsa_peer_id, ed_peer_id);
+ changed_identity = 1;
+ }
+
+ if (changed_identity) {
+ /* If we learned an identity for this connection, then we might have
+ * just discovered it to be canonical. */
+ connection_or_check_canonicity(conn, conn->handshake_state->started_here);
+ }
+
+ if (authdir_mode_tests_reachability(options)) {
+ dirserv_orconn_tls_done(&conn->base_.addr, conn->base_.port,
+ (const char*)rsa_peer_id, ed_peer_id);
+ }
+
+ return 0;
+}
+
+/** Return when we last used this channel for client activity (origin
+ * circuits). This is called from connection.c, since client_used is now one
+ * of the timestamps in channel_t */
+
+time_t
+connection_or_client_used(or_connection_t *conn)
+{
+ tor_assert(conn);
+
+ if (conn->chan) {
+ return channel_when_last_client(TLS_CHAN_TO_BASE(conn->chan));
+ } else return 0;
+}
+
+/** The v1/v2 TLS handshake is finished.
+ *
+ * Make sure we are happy with the peer we just handshaked with.
+ *
+ * If they initiated the connection, make sure they're not already connected,
+ * then initialize conn from the information in router.
+ *
+ * If all is successful, call circuit_n_conn_done() to handle events
+ * that have been pending on the <tls handshake completion. Also set the
+ * directory to be dirty (only matters if I'm an authdirserver).
+ *
+ * If this is a v2 TLS handshake, send a versions cell.
+ */
+static int
+connection_tls_finish_handshake(or_connection_t *conn)
+{
+ char digest_rcvd[DIGEST_LEN];
+ int started_here = connection_or_nonopen_was_started_here(conn);
+
+ tor_assert(!started_here);
+
+ log_debug(LD_HANDSHAKE,"%s tls handshake on %p with %s done, using "
+ "ciphersuite %s. verifying.",
+ started_here?"outgoing":"incoming",
+ conn,
+ safe_str_client(conn->base_.address),
+ tor_tls_get_ciphersuite_name(conn->tls));
+
+ if (connection_or_check_valid_tls_handshake(conn, started_here,
+ digest_rcvd) < 0)
+ return -1;
+
+ circuit_build_times_network_is_live(get_circuit_build_times_mutable());
+
+ if (tor_tls_used_v1_handshake(conn->tls)) {
+ conn->link_proto = 1;
+ connection_or_init_conn_from_address(conn, &conn->base_.addr,
+ conn->base_.port, digest_rcvd,
+ NULL, 0);
+ tor_tls_block_renegotiation(conn->tls);
+ rep_hist_note_negotiated_link_proto(1, started_here);
+ return connection_or_set_state_open(conn);
+ } else {
+ connection_or_change_state(conn, OR_CONN_STATE_OR_HANDSHAKING_V2);
+ if (connection_init_or_handshake_state(conn, started_here) < 0)
+ return -1;
+ connection_or_init_conn_from_address(conn, &conn->base_.addr,
+ conn->base_.port, digest_rcvd,
+ NULL, 0);
+ return connection_or_send_versions(conn, 0);
+ }
+}
+
+/**
+ * Called as client when initial TLS handshake is done, and we notice
+ * that we got a v3-handshake signalling certificate from the server.
+ * Set up structures, do bookkeeping, and send the versions cell.
+ * Return 0 on success and -1 on failure.
+ */
+static int
+connection_or_launch_v3_or_handshake(or_connection_t *conn)
+{
+ tor_assert(connection_or_nonopen_was_started_here(conn));
+
+ circuit_build_times_network_is_live(get_circuit_build_times_mutable());
+
+ connection_or_change_state(conn, OR_CONN_STATE_OR_HANDSHAKING_V3);
+ if (connection_init_or_handshake_state(conn, 1) < 0)
+ return -1;
+
+ return connection_or_send_versions(conn, 1);
+}
+
+/** Allocate a new connection handshake state for the connection
+ * <b>conn</b>. Return 0 on success, -1 on failure. */
+int
+connection_init_or_handshake_state(or_connection_t *conn, int started_here)
+{
+ or_handshake_state_t *s;
+ if (conn->handshake_state) {
+ log_warn(LD_BUG, "Duplicate call to connection_init_or_handshake_state!");
+ return 0;
+ }
+ s = conn->handshake_state = tor_malloc_zero(sizeof(or_handshake_state_t));
+ s->started_here = started_here ? 1 : 0;
+ s->digest_sent_data = 1;
+ s->digest_received_data = 1;
+ if (! started_here && get_current_link_cert_cert()) {
+ s->own_link_cert = tor_cert_dup(get_current_link_cert_cert());
+ }
+ s->certs = or_handshake_certs_new();
+ s->certs->started_here = s->started_here;
+ return 0;
+}
+
+/** Free all storage held by <b>state</b>. */
+void
+or_handshake_state_free_(or_handshake_state_t *state)
+{
+ if (!state)
+ return;
+ crypto_digest_free(state->digest_sent);
+ crypto_digest_free(state->digest_received);
+ or_handshake_certs_free(state->certs);
+ tor_cert_free(state->own_link_cert);
+ memwipe(state, 0xBE, sizeof(or_handshake_state_t));
+ tor_free(state);
+}
+
+/**
+ * Remember that <b>cell</b> has been transmitted (if <b>incoming</b> is
+ * false) or received (if <b>incoming</b> is true) during a V3 handshake using
+ * <b>state</b>.
+ *
+ * (We don't record the cell, but we keep a digest of everything sent or
+ * received during the v3 handshake, and the client signs it in an
+ * authenticate cell.)
+ */
+void
+or_handshake_state_record_cell(or_connection_t *conn,
+ or_handshake_state_t *state,
+ const cell_t *cell,
+ int incoming)
+{
+ size_t cell_network_size = get_cell_network_size(conn->wide_circ_ids);
+ crypto_digest_t *d, **dptr;
+ packed_cell_t packed;
+ if (incoming) {
+ if (!state->digest_received_data)
+ return;
+ } else {
+ if (!state->digest_sent_data)
+ return;
+ }
+ if (!incoming) {
+ log_warn(LD_BUG, "We shouldn't be sending any non-variable-length cells "
+ "while making a handshake digest. But we think we are sending "
+ "one with type %d.", (int)cell->command);
+ }
+ dptr = incoming ? &state->digest_received : &state->digest_sent;
+ if (! *dptr)
+ *dptr = crypto_digest256_new(DIGEST_SHA256);
+
+ d = *dptr;
+ /* Re-packing like this is a little inefficient, but we don't have to do
+ this very often at all. */
+ cell_pack(&packed, cell, conn->wide_circ_ids);
+ crypto_digest_add_bytes(d, packed.body, cell_network_size);
+ memwipe(&packed, 0, sizeof(packed));
+}
+
+/** Remember that a variable-length <b>cell</b> has been transmitted (if
+ * <b>incoming</b> is false) or received (if <b>incoming</b> is true) during a
+ * V3 handshake using <b>state</b>.
+ *
+ * (We don't record the cell, but we keep a digest of everything sent or
+ * received during the v3 handshake, and the client signs it in an
+ * authenticate cell.)
+ */
+void
+or_handshake_state_record_var_cell(or_connection_t *conn,
+ or_handshake_state_t *state,
+ const var_cell_t *cell,
+ int incoming)
+{
+ crypto_digest_t *d, **dptr;
+ int n;
+ char buf[VAR_CELL_MAX_HEADER_SIZE];
+ if (incoming) {
+ if (!state->digest_received_data)
+ return;
+ } else {
+ if (!state->digest_sent_data)
+ return;
+ }
+ dptr = incoming ? &state->digest_received : &state->digest_sent;
+ if (! *dptr)
+ *dptr = crypto_digest256_new(DIGEST_SHA256);
+
+ d = *dptr;
+
+ n = var_cell_pack_header(cell, buf, conn->wide_circ_ids);
+ crypto_digest_add_bytes(d, buf, n);
+ crypto_digest_add_bytes(d, (const char *)cell->payload, cell->payload_len);
+
+ memwipe(buf, 0, sizeof(buf));
+}
+
+/** Set <b>conn</b>'s state to OR_CONN_STATE_OPEN, and tell other subsystems
+ * as appropriate. Called when we are done with all TLS and OR handshaking.
+ */
+int
+connection_or_set_state_open(or_connection_t *conn)
+{
+ connection_or_change_state(conn, OR_CONN_STATE_OPEN);
+ control_event_or_conn_status(conn, OR_CONN_EVENT_CONNECTED, 0);
+
+ /* Link protocol 3 appeared in Tor 0.2.3.6-alpha, so any connection
+ * that uses an earlier link protocol should not be treated as a relay. */
+ if (conn->link_proto < 3) {
+ channel_mark_client(TLS_CHAN_TO_BASE(conn->chan));
+ }
+
+ or_handshake_state_free(conn->handshake_state);
+ conn->handshake_state = NULL;
+ connection_start_reading(TO_CONN(conn));
+
+ return 0;
+}
+
+/** Pack <b>cell</b> into wire-format, and write it onto <b>conn</b>'s outbuf.
+ * For cells that use or affect a circuit, this should only be called by
+ * connection_or_flush_from_first_active_circuit().
+ */
+void
+connection_or_write_cell_to_buf(const cell_t *cell, or_connection_t *conn)
+{
+ packed_cell_t networkcell;
+ size_t cell_network_size = get_cell_network_size(conn->wide_circ_ids);
+
+ tor_assert(cell);
+ tor_assert(conn);
+
+ cell_pack(&networkcell, cell, conn->wide_circ_ids);
+
+ rep_hist_padding_count_write(PADDING_TYPE_TOTAL);
+ if (cell->command == CELL_PADDING)
+ rep_hist_padding_count_write(PADDING_TYPE_CELL);
+
+ connection_buf_add(networkcell.body, cell_network_size, TO_CONN(conn));
+
+ /* Touch the channel's active timestamp if there is one */
+ if (conn->chan) {
+ channel_timestamp_active(TLS_CHAN_TO_BASE(conn->chan));
+
+ if (TLS_CHAN_TO_BASE(conn->chan)->currently_padding) {
+ rep_hist_padding_count_write(PADDING_TYPE_ENABLED_TOTAL);
+ if (cell->command == CELL_PADDING)
+ rep_hist_padding_count_write(PADDING_TYPE_ENABLED_CELL);
+ }
+ }
+
+ if (conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3)
+ or_handshake_state_record_cell(conn, conn->handshake_state, cell, 0);
+}
+
+/** Pack a variable-length <b>cell</b> into wire-format, and write it onto
+ * <b>conn</b>'s outbuf. Right now, this <em>DOES NOT</em> support cells that
+ * affect a circuit.
+ */
+MOCK_IMPL(void,
+connection_or_write_var_cell_to_buf,(const var_cell_t *cell,
+ or_connection_t *conn))
+{
+ int n;
+ char hdr[VAR_CELL_MAX_HEADER_SIZE];
+ tor_assert(cell);
+ tor_assert(conn);
+ n = var_cell_pack_header(cell, hdr, conn->wide_circ_ids);
+ connection_buf_add(hdr, n, TO_CONN(conn));
+ connection_buf_add((char*)cell->payload,
+ cell->payload_len, TO_CONN(conn));
+ if (conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3)
+ or_handshake_state_record_var_cell(conn, conn->handshake_state, cell, 0);
+
+ /* Touch the channel's active timestamp if there is one */
+ if (conn->chan)
+ channel_timestamp_active(TLS_CHAN_TO_BASE(conn->chan));
+}
+
+/** See whether there's a variable-length cell waiting on <b>or_conn</b>'s
+ * inbuf. Return values as for fetch_var_cell_from_buf(). */
+static int
+connection_fetch_var_cell_from_buf(or_connection_t *or_conn, var_cell_t **out)
+{
+ connection_t *conn = TO_CONN(or_conn);
+ return fetch_var_cell_from_buf(conn->inbuf, out, or_conn->link_proto);
+}
+
+/** Process cells from <b>conn</b>'s inbuf.
+ *
+ * Loop: while inbuf contains a cell, pull it off the inbuf, unpack it,
+ * and hand it to command_process_cell().
+ *
+ * Always return 0.
+ */
+static int
+connection_or_process_cells_from_inbuf(or_connection_t *conn)
+{
+ var_cell_t *var_cell;
+
+ /*
+ * Note on memory management for incoming cells: below the channel layer,
+ * we shouldn't need to consider its internal queueing/copying logic. It
+ * is safe to pass cells to it on the stack or on the heap, but in the
+ * latter case we must be sure we free them later.
+ *
+ * The incoming cell queue code in channel.c will (in the common case)
+ * decide it can pass them to the upper layer immediately, in which case
+ * those functions may run directly on the cell pointers we pass here, or
+ * it may decide to queue them, in which case it will allocate its own
+ * buffer and copy the cell.
+ */
+
+ while (1) {
+ log_debug(LD_OR,
+ TOR_SOCKET_T_FORMAT": starting, inbuf_datalen %d "
+ "(%d pending in tls object).",
+ conn->base_.s,(int)connection_get_inbuf_len(TO_CONN(conn)),
+ tor_tls_get_pending_bytes(conn->tls));
+ if (connection_fetch_var_cell_from_buf(conn, &var_cell)) {
+ if (!var_cell)
+ return 0; /* not yet. */
+
+ /* Touch the channel's active timestamp if there is one */
+ if (conn->chan)
+ channel_timestamp_active(TLS_CHAN_TO_BASE(conn->chan));
+
+ circuit_build_times_network_is_live(get_circuit_build_times_mutable());
+ channel_tls_handle_var_cell(var_cell, conn);
+ var_cell_free(var_cell);
+ } else {
+ const int wide_circ_ids = conn->wide_circ_ids;
+ size_t cell_network_size = get_cell_network_size(conn->wide_circ_ids);
+ char buf[CELL_MAX_NETWORK_SIZE];
+ cell_t cell;
+ if (connection_get_inbuf_len(TO_CONN(conn))
+ < cell_network_size) /* whole response available? */
+ return 0; /* not yet */
+
+ /* Touch the channel's active timestamp if there is one */
+ if (conn->chan)
+ channel_timestamp_active(TLS_CHAN_TO_BASE(conn->chan));
+
+ circuit_build_times_network_is_live(get_circuit_build_times_mutable());
+ connection_buf_get_bytes(buf, cell_network_size, TO_CONN(conn));
+
+ /* retrieve cell info from buf (create the host-order struct from the
+ * network-order string) */
+ cell_unpack(&cell, buf, wide_circ_ids);
+
+ channel_tls_handle_cell(&cell, conn);
+ }
+ }
+}
+
+/** Array of recognized link protocol versions. */
+static const uint16_t or_protocol_versions[] = { 1, 2, 3, 4, 5 };
+/** Number of versions in <b>or_protocol_versions</b>. */
+static const int n_or_protocol_versions =
+ (int)( sizeof(or_protocol_versions)/sizeof(uint16_t) );
+
+/** Return true iff <b>v</b> is a link protocol version that this Tor
+ * implementation believes it can support. */
+int
+is_or_protocol_version_known(uint16_t v)
+{
+ int i;
+ for (i = 0; i < n_or_protocol_versions; ++i) {
+ if (or_protocol_versions[i] == v)
+ return 1;
+ }
+ return 0;
+}
+
+/** Send a VERSIONS cell on <b>conn</b>, telling the other host about the
+ * link protocol versions that this Tor can support.
+ *
+ * If <b>v3_plus</b>, this is part of a V3 protocol handshake, so only
+ * allow protocol version v3 or later. If not <b>v3_plus</b>, this is
+ * not part of a v3 protocol handshake, so don't allow protocol v3 or
+ * later.
+ **/
+int
+connection_or_send_versions(or_connection_t *conn, int v3_plus)
+{
+ var_cell_t *cell;
+ int i;
+ int n_versions = 0;
+ const int min_version = v3_plus ? 3 : 0;
+ const int max_version = v3_plus ? UINT16_MAX : 2;
+ tor_assert(conn->handshake_state &&
+ !conn->handshake_state->sent_versions_at);
+ cell = var_cell_new(n_or_protocol_versions * 2);
+ cell->command = CELL_VERSIONS;
+ for (i = 0; i < n_or_protocol_versions; ++i) {
+ uint16_t v = or_protocol_versions[i];
+ if (v < min_version || v > max_version)
+ continue;
+ set_uint16(cell->payload+(2*n_versions), htons(v));
+ ++n_versions;
+ }
+ cell->payload_len = n_versions * 2;
+
+ connection_or_write_var_cell_to_buf(cell, conn);
+ conn->handshake_state->sent_versions_at = time(NULL);
+
+ var_cell_free(cell);
+ return 0;
+}
+
+/** Send a NETINFO cell on <b>conn</b>, telling the other server what we know
+ * about their address, our address, and the current time. */
+MOCK_IMPL(int,
+connection_or_send_netinfo,(or_connection_t *conn))
+{
+ cell_t cell;
+ time_t now = time(NULL);
+ const routerinfo_t *me;
+ int len;
+ uint8_t *out;
+
+ tor_assert(conn->handshake_state);
+
+ if (conn->handshake_state->sent_netinfo) {
+ log_warn(LD_BUG, "Attempted to send an extra netinfo cell on a connection "
+ "where we already sent one.");
+ return 0;
+ }
+
+ memset(&cell, 0, sizeof(cell_t));
+ cell.command = CELL_NETINFO;
+
+ /* Timestamp, if we're a relay. */
+ if (public_server_mode(get_options()) || ! conn->is_outgoing)
+ set_uint32(cell.payload, htonl((uint32_t)now));
+
+ /* Their address. */
+ out = cell.payload + 4;
+ /* We use &conn->real_addr below, unless it hasn't yet been set. If it
+ * hasn't yet been set, we know that base_.addr hasn't been tampered with
+ * yet either. */
+ len = append_address_to_payload(out, !tor_addr_is_null(&conn->real_addr)
+ ? &conn->real_addr : &conn->base_.addr);
+ if (len<0)
+ return -1;
+ out += len;
+
+ /* My address -- only include it if I'm a public relay, or if I'm a
+ * bridge and this is an incoming connection. If I'm a bridge and this
+ * is an outgoing connection, act like a normal client and omit it. */
+ if ((public_server_mode(get_options()) || !conn->is_outgoing) &&
+ (me = router_get_my_routerinfo())) {
+ tor_addr_t my_addr;
+ *out++ = 1 + !tor_addr_is_null(&me->ipv6_addr);
+
+ tor_addr_from_ipv4h(&my_addr, me->addr);
+ len = append_address_to_payload(out, &my_addr);
+ if (len < 0)
+ return -1;
+ out += len;
+
+ if (!tor_addr_is_null(&me->ipv6_addr)) {
+ len = append_address_to_payload(out, &me->ipv6_addr);
+ if (len < 0)
+ return -1;
+ }
+ } else {
+ *out = 0;
+ }
+
+ conn->handshake_state->digest_sent_data = 0;
+ conn->handshake_state->sent_netinfo = 1;
+ connection_or_write_cell_to_buf(&cell, conn);
+
+ return 0;
+}
+
+/** Helper used to add an encoded certs to a cert cell */
+static void
+add_certs_cell_cert_helper(certs_cell_t *certs_cell,
+ uint8_t cert_type,
+ const uint8_t *cert_encoded,
+ size_t cert_len)
+{
+ tor_assert(cert_len <= UINT16_MAX);
+ certs_cell_cert_t *ccc = certs_cell_cert_new();
+ ccc->cert_type = cert_type;
+ ccc->cert_len = cert_len;
+ certs_cell_cert_setlen_body(ccc, cert_len);
+ memcpy(certs_cell_cert_getarray_body(ccc), cert_encoded, cert_len);
+
+ certs_cell_add_certs(certs_cell, ccc);
+}
+
+/** Add an encoded X509 cert (stored as <b>cert_len</b> bytes at
+ * <b>cert_encoded</b>) to the trunnel certs_cell_t object that we are
+ * building in <b>certs_cell</b>. Set its type field to <b>cert_type</b>.
+ * (If <b>cert</b> is NULL, take no action.) */
+static void
+add_x509_cert(certs_cell_t *certs_cell,
+ uint8_t cert_type,
+ const tor_x509_cert_t *cert)
+{
+ if (NULL == cert)
+ return;
+
+ const uint8_t *cert_encoded = NULL;
+ size_t cert_len;
+ tor_x509_cert_get_der(cert, &cert_encoded, &cert_len);
+
+ add_certs_cell_cert_helper(certs_cell, cert_type, cert_encoded, cert_len);
+}
+
+/** Add an Ed25519 cert from <b>cert</b> to the trunnel certs_cell_t object
+ * that we are building in <b>certs_cell</b>. Set its type field to
+ * <b>cert_type</b>. (If <b>cert</b> is NULL, take no action.) */
+static void
+add_ed25519_cert(certs_cell_t *certs_cell,
+ uint8_t cert_type,
+ const tor_cert_t *cert)
+{
+ if (NULL == cert)
+ return;
+
+ add_certs_cell_cert_helper(certs_cell, cert_type,
+ cert->encoded, cert->encoded_len);
+}
+
+#ifdef TOR_UNIT_TESTS
+int certs_cell_ed25519_disabled_for_testing = 0;
+#else
+#define certs_cell_ed25519_disabled_for_testing 0
+#endif
+
+/** Send a CERTS cell on the connection <b>conn</b>. Return 0 on success, -1
+ * on failure. */
+int
+connection_or_send_certs_cell(or_connection_t *conn)
+{
+ const tor_x509_cert_t *global_link_cert = NULL, *id_cert = NULL;
+ tor_x509_cert_t *own_link_cert = NULL;
+ var_cell_t *cell;
+
+ certs_cell_t *certs_cell = NULL;
+
+ tor_assert(conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3);
+
+ if (! conn->handshake_state)
+ return -1;
+
+ const int conn_in_server_mode = ! conn->handshake_state->started_here;
+
+ /* Get the encoded values of the X509 certificates */
+ if (tor_tls_get_my_certs(conn_in_server_mode,
+ &global_link_cert, &id_cert) < 0)
+ return -1;
+
+ if (conn_in_server_mode) {
+ own_link_cert = tor_tls_get_own_cert(conn->tls);
+ }
+ tor_assert(id_cert);
+
+ certs_cell = certs_cell_new();
+
+ /* Start adding certs. First the link cert or auth1024 cert. */
+ if (conn_in_server_mode) {
+ tor_assert_nonfatal(own_link_cert);
+ add_x509_cert(certs_cell,
+ OR_CERT_TYPE_TLS_LINK, own_link_cert);
+ } else {
+ tor_assert(global_link_cert);
+ add_x509_cert(certs_cell,
+ OR_CERT_TYPE_AUTH_1024, global_link_cert);
+ }
+
+ /* Next the RSA->RSA ID cert */
+ add_x509_cert(certs_cell,
+ OR_CERT_TYPE_ID_1024, id_cert);
+
+ /* Next the Ed25519 certs */
+ add_ed25519_cert(certs_cell,
+ CERTTYPE_ED_ID_SIGN,
+ get_master_signing_key_cert());
+ if (conn_in_server_mode) {
+ tor_assert_nonfatal(conn->handshake_state->own_link_cert ||
+ certs_cell_ed25519_disabled_for_testing);
+ add_ed25519_cert(certs_cell,
+ CERTTYPE_ED_SIGN_LINK,
+ conn->handshake_state->own_link_cert);
+ } else {
+ add_ed25519_cert(certs_cell,
+ CERTTYPE_ED_SIGN_AUTH,
+ get_current_auth_key_cert());
+ }
+
+ /* And finally the crosscert. */
+ {
+ const uint8_t *crosscert=NULL;
+ size_t crosscert_len;
+ get_master_rsa_crosscert(&crosscert, &crosscert_len);
+ if (crosscert) {
+ add_certs_cell_cert_helper(certs_cell,
+ CERTTYPE_RSA1024_ID_EDID,
+ crosscert, crosscert_len);
+ }
+ }
+
+ /* We've added all the certs; make the cell. */
+ certs_cell->n_certs = certs_cell_getlen_certs(certs_cell);
+
+ ssize_t alloc_len = certs_cell_encoded_len(certs_cell);
+ tor_assert(alloc_len >= 0 && alloc_len <= UINT16_MAX);
+ cell = var_cell_new(alloc_len);
+ cell->command = CELL_CERTS;
+ ssize_t enc_len = certs_cell_encode(cell->payload, alloc_len, certs_cell);
+ tor_assert(enc_len > 0 && enc_len <= alloc_len);
+ cell->payload_len = enc_len;
+
+ connection_or_write_var_cell_to_buf(cell, conn);
+ var_cell_free(cell);
+ certs_cell_free(certs_cell);
+ tor_x509_cert_free(own_link_cert);
+
+ return 0;
+}
+
+/** Return true iff <b>challenge_type</b> is an AUTHCHALLENGE type that
+ * we can send and receive. */
+int
+authchallenge_type_is_supported(uint16_t challenge_type)
+{
+ switch (challenge_type) {
+ case AUTHTYPE_RSA_SHA256_TLSSECRET:
+ case AUTHTYPE_ED25519_SHA256_RFC5705:
+ return 1;
+ case AUTHTYPE_RSA_SHA256_RFC5705:
+ default:
+ return 0;
+ }
+}
+
+/** Return true iff <b>challenge_type_a</b> is one that we would rather
+ * use than <b>challenge_type_b</b>. */
+int
+authchallenge_type_is_better(uint16_t challenge_type_a,
+ uint16_t challenge_type_b)
+{
+ /* Any supported type is better than an unsupported one;
+ * all unsupported types are equally bad. */
+ if (!authchallenge_type_is_supported(challenge_type_a))
+ return 0;
+ if (!authchallenge_type_is_supported(challenge_type_b))
+ return 1;
+ /* It happens that types are superior in numerically ascending order.
+ * If that ever changes, this must change too. */
+ return (challenge_type_a > challenge_type_b);
+}
+
+/** Send an AUTH_CHALLENGE cell on the connection <b>conn</b>. Return 0
+ * on success, -1 on failure. */
+int
+connection_or_send_auth_challenge_cell(or_connection_t *conn)
+{
+ var_cell_t *cell = NULL;
+ int r = -1;
+ tor_assert(conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3);
+
+ if (! conn->handshake_state)
+ return -1;
+
+ auth_challenge_cell_t *ac = auth_challenge_cell_new();
+
+ tor_assert(sizeof(ac->challenge) == 32);
+ crypto_rand((char*)ac->challenge, sizeof(ac->challenge));
+
+ auth_challenge_cell_add_methods(ac, AUTHTYPE_RSA_SHA256_TLSSECRET);
+ /* Disabled, because everything that supports this method also supports
+ * the much-superior ED25519_SHA256_RFC5705 */
+ /* auth_challenge_cell_add_methods(ac, AUTHTYPE_RSA_SHA256_RFC5705); */
+ auth_challenge_cell_add_methods(ac, AUTHTYPE_ED25519_SHA256_RFC5705);
+ auth_challenge_cell_set_n_methods(ac,
+ auth_challenge_cell_getlen_methods(ac));
+
+ cell = var_cell_new(auth_challenge_cell_encoded_len(ac));
+ ssize_t len = auth_challenge_cell_encode(cell->payload, cell->payload_len,
+ ac);
+ if (len != cell->payload_len) {
+ /* LCOV_EXCL_START */
+ log_warn(LD_BUG, "Encoded auth challenge cell length not as expected");
+ goto done;
+ /* LCOV_EXCL_STOP */
+ }
+ cell->command = CELL_AUTH_CHALLENGE;
+
+ connection_or_write_var_cell_to_buf(cell, conn);
+ r = 0;
+
+ done:
+ var_cell_free(cell);
+ auth_challenge_cell_free(ac);
+
+ return r;
+}
+
+/** Compute the main body of an AUTHENTICATE cell that a client can use
+ * to authenticate itself on a v3 handshake for <b>conn</b>. Return it
+ * in a var_cell_t.
+ *
+ * If <b>server</b> is true, only calculate the first
+ * V3_AUTH_FIXED_PART_LEN bytes -- the part of the authenticator that's
+ * determined by the rest of the handshake, and which match the provided value
+ * exactly.
+ *
+ * If <b>server</b> is false and <b>signing_key</b> is NULL, calculate the
+ * first V3_AUTH_BODY_LEN bytes of the authenticator (that is, everything
+ * that should be signed), but don't actually sign it.
+ *
+ * If <b>server</b> is false and <b>signing_key</b> is provided, calculate the
+ * entire authenticator, signed with <b>signing_key</b>.
+ *
+ * Return the length of the cell body on success, and -1 on failure.
+ */
+var_cell_t *
+connection_or_compute_authenticate_cell_body(or_connection_t *conn,
+ const int authtype,
+ crypto_pk_t *signing_key,
+ const ed25519_keypair_t *ed_signing_key,
+ int server)
+{
+ auth1_t *auth = NULL;
+ auth_ctx_t *ctx = auth_ctx_new();
+ var_cell_t *result = NULL;
+ int old_tlssecrets_algorithm = 0;
+ const char *authtype_str = NULL;
+
+ int is_ed = 0;
+
+ /* assert state is reasonable XXXX */
+ switch (authtype) {
+ case AUTHTYPE_RSA_SHA256_TLSSECRET:
+ authtype_str = "AUTH0001";
+ old_tlssecrets_algorithm = 1;
+ break;
+ case AUTHTYPE_RSA_SHA256_RFC5705:
+ authtype_str = "AUTH0002";
+ break;
+ case AUTHTYPE_ED25519_SHA256_RFC5705:
+ authtype_str = "AUTH0003";
+ is_ed = 1;
+ break;
+ default:
+ tor_assert(0);
+ break;
+ }
+
+ auth = auth1_new();
+ ctx->is_ed = is_ed;
+
+ /* Type: 8 bytes. */
+ memcpy(auth1_getarray_type(auth), authtype_str, 8);
+
+ {
+ const tor_x509_cert_t *id_cert=NULL;
+ const common_digests_t *my_digests, *their_digests;
+ const uint8_t *my_id, *their_id, *client_id, *server_id;
+ if (tor_tls_get_my_certs(server, NULL, &id_cert))
+ goto err;
+ my_digests = tor_x509_cert_get_id_digests(id_cert);
+ their_digests =
+ tor_x509_cert_get_id_digests(conn->handshake_state->certs->id_cert);
+ tor_assert(my_digests);
+ tor_assert(their_digests);
+ my_id = (uint8_t*)my_digests->d[DIGEST_SHA256];
+ their_id = (uint8_t*)their_digests->d[DIGEST_SHA256];
+
+ client_id = server ? their_id : my_id;
+ server_id = server ? my_id : their_id;
+
+ /* Client ID digest: 32 octets. */
+ memcpy(auth->cid, client_id, 32);
+
+ /* Server ID digest: 32 octets. */
+ memcpy(auth->sid, server_id, 32);
+ }
+
+ if (is_ed) {
+ const ed25519_public_key_t *my_ed_id, *their_ed_id;
+ if (!conn->handshake_state->certs->ed_id_sign) {
+ log_warn(LD_OR, "Ed authenticate without Ed ID cert from peer.");
+ goto err;
+ }
+ my_ed_id = get_master_identity_key();
+ their_ed_id = &conn->handshake_state->certs->ed_id_sign->signing_key;
+
+ const uint8_t *cid_ed = (server ? their_ed_id : my_ed_id)->pubkey;
+ const uint8_t *sid_ed = (server ? my_ed_id : their_ed_id)->pubkey;
+
+ memcpy(auth->u1_cid_ed, cid_ed, ED25519_PUBKEY_LEN);
+ memcpy(auth->u1_sid_ed, sid_ed, ED25519_PUBKEY_LEN);
+ }
+
+ {
+ crypto_digest_t *server_d, *client_d;
+ if (server) {
+ server_d = conn->handshake_state->digest_sent;
+ client_d = conn->handshake_state->digest_received;
+ } else {
+ client_d = conn->handshake_state->digest_sent;
+ server_d = conn->handshake_state->digest_received;
+ }
+
+ /* Server log digest : 32 octets */
+ crypto_digest_get_digest(server_d, (char*)auth->slog, 32);
+
+ /* Client log digest : 32 octets */
+ crypto_digest_get_digest(client_d, (char*)auth->clog, 32);
+ }
+
+ {
+ /* Digest of cert used on TLS link : 32 octets. */
+ tor_x509_cert_t *cert = NULL;
+ if (server) {
+ cert = tor_tls_get_own_cert(conn->tls);
+ } else {
+ cert = tor_tls_get_peer_cert(conn->tls);
+ }
+ if (!cert) {
+ log_warn(LD_OR, "Unable to find cert when making %s data.",
+ authtype_str);
+ goto err;
+ }
+
+ memcpy(auth->scert,
+ tor_x509_cert_get_cert_digests(cert)->d[DIGEST_SHA256], 32);
+
+ tor_x509_cert_free(cert);
+ }
+
+ /* HMAC of clientrandom and serverrandom using master key : 32 octets */
+ if (old_tlssecrets_algorithm) {
+ tor_tls_get_tlssecrets(conn->tls, auth->tlssecrets);
+ } else {
+ char label[128];
+ tor_snprintf(label, sizeof(label),
+ "EXPORTER FOR TOR TLS CLIENT BINDING %s", authtype_str);
+ tor_tls_export_key_material(conn->tls, auth->tlssecrets,
+ auth->cid, sizeof(auth->cid),
+ label);
+ }
+
+ /* 8 octets were reserved for the current time, but we're trying to get out
+ * of the habit of sending time around willynilly. Fortunately, nothing
+ * checks it. That's followed by 16 bytes of nonce. */
+ crypto_rand((char*)auth->rand, 24);
+
+ ssize_t maxlen = auth1_encoded_len(auth, ctx);
+ if (ed_signing_key && is_ed) {
+ maxlen += ED25519_SIG_LEN;
+ } else if (signing_key && !is_ed) {
+ maxlen += crypto_pk_keysize(signing_key);
+ }
+
+ const int AUTH_CELL_HEADER_LEN = 4; /* 2 bytes of type, 2 bytes of length */
+ result = var_cell_new(AUTH_CELL_HEADER_LEN + maxlen);
+ uint8_t *const out = result->payload + AUTH_CELL_HEADER_LEN;
+ const size_t outlen = maxlen;
+ ssize_t len;
+
+ result->command = CELL_AUTHENTICATE;
+ set_uint16(result->payload, htons(authtype));
+
+ if ((len = auth1_encode(out, outlen, auth, ctx)) < 0) {
+ /* LCOV_EXCL_START */
+ log_warn(LD_BUG, "Unable to encode signed part of AUTH1 data.");
+ goto err;
+ /* LCOV_EXCL_STOP */
+ }
+
+ if (server) {
+ auth1_t *tmp = NULL;
+ ssize_t len2 = auth1_parse(&tmp, out, len, ctx);
+ if (!tmp) {
+ /* LCOV_EXCL_START */
+ log_warn(LD_BUG, "Unable to parse signed part of AUTH1 data that "
+ "we just encoded");
+ goto err;
+ /* LCOV_EXCL_STOP */
+ }
+ result->payload_len = (tmp->end_of_signed - result->payload);
+
+ auth1_free(tmp);
+ if (len2 != len) {
+ /* LCOV_EXCL_START */
+ log_warn(LD_BUG, "Mismatched length when re-parsing AUTH1 data.");
+ goto err;
+ /* LCOV_EXCL_STOP */
+ }
+ goto done;
+ }
+
+ if (ed_signing_key && is_ed) {
+ ed25519_signature_t sig;
+ if (ed25519_sign(&sig, out, len, ed_signing_key) < 0) {
+ /* LCOV_EXCL_START */
+ log_warn(LD_BUG, "Unable to sign ed25519 authentication data");
+ goto err;
+ /* LCOV_EXCL_STOP */
+ }
+ auth1_setlen_sig(auth, ED25519_SIG_LEN);
+ memcpy(auth1_getarray_sig(auth), sig.sig, ED25519_SIG_LEN);
+
+ } else if (signing_key && !is_ed) {
+ auth1_setlen_sig(auth, crypto_pk_keysize(signing_key));
+
+ char d[32];
+ crypto_digest256(d, (char*)out, len, DIGEST_SHA256);
+ int siglen = crypto_pk_private_sign(signing_key,
+ (char*)auth1_getarray_sig(auth),
+ auth1_getlen_sig(auth),
+ d, 32);
+ if (siglen < 0) {
+ log_warn(LD_OR, "Unable to sign AUTH1 data.");
+ goto err;
+ }
+
+ auth1_setlen_sig(auth, siglen);
+ }
+
+ len = auth1_encode(out, outlen, auth, ctx);
+ if (len < 0) {
+ /* LCOV_EXCL_START */
+ log_warn(LD_BUG, "Unable to encode signed AUTH1 data.");
+ goto err;
+ /* LCOV_EXCL_STOP */
+ }
+ tor_assert(len + AUTH_CELL_HEADER_LEN <= result->payload_len);
+ result->payload_len = len + AUTH_CELL_HEADER_LEN;
+ set_uint16(result->payload+2, htons(len));
+
+ goto done;
+
+ err:
+ var_cell_free(result);
+ result = NULL;
+ done:
+ auth1_free(auth);
+ auth_ctx_free(ctx);
+ return result;
+}
+
+/** Send an AUTHENTICATE cell on the connection <b>conn</b>. Return 0 on
+ * success, -1 on failure */
+MOCK_IMPL(int,
+connection_or_send_authenticate_cell,(or_connection_t *conn, int authtype))
+{
+ var_cell_t *cell;
+ crypto_pk_t *pk = tor_tls_get_my_client_auth_key();
+ /* XXXX make sure we're actually supposed to send this! */
+
+ if (!pk) {
+ log_warn(LD_BUG, "Can't compute authenticate cell: no client auth key");
+ return -1;
+ }
+ if (! authchallenge_type_is_supported(authtype)) {
+ log_warn(LD_BUG, "Tried to send authenticate cell with unknown "
+ "authentication type %d", authtype);
+ return -1;
+ }
+
+ cell = connection_or_compute_authenticate_cell_body(conn,
+ authtype,
+ pk,
+ get_current_auth_keypair(),
+ 0 /* not server */);
+ if (! cell) {
+ /* LCOV_EXCL_START */
+ log_warn(LD_BUG, "Unable to compute authenticate cell!");
+ return -1;
+ /* LCOV_EXCL_STOP */
+ }
+ connection_or_write_var_cell_to_buf(cell, conn);
+ var_cell_free(cell);
+
+ return 0;
+}
diff --git a/src/core/or/connection_or.h b/src/core/or/connection_or.h
new file mode 100644
index 0000000000..2d95fdea18
--- /dev/null
+++ b/src/core/or/connection_or.h
@@ -0,0 +1,165 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file connection_or.h
+ * \brief Header file for connection_or.c.
+ **/
+
+#ifndef TOR_CONNECTION_OR_H
+#define TOR_CONNECTION_OR_H
+
+struct ed25519_public_key_t;
+struct ed25519_keypair_t;
+
+or_connection_t *TO_OR_CONN(connection_t *);
+
+#define OR_CONN_STATE_MIN_ 1
+/** State for a connection to an OR: waiting for connect() to finish. */
+#define OR_CONN_STATE_CONNECTING 1
+/** State for a connection to an OR: waiting for proxy handshake to complete */
+#define OR_CONN_STATE_PROXY_HANDSHAKING 2
+/** State for an OR connection client: SSL is handshaking, not done
+ * yet. */
+#define OR_CONN_STATE_TLS_HANDSHAKING 3
+/** State for a connection to an OR: We're doing a second SSL handshake for
+ * renegotiation purposes. (V2 handshake only.) */
+#define OR_CONN_STATE_TLS_CLIENT_RENEGOTIATING 4
+/** State for a connection at an OR: We're waiting for the client to
+ * renegotiate (to indicate a v2 handshake) or send a versions cell (to
+ * indicate a v3 handshake) */
+#define OR_CONN_STATE_TLS_SERVER_RENEGOTIATING 5
+/** State for an OR connection: We're done with our SSL handshake, we've done
+ * renegotiation, but we haven't yet negotiated link protocol versions and
+ * sent a netinfo cell. */
+#define OR_CONN_STATE_OR_HANDSHAKING_V2 6
+/** State for an OR connection: We're done with our SSL handshake, but we
+ * haven't yet negotiated link protocol versions, done a V3 handshake, and
+ * sent a netinfo cell. */
+#define OR_CONN_STATE_OR_HANDSHAKING_V3 7
+/** State for an OR connection: Ready to send/receive cells. */
+#define OR_CONN_STATE_OPEN 8
+#define OR_CONN_STATE_MAX_ 8
+
+void connection_or_clear_identity(or_connection_t *conn);
+void connection_or_clear_identity_map(void);
+void clear_broken_connection_map(int disable);
+or_connection_t *connection_or_get_for_extend(const char *digest,
+ const tor_addr_t *target_addr,
+ const char **msg_out,
+ int *launch_out);
+
+void connection_or_block_renegotiation(or_connection_t *conn);
+int connection_or_reached_eof(or_connection_t *conn);
+int connection_or_process_inbuf(or_connection_t *conn);
+ssize_t connection_or_num_cells_writeable(or_connection_t *conn);
+int connection_or_flushed_some(or_connection_t *conn);
+int connection_or_finished_flushing(or_connection_t *conn);
+int connection_or_finished_connecting(or_connection_t *conn);
+void connection_or_about_to_close(or_connection_t *conn);
+int connection_or_digest_is_known_relay(const char *id_digest);
+void connection_or_update_token_buckets(smartlist_t *conns,
+ const or_options_t *options);
+
+void connection_or_connect_failed(or_connection_t *conn,
+ int reason, const char *msg);
+void connection_or_notify_error(or_connection_t *conn,
+ int reason, const char *msg);
+MOCK_DECL(or_connection_t *,
+ connection_or_connect,
+ (const tor_addr_t *addr, uint16_t port,
+ const char *id_digest,
+ const struct ed25519_public_key_t *ed_id,
+ channel_tls_t *chan));
+
+void connection_or_close_normally(or_connection_t *orconn, int flush);
+MOCK_DECL(void,connection_or_close_for_error,
+ (or_connection_t *orconn, int flush));
+
+void connection_or_report_broken_states(int severity, int domain);
+
+MOCK_DECL(int,connection_tls_start_handshake,(or_connection_t *conn,
+ int receiving));
+int connection_tls_continue_handshake(or_connection_t *conn);
+void connection_or_set_canonical(or_connection_t *or_conn,
+ int is_canonical);
+
+int connection_init_or_handshake_state(or_connection_t *conn,
+ int started_here);
+void connection_or_init_conn_from_address(or_connection_t *conn,
+ const tor_addr_t *addr,
+ uint16_t port,
+ const char *rsa_id_digest,
+ const struct ed25519_public_key_t *ed_id,
+ int started_here);
+int connection_or_client_learned_peer_id(or_connection_t *conn,
+ const uint8_t *rsa_peer_id,
+ const struct ed25519_public_key_t *ed_peer_id);
+time_t connection_or_client_used(or_connection_t *conn);
+MOCK_DECL(int, connection_or_get_num_circuits, (or_connection_t *conn));
+void or_handshake_state_free_(or_handshake_state_t *state);
+#define or_handshake_state_free(state) \
+ FREE_AND_NULL(or_handshake_state_t, or_handshake_state_free_, (state))
+void or_handshake_state_record_cell(or_connection_t *conn,
+ or_handshake_state_t *state,
+ const cell_t *cell,
+ int incoming);
+void or_handshake_state_record_var_cell(or_connection_t *conn,
+ or_handshake_state_t *state,
+ const var_cell_t *cell,
+ int incoming);
+
+int connection_or_set_state_open(or_connection_t *conn);
+void connection_or_write_cell_to_buf(const cell_t *cell,
+ or_connection_t *conn);
+MOCK_DECL(void,connection_or_write_var_cell_to_buf,(const var_cell_t *cell,
+ or_connection_t *conn));
+int connection_or_send_versions(or_connection_t *conn, int v3_plus);
+MOCK_DECL(int,connection_or_send_netinfo,(or_connection_t *conn));
+int connection_or_send_certs_cell(or_connection_t *conn);
+int connection_or_send_auth_challenge_cell(or_connection_t *conn);
+int authchallenge_type_is_supported(uint16_t challenge_type);
+int authchallenge_type_is_better(uint16_t challenge_type_a,
+ uint16_t challenge_type_b);
+var_cell_t *connection_or_compute_authenticate_cell_body(
+ or_connection_t *conn,
+ const int authtype,
+ crypto_pk_t *signing_key,
+ const struct ed25519_keypair_t *ed_signing_key,
+ int server);
+MOCK_DECL(int,connection_or_send_authenticate_cell,
+ (or_connection_t *conn, int type));
+
+int is_or_protocol_version_known(uint16_t version);
+
+void cell_pack(packed_cell_t *dest, const cell_t *src, int wide_circ_ids);
+int var_cell_pack_header(const var_cell_t *cell, char *hdr_out,
+ int wide_circ_ids);
+var_cell_t *var_cell_new(uint16_t payload_len);
+var_cell_t *var_cell_copy(const var_cell_t *src);
+void var_cell_free_(var_cell_t *cell);
+#define var_cell_free(cell) FREE_AND_NULL(var_cell_t, var_cell_free_, (cell))
+
+/* DOCDOC */
+#define MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS 4
+#define MIN_LINK_PROTO_FOR_CHANNEL_PADDING 5
+#define MAX_LINK_PROTO MIN_LINK_PROTO_FOR_CHANNEL_PADDING
+
+int connection_or_single_set_badness_(time_t now,
+ or_connection_t *or_conn,
+ int force);
+void connection_or_group_set_badness_(smartlist_t *group, int force);
+
+#ifdef CONNECTION_OR_PRIVATE
+STATIC int should_connect_to_relay(const or_connection_t *or_conn);
+STATIC void note_or_connect_failed(const or_connection_t *or_conn);
+#endif
+
+#ifdef TOR_UNIT_TESTS
+extern int certs_cell_ed25519_disabled_for_testing;
+#endif
+
+#endif /* !defined(TOR_CONNECTION_OR_H) */
diff --git a/src/core/or/connection_st.h b/src/core/or/connection_st.h
new file mode 100644
index 0000000000..6c22478689
--- /dev/null
+++ b/src/core/or/connection_st.h
@@ -0,0 +1,149 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef CONNECTION_ST_H
+#define CONNECTION_ST_H
+
+struct buf_t;
+
+/* Values for connection_t.magic: used to make sure that downcasts (casts from
+* connection_t to foo_connection_t) are safe. */
+#define BASE_CONNECTION_MAGIC 0x7C3C304Eu
+#define OR_CONNECTION_MAGIC 0x7D31FF03u
+#define EDGE_CONNECTION_MAGIC 0xF0374013u
+#define ENTRY_CONNECTION_MAGIC 0xbb4a5703
+#define DIR_CONNECTION_MAGIC 0x9988ffeeu
+#define CONTROL_CONNECTION_MAGIC 0x8abc765du
+#define LISTENER_CONNECTION_MAGIC 0x1a1ac741u
+
+/** Description of a connection to another host or process, and associated
+ * data.
+ *
+ * A connection is named based on what it's connected to -- an "OR
+ * connection" has a Tor node on the other end, an "exit
+ * connection" has a website or other server on the other end, and an
+ * "AP connection" has an application proxy (and thus a user) on the
+ * other end.
+ *
+ * Every connection has a type and a state. Connections never change
+ * their type, but can go through many state changes in their lifetime.
+ *
+ * Every connection has two associated input and output buffers.
+ * Listeners don't use them. For non-listener connections, incoming
+ * data is appended to conn->inbuf, and outgoing data is taken from
+ * conn->outbuf. Connections differ primarily in the functions called
+ * to fill and drain these buffers.
+ */
+struct connection_t {
+ uint32_t magic; /**< For memory debugging: must equal one of
+ * *_CONNECTION_MAGIC. */
+
+ uint8_t state; /**< Current state of this connection. */
+ unsigned int type:5; /**< What kind of connection is this? */
+ unsigned int purpose:5; /**< Only used for DIR and EXIT types currently. */
+
+ /* The next fields are all one-bit booleans. Some are only applicable to
+ * connection subtypes, but we hold them here anyway, to save space.
+ */
+ unsigned int read_blocked_on_bw:1; /**< Boolean: should we start reading
+ * again once the bandwidth throttler allows it? */
+ unsigned int write_blocked_on_bw:1; /**< Boolean: should we start writing
+ * again once the bandwidth throttler allows
+ * writes? */
+ unsigned int hold_open_until_flushed:1; /**< Despite this connection's being
+ * marked for close, do we flush it
+ * before closing it? */
+ unsigned int inbuf_reached_eof:1; /**< Boolean: did read() return 0 on this
+ * conn? */
+ /** Set to 1 when we're inside connection_flushed_some to keep us from
+ * calling connection_handle_write() recursively. */
+ unsigned int in_flushed_some:1;
+ /** True if connection_handle_write is currently running on this connection.
+ */
+ unsigned int in_connection_handle_write:1;
+
+ /* For linked connections:
+ */
+ unsigned int linked:1; /**< True if there is, or has been, a linked_conn. */
+ /** True iff we'd like to be notified about read events from the
+ * linked conn. */
+ unsigned int reading_from_linked_conn:1;
+ /** True iff we're willing to write to the linked conn. */
+ unsigned int writing_to_linked_conn:1;
+ /** True iff we're currently able to read on the linked conn, and our
+ * read_event should be made active with libevent. */
+ unsigned int active_on_link:1;
+ /** True iff we've called connection_close_immediate() on this linked
+ * connection. */
+ unsigned int linked_conn_is_closed:1;
+
+ /** CONNECT/SOCKS proxy client handshake state (for outgoing connections). */
+ unsigned int proxy_state:4;
+
+ /** Our socket; set to TOR_INVALID_SOCKET if this connection is closed,
+ * or has no socket. */
+ tor_socket_t s;
+ int conn_array_index; /**< Index into the global connection array. */
+
+ struct event *read_event; /**< Libevent event structure. */
+ struct event *write_event; /**< Libevent event structure. */
+ struct buf_t *inbuf; /**< Buffer holding data read over this connection. */
+ struct buf_t *outbuf; /**< Buffer holding data to write over this
+ * connection. */
+ size_t outbuf_flushlen; /**< How much data should we try to flush from the
+ * outbuf? */
+ time_t timestamp_last_read_allowed; /**< When was the last time libevent said
+ * we could read? */
+ time_t timestamp_last_write_allowed; /**< When was the last time libevent
+ * said we could write? */
+
+ time_t timestamp_created; /**< When was this connection_t created? */
+
+ int socket_family; /**< Address family of this connection's socket. Usually
+ * AF_INET, but it can also be AF_UNIX, or AF_INET6 */
+ tor_addr_t addr; /**< IP that socket "s" is directly connected to;
+ * may be the IP address for a proxy or pluggable transport,
+ * see "address" for the address of the final destination.
+ */
+ uint16_t port; /**< If non-zero, port that socket "s" is directly connected
+ * to; may be the port for a proxy or pluggable transport,
+ * see "address" for the port at the final destination. */
+ uint16_t marked_for_close; /**< Should we close this conn on the next
+ * iteration of the main loop? (If true, holds
+ * the line number where this connection was
+ * marked.) */
+ const char *marked_for_close_file; /**< For debugging: in which file were
+ * we marked for close? */
+ char *address; /**< FQDN (or IP) and port of the final destination for this
+ * connection; this is always the remote address, it is
+ * passed to a proxy or pluggable transport if one in use.
+ * See "addr" and "port" for the address that socket "s" is
+ * directly connected to.
+ * strdup into this, because free_connection() frees it. */
+ /** Another connection that's connected to this one in lieu of a socket. */
+ struct connection_t *linked_conn;
+
+ /** Unique identifier for this connection on this Tor instance. */
+ uint64_t global_identifier;
+
+ /** Bytes read since last call to control_event_conn_bandwidth_used().
+ * Only used if we're configured to emit CONN_BW events. */
+ uint32_t n_read_conn_bw;
+
+ /** Bytes written since last call to control_event_conn_bandwidth_used().
+ * Only used if we're configured to emit CONN_BW events. */
+ uint32_t n_written_conn_bw;
+};
+
+/** True iff <b>x</b> is an edge connection. */
+#define CONN_IS_EDGE(x) \
+ ((x)->type == CONN_TYPE_EXIT || (x)->type == CONN_TYPE_AP)
+
+/** True iff the purpose of <b>conn</b> means that it's a server-side
+ * directory connection. */
+#define DIR_CONN_IS_SERVER(conn) ((conn)->purpose == DIR_PURPOSE_SERVER)
+
+#endif
diff --git a/src/core/or/cpath_build_state_st.h b/src/core/or/cpath_build_state_st.h
new file mode 100644
index 0000000000..1db7251132
--- /dev/null
+++ b/src/core/or/cpath_build_state_st.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef CIRCUIT_BUILD_STATE_ST_ST_H
+#define CIRCUIT_BUILD_STATE_ST_ST_H
+
+/** Information used to build a circuit. */
+struct cpath_build_state_t {
+ /** Intended length of the final circuit. */
+ int desired_path_len;
+ /** How to extend to the planned exit node. */
+ extend_info_t *chosen_exit;
+ /** Whether every node in the circ must have adequate uptime. */
+ unsigned int need_uptime : 1;
+ /** Whether every node in the circ must have adequate capacity. */
+ unsigned int need_capacity : 1;
+ /** Whether the last hop was picked with exiting in mind. */
+ unsigned int is_internal : 1;
+ /** Did we pick this as a one-hop tunnel (not safe for other streams)?
+ * These are for encrypted dir conns that exit to this router, not
+ * for arbitrary exits from the circuit. */
+ unsigned int onehop_tunnel : 1;
+ /** The crypt_path_t to append after rendezvous: used for rendezvous. */
+ crypt_path_t *pending_final_cpath;
+ /** A ref-counted reference to the crypt_path_t to append after
+ * rendezvous; used on the service side. */
+ crypt_path_reference_t *service_pending_final_cpath_ref;
+ /** How many times has building a circuit for this task failed? */
+ int failure_count;
+ /** At what time should we give up on this task? */
+ time_t expiry_time;
+};
+
+#endif
+
diff --git a/src/core/or/crypt_path_reference_st.h b/src/core/or/crypt_path_reference_st.h
new file mode 100644
index 0000000000..bb0e519233
--- /dev/null
+++ b/src/core/or/crypt_path_reference_st.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef CRYPT_PATH_REFERENCE_ST_H
+#define CRYPT_PATH_REFERENCE_ST_H
+
+/** A reference-counted pointer to a crypt_path_t, used only to share
+ * the final rendezvous cpath to be used on a service-side rendezvous
+ * circuit among multiple circuits built in parallel to the same
+ * destination rendezvous point. */
+struct crypt_path_reference_t {
+ /** The reference count. */
+ unsigned int refcount;
+ /** The pointer. Set to NULL when the crypt_path_t is put into use
+ * on an opened rendezvous circuit. */
+ crypt_path_t *cpath;
+};
+
+#endif
+
diff --git a/src/core/or/crypt_path_st.h b/src/core/or/crypt_path_st.h
new file mode 100644
index 0000000000..0fde1fab0e
--- /dev/null
+++ b/src/core/or/crypt_path_st.h
@@ -0,0 +1,70 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef CRYPT_PATH_ST_H
+#define CRYPT_PATH_ST_H
+
+#include "or/relay_crypto_st.h"
+struct crypto_dh_t;
+
+#define CRYPT_PATH_MAGIC 0x70127012u
+
+struct fast_handshake_state_t;
+struct ntor_handshake_state_t;
+struct crypto_dh_t;
+struct onion_handshake_state_t {
+ uint16_t tag;
+ union {
+ struct fast_handshake_state_t *fast;
+ struct crypto_dh_t *tap;
+ struct ntor_handshake_state_t *ntor;
+ } u;
+};
+
+/** Holds accounting information for a single step in the layered encryption
+ * performed by a circuit. Used only at the client edge of a circuit. */
+struct crypt_path_t {
+ uint32_t magic;
+
+ /** Cryptographic state used for encrypting and authenticating relay
+ * cells to and from this hop. */
+ relay_crypto_t crypto;
+
+ /** Current state of the handshake as performed with the OR at this
+ * step. */
+ onion_handshake_state_t handshake_state;
+ /** Diffie-hellman handshake state for performing an introduction
+ * operations */
+ struct crypto_dh_t *rend_dh_handshake_state;
+
+ /** Negotiated key material shared with the OR at this step. */
+ char rend_circ_nonce[DIGEST_LEN];/* KH in tor-spec.txt */
+
+ /** Information to extend to the OR at this step. */
+ extend_info_t *extend_info;
+
+ /** Is the circuit built to this step? Must be one of:
+ * - CPATH_STATE_CLOSED (The circuit has not been extended to this step)
+ * - CPATH_STATE_AWAITING_KEYS (We have sent an EXTEND/CREATE to this step
+ * and not received an EXTENDED/CREATED)
+ * - CPATH_STATE_OPEN (The circuit has been extended to this step) */
+ uint8_t state;
+#define CPATH_STATE_CLOSED 0
+#define CPATH_STATE_AWAITING_KEYS 1
+#define CPATH_STATE_OPEN 2
+ struct crypt_path_t *next; /**< Link to next crypt_path_t in the circuit.
+ * (The list is circular, so the last node
+ * links to the first.) */
+ struct crypt_path_t *prev; /**< Link to previous crypt_path_t in the
+ * circuit. */
+
+ int package_window; /**< How many cells are we allowed to originate ending
+ * at this step? */
+ int deliver_window; /**< How many cells are we willing to deliver originating
+ * at this step? */
+};
+
+#endif
diff --git a/src/core/or/destroy_cell_queue_st.h b/src/core/or/destroy_cell_queue_st.h
new file mode 100644
index 0000000000..2839b0bd11
--- /dev/null
+++ b/src/core/or/destroy_cell_queue_st.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef DESTROY_CELL_QUEUE_ST_H
+#define DESTROY_CELL_QUEUE_ST_H
+
+/** A single queued destroy cell. */
+struct destroy_cell_t {
+ TOR_SIMPLEQ_ENTRY(destroy_cell_t) next;
+ circid_t circid;
+ uint32_t inserted_timestamp; /**< Time (in timestamp units) when this cell
+ * was inserted */
+ uint8_t reason;
+};
+
+/** A queue of destroy cells on a channel. */
+struct destroy_cell_queue_t {
+ /** Linked list of packed_cell_t */
+ TOR_SIMPLEQ_HEAD(dcell_simpleq, destroy_cell_t) head;
+ int n; /**< The number of cells in the queue. */
+};
+
+#endif
+
diff --git a/src/core/or/dos.c b/src/core/or/dos.c
new file mode 100644
index 0000000000..d86ede02cb
--- /dev/null
+++ b/src/core/or/dos.c
@@ -0,0 +1,801 @@
+/* Copyright (c) 2018-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/*
+ * \file dos.c
+ * \brief Implement Denial of Service mitigation subsystem.
+ */
+
+#define DOS_PRIVATE
+
+#include "or/or.h"
+#include "or/channel.h"
+#include "or/config.h"
+#include "or/connection.h"
+#include "or/connection_or.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "or/geoip.h"
+#include "or/main.h"
+#include "or/networkstatus.h"
+#include "or/nodelist.h"
+#include "or/relay.h"
+#include "or/router.h"
+
+#include "or/dos.h"
+
+#include "or/or_connection_st.h"
+
+/*
+ * Circuit creation denial of service mitigation.
+ *
+ * Namespace used for this mitigation framework is "dos_cc_" where "cc" is for
+ * Circuit Creation.
+ */
+
+/* Is the circuit creation DoS mitigation enabled? */
+static unsigned int dos_cc_enabled = 0;
+
+/* Consensus parameters. They can be changed when a new consensus arrives.
+ * They are initialized with the hardcoded default values. */
+static uint32_t dos_cc_min_concurrent_conn;
+static uint32_t dos_cc_circuit_rate;
+static uint32_t dos_cc_circuit_burst;
+static dos_cc_defense_type_t dos_cc_defense_type;
+static int32_t dos_cc_defense_time_period;
+
+/* Keep some stats for the heartbeat so we can report out. */
+static uint64_t cc_num_rejected_cells;
+static uint32_t cc_num_marked_addrs;
+
+/*
+ * Concurrent connection denial of service mitigation.
+ *
+ * Namespace used for this mitigation framework is "dos_conn_".
+ */
+
+/* Is the connection DoS mitigation enabled? */
+static unsigned int dos_conn_enabled = 0;
+
+/* Consensus parameters. They can be changed when a new consensus arrives.
+ * They are initialized with the hardcoded default values. */
+static uint32_t dos_conn_max_concurrent_count;
+static dos_conn_defense_type_t dos_conn_defense_type;
+
+/* Keep some stats for the heartbeat so we can report out. */
+static uint64_t conn_num_addr_rejected;
+
+/*
+ * General interface of the denial of service mitigation subsystem.
+ */
+
+/* Keep stats for the heartbeat. */
+static uint64_t num_single_hop_client_refused;
+
+/* Return true iff the circuit creation mitigation is enabled. We look at the
+ * consensus for this else a default value is returned. */
+MOCK_IMPL(STATIC unsigned int,
+get_param_cc_enabled, (const networkstatus_t *ns))
+{
+ if (get_options()->DoSCircuitCreationEnabled != -1) {
+ return get_options()->DoSCircuitCreationEnabled;
+ }
+
+ return !!networkstatus_get_param(ns, "DoSCircuitCreationEnabled",
+ DOS_CC_ENABLED_DEFAULT, 0, 1);
+}
+
+/* Return the parameter for the minimum concurrent connection at which we'll
+ * start counting circuit for a specific client address. */
+STATIC uint32_t
+get_param_cc_min_concurrent_connection(const networkstatus_t *ns)
+{
+ if (get_options()->DoSCircuitCreationMinConnections) {
+ return get_options()->DoSCircuitCreationMinConnections;
+ }
+ return networkstatus_get_param(ns, "DoSCircuitCreationMinConnections",
+ DOS_CC_MIN_CONCURRENT_CONN_DEFAULT,
+ 1, INT32_MAX);
+}
+
+/* Return the parameter for the time rate that is how many circuits over this
+ * time span. */
+static uint32_t
+get_param_cc_circuit_rate(const networkstatus_t *ns)
+{
+ /* This is in seconds. */
+ if (get_options()->DoSCircuitCreationRate) {
+ return get_options()->DoSCircuitCreationRate;
+ }
+ return networkstatus_get_param(ns, "DoSCircuitCreationRate",
+ DOS_CC_CIRCUIT_RATE_DEFAULT,
+ 1, INT32_MAX);
+}
+
+/* Return the parameter for the maximum circuit count for the circuit time
+ * rate. */
+STATIC uint32_t
+get_param_cc_circuit_burst(const networkstatus_t *ns)
+{
+ if (get_options()->DoSCircuitCreationBurst) {
+ return get_options()->DoSCircuitCreationBurst;
+ }
+ return networkstatus_get_param(ns, "DoSCircuitCreationBurst",
+ DOS_CC_CIRCUIT_BURST_DEFAULT,
+ 1, INT32_MAX);
+}
+
+/* Return the consensus parameter of the circuit creation defense type. */
+static uint32_t
+get_param_cc_defense_type(const networkstatus_t *ns)
+{
+ if (get_options()->DoSCircuitCreationDefenseType) {
+ return get_options()->DoSCircuitCreationDefenseType;
+ }
+ return networkstatus_get_param(ns, "DoSCircuitCreationDefenseType",
+ DOS_CC_DEFENSE_TYPE_DEFAULT,
+ DOS_CC_DEFENSE_NONE, DOS_CC_DEFENSE_MAX);
+}
+
+/* Return the consensus parameter of the defense time period which is how much
+ * time should we defend against a malicious client address. */
+static int32_t
+get_param_cc_defense_time_period(const networkstatus_t *ns)
+{
+ /* Time in seconds. */
+ if (get_options()->DoSCircuitCreationDefenseTimePeriod) {
+ return get_options()->DoSCircuitCreationDefenseTimePeriod;
+ }
+ return networkstatus_get_param(ns, "DoSCircuitCreationDefenseTimePeriod",
+ DOS_CC_DEFENSE_TIME_PERIOD_DEFAULT,
+ 0, INT32_MAX);
+}
+
+/* Return true iff connection mitigation is enabled. We look at the consensus
+ * for this else a default value is returned. */
+MOCK_IMPL(STATIC unsigned int,
+get_param_conn_enabled, (const networkstatus_t *ns))
+{
+ if (get_options()->DoSConnectionEnabled != -1) {
+ return get_options()->DoSConnectionEnabled;
+ }
+ return !!networkstatus_get_param(ns, "DoSConnectionEnabled",
+ DOS_CONN_ENABLED_DEFAULT, 0, 1);
+}
+
+/* Return the consensus parameter for the maximum concurrent connection
+ * allowed. */
+STATIC uint32_t
+get_param_conn_max_concurrent_count(const networkstatus_t *ns)
+{
+ if (get_options()->DoSConnectionMaxConcurrentCount) {
+ return get_options()->DoSConnectionMaxConcurrentCount;
+ }
+ return networkstatus_get_param(ns, "DoSConnectionMaxConcurrentCount",
+ DOS_CONN_MAX_CONCURRENT_COUNT_DEFAULT,
+ 1, INT32_MAX);
+}
+
+/* Return the consensus parameter of the connection defense type. */
+static uint32_t
+get_param_conn_defense_type(const networkstatus_t *ns)
+{
+ if (get_options()->DoSConnectionDefenseType) {
+ return get_options()->DoSConnectionDefenseType;
+ }
+ return networkstatus_get_param(ns, "DoSConnectionDefenseType",
+ DOS_CONN_DEFENSE_TYPE_DEFAULT,
+ DOS_CONN_DEFENSE_NONE, DOS_CONN_DEFENSE_MAX);
+}
+
+/* Set circuit creation parameters located in the consensus or their default
+ * if none are present. Called at initialization or when the consensus
+ * changes. */
+static void
+set_dos_parameters(const networkstatus_t *ns)
+{
+ /* Get the default consensus param values. */
+ dos_cc_enabled = get_param_cc_enabled(ns);
+ dos_cc_min_concurrent_conn = get_param_cc_min_concurrent_connection(ns);
+ dos_cc_circuit_rate = get_param_cc_circuit_rate(ns);
+ dos_cc_circuit_burst = get_param_cc_circuit_burst(ns);
+ dos_cc_defense_time_period = get_param_cc_defense_time_period(ns);
+ dos_cc_defense_type = get_param_cc_defense_type(ns);
+
+ /* Connection detection. */
+ dos_conn_enabled = get_param_conn_enabled(ns);
+ dos_conn_max_concurrent_count = get_param_conn_max_concurrent_count(ns);
+ dos_conn_defense_type = get_param_conn_defense_type(ns);
+}
+
+/* Free everything for the circuit creation DoS mitigation subsystem. */
+static void
+cc_free_all(void)
+{
+ /* If everything is freed, the circuit creation subsystem is not enabled. */
+ dos_cc_enabled = 0;
+}
+
+/* Called when the consensus has changed. Do appropriate actions for the
+ * circuit creation subsystem. */
+static void
+cc_consensus_has_changed(const networkstatus_t *ns)
+{
+ /* Looking at the consensus, is the circuit creation subsystem enabled? If
+ * not and it was enabled before, clean it up. */
+ if (dos_cc_enabled && !get_param_cc_enabled(ns)) {
+ cc_free_all();
+ }
+}
+
+/** Return the number of circuits we allow per second under the current
+ * configuration. */
+STATIC uint64_t
+get_circuit_rate_per_second(void)
+{
+ return dos_cc_circuit_rate;
+}
+
+/* Given the circuit creation client statistics object, refill the circuit
+ * bucket if needed. This also works if the bucket was never filled in the
+ * first place. The addr is only used for logging purposes. */
+STATIC void
+cc_stats_refill_bucket(cc_client_stats_t *stats, const tor_addr_t *addr)
+{
+ uint32_t new_circuit_bucket_count;
+ uint64_t num_token, elapsed_time_last_refill = 0, circuit_rate = 0;
+ time_t now;
+ int64_t last_refill_ts;
+
+ tor_assert(stats);
+ tor_assert(addr);
+
+ now = approx_time();
+ last_refill_ts = (int64_t)stats->last_circ_bucket_refill_ts;
+
+ /* If less than a second has elapsed, don't add any tokens.
+ * Note: If a relay's clock is ever 0, any new clients won't get a refill
+ * until the next second. But a relay that thinks it is 1970 will never
+ * validate the public consensus. */
+ if ((int64_t)now == last_refill_ts) {
+ goto done;
+ }
+
+ /* At this point, we know we might need to add token to the bucket. We'll
+ * first get the circuit rate that is how many circuit are we allowed to do
+ * per second. */
+ circuit_rate = get_circuit_rate_per_second();
+
+ /* We've never filled the bucket so fill it with the maximum being the burst
+ * and we are done.
+ * Note: If a relay's clock is ever 0, all clients that were last refilled
+ * in that zero second will get a full refill here. */
+ if (last_refill_ts == 0) {
+ num_token = dos_cc_circuit_burst;
+ goto end;
+ }
+
+ /* Our clock jumped backward so fill it up to the maximum. Not filling it
+ * could trigger a detection for a valid client. Also, if the clock jumped
+ * negative but we didn't notice until the elapsed time became positive
+ * again, then we potentially spent many seconds not refilling the bucket
+ * when we should have been refilling it. But the fact that we didn't notice
+ * until now means that no circuit creation requests came in during that
+ * time, so the client doesn't end up punished that much from this hopefully
+ * rare situation.*/
+ if ((int64_t)now < last_refill_ts) {
+ /* Use the maximum allowed value of token. */
+ num_token = dos_cc_circuit_burst;
+ goto end;
+ }
+
+ /* How many seconds have elapsed between now and the last refill?
+ * This subtraction can't underflow, because now >= last_refill_ts.
+ * And it can't overflow, because INT64_MAX - (-INT64_MIN) == UINT64_MAX. */
+ elapsed_time_last_refill = (uint64_t)now - last_refill_ts;
+
+ /* If the elapsed time is very large, it means our clock jumped forward.
+ * If the multiplication would overflow, use the maximum allowed value. */
+ if (elapsed_time_last_refill > UINT32_MAX) {
+ num_token = dos_cc_circuit_burst;
+ goto end;
+ }
+
+ /* Compute how many circuits we are allowed in that time frame which we'll
+ * add to the bucket. This can't overflow, because both multiplicands
+ * are less than or equal to UINT32_MAX, and num_token is uint64_t. */
+ num_token = elapsed_time_last_refill * circuit_rate;
+
+ end:
+ /* If the sum would overflow, use the maximum allowed value. */
+ if (num_token > UINT32_MAX - stats->circuit_bucket) {
+ new_circuit_bucket_count = dos_cc_circuit_burst;
+ } else {
+ /* We cap the bucket to the burst value else this could overflow uint32_t
+ * over time. */
+ new_circuit_bucket_count = MIN(stats->circuit_bucket + (uint32_t)num_token,
+ dos_cc_circuit_burst);
+ }
+
+ /* This function is not allowed to make the bucket count larger than the
+ * burst value */
+ tor_assert_nonfatal(new_circuit_bucket_count <= dos_cc_circuit_burst);
+ /* This function is not allowed to make the bucket count smaller, unless it
+ * is decreasing it to a newly configured, lower burst value. We allow the
+ * bucket to stay the same size, in case the circuit rate is zero. */
+ tor_assert_nonfatal(new_circuit_bucket_count >= stats->circuit_bucket ||
+ new_circuit_bucket_count == dos_cc_circuit_burst);
+
+ log_debug(LD_DOS, "DoS address %s has its circuit bucket value: %" PRIu32
+ ". Filling it to %" PRIu32 ". Circuit rate is %" PRIu64
+ ". Elapsed time is %" PRIi64,
+ fmt_addr(addr), stats->circuit_bucket, new_circuit_bucket_count,
+ circuit_rate, (int64_t)elapsed_time_last_refill);
+
+ stats->circuit_bucket = new_circuit_bucket_count;
+ stats->last_circ_bucket_refill_ts = now;
+
+ done:
+ return;
+}
+
+/* Return true iff the circuit bucket is down to 0 and the number of
+ * concurrent connections is greater or equal the minimum threshold set the
+ * consensus parameter. */
+static int
+cc_has_exhausted_circuits(const dos_client_stats_t *stats)
+{
+ tor_assert(stats);
+ return stats->cc_stats.circuit_bucket == 0 &&
+ stats->concurrent_count >= dos_cc_min_concurrent_conn;
+}
+
+/* Mark client address by setting a timestamp in the stats object which tells
+ * us until when it is marked as positively detected. */
+static void
+cc_mark_client(cc_client_stats_t *stats)
+{
+ tor_assert(stats);
+ /* We add a random offset of a maximum of half the defense time so it is
+ * less predictable. */
+ stats->marked_until_ts =
+ approx_time() + dos_cc_defense_time_period +
+ crypto_rand_int_range(1, dos_cc_defense_time_period / 2);
+}
+
+/* Return true iff the given channel address is marked as malicious. This is
+ * called a lot and part of the fast path of handling cells. It has to remain
+ * as fast as we can. */
+static int
+cc_channel_addr_is_marked(channel_t *chan)
+{
+ time_t now;
+ tor_addr_t addr;
+ clientmap_entry_t *entry;
+ cc_client_stats_t *stats = NULL;
+
+ if (chan == NULL) {
+ goto end;
+ }
+ /* Must be a client connection else we ignore. */
+ if (!channel_is_client(chan)) {
+ goto end;
+ }
+ /* Without an IP address, nothing can work. */
+ if (!channel_get_addr_if_possible(chan, &addr)) {
+ goto end;
+ }
+
+ /* We are only interested in client connection from the geoip cache. */
+ entry = geoip_lookup_client(&addr, NULL, GEOIP_CLIENT_CONNECT);
+ if (entry == NULL) {
+ /* We can have a connection creating circuits but not tracked by the geoip
+ * cache. Once this DoS subsystem is enabled, we can end up here with no
+ * entry for the channel. */
+ goto end;
+ }
+ now = approx_time();
+ stats = &entry->dos_stats.cc_stats;
+
+ end:
+ return stats && stats->marked_until_ts >= now;
+}
+
+/* Concurrent connection private API. */
+
+/* Free everything for the connection DoS mitigation subsystem. */
+static void
+conn_free_all(void)
+{
+ dos_conn_enabled = 0;
+}
+
+/* Called when the consensus has changed. Do appropriate actions for the
+ * connection mitigation subsystem. */
+static void
+conn_consensus_has_changed(const networkstatus_t *ns)
+{
+ /* Looking at the consensus, is the connection mitigation subsystem enabled?
+ * If not and it was enabled before, clean it up. */
+ if (dos_conn_enabled && !get_param_conn_enabled(ns)) {
+ conn_free_all();
+ }
+}
+
+/* General private API */
+
+/* Return true iff we have at least one DoS detection enabled. This is used to
+ * decide if we need to allocate any kind of high level DoS object. */
+static inline int
+dos_is_enabled(void)
+{
+ return (dos_cc_enabled || dos_conn_enabled);
+}
+
+/* Circuit creation public API. */
+
+/* Called when a CREATE cell is received from the given channel. */
+void
+dos_cc_new_create_cell(channel_t *chan)
+{
+ tor_addr_t addr;
+ clientmap_entry_t *entry;
+
+ tor_assert(chan);
+
+ /* Skip everything if not enabled. */
+ if (!dos_cc_enabled) {
+ goto end;
+ }
+
+ /* Must be a client connection else we ignore. */
+ if (!channel_is_client(chan)) {
+ goto end;
+ }
+ /* Without an IP address, nothing can work. */
+ if (!channel_get_addr_if_possible(chan, &addr)) {
+ goto end;
+ }
+
+ /* We are only interested in client connection from the geoip cache. */
+ entry = geoip_lookup_client(&addr, NULL, GEOIP_CLIENT_CONNECT);
+ if (entry == NULL) {
+ /* We can have a connection creating circuits but not tracked by the geoip
+ * cache. Once this DoS subsystem is enabled, we can end up here with no
+ * entry for the channel. */
+ goto end;
+ }
+
+ /* General comment. Even though the client can already be marked as
+ * malicious, we continue to track statistics. If it keeps going above
+ * threshold while marked, the defense period time will grow longer. There
+ * is really no point at unmarking a client that keeps DoSing us. */
+
+ /* First of all, we'll try to refill the circuit bucket opportunistically
+ * before we assess. */
+ cc_stats_refill_bucket(&entry->dos_stats.cc_stats, &addr);
+
+ /* Take a token out of the circuit bucket if we are above 0 so we don't
+ * underflow the bucket. */
+ if (entry->dos_stats.cc_stats.circuit_bucket > 0) {
+ entry->dos_stats.cc_stats.circuit_bucket--;
+ }
+
+ /* This is the detection. Assess at every CREATE cell if the client should
+ * get marked as malicious. This should be kept as fast as possible. */
+ if (cc_has_exhausted_circuits(&entry->dos_stats)) {
+ /* If this is the first time we mark this entry, log it a info level.
+ * Under heavy DDoS, logging each time we mark would results in lots and
+ * lots of logs. */
+ if (entry->dos_stats.cc_stats.marked_until_ts == 0) {
+ log_debug(LD_DOS, "Detected circuit creation DoS by address: %s",
+ fmt_addr(&addr));
+ cc_num_marked_addrs++;
+ }
+ cc_mark_client(&entry->dos_stats.cc_stats);
+ }
+
+ end:
+ return;
+}
+
+/* Return the defense type that should be used for this circuit.
+ *
+ * This is part of the fast path and called a lot. */
+dos_cc_defense_type_t
+dos_cc_get_defense_type(channel_t *chan)
+{
+ tor_assert(chan);
+
+ /* Skip everything if not enabled. */
+ if (!dos_cc_enabled) {
+ goto end;
+ }
+
+ /* On an OR circuit, we'll check if the previous channel is a marked client
+ * connection detected by our DoS circuit creation mitigation subsystem. */
+ if (cc_channel_addr_is_marked(chan)) {
+ /* We've just assess that this circuit should trigger a defense for the
+ * cell it just seen. Note it down. */
+ cc_num_rejected_cells++;
+ return dos_cc_defense_type;
+ }
+
+ end:
+ return DOS_CC_DEFENSE_NONE;
+}
+
+/* Concurrent connection detection public API. */
+
+/* Return true iff the given address is permitted to open another connection.
+ * A defense value is returned for the caller to take appropriate actions. */
+dos_conn_defense_type_t
+dos_conn_addr_get_defense_type(const tor_addr_t *addr)
+{
+ clientmap_entry_t *entry;
+
+ tor_assert(addr);
+
+ /* Skip everything if not enabled. */
+ if (!dos_conn_enabled) {
+ goto end;
+ }
+
+ /* We are only interested in client connection from the geoip cache. */
+ entry = geoip_lookup_client(addr, NULL, GEOIP_CLIENT_CONNECT);
+ if (entry == NULL) {
+ goto end;
+ }
+
+ /* Need to be above the maximum concurrent connection count to trigger a
+ * defense. */
+ if (entry->dos_stats.concurrent_count > dos_conn_max_concurrent_count) {
+ conn_num_addr_rejected++;
+ return dos_conn_defense_type;
+ }
+
+ end:
+ return DOS_CONN_DEFENSE_NONE;
+}
+
+/* General API */
+
+/* Take any appropriate actions for the given geoip entry that is about to get
+ * freed. This is called for every entry that is being freed.
+ *
+ * This function will clear out the connection tracked flag if the concurrent
+ * count of the entry is above 0 so if those connections end up being seen by
+ * this subsystem, we won't try to decrement the counter for a new geoip entry
+ * that might have been added after this call for the same address. */
+void
+dos_geoip_entry_about_to_free(const clientmap_entry_t *geoip_ent)
+{
+ tor_assert(geoip_ent);
+
+ /* The count is down to 0 meaning no connections right now, we can safely
+ * clear the geoip entry from the cache. */
+ if (geoip_ent->dos_stats.concurrent_count == 0) {
+ goto end;
+ }
+
+ /* For each connection matching the geoip entry address, we'll clear the
+ * tracked flag because the entry is about to get removed from the geoip
+ * cache. We do not try to decrement if the flag is not set. */
+ SMARTLIST_FOREACH_BEGIN(get_connection_array(), connection_t *, conn) {
+ if (conn->type == CONN_TYPE_OR) {
+ or_connection_t *or_conn = TO_OR_CONN(conn);
+ if (!tor_addr_compare(&geoip_ent->addr, &or_conn->real_addr,
+ CMP_EXACT)) {
+ or_conn->tracked_for_dos_mitigation = 0;
+ }
+ }
+ } SMARTLIST_FOREACH_END(conn);
+
+ end:
+ return;
+}
+
+/* Note down that we've just refused a single hop client. This increments a
+ * counter later used for the heartbeat. */
+void
+dos_note_refuse_single_hop_client(void)
+{
+ num_single_hop_client_refused++;
+}
+
+/* Return true iff single hop client connection (ESTABLISH_RENDEZVOUS) should
+ * be refused. */
+int
+dos_should_refuse_single_hop_client(void)
+{
+ /* If we aren't a public relay, this shouldn't apply to anything. */
+ if (!public_server_mode(get_options())) {
+ return 0;
+ }
+
+ if (get_options()->DoSRefuseSingleHopClientRendezvous != -1) {
+ return get_options()->DoSRefuseSingleHopClientRendezvous;
+ }
+
+ return (int) networkstatus_get_param(NULL,
+ "DoSRefuseSingleHopClientRendezvous",
+ 0 /* default */, 0, 1);
+}
+
+/* Log a heartbeat message with some statistics. */
+void
+dos_log_heartbeat(void)
+{
+ char *conn_msg = NULL;
+ char *cc_msg = NULL;
+ char *single_hop_client_msg = NULL;
+ char *circ_stats_msg = NULL;
+
+ /* Stats number coming from relay.c append_cell_to_circuit_queue(). */
+ tor_asprintf(&circ_stats_msg,
+ " %" PRIu64 " circuits killed with too many cells.",
+ stats_n_circ_max_cell_reached);
+
+ if (dos_cc_enabled) {
+ tor_asprintf(&cc_msg,
+ " %" PRIu64 " circuits rejected,"
+ " %" PRIu32 " marked addresses.",
+ cc_num_rejected_cells, cc_num_marked_addrs);
+ }
+
+ if (dos_conn_enabled) {
+ tor_asprintf(&conn_msg,
+ " %" PRIu64 " connections closed.",
+ conn_num_addr_rejected);
+ }
+
+ if (dos_should_refuse_single_hop_client()) {
+ tor_asprintf(&single_hop_client_msg,
+ " %" PRIu64 " single hop clients refused.",
+ num_single_hop_client_refused);
+ }
+
+ log_notice(LD_HEARTBEAT,
+ "DoS mitigation since startup:%s%s%s%s",
+ circ_stats_msg,
+ (cc_msg != NULL) ? cc_msg : " [cc not enabled]",
+ (conn_msg != NULL) ? conn_msg : " [conn not enabled]",
+ (single_hop_client_msg != NULL) ? single_hop_client_msg : "");
+
+ tor_free(conn_msg);
+ tor_free(cc_msg);
+ tor_free(single_hop_client_msg);
+ tor_free(circ_stats_msg);
+ return;
+}
+
+/* Called when a new client connection has been established on the given
+ * address. */
+void
+dos_new_client_conn(or_connection_t *or_conn)
+{
+ clientmap_entry_t *entry;
+
+ tor_assert(or_conn);
+
+ /* Past that point, we know we have at least one DoS detection subsystem
+ * enabled so we'll start allocating stuff. */
+ if (!dos_is_enabled()) {
+ goto end;
+ }
+
+ /* We ignore any known address meaning an address of a known relay. The
+ * reason to do so is because network reentry is possible where a client
+ * connection comes from an Exit node. Even when we'll fix reentry, this is
+ * a robust defense to keep in place. */
+ if (nodelist_probably_contains_address(&or_conn->real_addr)) {
+ goto end;
+ }
+
+ /* We are only interested in client connection from the geoip cache. */
+ entry = geoip_lookup_client(&or_conn->real_addr, NULL,
+ GEOIP_CLIENT_CONNECT);
+ if (BUG(entry == NULL)) {
+ /* Should never happen because we note down the address in the geoip
+ * cache before this is called. */
+ goto end;
+ }
+
+ entry->dos_stats.concurrent_count++;
+ or_conn->tracked_for_dos_mitigation = 1;
+ log_debug(LD_DOS, "Client address %s has now %u concurrent connections.",
+ fmt_addr(&or_conn->real_addr),
+ entry->dos_stats.concurrent_count);
+
+ end:
+ return;
+}
+
+/* Called when a client connection for the given IP address has been closed. */
+void
+dos_close_client_conn(const or_connection_t *or_conn)
+{
+ clientmap_entry_t *entry;
+
+ tor_assert(or_conn);
+
+ /* We have to decrement the count on tracked connection only even if the
+ * subsystem has been disabled at runtime because it might be re-enabled
+ * after and we need to keep a synchronized counter at all time. */
+ if (!or_conn->tracked_for_dos_mitigation) {
+ goto end;
+ }
+
+ /* We are only interested in client connection from the geoip cache. */
+ entry = geoip_lookup_client(&or_conn->real_addr, NULL,
+ GEOIP_CLIENT_CONNECT);
+ if (entry == NULL) {
+ /* This can happen because we can close a connection before the channel
+ * got to be noted down in the geoip cache. */
+ goto end;
+ }
+
+ /* Extra super duper safety. Going below 0 means an underflow which could
+ * lead to most likely a false positive. In theory, this should never happen
+ * but lets be extra safe. */
+ if (BUG(entry->dos_stats.concurrent_count == 0)) {
+ goto end;
+ }
+
+ entry->dos_stats.concurrent_count--;
+ log_debug(LD_DOS, "Client address %s has lost a connection. Concurrent "
+ "connections are now at %u",
+ fmt_addr(&or_conn->real_addr),
+ entry->dos_stats.concurrent_count);
+
+ end:
+ return;
+}
+
+/* Called when the consensus has changed. We might have new consensus
+ * parameters to look at. */
+void
+dos_consensus_has_changed(const networkstatus_t *ns)
+{
+ /* There are two ways to configure this subsystem, one at startup through
+ * dos_init() which is called when the options are parsed. And this one
+ * through the consensus. We don't want to enable any DoS mitigation if we
+ * aren't a public relay. */
+ if (!public_server_mode(get_options())) {
+ return;
+ }
+
+ cc_consensus_has_changed(ns);
+ conn_consensus_has_changed(ns);
+
+ /* We were already enabled or we just became enabled but either way, set the
+ * consensus parameters for all subsystems. */
+ set_dos_parameters(ns);
+}
+
+/* Return true iff the DoS mitigation subsystem is enabled. */
+int
+dos_enabled(void)
+{
+ return dos_is_enabled();
+}
+
+/* Free everything from the Denial of Service subsystem. */
+void
+dos_free_all(void)
+{
+ /* Free the circuit creation mitigation subsystem. It is safe to do this
+ * even if it wasn't initialized. */
+ cc_free_all();
+
+ /* Free the connection mitigation subsystem. It is safe to do this even if
+ * it wasn't initialized. */
+ conn_free_all();
+}
+
+/* Initialize the Denial of Service subsystem. */
+void
+dos_init(void)
+{
+ /* To initialize, we only need to get the parameters. */
+ set_dos_parameters(NULL);
+}
diff --git a/src/core/or/dos.h b/src/core/or/dos.h
new file mode 100644
index 0000000000..760ef11057
--- /dev/null
+++ b/src/core/or/dos.h
@@ -0,0 +1,140 @@
+/* Copyright (c) 2018-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/*
+ * \file dos.h
+ * \brief Header file for dos.c
+ */
+
+#ifndef TOR_DOS_H
+#define TOR_DOS_H
+
+/* Structure that keeps stats of client connection per-IP. */
+typedef struct cc_client_stats_t {
+ /* Number of allocated circuits remaining for this address. It is
+ * decremented every time a new circuit is seen for this client address and
+ * if the count goes to 0, we have a positive detection. */
+ uint32_t circuit_bucket;
+
+ /* When was the last time we've refilled the circuit bucket? This is used to
+ * know if we need to refill the bucket when a new circuit is seen. It is
+ * synchronized using approx_time(). */
+ time_t last_circ_bucket_refill_ts;
+
+ /* This client address was detected to be above the circuit creation rate
+ * and this timestamp indicates until when it should remain marked as
+ * detected so we can apply a defense for the address. It is synchronized
+ * using the approx_time(). */
+ time_t marked_until_ts;
+} cc_client_stats_t;
+
+/* This object is a top level object that contains everything related to the
+ * per-IP client DoS mitigation. Because it is per-IP, it is used in the geoip
+ * clientmap_entry_t object. */
+typedef struct dos_client_stats_t {
+ /* Concurrent connection count from the specific address. 2^32 is most
+ * likely way too big for the amount of allowed file descriptors. */
+ uint32_t concurrent_count;
+
+ /* Circuit creation statistics. This is only used if the circuit creation
+ * subsystem has been enabled (dos_cc_enabled). */
+ cc_client_stats_t cc_stats;
+} dos_client_stats_t;
+
+/* General API. */
+
+/* Stub. */
+struct clientmap_entry_t;
+
+void dos_init(void);
+void dos_free_all(void);
+void dos_consensus_has_changed(const networkstatus_t *ns);
+int dos_enabled(void);
+void dos_log_heartbeat(void);
+void dos_geoip_entry_about_to_free(const struct clientmap_entry_t *geoip_ent);
+
+void dos_new_client_conn(or_connection_t *or_conn);
+void dos_close_client_conn(const or_connection_t *or_conn);
+
+int dos_should_refuse_single_hop_client(void);
+void dos_note_refuse_single_hop_client(void);
+
+/*
+ * Circuit creation DoS mitigation subsystemn interface.
+ */
+
+/* DoSCircuitCreationEnabled default. Disabled by default. */
+#define DOS_CC_ENABLED_DEFAULT 0
+/* DoSCircuitCreationDefenseType maps to the dos_cc_defense_type_t enum. */
+#define DOS_CC_DEFENSE_TYPE_DEFAULT DOS_CC_DEFENSE_REFUSE_CELL
+/* DoSCircuitCreationMinConnections default */
+#define DOS_CC_MIN_CONCURRENT_CONN_DEFAULT 3
+/* DoSCircuitCreationRateTenths is 3 per seconds. */
+#define DOS_CC_CIRCUIT_RATE_DEFAULT 3
+/* DoSCircuitCreationBurst default. */
+#define DOS_CC_CIRCUIT_BURST_DEFAULT 90
+/* DoSCircuitCreationDefenseTimePeriod in seconds. */
+#define DOS_CC_DEFENSE_TIME_PERIOD_DEFAULT (60 * 60)
+
+/* Type of defense that we can use for the circuit creation DoS mitigation. */
+typedef enum dos_cc_defense_type_t {
+ /* No defense used. */
+ DOS_CC_DEFENSE_NONE = 1,
+ /* Refuse any cells which means a DESTROY cell will be sent back. */
+ DOS_CC_DEFENSE_REFUSE_CELL = 2,
+
+ /* Maximum value that can be used. Useful for the boundaries of the
+ * consensus parameter. */
+ DOS_CC_DEFENSE_MAX = 2,
+} dos_cc_defense_type_t;
+
+void dos_cc_new_create_cell(channel_t *channel);
+dos_cc_defense_type_t dos_cc_get_defense_type(channel_t *chan);
+
+/*
+ * Concurrent connection DoS mitigation interface.
+ */
+
+/* DoSConnectionEnabled default. Disabled by default. */
+#define DOS_CONN_ENABLED_DEFAULT 0
+/* DoSConnectionMaxConcurrentCount default. */
+#define DOS_CONN_MAX_CONCURRENT_COUNT_DEFAULT 100
+/* DoSConnectionDefenseType maps to the dos_conn_defense_type_t enum. */
+#define DOS_CONN_DEFENSE_TYPE_DEFAULT DOS_CONN_DEFENSE_CLOSE
+
+/* Type of defense that we can use for the concurrent connection DoS
+ * mitigation. */
+typedef enum dos_conn_defense_type_t {
+ /* No defense used. */
+ DOS_CONN_DEFENSE_NONE = 1,
+ /* Close immediately the connection meaning refuse it. */
+ DOS_CONN_DEFENSE_CLOSE = 2,
+
+ /* Maximum value that can be used. Useful for the boundaries of the
+ * consensus parameter. */
+ DOS_CONN_DEFENSE_MAX = 2,
+} dos_conn_defense_type_t;
+
+dos_conn_defense_type_t dos_conn_addr_get_defense_type(const tor_addr_t *addr);
+
+#ifdef DOS_PRIVATE
+
+STATIC uint32_t get_param_conn_max_concurrent_count(
+ const networkstatus_t *ns);
+STATIC uint32_t get_param_cc_circuit_burst(const networkstatus_t *ns);
+STATIC uint32_t get_param_cc_min_concurrent_connection(
+ const networkstatus_t *ns);
+
+STATIC uint64_t get_circuit_rate_per_second(void);
+STATIC void cc_stats_refill_bucket(cc_client_stats_t *stats,
+ const tor_addr_t *addr);
+
+MOCK_DECL(STATIC unsigned int, get_param_cc_enabled,
+ (const networkstatus_t *ns));
+MOCK_DECL(STATIC unsigned int, get_param_conn_enabled,
+ (const networkstatus_t *ns));
+
+#endif /* TOR_DOS_PRIVATE */
+
+#endif /* TOR_DOS_H */
+
diff --git a/src/core/or/edge_connection_st.h b/src/core/or/edge_connection_st.h
new file mode 100644
index 0000000000..d58e1c2b8c
--- /dev/null
+++ b/src/core/or/edge_connection_st.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef EDGE_CONNECTION_ST_H
+#define EDGE_CONNECTION_ST_H
+
+#include "or/or.h"
+
+#include "or/connection_st.h"
+
+/** Subtype of connection_t for an "edge connection" -- that is, an entry (ap)
+ * connection, or an exit. */
+struct edge_connection_t {
+ connection_t base_;
+
+ struct edge_connection_t *next_stream; /**< Points to the next stream at this
+ * edge, if any */
+ int package_window; /**< How many more relay cells can I send into the
+ * circuit? */
+ int deliver_window; /**< How many more relay cells can end at me? */
+
+ struct circuit_t *on_circuit; /**< The circuit (if any) that this edge
+ * connection is using. */
+
+ /** A pointer to which node in the circ this conn exits at. Set for AP
+ * connections and for hidden service exit connections. */
+ struct crypt_path_t *cpath_layer;
+ /** What rendezvous service are we querying for (if an AP) or providing (if
+ * an exit)? */
+ rend_data_t *rend_data;
+
+ /* Hidden service connection identifier for edge connections. Used by the HS
+ * client-side code to identify client SOCKS connections and by the
+ * service-side code to match HS circuits with their streams. */
+ struct hs_ident_edge_conn_t *hs_ident;
+
+ uint32_t address_ttl; /**< TTL for address-to-addr mapping on exit
+ * connection. Exit connections only. */
+ uint32_t begincell_flags; /** Flags sent or received in the BEGIN cell
+ * for this connection */
+
+ streamid_t stream_id; /**< The stream ID used for this edge connection on its
+ * circuit */
+
+ /** The reason why this connection is closing; passed to the controller. */
+ uint16_t end_reason;
+
+ /** Bytes read since last call to control_event_stream_bandwidth_used() */
+ uint32_t n_read;
+
+ /** Bytes written since last call to control_event_stream_bandwidth_used() */
+ uint32_t n_written;
+
+ /** True iff this connection is for a DNS request only. */
+ unsigned int is_dns_request:1;
+ /** True iff this connection is for a PTR DNS request. (exit only) */
+ unsigned int is_reverse_dns_lookup:1;
+
+ unsigned int edge_has_sent_end:1; /**< For debugging; only used on edge
+ * connections. Set once we've set the stream end,
+ * and check in connection_about_to_close_connection().
+ */
+ /** True iff we've blocked reading until the circuit has fewer queued
+ * cells. */
+ unsigned int edge_blocked_on_circ:1;
+
+ /** Unique ID for directory requests; this used to be in connection_t, but
+ * that's going away and being used on channels instead. We still tag
+ * edge connections with dirreq_id from circuits, so it's copied here. */
+ uint64_t dirreq_id;
+};
+
+#endif
+
diff --git a/src/core/or/entry_connection_st.h b/src/core/or/entry_connection_st.h
new file mode 100644
index 0000000000..2f9676088c
--- /dev/null
+++ b/src/core/or/entry_connection_st.h
@@ -0,0 +1,100 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef ENTRY_CONNECTION_ST_H
+#define ENTRY_CONNECTION_ST_H
+
+#include "or/edge_connection_st.h"
+
+/** Subtype of edge_connection_t for an "entry connection" -- that is, a SOCKS
+ * connection, a DNS request, a TransPort connection or a NATD connection */
+struct entry_connection_t {
+ struct edge_connection_t edge_;
+
+ /** Nickname of planned exit node -- used with .exit support. */
+ /* XXX prop220: we need to make chosen_exit_name able to encode Ed IDs too.
+ * That's logically part of the UI parts for prop220 though. */
+ char *chosen_exit_name;
+
+ socks_request_t *socks_request; /**< SOCKS structure describing request (AP
+ * only.) */
+
+ /* === Isolation related, AP only. === */
+ entry_port_cfg_t entry_cfg;
+ /** AP only: The newnym epoch in which we created this connection. */
+ unsigned nym_epoch;
+
+ /** AP only: The original requested address before we rewrote it. */
+ char *original_dest_address;
+ /* Other fields to isolate on already exist. The ClientAddr is addr. The
+ ClientProtocol is a combination of type and socks_request->
+ socks_version. SocksAuth is socks_request->username/password.
+ DestAddr is in socks_request->address. */
+
+ /** Number of times we've reassigned this application connection to
+ * a new circuit. We keep track because the timeout is longer if we've
+ * already retried several times. */
+ uint8_t num_socks_retries;
+
+ /** For AP connections only: buffer for data that we have sent
+ * optimistically, which we might need to re-send if we have to
+ * retry this connection. */
+ struct buf_t *pending_optimistic_data;
+ /* For AP connections only: buffer for data that we previously sent
+ * optimistically which we are currently re-sending as we retry this
+ * connection. */
+ struct buf_t *sending_optimistic_data;
+
+ /** If this is a DNSPort connection, this field holds the pending DNS
+ * request that we're going to try to answer. */
+ struct evdns_server_request *dns_server_request;
+
+#define DEBUGGING_17659
+
+#ifdef DEBUGGING_17659
+ uint16_t marked_pending_circ_line;
+ const char *marked_pending_circ_file;
+#endif
+
+#define NUM_CIRCUITS_LAUNCHED_THRESHOLD 10
+ /** Number of times we've launched a circuit to handle this stream. If
+ * it gets too high, that could indicate an inconsistency between our
+ * "launch a circuit to handle this stream" logic and our "attach our
+ * stream to one of the available circuits" logic. */
+ unsigned int num_circuits_launched:4;
+
+ /** True iff this stream must attach to a one-hop circuit (e.g. for
+ * begin_dir). */
+ unsigned int want_onehop:1;
+ /** True iff this stream should use a BEGIN_DIR relay command to establish
+ * itself rather than BEGIN (either via onehop or via a whole circuit). */
+ unsigned int use_begindir:1;
+
+ /** For AP connections only. If 1, and we fail to reach the chosen exit,
+ * stop requiring it. */
+ unsigned int chosen_exit_optional:1;
+ /** For AP connections only. If non-zero, this exit node was picked as
+ * a result of the TrackHostExit, and the value decrements every time
+ * we fail to complete a circuit to our chosen exit -- if it reaches
+ * zero, abandon the associated mapaddress. */
+ unsigned int chosen_exit_retries:3;
+
+ /** True iff this is an AP connection that came from a transparent or
+ * NATd connection */
+ unsigned int is_transparent_ap:1;
+
+ /** For AP connections only: Set if this connection's target exit node
+ * allows optimistic data (that is, data sent on this stream before
+ * the exit has sent a CONNECTED cell) and we have chosen to use it.
+ */
+ unsigned int may_use_optimistic_data : 1;
+};
+
+/** Cast a entry_connection_t subtype pointer to a edge_connection_t **/
+#define ENTRY_TO_EDGE_CONN(c) (&(((c))->edge_))
+
+#endif
+
diff --git a/src/core/or/entry_port_cfg_st.h b/src/core/or/entry_port_cfg_st.h
new file mode 100644
index 0000000000..9b07ccb067
--- /dev/null
+++ b/src/core/or/entry_port_cfg_st.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef ENTRY_PORT_CFG_ST_H
+#define ENTRY_PORT_CFG_ST_H
+
+#include "lib/cc/torint.h"
+#include "or/or.h"
+
+struct entry_port_cfg_t {
+ /* Client port types (socks, dns, trans, natd) only: */
+ uint8_t isolation_flags; /**< Zero or more isolation flags */
+ int session_group; /**< A session group, or -1 if this port is not in a
+ * session group. */
+
+ /* Socks only: */
+ /** When both no-auth and user/pass are advertised by a SOCKS client, select
+ * no-auth. */
+ unsigned int socks_prefer_no_auth : 1;
+ /** When ISO_SOCKSAUTH is in use, Keep-Alive circuits indefinitely. */
+ unsigned int socks_iso_keep_alive : 1;
+
+ /* Client port types only: */
+ unsigned int ipv4_traffic : 1;
+ unsigned int ipv6_traffic : 1;
+ unsigned int prefer_ipv6 : 1;
+ unsigned int dns_request : 1;
+ unsigned int onion_traffic : 1;
+
+ /** For a socks listener: should we cache IPv4/IPv6 DNS information that
+ * exit nodes tell us?
+ *
+ * @{ */
+ unsigned int cache_ipv4_answers : 1;
+ unsigned int cache_ipv6_answers : 1;
+ /** @} */
+ /** For a socks listeners: if we find an answer in our client-side DNS cache,
+ * should we use it?
+ *
+ * @{ */
+ unsigned int use_cached_ipv4_answers : 1;
+ unsigned int use_cached_ipv6_answers : 1;
+ /** @} */
+ /** For socks listeners: When we can automap an address to IPv4 or IPv6,
+ * do we prefer IPv6? */
+ unsigned int prefer_ipv6_virtaddr : 1;
+
+};
+
+#endif
+
diff --git a/src/core/or/extend_info_st.h b/src/core/or/extend_info_st.h
new file mode 100644
index 0000000000..277766c4d6
--- /dev/null
+++ b/src/core/or/extend_info_st.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef EXTEND_INFO_ST_H
+#define EXTEND_INFO_ST_H
+
+#include "lib/crypt_ops/crypto_curve25519.h"
+#include "lib/crypt_ops/crypto_ed25519.h"
+
+/** Information on router used when extending a circuit. We don't need a
+ * full routerinfo_t to extend: we only need addr:port:keyid to build an OR
+ * connection, and onion_key to create the onionskin. Note that for onehop
+ * general-purpose tunnels, the onion_key is NULL. */
+struct extend_info_t {
+ char nickname[MAX_HEX_NICKNAME_LEN+1]; /**< This router's nickname for
+ * display. */
+ /** Hash of this router's RSA identity key. */
+ char identity_digest[DIGEST_LEN];
+ /** Ed25519 identity for this router, if any. */
+ ed25519_public_key_t ed_identity;
+ uint16_t port; /**< OR port. */
+ tor_addr_t addr; /**< IP address. */
+ crypto_pk_t *onion_key; /**< Current onionskin key. */
+ curve25519_public_key_t curve25519_onion_key;
+};
+
+#endif
diff --git a/src/core/or/git_revision.c b/src/core/or/git_revision.c
new file mode 100644
index 0000000000..be6f67423c
--- /dev/null
+++ b/src/core/or/git_revision.c
@@ -0,0 +1,17 @@
+/* Copyright 2001-2004 Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#include "or/git_revision.h"
+
+/** String describing which Tor Git repository version the source was
+ * built from. This string is generated by a bit of shell kludging in
+ * src/or/include.am, and is usually right.
+ */
+const char tor_git_revision[] =
+#ifndef _MSC_VER
+#include "micro-revision.i"
+#endif
+ "";
+
diff --git a/src/core/or/git_revision.h b/src/core/or/git_revision.h
new file mode 100644
index 0000000000..02070cfd5e
--- /dev/null
+++ b/src/core/or/git_revision.h
@@ -0,0 +1,12 @@
+/* Copyright 2001-2004 Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_GIT_REVISION_H
+#define TOR_GIT_REVISION_H
+
+extern const char tor_git_revision[];
+
+#endif /* !defined(TOR_GIT_REVISION_H) */
+
diff --git a/src/core/or/listener_connection_st.h b/src/core/or/listener_connection_st.h
new file mode 100644
index 0000000000..7b5aafcb58
--- /dev/null
+++ b/src/core/or/listener_connection_st.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef LISTENER_CONNECTION_ST_H
+#define LISTENER_CONNECTION_ST_H
+
+#include "or/connection_st.h"
+
+/** Subtype of connection_t; used for a listener socket. */
+struct listener_connection_t {
+ connection_t base_;
+
+ /** If the connection is a CONN_TYPE_AP_DNS_LISTENER, this field points
+ * to the evdns_server_port it uses to listen to and answer connections. */
+ struct evdns_server_port *dns_server_port;
+
+ entry_port_cfg_t entry_cfg;
+
+};
+
+#endif
+
diff --git a/src/core/or/or.h b/src/core/or/or.h
new file mode 100644
index 0000000000..826e81e468
--- /dev/null
+++ b/src/core/or/or.h
@@ -0,0 +1,1088 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file or.h
+ * \brief Master header file for Tor-specific functionality.
+ **/
+
+#ifndef TOR_OR_H
+#define TOR_OR_H
+
+#include "orconfig.h"
+#include "lib/cc/torint.h"
+
+#ifdef HAVE_SIGNAL_H
+#include <signal.h>
+#endif
+#ifdef HAVE_TIME_H
+#include <time.h>
+#endif
+
+#include "lib/arch/bytes.h"
+#include "lib/cc/compat_compiler.h"
+#include "lib/cc/torint.h"
+#include "lib/container/map.h"
+#include "lib/container/smartlist.h"
+#include "lib/crypt_ops/crypto.h"
+#include "lib/ctime/di_ops.h"
+#include "lib/defs/dh_sizes.h"
+#include "lib/encoding/binascii.h"
+#include "lib/encoding/cstring.h"
+#include "lib/encoding/time_fmt.h"
+#include "lib/err/torerr.h"
+#include "lib/fs/dir.h"
+#include "lib/fs/files.h"
+#include "lib/fs/mmap.h"
+#include "lib/fs/path.h"
+#include "lib/fs/userdb.h"
+#include "lib/intmath/addsub.h"
+#include "lib/intmath/bits.h"
+#include "lib/intmath/cmp.h"
+#include "lib/intmath/logic.h"
+#include "lib/intmath/muldiv.h"
+#include "lib/log/escape.h"
+#include "lib/log/ratelim.h"
+#include "lib/log/util_bug.h"
+#include "lib/malloc/util_malloc.h"
+#include "lib/net/address.h"
+#include "lib/net/ipv4.h"
+#include "lib/net/ipv6.h"
+#include "lib/net/resolve.h"
+#include "lib/net/socket.h"
+#include "lib/string/compat_ctype.h"
+#include "lib/string/compat_string.h"
+#include "lib/string/parse_int.h"
+#include "lib/string/printf.h"
+#include "lib/string/scanf.h"
+#include "lib/string/util_string.h"
+#include "lib/testsupport/testsupport.h"
+#include "lib/thread/threads.h"
+#include "lib/time/compat_time.h"
+#include "lib/wallclock/approx_time.h"
+#include "lib/wallclock/timeval.h"
+
+#include "ht.h"
+
+// These, more than other includes, are for keeping the other struct
+// definitions working. We should remove them when we minimize our includes.
+#include "or/entry_port_cfg_st.h"
+
+struct ed25519_public_key_t;
+struct curve25519_public_key_t;
+
+/* These signals are defined to help handle_control_signal work.
+ */
+#ifndef SIGHUP
+#define SIGHUP 1
+#endif
+#ifndef SIGINT
+#define SIGINT 2
+#endif
+#ifndef SIGUSR1
+#define SIGUSR1 10
+#endif
+#ifndef SIGUSR2
+#define SIGUSR2 12
+#endif
+#ifndef SIGTERM
+#define SIGTERM 15
+#endif
+/* Controller signals start at a high number so we don't
+ * conflict with system-defined signals. */
+#define SIGNEWNYM 129
+#define SIGCLEARDNSCACHE 130
+#define SIGHEARTBEAT 131
+
+#if (SIZEOF_CELL_T != 0)
+/* On Irix, stdlib.h defines a cell_t type, so we need to make sure
+ * that our stuff always calls cell_t something different. */
+#define cell_t tor_cell_t
+#endif
+
+#ifdef ENABLE_TOR2WEB_MODE
+#define NON_ANONYMOUS_MODE_ENABLED 1
+#endif
+
+/** Helper macro: Given a pointer to to.base_, of type from*, return &to. */
+#define DOWNCAST(to, ptr) ((to*)SUBTYPE_P(ptr, to, base_))
+
+/** Length of longest allowable configured nickname. */
+#define MAX_NICKNAME_LEN 19
+/** Length of a router identity encoded as a hexadecimal digest, plus
+ * possible dollar sign. */
+#define MAX_HEX_NICKNAME_LEN (HEX_DIGEST_LEN+1)
+/** Maximum length of verbose router identifier: dollar sign, hex ID digest,
+ * equal sign or tilde, nickname. */
+#define MAX_VERBOSE_NICKNAME_LEN (1+HEX_DIGEST_LEN+1+MAX_NICKNAME_LEN)
+
+/** For HTTP parsing: Maximum number of bytes we'll accept in the headers
+ * of an HTTP request or response. */
+#define MAX_HEADERS_SIZE 50000
+
+/** Maximum size, in bytes, of a single router descriptor uploaded to us
+ * as a directory authority. Caches and clients fetch whatever descriptors
+ * the authorities tell them to fetch, and don't care about size. */
+#define MAX_DESCRIPTOR_UPLOAD_SIZE 20000
+
+/** Maximum size of a single extrainfo document, as above. */
+#define MAX_EXTRAINFO_UPLOAD_SIZE 50000
+
+/** Minimum lifetime for an onion key in days. */
+#define MIN_ONION_KEY_LIFETIME_DAYS (1)
+
+/** Maximum lifetime for an onion key in days. */
+#define MAX_ONION_KEY_LIFETIME_DAYS (90)
+
+/** Default lifetime for an onion key in days. */
+#define DEFAULT_ONION_KEY_LIFETIME_DAYS (28)
+
+/** Minimum grace period for acceptance of an onion key in days.
+ * The maximum value is defined in proposal #274 as being the current network
+ * consensus parameter for "onion-key-rotation-days". */
+#define MIN_ONION_KEY_GRACE_PERIOD_DAYS (1)
+
+/** Default grace period for acceptance of an onion key in days. */
+#define DEFAULT_ONION_KEY_GRACE_PERIOD_DAYS (7)
+
+/** How often we should check the network consensus if it is time to rotate or
+ * expire onion keys. */
+#define ONION_KEY_CONSENSUS_CHECK_INTERVAL (60*60)
+
+/** How often do we rotate TLS contexts? */
+#define MAX_SSL_KEY_LIFETIME_INTERNAL (2*60*60)
+
+/** How old do we allow a router to get before removing it
+ * from the router list? In seconds. */
+#define ROUTER_MAX_AGE (60*60*48)
+/** How old can a router get before we (as a server) will no longer
+ * consider it live? In seconds. */
+#define ROUTER_MAX_AGE_TO_PUBLISH (60*60*24)
+/** How old do we let a saved descriptor get before force-removing it? */
+#define OLD_ROUTER_DESC_MAX_AGE (60*60*24*5)
+
+/* Proxy client types */
+#define PROXY_NONE 0
+#define PROXY_CONNECT 1
+#define PROXY_SOCKS4 2
+#define PROXY_SOCKS5 3
+/* !!!! If there is ever a PROXY_* type over 3, we must grow the proxy_type
+ * field in or_connection_t */
+
+/* Pluggable transport proxy type. Don't use this in or_connection_t,
+ * instead use the actual underlying proxy type (see above). */
+#define PROXY_PLUGGABLE 4
+
+/** How many circuits do we want simultaneously in-progress to handle
+ * a given stream? */
+#define MIN_CIRCUITS_HANDLING_STREAM 2
+
+/* These RELAY_COMMAND constants define values for relay cell commands, and
+* must match those defined in tor-spec.txt. */
+#define RELAY_COMMAND_BEGIN 1
+#define RELAY_COMMAND_DATA 2
+#define RELAY_COMMAND_END 3
+#define RELAY_COMMAND_CONNECTED 4
+#define RELAY_COMMAND_SENDME 5
+#define RELAY_COMMAND_EXTEND 6
+#define RELAY_COMMAND_EXTENDED 7
+#define RELAY_COMMAND_TRUNCATE 8
+#define RELAY_COMMAND_TRUNCATED 9
+#define RELAY_COMMAND_DROP 10
+#define RELAY_COMMAND_RESOLVE 11
+#define RELAY_COMMAND_RESOLVED 12
+#define RELAY_COMMAND_BEGIN_DIR 13
+#define RELAY_COMMAND_EXTEND2 14
+#define RELAY_COMMAND_EXTENDED2 15
+
+#define RELAY_COMMAND_ESTABLISH_INTRO 32
+#define RELAY_COMMAND_ESTABLISH_RENDEZVOUS 33
+#define RELAY_COMMAND_INTRODUCE1 34
+#define RELAY_COMMAND_INTRODUCE2 35
+#define RELAY_COMMAND_RENDEZVOUS1 36
+#define RELAY_COMMAND_RENDEZVOUS2 37
+#define RELAY_COMMAND_INTRO_ESTABLISHED 38
+#define RELAY_COMMAND_RENDEZVOUS_ESTABLISHED 39
+#define RELAY_COMMAND_INTRODUCE_ACK 40
+
+/* Reasons why an OR connection is closed. */
+#define END_OR_CONN_REASON_DONE 1
+#define END_OR_CONN_REASON_REFUSED 2 /* connection refused */
+#define END_OR_CONN_REASON_OR_IDENTITY 3
+#define END_OR_CONN_REASON_CONNRESET 4 /* connection reset by peer */
+#define END_OR_CONN_REASON_TIMEOUT 5
+#define END_OR_CONN_REASON_NO_ROUTE 6 /* no route to host/net */
+#define END_OR_CONN_REASON_IO_ERROR 7 /* read/write error */
+#define END_OR_CONN_REASON_RESOURCE_LIMIT 8 /* sockets, buffers, etc */
+#define END_OR_CONN_REASON_PT_MISSING 9 /* PT failed or not available */
+#define END_OR_CONN_REASON_MISC 10
+
+/* Reasons why we (or a remote OR) might close a stream. See tor-spec.txt for
+ * documentation of these. The values must match. */
+#define END_STREAM_REASON_MISC 1
+#define END_STREAM_REASON_RESOLVEFAILED 2
+#define END_STREAM_REASON_CONNECTREFUSED 3
+#define END_STREAM_REASON_EXITPOLICY 4
+#define END_STREAM_REASON_DESTROY 5
+#define END_STREAM_REASON_DONE 6
+#define END_STREAM_REASON_TIMEOUT 7
+#define END_STREAM_REASON_NOROUTE 8
+#define END_STREAM_REASON_HIBERNATING 9
+#define END_STREAM_REASON_INTERNAL 10
+#define END_STREAM_REASON_RESOURCELIMIT 11
+#define END_STREAM_REASON_CONNRESET 12
+#define END_STREAM_REASON_TORPROTOCOL 13
+#define END_STREAM_REASON_NOTDIRECTORY 14
+#define END_STREAM_REASON_ENTRYPOLICY 15
+
+/* These high-numbered end reasons are not part of the official spec,
+ * and are not intended to be put in relay end cells. They are here
+ * to be more informative when sending back socks replies to the
+ * application. */
+/* XXXX 256 is no longer used; feel free to reuse it. */
+/** We were unable to attach the connection to any circuit at all. */
+/* XXXX the ways we use this one don't make a lot of sense. */
+#define END_STREAM_REASON_CANT_ATTACH 257
+/** We can't connect to any directories at all, so we killed our streams
+ * before they can time out. */
+#define END_STREAM_REASON_NET_UNREACHABLE 258
+/** This is a SOCKS connection, and the client used (or misused) the SOCKS
+ * protocol in a way we couldn't handle. */
+#define END_STREAM_REASON_SOCKSPROTOCOL 259
+/** This is a transparent proxy connection, but we can't extract the original
+ * target address:port. */
+#define END_STREAM_REASON_CANT_FETCH_ORIG_DEST 260
+/** This is a connection on the NATD port, and the destination IP:Port was
+ * either ill-formed or out-of-range. */
+#define END_STREAM_REASON_INVALID_NATD_DEST 261
+/** The target address is in a private network (like 127.0.0.1 or 10.0.0.1);
+ * you don't want to do that over a randomly chosen exit */
+#define END_STREAM_REASON_PRIVATE_ADDR 262
+/** This is an HTTP tunnel connection and the client used or misused HTTP in a
+ * way we can't handle.
+ */
+#define END_STREAM_REASON_HTTPPROTOCOL 263
+
+/** Bitwise-and this value with endreason to mask out all flags. */
+#define END_STREAM_REASON_MASK 511
+
+/** Bitwise-or this with the argument to control_event_stream_status
+ * to indicate that the reason came from an END cell. */
+#define END_STREAM_REASON_FLAG_REMOTE 512
+/** Bitwise-or this with the argument to control_event_stream_status
+ * to indicate that we already sent a CLOSED stream event. */
+#define END_STREAM_REASON_FLAG_ALREADY_SENT_CLOSED 1024
+/** Bitwise-or this with endreason to indicate that we already sent
+ * a socks reply, and no further reply needs to be sent from
+ * connection_mark_unattached_ap(). */
+#define END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED 2048
+
+/* 'type' values to use in RESOLVED cells. Specified in tor-spec.txt. */
+#define RESOLVED_TYPE_HOSTNAME 0
+#define RESOLVED_TYPE_IPV4 4
+#define RESOLVED_TYPE_IPV6 6
+#define RESOLVED_TYPE_ERROR_TRANSIENT 0xF0
+#define RESOLVED_TYPE_ERROR 0xF1
+
+/* Negative reasons are internal: we never send them in a DESTROY or TRUNCATE
+ * call; they only go to the controller for tracking */
+
+/* Closing introduction point that were opened in parallel. */
+#define END_CIRC_REASON_IP_NOW_REDUNDANT -4
+
+/** Our post-timeout circuit time measurement period expired.
+ * We must give up now */
+#define END_CIRC_REASON_MEASUREMENT_EXPIRED -3
+
+/** We couldn't build a path for this circuit. */
+#define END_CIRC_REASON_NOPATH -2
+/** Catch-all "other" reason for closing origin circuits. */
+#define END_CIRC_AT_ORIGIN -1
+
+/* Reasons why we (or a remote OR) might close a circuit. See tor-spec.txt
+ * section 5.4 for documentation of these. */
+#define END_CIRC_REASON_MIN_ 0
+#define END_CIRC_REASON_NONE 0
+#define END_CIRC_REASON_TORPROTOCOL 1
+#define END_CIRC_REASON_INTERNAL 2
+#define END_CIRC_REASON_REQUESTED 3
+#define END_CIRC_REASON_HIBERNATING 4
+#define END_CIRC_REASON_RESOURCELIMIT 5
+#define END_CIRC_REASON_CONNECTFAILED 6
+#define END_CIRC_REASON_OR_IDENTITY 7
+#define END_CIRC_REASON_CHANNEL_CLOSED 8
+#define END_CIRC_REASON_FINISHED 9
+#define END_CIRC_REASON_TIMEOUT 10
+#define END_CIRC_REASON_DESTROYED 11
+#define END_CIRC_REASON_NOSUCHSERVICE 12
+#define END_CIRC_REASON_MAX_ 12
+
+/** Bitwise-OR this with the argument to circuit_mark_for_close() or
+ * control_event_circuit_status() to indicate that the reason was
+ * passed through from a destroy or truncate cell. */
+#define END_CIRC_REASON_FLAG_REMOTE 512
+
+/** Length of 'y' portion of 'y.onion' URL. */
+#define REND_SERVICE_ID_LEN_BASE32 16
+
+/** Length of 'y.onion' including '.onion' URL. */
+#define REND_SERVICE_ADDRESS_LEN (16+1+5)
+
+/** Length of a binary-encoded rendezvous service ID. */
+#define REND_SERVICE_ID_LEN 10
+
+/** Time period for which a v2 descriptor will be valid. */
+#define REND_TIME_PERIOD_V2_DESC_VALIDITY (24*60*60)
+
+/** Time period within which two sets of v2 descriptors will be uploaded in
+ * parallel. */
+#define REND_TIME_PERIOD_OVERLAPPING_V2_DESCS (60*60)
+
+/** Number of non-consecutive replicas (i.e. distributed somewhere
+ * in the ring) for a descriptor. */
+#define REND_NUMBER_OF_NON_CONSECUTIVE_REPLICAS 2
+
+/** Number of consecutive replicas for a descriptor. */
+#define REND_NUMBER_OF_CONSECUTIVE_REPLICAS 3
+
+/** Length of v2 descriptor ID (32 base32 chars = 160 bits). */
+#define REND_DESC_ID_V2_LEN_BASE32 BASE32_DIGEST_LEN
+
+/** Length of the base32-encoded secret ID part of versioned hidden service
+ * descriptors. */
+#define REND_SECRET_ID_PART_LEN_BASE32 BASE32_DIGEST_LEN
+
+/** Length of the base32-encoded hash of an introduction point's
+ * identity key. */
+#define REND_INTRO_POINT_ID_LEN_BASE32 BASE32_DIGEST_LEN
+
+/** Length of the descriptor cookie that is used for client authorization
+ * to hidden services. */
+#define REND_DESC_COOKIE_LEN 16
+
+/** Length of the base64-encoded descriptor cookie that is used for
+ * exchanging client authorization between hidden service and client. */
+#define REND_DESC_COOKIE_LEN_BASE64 22
+
+/** Length of client identifier in encrypted introduction points for hidden
+ * service authorization type 'basic'. */
+#define REND_BASIC_AUTH_CLIENT_ID_LEN 4
+
+/** Multiple of the number of clients to which the real number of clients
+ * is padded with fake clients for hidden service authorization type
+ * 'basic'. */
+#define REND_BASIC_AUTH_CLIENT_MULTIPLE 16
+
+/** Length of client entry consisting of client identifier and encrypted
+ * session key for hidden service authorization type 'basic'. */
+#define REND_BASIC_AUTH_CLIENT_ENTRY_LEN (REND_BASIC_AUTH_CLIENT_ID_LEN \
+ + CIPHER_KEY_LEN)
+
+/** Maximum size of v2 hidden service descriptors. */
+#define REND_DESC_MAX_SIZE (20 * 1024)
+
+/** Legal characters for use in authorized client names for a hidden
+ * service. */
+#define REND_LEGAL_CLIENTNAME_CHARACTERS \
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+-_"
+
+/** Maximum length of authorized client names for a hidden service. */
+#define REND_CLIENTNAME_MAX_LEN 16
+
+/** Length of the rendezvous cookie that is used to connect circuits at the
+ * rendezvous point. */
+#define REND_COOKIE_LEN DIGEST_LEN
+
+/** Client authorization type that a hidden service performs. */
+typedef enum rend_auth_type_t {
+ REND_NO_AUTH = 0,
+ REND_BASIC_AUTH = 1,
+ REND_STEALTH_AUTH = 2,
+} rend_auth_type_t;
+
+/** Client-side configuration of authorization for a hidden service. */
+typedef struct rend_service_authorization_t {
+ uint8_t descriptor_cookie[REND_DESC_COOKIE_LEN];
+ char onion_address[REND_SERVICE_ADDRESS_LEN+1];
+ rend_auth_type_t auth_type;
+} rend_service_authorization_t;
+
+/** Client- and server-side data that is used for hidden service connection
+ * establishment. Not all fields contain data depending on where this struct
+ * is used. */
+typedef struct rend_data_t {
+ /* Hidden service protocol version of this base object. */
+ uint32_t version;
+
+ /** List of HSDir fingerprints on which this request has been sent to. This
+ * contains binary identity digest of the directory of size DIGEST_LEN. */
+ smartlist_t *hsdirs_fp;
+
+ /** Rendezvous cookie used by both, client and service. */
+ char rend_cookie[REND_COOKIE_LEN];
+
+ /** Number of streams associated with this rendezvous circuit. */
+ int nr_streams;
+} rend_data_t;
+
+typedef struct rend_data_v2_t {
+ /* Rendezvous base data. */
+ rend_data_t base_;
+
+ /** Onion address (without the .onion part) that a client requests. */
+ char onion_address[REND_SERVICE_ID_LEN_BASE32+1];
+
+ /** Descriptor ID for each replicas computed from the onion address. If
+ * the onion address is empty, this array MUST be empty. We keep them so
+ * we know when to purge our entry in the last hsdir request table. */
+ char descriptor_id[REND_NUMBER_OF_NON_CONSECUTIVE_REPLICAS][DIGEST_LEN];
+
+ /** (Optional) descriptor cookie that is used by a client. */
+ char descriptor_cookie[REND_DESC_COOKIE_LEN];
+
+ /** Authorization type for accessing a service used by a client. */
+ rend_auth_type_t auth_type;
+
+ /** Descriptor ID for a client request. The control port command HSFETCH
+ * uses this. It's set if the descriptor query should only use this
+ * descriptor ID. */
+ char desc_id_fetch[DIGEST_LEN];
+
+ /** Hash of the hidden service's PK used by a service. */
+ char rend_pk_digest[DIGEST_LEN];
+} rend_data_v2_t;
+
+/* From a base rend_data_t object <b>d</d>, return the v2 object. */
+static inline
+rend_data_v2_t *TO_REND_DATA_V2(const rend_data_t *d)
+{
+ tor_assert(d);
+ tor_assert(d->version == 2);
+ return DOWNCAST(rend_data_v2_t, d);
+}
+
+/* Stub because we can't include hs_ident.h. */
+struct hs_ident_edge_conn_t;
+struct hs_ident_dir_conn_t;
+struct hs_ident_circuit_t;
+
+typedef struct hsdir_index_t hsdir_index_t;
+
+/** Time interval for tracking replays of DH public keys received in
+ * INTRODUCE2 cells. Used only to avoid launching multiple
+ * simultaneous attempts to connect to the same rendezvous point. */
+#define REND_REPLAY_TIME_INTERVAL (5 * 60)
+
+/** Used to indicate which way a cell is going on a circuit. */
+typedef enum {
+ CELL_DIRECTION_IN=1, /**< The cell is moving towards the origin. */
+ CELL_DIRECTION_OUT=2, /**< The cell is moving away from the origin. */
+} cell_direction_t;
+
+/** Initial value for both sides of a circuit transmission window when the
+ * circuit is initialized. Measured in cells. */
+#define CIRCWINDOW_START 1000
+#define CIRCWINDOW_START_MIN 100
+#define CIRCWINDOW_START_MAX 1000
+/** Amount to increment a circuit window when we get a circuit SENDME. */
+#define CIRCWINDOW_INCREMENT 100
+/** Initial value on both sides of a stream transmission window when the
+ * stream is initialized. Measured in cells. */
+#define STREAMWINDOW_START 500
+#define STREAMWINDOW_START_MAX 500
+/** Amount to increment a stream window when we get a stream SENDME. */
+#define STREAMWINDOW_INCREMENT 50
+
+/** Maximum number of queued cells on a circuit for which we are the
+ * midpoint before we give up and kill it. This must be >= circwindow
+ * to avoid killing innocent circuits, and >= circwindow*2 to give
+ * leaky-pipe a chance of working someday. The ORCIRC_MAX_MIDDLE_KILL_THRESH
+ * ratio controls the margin of error between emitting a warning and
+ * killing the circuit.
+ */
+#define ORCIRC_MAX_MIDDLE_CELLS (CIRCWINDOW_START_MAX*2)
+/** Ratio of hard (circuit kill) to soft (warning) thresholds for the
+ * ORCIRC_MAX_MIDDLE_CELLS tests.
+ */
+#define ORCIRC_MAX_MIDDLE_KILL_THRESH (1.1f)
+
+/* Cell commands. These values are defined in tor-spec.txt. */
+#define CELL_PADDING 0
+#define CELL_CREATE 1
+#define CELL_CREATED 2
+#define CELL_RELAY 3
+#define CELL_DESTROY 4
+#define CELL_CREATE_FAST 5
+#define CELL_CREATED_FAST 6
+#define CELL_VERSIONS 7
+#define CELL_NETINFO 8
+#define CELL_RELAY_EARLY 9
+#define CELL_CREATE2 10
+#define CELL_CREATED2 11
+#define CELL_PADDING_NEGOTIATE 12
+
+#define CELL_VPADDING 128
+#define CELL_CERTS 129
+#define CELL_AUTH_CHALLENGE 130
+#define CELL_AUTHENTICATE 131
+#define CELL_AUTHORIZE 132
+#define CELL_COMMAND_MAX_ 132
+
+/** How long to test reachability before complaining to the user. */
+#define TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT (20*60)
+
+/** Legal characters in a nickname. */
+#define LEGAL_NICKNAME_CHARACTERS \
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
+
+/** Name to use in client TLS certificates if no nickname is given. Once
+ * Tor 0.1.2.x is obsolete, we can remove this. */
+#define DEFAULT_CLIENT_NICKNAME "client"
+
+/** Name chosen by routers that don't configure nicknames */
+#define UNNAMED_ROUTER_NICKNAME "Unnamed"
+
+/** Number of bytes in a SOCKS4 header. */
+#define SOCKS4_NETWORK_LEN 8
+
+/*
+ * Relay payload:
+ * Relay command [1 byte]
+ * Recognized [2 bytes]
+ * Stream ID [2 bytes]
+ * Partial SHA-1 [4 bytes]
+ * Length [2 bytes]
+ * Relay payload [498 bytes]
+ */
+
+/** Number of bytes in a cell, minus cell header. */
+#define CELL_PAYLOAD_SIZE 509
+/** Number of bytes in a cell transmitted over the network, in the longest
+ * form */
+#define CELL_MAX_NETWORK_SIZE 514
+
+/** Maximum length of a header on a variable-length cell. */
+#define VAR_CELL_MAX_HEADER_SIZE 7
+
+static int get_cell_network_size(int wide_circ_ids);
+static inline int get_cell_network_size(int wide_circ_ids)
+{
+ return wide_circ_ids ? CELL_MAX_NETWORK_SIZE : CELL_MAX_NETWORK_SIZE - 2;
+}
+static int get_var_cell_header_size(int wide_circ_ids);
+static inline int get_var_cell_header_size(int wide_circ_ids)
+{
+ return wide_circ_ids ? VAR_CELL_MAX_HEADER_SIZE :
+ VAR_CELL_MAX_HEADER_SIZE - 2;
+}
+static int get_circ_id_size(int wide_circ_ids);
+static inline int get_circ_id_size(int wide_circ_ids)
+{
+ return wide_circ_ids ? 4 : 2;
+}
+
+/** Number of bytes in a relay cell's header (not including general cell
+ * header). */
+#define RELAY_HEADER_SIZE (1+2+2+4+2)
+/** Largest number of bytes that can fit in a relay cell payload. */
+#define RELAY_PAYLOAD_SIZE (CELL_PAYLOAD_SIZE-RELAY_HEADER_SIZE)
+
+/** Identifies a circuit on an or_connection */
+typedef uint32_t circid_t;
+/** Identifies a stream on a circuit */
+typedef uint16_t streamid_t;
+
+/* channel_t typedef; struct channel_s is in channel.h */
+
+typedef struct channel_s channel_t;
+
+/* channel_listener_t typedef; struct channel_listener_s is in channel.h */
+
+typedef struct channel_listener_s channel_listener_t;
+
+/* TLS channel stuff */
+
+typedef struct channel_tls_s channel_tls_t;
+
+/* circuitmux_t typedef; struct circuitmux_s is in circuitmux.h */
+
+typedef struct circuitmux_s circuitmux_t;
+
+typedef struct cell_t cell_t;
+typedef struct var_cell_t var_cell_t;
+typedef struct packed_cell_t packed_cell_t;
+typedef struct cell_queue_t cell_queue_t;
+typedef struct destroy_cell_t destroy_cell_t;
+typedef struct destroy_cell_queue_t destroy_cell_queue_t;
+typedef struct ext_or_cmd_t ext_or_cmd_t;
+
+/** Beginning of a RELAY cell payload. */
+typedef struct {
+ uint8_t command; /**< The end-to-end relay command. */
+ uint16_t recognized; /**< Used to tell whether cell is for us. */
+ streamid_t stream_id; /**< Which stream is this cell associated with? */
+ char integrity[4]; /**< Used to tell whether cell is corrupted. */
+ uint16_t length; /**< How long is the payload body? */
+} relay_header_t;
+
+typedef struct socks_request_t socks_request_t;
+typedef struct entry_port_cfg_t entry_port_cfg_t;
+typedef struct server_port_cfg_t server_port_cfg_t;
+
+/** Minimum length of the random part of an AUTH_CHALLENGE cell. */
+#define OR_AUTH_CHALLENGE_LEN 32
+
+/**
+ * @name Certificate types for CERTS cells.
+ *
+ * These values are defined by the protocol, and affect how an X509
+ * certificate in a CERTS cell is interpreted and used.
+ *
+ * @{ */
+/** A certificate that authenticates a TLS link key. The subject key
+ * must match the key used in the TLS handshake; it must be signed by
+ * the identity key. */
+#define OR_CERT_TYPE_TLS_LINK 1
+/** A self-signed identity certificate. The subject key must be a
+ * 1024-bit RSA key. */
+#define OR_CERT_TYPE_ID_1024 2
+/** A certificate that authenticates a key used in an AUTHENTICATE cell
+ * in the v3 handshake. The subject key must be a 1024-bit RSA key; it
+ * must be signed by the identity key */
+#define OR_CERT_TYPE_AUTH_1024 3
+/* DOCDOC */
+#define OR_CERT_TYPE_RSA_ED_CROSSCERT 7
+/**@}*/
+
+/** The first supported type of AUTHENTICATE cell. It contains
+ * a bunch of structures signed with an RSA1024 key. The signed
+ * structures include a HMAC using negotiated TLS secrets, and a digest
+ * of all cells sent or received before the AUTHENTICATE cell (including
+ * the random server-generated AUTH_CHALLENGE cell).
+ */
+#define AUTHTYPE_RSA_SHA256_TLSSECRET 1
+/** As AUTHTYPE_RSA_SHA256_TLSSECRET, but instead of using the
+ * negotiated TLS secrets, uses exported keying material from the TLS
+ * session as described in RFC 5705.
+ *
+ * Not used by today's tors, since everything that supports this
+ * also supports ED25519_SHA256_5705, which is better.
+ **/
+#define AUTHTYPE_RSA_SHA256_RFC5705 2
+/** As AUTHTYPE_RSA_SHA256_RFC5705, but uses an Ed25519 identity key to
+ * authenticate. */
+#define AUTHTYPE_ED25519_SHA256_RFC5705 3
+/*
+ * NOTE: authchallenge_type_is_better() relies on these AUTHTYPE codes
+ * being sorted in order of preference. If we someday add one with
+ * a higher numerical value that we don't like as much, we should revise
+ * authchallenge_type_is_better().
+ */
+
+/** The length of the part of the AUTHENTICATE cell body that the client and
+ * server can generate independently (when using RSA_SHA256_TLSSECRET). It
+ * contains everything except the client's timestamp, the client's randomly
+ * generated nonce, and the signature. */
+#define V3_AUTH_FIXED_PART_LEN (8+(32*6))
+/** The length of the part of the AUTHENTICATE cell body that the client
+ * signs. */
+#define V3_AUTH_BODY_LEN (V3_AUTH_FIXED_PART_LEN + 8 + 16)
+
+typedef struct or_handshake_certs_t or_handshake_certs_t;
+typedef struct or_handshake_state_t or_handshake_state_t;
+
+/** Length of Extended ORPort connection identifier. */
+#define EXT_OR_CONN_ID_LEN DIGEST_LEN /* 20 */
+/*
+ * OR_CONN_HIGHWATER and OR_CONN_LOWWATER moved from connection_or.c so
+ * channeltls.c can see them too.
+ */
+
+/** When adding cells to an OR connection's outbuf, keep adding until the
+ * outbuf is at least this long, or we run out of cells. */
+#define OR_CONN_HIGHWATER (32*1024)
+
+/** Add cells to an OR connection's outbuf whenever the outbuf's data length
+ * drops below this size. */
+#define OR_CONN_LOWWATER (16*1024)
+
+typedef struct connection_t connection_t;
+typedef struct control_connection_t control_connection_t;
+typedef struct dir_connection_t dir_connection_t;
+typedef struct edge_connection_t edge_connection_t;
+typedef struct entry_connection_t entry_connection_t;
+typedef struct listener_connection_t listener_connection_t;
+typedef struct or_connection_t or_connection_t;
+
+/** Cast a connection_t subtype pointer to a connection_t **/
+#define TO_CONN(c) (&(((c)->base_)))
+
+/** Cast a entry_connection_t subtype pointer to a connection_t **/
+#define ENTRY_TO_CONN(c) (TO_CONN(ENTRY_TO_EDGE_CONN(c)))
+
+typedef struct addr_policy_t addr_policy_t;
+
+typedef struct cached_dir_t cached_dir_t;
+
+/** Enum used to remember where a signed_descriptor_t is stored and how to
+ * manage the memory for signed_descriptor_body. */
+typedef enum {
+ /** The descriptor isn't stored on disk at all: the copy in memory is
+ * canonical; the saved_offset field is meaningless. */
+ SAVED_NOWHERE=0,
+ /** The descriptor is stored in the cached_routers file: the
+ * signed_descriptor_body is meaningless; the signed_descriptor_len and
+ * saved_offset are used to index into the mmaped cache file. */
+ SAVED_IN_CACHE,
+ /** The descriptor is stored in the cached_routers.new file: the
+ * signed_descriptor_body and saved_offset fields are both set. */
+ /* FFFF (We could also mmap the file and grow the mmap as needed, or
+ * lazy-load the descriptor text by using seek and read. We don't, for
+ * now.)
+ */
+ SAVED_IN_JOURNAL
+} saved_location_t;
+#define saved_location_bitfield_t ENUM_BF(saved_location_t)
+
+/** Enumeration: what directory object is being downloaded?
+ * This determines which schedule is selected to perform the download. */
+typedef enum {
+ DL_SCHED_GENERIC = 0,
+ DL_SCHED_CONSENSUS = 1,
+ DL_SCHED_BRIDGE = 2,
+} download_schedule_t;
+#define download_schedule_bitfield_t ENUM_BF(download_schedule_t)
+
+/** Enumeration: is the download schedule for downloading from an authority,
+ * or from any available directory mirror?
+ * During bootstrap, "any" means a fallback (or an authority, if there
+ * are no fallbacks).
+ * When we have a valid consensus, "any" means any directory server. */
+typedef enum {
+ DL_WANT_ANY_DIRSERVER = 0,
+ DL_WANT_AUTHORITY = 1,
+} download_want_authority_t;
+#define download_want_authority_bitfield_t \
+ ENUM_BF(download_want_authority_t)
+
+/** Enumeration: do we want to increment the schedule position each time a
+ * connection is attempted (these attempts can be concurrent), or do we want
+ * to increment the schedule position after a connection fails? */
+typedef enum {
+ DL_SCHED_INCREMENT_FAILURE = 0,
+ DL_SCHED_INCREMENT_ATTEMPT = 1,
+} download_schedule_increment_t;
+#define download_schedule_increment_bitfield_t \
+ ENUM_BF(download_schedule_increment_t)
+
+typedef struct download_status_t download_status_t;
+
+/** If n_download_failures is this high, the download can never happen. */
+#define IMPOSSIBLE_TO_DOWNLOAD 255
+
+/** The max size we expect router descriptor annotations we create to
+ * be. We'll accept larger ones if we see them on disk, but we won't
+ * create any that are larger than this. */
+#define ROUTER_ANNOTATION_BUF_LEN 256
+
+typedef struct signed_descriptor_t signed_descriptor_t;
+
+/** A signed integer representing a country code. */
+typedef int16_t country_t;
+
+/** Flags used to summarize the declared protocol versions of a relay,
+ * so we don't need to parse them again and again. */
+typedef struct protover_summary_flags_t {
+ /** True iff we have a proto line for this router, or a versions line
+ * from which we could infer the protocols. */
+ unsigned int protocols_known:1;
+
+ /** True iff this router has a version or protocol list that allows it to
+ * accept EXTEND2 cells. This requires Relay=2. */
+ unsigned int supports_extend2_cells:1;
+
+ /** True iff this router has a protocol list that allows it to negotiate
+ * ed25519 identity keys on a link handshake with us. This
+ * requires LinkAuth=3. */
+ unsigned int supports_ed25519_link_handshake_compat:1;
+
+ /** True iff this router has a protocol list that allows it to negotiate
+ * ed25519 identity keys on a link handshake, at all. This requires some
+ * LinkAuth=X for X >= 3. */
+ unsigned int supports_ed25519_link_handshake_any:1;
+
+ /** True iff this router has a protocol list that allows it to be an
+ * introduction point supporting ed25519 authentication key which is part of
+ * the v3 protocol detailed in proposal 224. This requires HSIntro=4. */
+ unsigned int supports_ed25519_hs_intro : 1;
+
+ /** True iff this router has a protocol list that allows it to be an hidden
+ * service directory supporting version 3 as seen in proposal 224. This
+ * requires HSDir=2. */
+ unsigned int supports_v3_hsdir : 1;
+
+ /** True iff this router has a protocol list that allows it to be an hidden
+ * service rendezvous point supporting version 3 as seen in proposal 224.
+ * This requires HSRend=2. */
+ unsigned int supports_v3_rendezvous_point: 1;
+} protover_summary_flags_t;
+
+typedef struct routerinfo_t routerinfo_t;
+typedef struct extrainfo_t extrainfo_t;
+typedef struct routerstatus_t routerstatus_t;
+
+typedef struct microdesc_t microdesc_t;
+typedef struct node_t node_t;
+typedef struct vote_microdesc_hash_t vote_microdesc_hash_t;
+typedef struct vote_routerstatus_t vote_routerstatus_t;
+typedef struct document_signature_t document_signature_t;
+typedef struct networkstatus_voter_info_t networkstatus_voter_info_t;
+typedef struct networkstatus_sr_info_t networkstatus_sr_info_t;
+
+/** Enumerates recognized flavors of a consensus networkstatus document. All
+ * flavors of a consensus are generated from the same set of votes, but they
+ * present different types information to different versions of Tor. */
+typedef enum {
+ FLAV_NS = 0,
+ FLAV_MICRODESC = 1,
+} consensus_flavor_t;
+
+/** How many different consensus flavors are there? */
+#define N_CONSENSUS_FLAVORS ((int)(FLAV_MICRODESC)+1)
+
+typedef struct networkstatus_t networkstatus_t;
+typedef struct ns_detached_signatures_t ns_detached_signatures_t;
+typedef struct desc_store_t desc_store_t;
+typedef struct routerlist_t routerlist_t;
+typedef struct extend_info_t extend_info_t;
+typedef struct authority_cert_t authority_cert_t;
+
+/** Bitfield enum type listing types of information that directory authorities
+ * can be authoritative about, and that directory caches may or may not cache.
+ *
+ * Note that the granularity here is based on authority granularity and on
+ * cache capabilities. Thus, one particular bit may correspond in practice to
+ * a few types of directory info, so long as every authority that pronounces
+ * officially about one of the types prounounces officially about all of them,
+ * and so long as every cache that caches one of them caches all of them.
+ */
+typedef enum {
+ NO_DIRINFO = 0,
+ /** Serves/signs v3 directory information: votes, consensuses, certs */
+ V3_DIRINFO = 1 << 2,
+ /** Serves bridge descriptors. */
+ BRIDGE_DIRINFO = 1 << 4,
+ /** Serves extrainfo documents. */
+ EXTRAINFO_DIRINFO=1 << 5,
+ /** Serves microdescriptors. */
+ MICRODESC_DIRINFO=1 << 6,
+} dirinfo_type_t;
+
+#define ALL_DIRINFO ((dirinfo_type_t)((1<<7)-1))
+
+#define ONION_HANDSHAKE_TYPE_TAP 0x0000
+#define ONION_HANDSHAKE_TYPE_FAST 0x0001
+#define ONION_HANDSHAKE_TYPE_NTOR 0x0002
+#define MAX_ONION_HANDSHAKE_TYPE 0x0002
+
+typedef struct onion_handshake_state_t onion_handshake_state_t;
+typedef struct relay_crypto_t relay_crypto_t;
+typedef struct crypt_path_t crypt_path_t;
+typedef struct crypt_path_reference_t crypt_path_reference_t;
+
+#define CPATH_KEY_MATERIAL_LEN (20*2+16*2)
+
+typedef struct cpath_build_state_t cpath_build_state_t;
+
+struct create_cell_t;
+
+/** Entry in the cell stats list of a circuit; used only if CELL_STATS
+ * events are enabled. */
+typedef struct testing_cell_stats_entry_t {
+ uint8_t command; /**< cell command number. */
+ /** Waiting time in centiseconds if this event is for a removed cell,
+ * or 0 if this event is for adding a cell to the queue. 22 bits can
+ * store more than 11 hours, enough to assume that a circuit with this
+ * delay would long have been closed. */
+ unsigned int waiting_time:22;
+ unsigned int removed:1; /**< 0 for added to, 1 for removed from queue. */
+ unsigned int exitward:1; /**< 0 for app-ward, 1 for exit-ward. */
+} testing_cell_stats_entry_t;
+
+typedef struct circuit_t circuit_t;
+typedef struct origin_circuit_t origin_circuit_t;
+typedef struct or_circuit_t or_circuit_t;
+
+/** Largest number of relay_early cells that we can send on a given
+ * circuit. */
+#define MAX_RELAY_EARLY_CELLS_PER_CIRCUIT 8
+
+typedef enum path_state_t path_state_t;
+#define path_state_bitfield_t ENUM_BF(path_state_t)
+
+#if REND_COOKIE_LEN != DIGEST_LEN
+#error "The REND_TOKEN_LEN macro assumes REND_COOKIE_LEN == DIGEST_LEN"
+#endif
+#define REND_TOKEN_LEN DIGEST_LEN
+
+/** Convert a circuit subtype to a circuit_t. */
+#define TO_CIRCUIT(x) (&((x)->base_))
+
+/** @name Isolation flags
+
+ Ways to isolate client streams
+
+ @{
+*/
+/** Isolate based on destination port */
+#define ISO_DESTPORT (1u<<0)
+/** Isolate based on destination address */
+#define ISO_DESTADDR (1u<<1)
+/** Isolate based on SOCKS authentication */
+#define ISO_SOCKSAUTH (1u<<2)
+/** Isolate based on client protocol choice */
+#define ISO_CLIENTPROTO (1u<<3)
+/** Isolate based on client address */
+#define ISO_CLIENTADDR (1u<<4)
+/** Isolate based on session group (always on). */
+#define ISO_SESSIONGRP (1u<<5)
+/** Isolate based on newnym epoch (always on). */
+#define ISO_NYM_EPOCH (1u<<6)
+/** Isolate all streams (Internal only). */
+#define ISO_STREAM (1u<<7)
+/**@}*/
+
+/** Default isolation level for ports. */
+#define ISO_DEFAULT (ISO_CLIENTADDR|ISO_SOCKSAUTH|ISO_SESSIONGRP|ISO_NYM_EPOCH)
+
+/** Indicates that we haven't yet set a session group on a port_cfg_t. */
+#define SESSION_GROUP_UNSET -1
+/** Session group reserved for directory connections */
+#define SESSION_GROUP_DIRCONN -2
+/** Session group reserved for resolve requests launched by a controller */
+#define SESSION_GROUP_CONTROL_RESOLVE -3
+/** First automatically allocated session group number */
+#define SESSION_GROUP_FIRST_AUTO -4
+
+typedef struct port_cfg_t port_cfg_t;
+typedef struct routerset_t routerset_t;
+
+/** A magic value for the (Socks|OR|...)Port options below, telling Tor
+ * to pick its own port. */
+#define CFG_AUTO_PORT 0xc4005e
+
+typedef struct or_options_t or_options_t;
+
+#define LOG_PROTOCOL_WARN (get_protocol_warning_severity_level())
+
+typedef struct or_state_t or_state_t;
+
+#define MAX_SOCKS_ADDR_LEN 256
+
+/********************************* circuitbuild.c **********************/
+
+/** How many hops does a general-purpose circuit have by default? */
+#define DEFAULT_ROUTE_LEN 3
+
+/* Circuit Build Timeout "public" structures. */
+
+/** Precision multiplier for the Bw weights */
+#define BW_WEIGHT_SCALE 10000
+#define BW_MIN_WEIGHT_SCALE 1
+#define BW_MAX_WEIGHT_SCALE INT32_MAX
+
+typedef struct circuit_build_times_s circuit_build_times_t;
+
+/********************************* config.c ***************************/
+
+/********************************* connection_edge.c *************************/
+
+/** Enumerates possible origins of a client-side address mapping. */
+typedef enum {
+ /** We're remapping this address because the controller told us to. */
+ ADDRMAPSRC_CONTROLLER,
+ /** We're remapping this address because of an AutomapHostsOnResolve
+ * configuration. */
+ ADDRMAPSRC_AUTOMAP,
+ /** We're remapping this address because our configuration (via torrc, the
+ * command line, or a SETCONF command) told us to. */
+ ADDRMAPSRC_TORRC,
+ /** We're remapping this address because we have TrackHostExit configured,
+ * and we want to remember to use the same exit next time. */
+ ADDRMAPSRC_TRACKEXIT,
+ /** We're remapping this address because we got a DNS resolution from a
+ * Tor server that told us what its value was. */
+ ADDRMAPSRC_DNS,
+
+ /** No remapping has occurred. This isn't a possible value for an
+ * addrmap_entry_t; it's used as a null value when we need to answer "Why
+ * did this remapping happen." */
+ ADDRMAPSRC_NONE
+} addressmap_entry_source_t;
+#define addressmap_entry_source_bitfield_t ENUM_BF(addressmap_entry_source_t)
+
+#define WRITE_STATS_INTERVAL (24*60*60)
+
+/********************************* dirvote.c ************************/
+
+typedef struct vote_timing_t vote_timing_t;
+
+/********************************* microdesc.c *************************/
+
+typedef struct microdesc_cache_t microdesc_cache_t;
+
+/********************************* rendcommon.c ***************************/
+
+typedef struct rend_authorized_client_t rend_authorized_client_t;
+typedef struct rend_encoded_v2_service_descriptor_t
+ rend_encoded_v2_service_descriptor_t;
+
+/** The maximum number of non-circuit-build-timeout failures a hidden
+ * service client will tolerate while trying to build a circuit to an
+ * introduction point. See also rend_intro_point_t.unreachable_count. */
+#define MAX_INTRO_POINT_REACHABILITY_FAILURES 5
+
+/** The minimum and maximum number of distinct INTRODUCE2 cells which a
+ * hidden service's introduction point will receive before it begins to
+ * expire. */
+#define INTRO_POINT_MIN_LIFETIME_INTRODUCTIONS 16384
+/* Double the minimum value so the interval is [min, min * 2]. */
+#define INTRO_POINT_MAX_LIFETIME_INTRODUCTIONS \
+ (INTRO_POINT_MIN_LIFETIME_INTRODUCTIONS * 2)
+
+/** The minimum number of seconds that an introduction point will last
+ * before expiring due to old age. (If it receives
+ * INTRO_POINT_LIFETIME_INTRODUCTIONS INTRODUCE2 cells, it may expire
+ * sooner.)
+ *
+ * XXX Should this be configurable? */
+#define INTRO_POINT_LIFETIME_MIN_SECONDS (18*60*60)
+/** The maximum number of seconds that an introduction point will last
+ * before expiring due to old age.
+ *
+ * XXX Should this be configurable? */
+#define INTRO_POINT_LIFETIME_MAX_SECONDS (24*60*60)
+
+/** The maximum number of circuit creation retry we do to an intro point
+ * before giving up. We try to reuse intro point that fails during their
+ * lifetime so this is a hard limit on the amount of time we do that. */
+#define MAX_INTRO_POINT_CIRCUIT_RETRIES 3
+
+typedef struct rend_intro_point_t rend_intro_point_t;
+typedef struct rend_service_descriptor_t rend_service_descriptor_t;
+
+/********************************* routerlist.c ***************************/
+
+typedef struct dir_server_t dir_server_t;
+
+#define RELAY_REQUIRED_MIN_BANDWIDTH (75*1024)
+#define BRIDGE_REQUIRED_MIN_BANDWIDTH (50*1024)
+
+#define ROUTER_MAX_DECLARED_BANDWIDTH INT32_MAX
+
+typedef struct tor_version_t tor_version_t;
+
+#endif /* !defined(TOR_OR_H) */
diff --git a/src/core/or/or_circuit_st.h b/src/core/or/or_circuit_st.h
new file mode 100644
index 0000000000..158a5314ef
--- /dev/null
+++ b/src/core/or/or_circuit_st.h
@@ -0,0 +1,80 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef OR_CIRCUIT_ST_H
+#define OR_CIRCUIT_ST_H
+
+#include "or/or.h"
+
+#include "or/circuit_st.h"
+#include "or/crypt_path_st.h"
+
+struct onion_queue_t;
+
+/** An or_circuit_t holds information needed to implement a circuit at an
+ * OR. */
+struct or_circuit_t {
+ circuit_t base_;
+
+ /** Pointer to an entry on the onion queue, if this circuit is waiting for a
+ * chance to give an onionskin to a cpuworker. Used only in onion.c */
+ struct onion_queue_t *onionqueue_entry;
+ /** Pointer to a workqueue entry, if this circuit has given an onionskin to
+ * a cpuworker and is waiting for a response. Used to decide whether it is
+ * safe to free a circuit or if it is still in use by a cpuworker. */
+ struct workqueue_entry_s *workqueue_entry;
+
+ /** The circuit_id used in the previous (backward) hop of this circuit. */
+ circid_t p_circ_id;
+ /** Queue of cells waiting to be transmitted on p_conn. */
+ cell_queue_t p_chan_cells;
+ /** The channel that is previous in this circuit. */
+ channel_t *p_chan;
+ /**
+ * Circuit mux associated with p_chan to which this circuit is attached;
+ * NULL if we have no p_chan.
+ */
+ circuitmux_t *p_mux;
+ /** Linked list of Exit streams associated with this circuit. */
+ edge_connection_t *n_streams;
+ /** Linked list of Exit streams associated with this circuit that are
+ * still being resolved. */
+ edge_connection_t *resolving_streams;
+
+ /** Cryptographic state used for encrypting and authenticating relay
+ * cells to and from this hop. */
+ relay_crypto_t crypto;
+
+ /** Points to spliced circuit if purpose is REND_ESTABLISHED, and circuit
+ * is not marked for close. */
+ struct or_circuit_t *rend_splice;
+
+ /** Stores KH for the handshake. */
+ char rend_circ_nonce[DIGEST_LEN];/* KH in tor-spec.txt */
+
+ /** How many more relay_early cells can we send on this circuit, according
+ * to the specification? */
+ unsigned int remaining_relay_early_cells : 4;
+
+ /* We have already received an INTRODUCE1 cell on this circuit. */
+ unsigned int already_received_introduce1 : 1;
+
+ /** If set, this circuit carries HS traffic. Consider it in any HS
+ * statistics. */
+ unsigned int circuit_carries_hs_traffic_stats : 1;
+
+ /** Number of cells that were removed from circuit queue; reset every
+ * time when writing buffer stats to disk. */
+ uint32_t processed_cells;
+
+ /** Total time in milliseconds that cells spent in both app-ward and
+ * exit-ward queues of this circuit; reset every time when writing
+ * buffer stats to disk. */
+ uint64_t total_cell_waiting_time;
+};
+
+#endif
+
diff --git a/src/core/or/or_connection_st.h b/src/core/or/or_connection_st.h
new file mode 100644
index 0000000000..8c2c1f89c6
--- /dev/null
+++ b/src/core/or/or_connection_st.h
@@ -0,0 +1,92 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef OR_CONNECTION_ST_H
+#define OR_CONNECTION_ST_H
+
+#include "or/connection_st.h"
+#include "lib/evloop/token_bucket.h"
+
+struct tor_tls_t;
+
+/** Subtype of connection_t for an "OR connection" -- that is, one that speaks
+ * cells over TLS. */
+struct or_connection_t {
+ connection_t base_;
+
+ /** Hash of the public RSA key for the other side's identity key, or zeroes
+ * if the other side hasn't shown us a valid identity key. */
+ char identity_digest[DIGEST_LEN];
+
+ /** Extended ORPort connection identifier. */
+ char *ext_or_conn_id;
+ /** This is the ClientHash value we expect to receive from the
+ * client during the Extended ORPort authentication protocol. We
+ * compute it upon receiving the ClientNoce from the client, and we
+ * compare it with the acual ClientHash value sent by the
+ * client. */
+ char *ext_or_auth_correct_client_hash;
+ /** String carrying the name of the pluggable transport
+ * (e.g. "obfs2") that is obfuscating this connection. If no
+ * pluggable transports are used, it's NULL. */
+ char *ext_or_transport;
+
+ char *nickname; /**< Nickname of OR on other side (if any). */
+
+ struct tor_tls_t *tls; /**< TLS connection state. */
+ int tls_error; /**< Last tor_tls error code. */
+ /** When we last used this conn for any client traffic. If not
+ * recent, we can rate limit it further. */
+
+ /* Channel using this connection */
+ channel_tls_t *chan;
+
+ tor_addr_t real_addr; /**< The actual address that this connection came from
+ * or went to. The <b>addr</b> field is prone to
+ * getting overridden by the address from the router
+ * descriptor matching <b>identity_digest</b>. */
+
+ /** Should this connection be used for extending circuits to the server
+ * matching the <b>identity_digest</b> field? Set to true if we're pretty
+ * sure we aren't getting MITMed, either because we're connected to an
+ * address listed in a server descriptor, or because an authenticated
+ * NETINFO cell listed the address we're connected to as recognized. */
+ unsigned int is_canonical:1;
+
+ /** True iff this is an outgoing connection. */
+ unsigned int is_outgoing:1;
+ unsigned int proxy_type:2; /**< One of PROXY_NONE...PROXY_SOCKS5 */
+ unsigned int wide_circ_ids:1;
+ /** True iff this connection has had its bootstrap failure logged with
+ * control_event_bootstrap_problem. */
+ unsigned int have_noted_bootstrap_problem:1;
+ /** True iff this is a client connection and its address has been put in the
+ * geoip cache and handled by the DoS mitigation subsystem. We use this to
+ * insure we have a coherent count of concurrent connection. */
+ unsigned int tracked_for_dos_mitigation : 1;
+
+ uint16_t link_proto; /**< What protocol version are we using? 0 for
+ * "none negotiated yet." */
+ uint16_t idle_timeout; /**< How long can this connection sit with no
+ * circuits on it before we close it? Based on
+ * IDLE_CIRCUIT_TIMEOUT_{NON,}CANONICAL and
+ * on is_canonical, randomized. */
+ or_handshake_state_t *handshake_state; /**< If we are setting this connection
+ * up, state information to do so. */
+
+ time_t timestamp_lastempty; /**< When was the outbuf last completely empty?*/
+
+ token_bucket_rw_t bucket; /**< Used for rate limiting when the connection is
+ * in state CONN_OPEN. */
+
+ /*
+ * Count the number of bytes flushed out on this orconn, and the number of
+ * bytes TLS actually sent - used for overhead estimation for scheduling.
+ */
+ uint64_t bytes_xmitted, bytes_xmitted_by_tls;
+};
+
+#endif
diff --git a/src/core/or/or_handshake_certs_st.h b/src/core/or/or_handshake_certs_st.h
new file mode 100644
index 0000000000..38e798b5e2
--- /dev/null
+++ b/src/core/or/or_handshake_certs_st.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef OR_HANDSHAKE_CERTS_ST
+#define OR_HANDSHAKE_CERTS_ST
+
+struct tor_x509_cert_t;
+
+/** Structure to hold all the certificates we've received on an OR connection
+ */
+struct or_handshake_certs_t {
+ /** True iff we originated this connection. */
+ int started_here;
+ /** The cert for the 'auth' RSA key that's supposed to sign the AUTHENTICATE
+ * cell. Signed with the RSA identity key. */
+ struct tor_x509_cert_t *auth_cert;
+ /** The cert for the 'link' RSA key that was used to negotiate the TLS
+ * connection. Signed with the RSA identity key. */
+ struct tor_x509_cert_t *link_cert;
+ /** A self-signed identity certificate: the RSA identity key signed
+ * with itself. */
+ struct tor_x509_cert_t *id_cert;
+ /** The Ed25519 signing key, signed with the Ed25519 identity key. */
+ struct tor_cert_st *ed_id_sign;
+ /** A digest of the X509 link certificate for the TLS connection, signed
+ * with the Ed25519 siging key. */
+ struct tor_cert_st *ed_sign_link;
+ /** The Ed25519 authentication key (that's supposed to sign an AUTHENTICATE
+ * cell) , signed with the Ed25519 siging key. */
+ struct tor_cert_st *ed_sign_auth;
+ /** The Ed25519 identity key, crosssigned with the RSA identity key. */
+ uint8_t *ed_rsa_crosscert;
+ /** The length of <b>ed_rsa_crosscert</b> in bytes */
+ size_t ed_rsa_crosscert_len;
+};
+
+#endif
diff --git a/src/core/or/or_handshake_state_st.h b/src/core/or/or_handshake_state_st.h
new file mode 100644
index 0000000000..4ee095d9af
--- /dev/null
+++ b/src/core/or/or_handshake_state_st.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef OR_HANDSHAKE_STATE_ST
+#define OR_HANDSHAKE_STATE_ST
+
+/** Stores flags and information related to the portion of a v2/v3 Tor OR
+ * connection handshake that happens after the TLS handshake is finished.
+ */
+struct or_handshake_state_t {
+ /** When was the VERSIONS cell sent on this connection? Used to get
+ * an estimate of the skew in the returning NETINFO reply. */
+ time_t sent_versions_at;
+ /** True iff we originated this connection */
+ unsigned int started_here : 1;
+ /** True iff we have received and processed a VERSIONS cell. */
+ unsigned int received_versions : 1;
+ /** True iff we have received and processed an AUTH_CHALLENGE cell */
+ unsigned int received_auth_challenge : 1;
+ /** True iff we have received and processed a CERTS cell. */
+ unsigned int received_certs_cell : 1;
+ /** True iff we have received and processed an AUTHENTICATE cell */
+ unsigned int received_authenticate : 1;
+
+ /* True iff we've received valid authentication to some identity. */
+ unsigned int authenticated : 1;
+ unsigned int authenticated_rsa : 1;
+ unsigned int authenticated_ed25519 : 1;
+
+ /* True iff we have sent a netinfo cell */
+ unsigned int sent_netinfo : 1;
+
+ /** The signing->ed25519 link certificate corresponding to the x509
+ * certificate we used on the TLS connection (if this is a server-side
+ * connection). We make a copy of this here to prevent a race condition
+ * caused by TLS context rotation. */
+ struct tor_cert_st *own_link_cert;
+
+ /** True iff we should feed outgoing cells into digest_sent and
+ * digest_received respectively.
+ *
+ * From the server's side of the v3 handshake, we want to capture everything
+ * from the VERSIONS cell through and including the AUTH_CHALLENGE cell.
+ * From the client's, we want to capture everything from the VERSIONS cell
+ * through but *not* including the AUTHENTICATE cell.
+ *
+ * @{ */
+ unsigned int digest_sent_data : 1;
+ unsigned int digest_received_data : 1;
+ /**@}*/
+
+ /** Identity RSA digest that we have received and authenticated for our peer
+ * on this connection. */
+ uint8_t authenticated_rsa_peer_id[DIGEST_LEN];
+ /** Identity Ed25519 public key that we have received and authenticated for
+ * our peer on this connection. */
+ ed25519_public_key_t authenticated_ed25519_peer_id;
+
+ /** Digests of the cells that we have sent or received as part of a V3
+ * handshake. Used for making and checking AUTHENTICATE cells.
+ *
+ * @{
+ */
+ crypto_digest_t *digest_sent;
+ crypto_digest_t *digest_received;
+ /** @} */
+
+ /** Certificates that a connection initiator sent us in a CERTS cell; we're
+ * holding on to them until we get an AUTHENTICATE cell.
+ */
+ or_handshake_certs_t *certs;
+};
+
+#endif
+
diff --git a/src/core/or/origin_circuit_st.h b/src/core/or/origin_circuit_st.h
new file mode 100644
index 0000000000..b885725edb
--- /dev/null
+++ b/src/core/or/origin_circuit_st.h
@@ -0,0 +1,290 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef ORIGIN_CIRCUIT_ST_H
+#define ORIGIN_CIRCUIT_ST_H
+
+#include "or/or.h"
+
+#include "or/circuit_st.h"
+
+struct onion_queue_t;
+
+/**
+ * Describes the circuit building process in simplified terms based
+ * on the path bias accounting state for a circuit.
+ *
+ * NOTE: These state values are enumerated in the order for which we
+ * expect circuits to transition through them. If you add states,
+ * you need to preserve this overall ordering. The various pathbias
+ * state transition and accounting functions (pathbias_mark_* and
+ * pathbias_count_*) contain ordinal comparisons to enforce proper
+ * state transitions for corrections.
+ *
+ * This state machine and the associated logic was created to prevent
+ * miscounting due to unknown cases of circuit reuse. See also tickets
+ * #6475 and #7802.
+ */
+enum path_state_t {
+ /** This circuit is "new". It has not yet completed a first hop
+ * or been counted by the path bias code. */
+ PATH_STATE_NEW_CIRC = 0,
+ /** This circuit has completed one/two hops, and has been counted by
+ * the path bias logic. */
+ PATH_STATE_BUILD_ATTEMPTED = 1,
+ /** This circuit has been completely built */
+ PATH_STATE_BUILD_SUCCEEDED = 2,
+ /** Did we try to attach any SOCKS streams or hidserv introductions to
+ * this circuit?
+ *
+ * Note: If we ever implement end-to-end stream timing through test
+ * stream probes (#5707), we must *not* set this for those probes
+ * (or any other automatic streams) because the adversary could
+ * just tag at a later point.
+ */
+ PATH_STATE_USE_ATTEMPTED = 3,
+ /** Did any SOCKS streams or hidserv introductions actually succeed on
+ * this circuit?
+ *
+ * If any streams detatch/fail from this circuit, the code transitions
+ * the circuit back to PATH_STATE_USE_ATTEMPTED to ensure we probe. See
+ * pathbias_mark_use_rollback() for that.
+ */
+ PATH_STATE_USE_SUCCEEDED = 4,
+
+ /**
+ * This is a special state to indicate that we got a corrupted
+ * relay cell on a circuit and we don't intend to probe it.
+ */
+ PATH_STATE_USE_FAILED = 5,
+
+ /**
+ * This is a special state to indicate that we already counted
+ * the circuit. Used to guard against potential state machine
+ * violations.
+ */
+ PATH_STATE_ALREADY_COUNTED = 6,
+};
+
+/** An origin_circuit_t holds data necessary to build and use a circuit.
+ */
+struct origin_circuit_t {
+ circuit_t base_;
+
+ /** Linked list of AP streams (or EXIT streams if hidden service)
+ * associated with this circuit. */
+ edge_connection_t *p_streams;
+
+ /** Bytes read on this circuit since last call to
+ * control_event_circ_bandwidth_used(). Only used if we're configured
+ * to emit CIRC_BW events. */
+ uint32_t n_read_circ_bw;
+
+ /** Bytes written to on this circuit since last call to
+ * control_event_circ_bandwidth_used(). Only used if we're configured
+ * to emit CIRC_BW events. */
+ uint32_t n_written_circ_bw;
+
+ /** Total known-valid relay cell bytes since last call to
+ * control_event_circ_bandwidth_used(). Only used if we're configured
+ * to emit CIRC_BW events. */
+ uint32_t n_delivered_read_circ_bw;
+
+ /** Total written relay cell bytes since last call to
+ * control_event_circ_bandwidth_used(). Only used if we're configured
+ * to emit CIRC_BW events. */
+ uint32_t n_delivered_written_circ_bw;
+
+ /** Total overhead data in all known-valid relay data cells since last
+ * call to control_event_circ_bandwidth_used(). Only used if we're
+ * configured to emit CIRC_BW events. */
+ uint32_t n_overhead_read_circ_bw;
+
+ /** Total written overhead data in all relay data cells since last call to
+ * control_event_circ_bandwidth_used(). Only used if we're configured
+ * to emit CIRC_BW events. */
+ uint32_t n_overhead_written_circ_bw;
+
+ /** Build state for this circuit. It includes the intended path
+ * length, the chosen exit router, rendezvous information, etc.
+ */
+ cpath_build_state_t *build_state;
+ /** The doubly-linked list of crypt_path_t entries, one per hop,
+ * for this circuit. This includes ciphers for each hop,
+ * integrity-checking digests for each hop, and package/delivery
+ * windows for each hop.
+ */
+ crypt_path_t *cpath;
+
+ /** Holds all rendezvous data on either client or service side. */
+ rend_data_t *rend_data;
+
+ /** Holds hidden service identifier on either client or service side. This
+ * is for both introduction and rendezvous circuit. */
+ struct hs_ident_circuit_t *hs_ident;
+
+ /** Holds the data that the entry guard system uses to track the
+ * status of the guard this circuit is using, and thereby to determine
+ * whether this circuit can be used. */
+ struct circuit_guard_state_t *guard_state;
+
+ /** Index into global_origin_circuit_list for this circuit. -1 if not
+ * present. */
+ int global_origin_circuit_list_idx;
+
+ /** How many more relay_early cells can we send on this circuit, according
+ * to the specification? */
+ unsigned int remaining_relay_early_cells : 4;
+
+ /** Set if this circuit is insanely old and we already informed the user */
+ unsigned int is_ancient : 1;
+
+ /** Set if this circuit has already been opened. Used to detect
+ * cannibalized circuits. */
+ unsigned int has_opened : 1;
+
+ /**
+ * Path bias state machine. Used to ensure integrity of our
+ * circuit building and usage accounting. See path_state_t
+ * for more details.
+ */
+ path_state_bitfield_t path_state : 3;
+
+ /* If this flag is set, we should not consider attaching any more
+ * connections to this circuit. */
+ unsigned int unusable_for_new_conns : 1;
+
+ /**
+ * Tristate variable to guard against pathbias miscounting
+ * due to circuit purpose transitions changing the decision
+ * of pathbias_should_count(). This variable is informational
+ * only. The current results of pathbias_should_count() are
+ * the official decision for pathbias accounting.
+ */
+ uint8_t pathbias_shouldcount;
+#define PATHBIAS_SHOULDCOUNT_UNDECIDED 0
+#define PATHBIAS_SHOULDCOUNT_IGNORED 1
+#define PATHBIAS_SHOULDCOUNT_COUNTED 2
+
+ /** For path probing. Store the temporary probe stream ID
+ * for response comparison */
+ streamid_t pathbias_probe_id;
+
+ /** For path probing. Store the temporary probe address nonce
+ * (in host byte order) for response comparison. */
+ uint32_t pathbias_probe_nonce;
+
+ /** Set iff this is a hidden-service circuit which has timed out
+ * according to our current circuit-build timeout, but which has
+ * been kept around because it might still succeed in connecting to
+ * its destination, and which is not a fully-connected rendezvous
+ * circuit.
+ *
+ * (We clear this flag for client-side rendezvous circuits when they
+ * are 'joined' to the other side's rendezvous circuit, so that
+ * connection_ap_handshake_attach_circuit can put client streams on
+ * the circuit. We also clear this flag for service-side rendezvous
+ * circuits when they are 'joined' to a client's rend circ, but only
+ * for symmetry with the client case. Client-side introduction
+ * circuits are closed when we get a joined rend circ, and
+ * service-side introduction circuits never have this flag set.) */
+ unsigned int hs_circ_has_timed_out : 1;
+
+ /** Set iff this circuit has been given a relaxed timeout because
+ * no circuits have opened. Used to prevent spamming logs. */
+ unsigned int relaxed_timeout : 1;
+
+ /** Set iff this is a service-side rendezvous circuit for which a
+ * new connection attempt has been launched. We consider launching
+ * a new service-side rend circ to a client when the previous one
+ * fails; now that we don't necessarily close a service-side rend
+ * circ when we launch a new one to the same client, this flag keeps
+ * us from launching two retries for the same failed rend circ. */
+ unsigned int hs_service_side_rend_circ_has_been_relaunched : 1;
+
+ /** What commands were sent over this circuit that decremented the
+ * RELAY_EARLY counter? This is for debugging task 878. */
+ uint8_t relay_early_commands[MAX_RELAY_EARLY_CELLS_PER_CIRCUIT];
+
+ /** How many RELAY_EARLY cells have been sent over this circuit? This is
+ * for debugging task 878, too. */
+ int relay_early_cells_sent;
+
+ /** The next stream_id that will be tried when we're attempting to
+ * construct a new AP stream originating at this circuit. */
+ streamid_t next_stream_id;
+
+ /* The intro key replaces the hidden service's public key if purpose is
+ * S_ESTABLISH_INTRO or S_INTRO, provided that no unversioned rendezvous
+ * descriptor is used. */
+ crypto_pk_t *intro_key;
+
+ /** Quasi-global identifier for this circuit; used for control.c */
+ /* XXXX NM This can get re-used after 2**32 circuits. */
+ uint32_t global_identifier;
+
+ /** True if we have associated one stream to this circuit, thereby setting
+ * the isolation parameters for this circuit. Note that this doesn't
+ * necessarily mean that we've <em>attached</em> any streams to the circuit:
+ * we may only have marked up this circuit during the launch process.
+ */
+ unsigned int isolation_values_set : 1;
+ /** True iff any stream has <em>ever</em> been attached to this circuit.
+ *
+ * In a better world we could use timestamp_dirty for this, but
+ * timestamp_dirty is far too overloaded at the moment.
+ */
+ unsigned int isolation_any_streams_attached : 1;
+
+ /** A bitfield of ISO_* flags for every isolation field such that this
+ * circuit has had streams with more than one value for that field
+ * attached to it. */
+ uint8_t isolation_flags_mixed;
+
+ /** @name Isolation parameters
+ *
+ * If any streams have been associated with this circ (isolation_values_set
+ * == 1), and all streams associated with the circuit have had the same
+ * value for some field ((isolation_flags_mixed & ISO_FOO) == 0), then these
+ * elements hold the value for that field.
+ *
+ * Note again that "associated" is not the same as "attached": we
+ * preliminarily associate streams with a circuit while the circuit is being
+ * launched, so that we can tell whether we need to launch more circuits.
+ *
+ * @{
+ */
+ uint8_t client_proto_type;
+ uint8_t client_proto_socksver;
+ uint16_t dest_port;
+ tor_addr_t client_addr;
+ char *dest_address;
+ int session_group;
+ unsigned nym_epoch;
+ size_t socks_username_len;
+ uint8_t socks_password_len;
+ /* Note that the next two values are NOT NUL-terminated; see
+ socks_username_len and socks_password_len for their lengths. */
+ char *socks_username;
+ char *socks_password;
+ /** Global identifier for the first stream attached here; used by
+ * ISO_STREAM. */
+ uint64_t associated_isolated_stream_global_id;
+ /**@}*/
+ /** A list of addr_policy_t for this circuit in particular. Used by
+ * adjust_exit_policy_from_exitpolicy_failure.
+ */
+ smartlist_t *prepend_policy;
+
+ /** How long do we wait before closing this circuit if it remains
+ * completely idle after it was built, in seconds? This value
+ * is randomized on a per-circuit basis from CircuitsAvailableTimoeut
+ * to 2*CircuitsAvailableTimoeut. */
+ int circuit_idle_timeout;
+
+};
+
+#endif
diff --git a/src/core/or/policies.c b/src/core/or/policies.c
new file mode 100644
index 0000000000..53e00e8083
--- /dev/null
+++ b/src/core/or/policies.c
@@ -0,0 +1,3147 @@
+/* Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file policies.c
+ * \brief Code to parse and use address policies and exit policies.
+ *
+ * We have two key kinds of address policy: full and compressed. A full
+ * policy is an array of accept/reject patterns, to be applied in order.
+ * A short policy is simply a list of ports. This module handles both
+ * kinds, including generic functions to apply them to addresses, and
+ * also including code to manage the global policies that we apply to
+ * incoming and outgoing connections.
+ **/
+
+#define POLICIES_PRIVATE
+
+#include "or/or.h"
+#include "or/bridges.h"
+#include "or/config.h"
+#include "or/dirserv.h"
+#include "or/microdesc.h"
+#include "or/networkstatus.h"
+#include "or/nodelist.h"
+#include "or/policies.h"
+#include "or/router.h"
+#include "or/routerparse.h"
+#include "or/geoip.h"
+#include "ht.h"
+#include "lib/encoding/confline.h"
+
+#include "or/addr_policy_st.h"
+#include "or/dir_server_st.h"
+#include "or/microdesc_st.h"
+#include "or/node_st.h"
+#include "or/port_cfg_st.h"
+#include "or/routerinfo_st.h"
+#include "or/routerstatus_st.h"
+
+/** Policy that addresses for incoming SOCKS connections must match. */
+static smartlist_t *socks_policy = NULL;
+/** Policy that addresses for incoming directory connections must match. */
+static smartlist_t *dir_policy = NULL;
+/** Policy that addresses for incoming router descriptors must match in order
+ * to be published by us. */
+static smartlist_t *authdir_reject_policy = NULL;
+/** Policy that addresses for incoming router descriptors must match in order
+ * to be marked as valid in our networkstatus. */
+static smartlist_t *authdir_invalid_policy = NULL;
+/** Policy that addresses for incoming router descriptors must <b>not</b>
+ * match in order to not be marked as BadExit. */
+static smartlist_t *authdir_badexit_policy = NULL;
+
+/** Parsed addr_policy_t describing which addresses we believe we can start
+ * circuits at. */
+static smartlist_t *reachable_or_addr_policy = NULL;
+/** Parsed addr_policy_t describing which addresses we believe we can connect
+ * to directories at. */
+static smartlist_t *reachable_dir_addr_policy = NULL;
+
+/** Element of an exit policy summary */
+typedef struct policy_summary_item_t {
+ uint16_t prt_min; /**< Lowest port number to accept/reject. */
+ uint16_t prt_max; /**< Highest port number to accept/reject. */
+ uint64_t reject_count; /**< Number of IP-Addresses that are rejected to
+ this port range. */
+ unsigned int accepted:1; /** Has this port already been accepted */
+} policy_summary_item_t;
+
+/** Private networks. This list is used in two places, once to expand the
+ * "private" keyword when parsing our own exit policy, secondly to ignore
+ * just such networks when building exit policy summaries. It is important
+ * that all authorities agree on that list when creating summaries, so don't
+ * just change this without a proper migration plan and a proposal and stuff.
+ */
+static const char *private_nets[] = {
+ "0.0.0.0/8", "169.254.0.0/16",
+ "127.0.0.0/8", "192.168.0.0/16", "10.0.0.0/8", "172.16.0.0/12",
+ "[::]/8",
+ "[fc00::]/7", "[fe80::]/10", "[fec0::]/10", "[ff00::]/8", "[::]/127",
+ NULL
+};
+
+static int policies_parse_exit_policy_internal(
+ config_line_t *cfg,
+ smartlist_t **dest,
+ int ipv6_exit,
+ int rejectprivate,
+ const smartlist_t *configured_addresses,
+ int reject_interface_addresses,
+ int reject_configured_port_addresses,
+ int add_default_policy,
+ int add_reduced_policy);
+
+/** Replace all "private" entries in *<b>policy</b> with their expanded
+ * equivalents. */
+void
+policy_expand_private(smartlist_t **policy)
+{
+ uint16_t port_min, port_max;
+
+ int i;
+ smartlist_t *tmp;
+
+ if (!*policy) /*XXXX disallow NULL policies? */
+ return;
+
+ tmp = smartlist_new();
+
+ SMARTLIST_FOREACH_BEGIN(*policy, addr_policy_t *, p) {
+ if (! p->is_private) {
+ smartlist_add(tmp, p);
+ continue;
+ }
+ for (i = 0; private_nets[i]; ++i) {
+ addr_policy_t newpolicy;
+ memcpy(&newpolicy, p, sizeof(addr_policy_t));
+ newpolicy.is_private = 0;
+ newpolicy.is_canonical = 0;
+ if (tor_addr_parse_mask_ports(private_nets[i], 0,
+ &newpolicy.addr,
+ &newpolicy.maskbits, &port_min, &port_max)<0) {
+ tor_assert_unreached();
+ }
+ smartlist_add(tmp, addr_policy_get_canonical_entry(&newpolicy));
+ }
+ addr_policy_free(p);
+ } SMARTLIST_FOREACH_END(p);
+
+ smartlist_free(*policy);
+ *policy = tmp;
+}
+
+/** Expand each of the AF_UNSPEC elements in *<b>policy</b> (which indicate
+ * protocol-neutral wildcards) into a pair of wildcard elements: one IPv4-
+ * specific and one IPv6-specific. */
+void
+policy_expand_unspec(smartlist_t **policy)
+{
+ smartlist_t *tmp;
+ if (!*policy)
+ return;
+
+ tmp = smartlist_new();
+ SMARTLIST_FOREACH_BEGIN(*policy, addr_policy_t *, p) {
+ sa_family_t family = tor_addr_family(&p->addr);
+ if (family == AF_INET6 || family == AF_INET || p->is_private) {
+ smartlist_add(tmp, p);
+ } else if (family == AF_UNSPEC) {
+ addr_policy_t newpolicy_ipv4;
+ addr_policy_t newpolicy_ipv6;
+ memcpy(&newpolicy_ipv4, p, sizeof(addr_policy_t));
+ memcpy(&newpolicy_ipv6, p, sizeof(addr_policy_t));
+ newpolicy_ipv4.is_canonical = 0;
+ newpolicy_ipv6.is_canonical = 0;
+ if (p->maskbits != 0) {
+ log_warn(LD_BUG, "AF_UNSPEC policy with maskbits==%d", p->maskbits);
+ newpolicy_ipv4.maskbits = 0;
+ newpolicy_ipv6.maskbits = 0;
+ }
+ tor_addr_from_ipv4h(&newpolicy_ipv4.addr, 0);
+ tor_addr_from_ipv6_bytes(&newpolicy_ipv6.addr,
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0");
+ smartlist_add(tmp, addr_policy_get_canonical_entry(&newpolicy_ipv4));
+ smartlist_add(tmp, addr_policy_get_canonical_entry(&newpolicy_ipv6));
+ addr_policy_free(p);
+ } else {
+ log_warn(LD_BUG, "Funny-looking address policy with family %d", family);
+ smartlist_add(tmp, p);
+ }
+ } SMARTLIST_FOREACH_END(p);
+
+ smartlist_free(*policy);
+ *policy = tmp;
+}
+
+/**
+ * Given a linked list of config lines containing "accept[6]" and "reject[6]"
+ * tokens, parse them and append the result to <b>dest</b>. Return -1
+ * if any tokens are malformed (and don't append any), else return 0.
+ *
+ * If <b>assume_action</b> is nonnegative, then insert its action
+ * (ADDR_POLICY_ACCEPT or ADDR_POLICY_REJECT) for items that specify no
+ * action.
+ */
+static int
+parse_addr_policy(config_line_t *cfg, smartlist_t **dest,
+ int assume_action)
+{
+ smartlist_t *result;
+ smartlist_t *entries;
+ addr_policy_t *item;
+ int malformed_list;
+ int r = 0;
+
+ if (!cfg)
+ return 0;
+
+ result = smartlist_new();
+ entries = smartlist_new();
+ for (; cfg; cfg = cfg->next) {
+ smartlist_split_string(entries, cfg->value, ",",
+ SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 0);
+ SMARTLIST_FOREACH_BEGIN(entries, const char *, ent) {
+ log_debug(LD_CONFIG,"Adding new entry '%s'",ent);
+ malformed_list = 0;
+ item = router_parse_addr_policy_item_from_string(ent, assume_action,
+ &malformed_list);
+ if (item) {
+ smartlist_add(result, item);
+ } else if (malformed_list) {
+ /* the error is so severe the entire list should be discarded */
+ log_warn(LD_CONFIG, "Malformed policy '%s'. Discarding entire policy "
+ "list.", ent);
+ r = -1;
+ } else {
+ /* the error is minor: don't add the item, but keep processing the
+ * rest of the policies in the list */
+ log_debug(LD_CONFIG, "Ignored policy '%s' due to non-fatal error. "
+ "The remainder of the policy list will be used.",
+ ent);
+ }
+ } SMARTLIST_FOREACH_END(ent);
+ SMARTLIST_FOREACH(entries, char *, ent, tor_free(ent));
+ smartlist_clear(entries);
+ }
+ smartlist_free(entries);
+ if (r == -1) {
+ addr_policy_list_free(result);
+ } else {
+ policy_expand_private(&result);
+ policy_expand_unspec(&result);
+
+ if (*dest) {
+ smartlist_add_all(*dest, result);
+ smartlist_free(result);
+ } else {
+ *dest = result;
+ }
+ }
+
+ return r;
+}
+
+/** Helper: parse the Reachable(Dir|OR)?Addresses fields into
+ * reachable_(or|dir)_addr_policy. The options should already have
+ * been validated by validate_addr_policies.
+ */
+static int
+parse_reachable_addresses(void)
+{
+ const or_options_t *options = get_options();
+ int ret = 0;
+
+ if (options->ReachableDirAddresses &&
+ options->ReachableORAddresses &&
+ options->ReachableAddresses) {
+ log_warn(LD_CONFIG,
+ "Both ReachableDirAddresses and ReachableORAddresses are set. "
+ "ReachableAddresses setting will be ignored.");
+ }
+ addr_policy_list_free(reachable_or_addr_policy);
+ reachable_or_addr_policy = NULL;
+ if (!options->ReachableORAddresses && options->ReachableAddresses)
+ log_info(LD_CONFIG,
+ "Using ReachableAddresses as ReachableORAddresses.");
+ if (parse_addr_policy(options->ReachableORAddresses ?
+ options->ReachableORAddresses :
+ options->ReachableAddresses,
+ &reachable_or_addr_policy, ADDR_POLICY_ACCEPT)) {
+ log_warn(LD_CONFIG,
+ "Error parsing Reachable%sAddresses entry; ignoring.",
+ options->ReachableORAddresses ? "OR" : "");
+ ret = -1;
+ }
+
+ addr_policy_list_free(reachable_dir_addr_policy);
+ reachable_dir_addr_policy = NULL;
+ if (!options->ReachableDirAddresses && options->ReachableAddresses)
+ log_info(LD_CONFIG,
+ "Using ReachableAddresses as ReachableDirAddresses");
+ if (parse_addr_policy(options->ReachableDirAddresses ?
+ options->ReachableDirAddresses :
+ options->ReachableAddresses,
+ &reachable_dir_addr_policy, ADDR_POLICY_ACCEPT)) {
+ if (options->ReachableDirAddresses)
+ log_warn(LD_CONFIG,
+ "Error parsing ReachableDirAddresses entry; ignoring.");
+ ret = -1;
+ }
+
+ /* We ignore ReachableAddresses for relays */
+ if (!server_mode(options)) {
+ if (policy_is_reject_star(reachable_or_addr_policy, AF_UNSPEC, 0)
+ || policy_is_reject_star(reachable_dir_addr_policy, AF_UNSPEC,0)) {
+ log_warn(LD_CONFIG, "Tor cannot connect to the Internet if "
+ "ReachableAddresses, ReachableORAddresses, or "
+ "ReachableDirAddresses reject all addresses. Please accept "
+ "some addresses in these options.");
+ } else if (options->ClientUseIPv4 == 1
+ && (policy_is_reject_star(reachable_or_addr_policy, AF_INET, 0)
+ || policy_is_reject_star(reachable_dir_addr_policy, AF_INET, 0))) {
+ log_warn(LD_CONFIG, "You have set ClientUseIPv4 1, but "
+ "ReachableAddresses, ReachableORAddresses, or "
+ "ReachableDirAddresses reject all IPv4 addresses. "
+ "Tor will not connect using IPv4.");
+ } else if (fascist_firewall_use_ipv6(options)
+ && (policy_is_reject_star(reachable_or_addr_policy, AF_INET6, 0)
+ || policy_is_reject_star(reachable_dir_addr_policy, AF_INET6, 0))) {
+ log_warn(LD_CONFIG, "You have configured tor to use or prefer IPv6 "
+ "(or UseBridges 1), but "
+ "ReachableAddresses, ReachableORAddresses, or "
+ "ReachableDirAddresses reject all IPv6 addresses. "
+ "Tor will not connect using IPv6.");
+ }
+ }
+
+ return ret;
+}
+
+/* Return true iff ClientUseIPv4 0 or ClientUseIPv6 0 might block any OR or Dir
+ * address:port combination. */
+static int
+firewall_is_fascist_impl(void)
+{
+ const or_options_t *options = get_options();
+ /* Assume every non-bridge relay has an IPv4 address.
+ * Clients which use bridges may only know the IPv6 address of their
+ * bridge, but they will connect regardless of the ClientUseIPv6 setting. */
+ return options->ClientUseIPv4 == 0;
+}
+
+/** Return true iff the firewall options, including ClientUseIPv4 0 and
+ * ClientUseIPv6 0, might block any OR address:port combination.
+ * Address preferences may still change which address is selected even if
+ * this function returns false.
+ */
+int
+firewall_is_fascist_or(void)
+{
+ return (reachable_or_addr_policy != NULL || firewall_is_fascist_impl());
+}
+
+/** Return true iff the firewall options, including ClientUseIPv4 0 and
+ * ClientUseIPv6 0, might block any Dir address:port combination.
+ * Address preferences may still change which address is selected even if
+ * this function returns false.
+ */
+int
+firewall_is_fascist_dir(void)
+{
+ return (reachable_dir_addr_policy != NULL || firewall_is_fascist_impl());
+}
+
+/** Return true iff <b>policy</b> (possibly NULL) will allow a
+ * connection to <b>addr</b>:<b>port</b>.
+ */
+static int
+addr_policy_permits_tor_addr(const tor_addr_t *addr, uint16_t port,
+ smartlist_t *policy)
+{
+ addr_policy_result_t p;
+ p = compare_tor_addr_to_addr_policy(addr, port, policy);
+ switch (p) {
+ case ADDR_POLICY_PROBABLY_ACCEPTED:
+ case ADDR_POLICY_ACCEPTED:
+ return 1;
+ case ADDR_POLICY_PROBABLY_REJECTED:
+ case ADDR_POLICY_REJECTED:
+ return 0;
+ default:
+ log_warn(LD_BUG, "Unexpected result: %d", (int)p);
+ return 0;
+ }
+}
+
+/** Return true iff <b> policy</b> (possibly NULL) will allow a connection to
+ * <b>addr</b>:<b>port</b>. <b>addr</b> is an IPv4 address given in host
+ * order. */
+/* XXXX deprecate when possible. */
+static int
+addr_policy_permits_address(uint32_t addr, uint16_t port,
+ smartlist_t *policy)
+{
+ tor_addr_t a;
+ tor_addr_from_ipv4h(&a, addr);
+ return addr_policy_permits_tor_addr(&a, port, policy);
+}
+
+/** Return true iff we think our firewall will let us make a connection to
+ * addr:port.
+ *
+ * If we are configured as a server, ignore any address family preference and
+ * just use IPv4.
+ * Otherwise:
+ * - return false for all IPv4 addresses:
+ * - if ClientUseIPv4 is 0, or
+ * if pref_only and pref_ipv6 are both true;
+ * - return false for all IPv6 addresses:
+ * - if fascist_firewall_use_ipv6() is 0, or
+ * - if pref_only is true and pref_ipv6 is false.
+ *
+ * Return false if addr is NULL or tor_addr_is_null(), or if port is 0. */
+STATIC int
+fascist_firewall_allows_address(const tor_addr_t *addr,
+ uint16_t port,
+ smartlist_t *firewall_policy,
+ int pref_only, int pref_ipv6)
+{
+ const or_options_t *options = get_options();
+ const int client_mode = !server_mode(options);
+
+ if (!addr || tor_addr_is_null(addr) || !port) {
+ return 0;
+ }
+
+ /* Clients stop using IPv4 if it's disabled. In most cases, clients also
+ * stop using IPv4 if it's not preferred.
+ * Servers must have IPv4 enabled and preferred. */
+ if (tor_addr_family(addr) == AF_INET && client_mode &&
+ (!options->ClientUseIPv4 || (pref_only && pref_ipv6))) {
+ return 0;
+ }
+
+ /* Clients and Servers won't use IPv6 unless it's enabled (and in most
+ * cases, IPv6 must also be preferred before it will be used). */
+ if (tor_addr_family(addr) == AF_INET6 &&
+ (!fascist_firewall_use_ipv6(options) || (pref_only && !pref_ipv6))) {
+ return 0;
+ }
+
+ return addr_policy_permits_tor_addr(addr, port,
+ firewall_policy);
+}
+
+/** Is this client configured to use IPv6?
+ * Returns true if the client might use IPv6 for some of its connections
+ * (including dual-stack and IPv6-only clients), and false if it will never
+ * use IPv6 for any connections.
+ * Use node_ipv6_or/dir_preferred() when checking a specific node and OR/Dir
+ * port: it supports bridge client per-node IPv6 preferences.
+ */
+int
+fascist_firewall_use_ipv6(const or_options_t *options)
+{
+ /* Clients use IPv6 if it's set, or they use bridges, or they don't use
+ * IPv4, or they prefer it.
+ * ClientPreferIPv6DirPort is deprecated, but check it anyway. */
+ return (options->ClientUseIPv6 == 1 || options->ClientUseIPv4 == 0 ||
+ options->ClientPreferIPv6ORPort == 1 ||
+ options->ClientPreferIPv6DirPort == 1 || options->UseBridges == 1);
+}
+
+/** Do we prefer to connect to IPv6, ignoring ClientPreferIPv6ORPort and
+ * ClientPreferIPv6DirPort?
+ * If we're unsure, return -1, otherwise, return 1 for IPv6 and 0 for IPv4.
+ */
+static int
+fascist_firewall_prefer_ipv6_impl(const or_options_t *options)
+{
+ /*
+ Cheap implementation of config options ClientUseIPv4 & ClientUseIPv6 --
+ If we're a server or IPv6 is disabled, use IPv4.
+ If IPv4 is disabled, use IPv6.
+ */
+
+ if (server_mode(options) || !fascist_firewall_use_ipv6(options)) {
+ return 0;
+ }
+
+ if (!options->ClientUseIPv4) {
+ return 1;
+ }
+
+ return -1;
+}
+
+/** Do we prefer to connect to IPv6 ORPorts?
+ * Use node_ipv6_or_preferred() whenever possible: it supports bridge client
+ * per-node IPv6 preferences.
+ */
+int
+fascist_firewall_prefer_ipv6_orport(const or_options_t *options)
+{
+ int pref_ipv6 = fascist_firewall_prefer_ipv6_impl(options);
+
+ if (pref_ipv6 >= 0) {
+ return pref_ipv6;
+ }
+
+ /* We can use both IPv4 and IPv6 - which do we prefer? */
+ if (options->ClientPreferIPv6ORPort == 1) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/** Do we prefer to connect to IPv6 DirPorts?
+ *
+ * (node_ipv6_dir_preferred() doesn't support bridge client per-node IPv6
+ * preferences. There's no reason to use it instead of this function.)
+ */
+int
+fascist_firewall_prefer_ipv6_dirport(const or_options_t *options)
+{
+ int pref_ipv6 = fascist_firewall_prefer_ipv6_impl(options);
+
+ if (pref_ipv6 >= 0) {
+ return pref_ipv6;
+ }
+
+ /* We can use both IPv4 and IPv6 - which do we prefer? */
+ if (options->ClientPreferIPv6DirPort == 1) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/** Return true iff we think our firewall will let us make a connection to
+ * addr:port. Uses ReachableORAddresses or ReachableDirAddresses based on
+ * fw_connection.
+ * If pref_only is true, return true if addr is in the client's preferred
+ * address family, which is IPv6 if pref_ipv6 is true, and IPv4 otherwise.
+ * If pref_only is false, ignore pref_ipv6, and return true if addr is allowed.
+ */
+int
+fascist_firewall_allows_address_addr(const tor_addr_t *addr, uint16_t port,
+ firewall_connection_t fw_connection,
+ int pref_only, int pref_ipv6)
+{
+ if (fw_connection == FIREWALL_OR_CONNECTION) {
+ return fascist_firewall_allows_address(addr, port,
+ reachable_or_addr_policy,
+ pref_only, pref_ipv6);
+ } else if (fw_connection == FIREWALL_DIR_CONNECTION) {
+ return fascist_firewall_allows_address(addr, port,
+ reachable_dir_addr_policy,
+ pref_only, pref_ipv6);
+ } else {
+ log_warn(LD_BUG, "Bad firewall_connection_t value %d.",
+ fw_connection);
+ return 0;
+ }
+}
+
+/** Return true iff we think our firewall will let us make a connection to
+ * addr:port (ap). Uses ReachableORAddresses or ReachableDirAddresses based on
+ * fw_connection.
+ * pref_only and pref_ipv6 work as in fascist_firewall_allows_address_addr().
+ */
+static int
+fascist_firewall_allows_address_ap(const tor_addr_port_t *ap,
+ firewall_connection_t fw_connection,
+ int pref_only, int pref_ipv6)
+{
+ tor_assert(ap);
+ return fascist_firewall_allows_address_addr(&ap->addr, ap->port,
+ fw_connection, pref_only,
+ pref_ipv6);
+}
+
+/* Return true iff we think our firewall will let us make a connection to
+ * ipv4h_or_addr:ipv4_or_port. ipv4h_or_addr is interpreted in host order.
+ * Uses ReachableORAddresses or ReachableDirAddresses based on
+ * fw_connection.
+ * pref_only and pref_ipv6 work as in fascist_firewall_allows_address_addr().
+ */
+static int
+fascist_firewall_allows_address_ipv4h(uint32_t ipv4h_or_addr,
+ uint16_t ipv4_or_port,
+ firewall_connection_t fw_connection,
+ int pref_only, int pref_ipv6)
+{
+ tor_addr_t ipv4_or_addr;
+ tor_addr_from_ipv4h(&ipv4_or_addr, ipv4h_or_addr);
+ return fascist_firewall_allows_address_addr(&ipv4_or_addr, ipv4_or_port,
+ fw_connection, pref_only,
+ pref_ipv6);
+}
+
+/** Return true iff we think our firewall will let us make a connection to
+ * ipv4h_addr/ipv6_addr. Uses ipv4_orport/ipv6_orport/ReachableORAddresses or
+ * ipv4_dirport/ipv6_dirport/ReachableDirAddresses based on IPv4/IPv6 and
+ * <b>fw_connection</b>.
+ * pref_only and pref_ipv6 work as in fascist_firewall_allows_address_addr().
+ */
+static int
+fascist_firewall_allows_base(uint32_t ipv4h_addr, uint16_t ipv4_orport,
+ uint16_t ipv4_dirport,
+ const tor_addr_t *ipv6_addr, uint16_t ipv6_orport,
+ uint16_t ipv6_dirport,
+ firewall_connection_t fw_connection,
+ int pref_only, int pref_ipv6)
+{
+ if (fascist_firewall_allows_address_ipv4h(ipv4h_addr,
+ (fw_connection == FIREWALL_OR_CONNECTION
+ ? ipv4_orport
+ : ipv4_dirport),
+ fw_connection,
+ pref_only, pref_ipv6)) {
+ return 1;
+ }
+
+ if (fascist_firewall_allows_address_addr(ipv6_addr,
+ (fw_connection == FIREWALL_OR_CONNECTION
+ ? ipv6_orport
+ : ipv6_dirport),
+ fw_connection,
+ pref_only, pref_ipv6)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/** Like fascist_firewall_allows_base(), but takes ri. */
+static int
+fascist_firewall_allows_ri_impl(const routerinfo_t *ri,
+ firewall_connection_t fw_connection,
+ int pref_only, int pref_ipv6)
+{
+ if (!ri) {
+ return 0;
+ }
+
+ /* Assume IPv4 and IPv6 DirPorts are the same */
+ return fascist_firewall_allows_base(ri->addr, ri->or_port, ri->dir_port,
+ &ri->ipv6_addr, ri->ipv6_orport,
+ ri->dir_port, fw_connection, pref_only,
+ pref_ipv6);
+}
+
+/** Like fascist_firewall_allows_rs, but takes pref_ipv6. */
+static int
+fascist_firewall_allows_rs_impl(const routerstatus_t *rs,
+ firewall_connection_t fw_connection,
+ int pref_only, int pref_ipv6)
+{
+ if (!rs) {
+ return 0;
+ }
+
+ /* Assume IPv4 and IPv6 DirPorts are the same */
+ return fascist_firewall_allows_base(rs->addr, rs->or_port, rs->dir_port,
+ &rs->ipv6_addr, rs->ipv6_orport,
+ rs->dir_port, fw_connection, pref_only,
+ pref_ipv6);
+}
+
+/** Like fascist_firewall_allows_base(), but takes rs.
+ * When rs is a fake_status from a dir_server_t, it can have a reachable
+ * address, even when the corresponding node does not.
+ * nodes can be missing addresses when there's no consensus (IPv4 and IPv6),
+ * or when there is a microdescriptor consensus, but no microdescriptors
+ * (microdescriptors have IPv6, the microdesc consensus does not). */
+int
+fascist_firewall_allows_rs(const routerstatus_t *rs,
+ firewall_connection_t fw_connection, int pref_only)
+{
+ if (!rs) {
+ return 0;
+ }
+
+ /* We don't have access to the node-specific IPv6 preference, so use the
+ * generic IPv6 preference instead. */
+ const or_options_t *options = get_options();
+ int pref_ipv6 = (fw_connection == FIREWALL_OR_CONNECTION
+ ? fascist_firewall_prefer_ipv6_orport(options)
+ : fascist_firewall_prefer_ipv6_dirport(options));
+
+ return fascist_firewall_allows_rs_impl(rs, fw_connection, pref_only,
+ pref_ipv6);
+}
+
+/** Return true iff we think our firewall will let us make a connection to
+ * ipv6_addr:ipv6_orport based on ReachableORAddresses.
+ * If <b>fw_connection</b> is FIREWALL_DIR_CONNECTION, returns 0.
+ * pref_only and pref_ipv6 work as in fascist_firewall_allows_address_addr().
+ */
+static int
+fascist_firewall_allows_md_impl(const microdesc_t *md,
+ firewall_connection_t fw_connection,
+ int pref_only, int pref_ipv6)
+{
+ if (!md) {
+ return 0;
+ }
+
+ /* Can't check dirport, it doesn't have one */
+ if (fw_connection == FIREWALL_DIR_CONNECTION) {
+ return 0;
+ }
+
+ /* Also can't check IPv4, doesn't have that either */
+ return fascist_firewall_allows_address_addr(&md->ipv6_addr, md->ipv6_orport,
+ fw_connection, pref_only,
+ pref_ipv6);
+}
+
+/** Like fascist_firewall_allows_base(), but takes node, and looks up pref_ipv6
+ * from node_ipv6_or/dir_preferred(). */
+int
+fascist_firewall_allows_node(const node_t *node,
+ firewall_connection_t fw_connection,
+ int pref_only)
+{
+ if (!node) {
+ return 0;
+ }
+
+ node_assert_ok(node);
+
+ const int pref_ipv6 = (fw_connection == FIREWALL_OR_CONNECTION
+ ? node_ipv6_or_preferred(node)
+ : node_ipv6_dir_preferred(node));
+
+ /* Sometimes, the rs is missing the IPv6 address info, and we need to go
+ * all the way to the md */
+ if (node->ri && fascist_firewall_allows_ri_impl(node->ri, fw_connection,
+ pref_only, pref_ipv6)) {
+ return 1;
+ } else if (node->rs && fascist_firewall_allows_rs_impl(node->rs,
+ fw_connection,
+ pref_only,
+ pref_ipv6)) {
+ return 1;
+ } else if (node->md && fascist_firewall_allows_md_impl(node->md,
+ fw_connection,
+ pref_only,
+ pref_ipv6)) {
+ return 1;
+ } else {
+ /* If we know nothing, assume it's unreachable, we'll never get an address
+ * to connect to. */
+ return 0;
+ }
+}
+
+/** Like fascist_firewall_allows_rs(), but takes ds. */
+int
+fascist_firewall_allows_dir_server(const dir_server_t *ds,
+ firewall_connection_t fw_connection,
+ int pref_only)
+{
+ if (!ds) {
+ return 0;
+ }
+
+ /* A dir_server_t always has a fake_status. As long as it has the same
+ * addresses/ports in both fake_status and dir_server_t, this works fine.
+ * (See #17867.)
+ * fascist_firewall_allows_rs only checks the addresses in fake_status. */
+ return fascist_firewall_allows_rs(&ds->fake_status, fw_connection,
+ pref_only);
+}
+
+/** If a and b are both valid and allowed by fw_connection,
+ * choose one based on want_a and return it.
+ * Otherwise, return whichever is allowed.
+ * Otherwise, return NULL.
+ * pref_only and pref_ipv6 work as in fascist_firewall_allows_address_addr().
+ */
+static const tor_addr_port_t *
+fascist_firewall_choose_address_impl(const tor_addr_port_t *a,
+ const tor_addr_port_t *b,
+ int want_a,
+ firewall_connection_t fw_connection,
+ int pref_only, int pref_ipv6)
+{
+ const tor_addr_port_t *use_a = NULL;
+ const tor_addr_port_t *use_b = NULL;
+
+ if (fascist_firewall_allows_address_ap(a, fw_connection, pref_only,
+ pref_ipv6)) {
+ use_a = a;
+ }
+
+ if (fascist_firewall_allows_address_ap(b, fw_connection, pref_only,
+ pref_ipv6)) {
+ use_b = b;
+ }
+
+ /* If both are allowed */
+ if (use_a && use_b) {
+ /* Choose a if we want it */
+ return (want_a ? use_a : use_b);
+ } else {
+ /* Choose a if we have it */
+ return (use_a ? use_a : use_b);
+ }
+}
+
+/** If a and b are both valid and preferred by fw_connection,
+ * choose one based on want_a and return it.
+ * Otherwise, return whichever is preferred.
+ * If neither are preferred, and pref_only is false:
+ * - If a and b are both allowed by fw_connection,
+ * choose one based on want_a and return it.
+ * - Otherwise, return whichever is preferred.
+ * Otherwise, return NULL. */
+STATIC const tor_addr_port_t *
+fascist_firewall_choose_address(const tor_addr_port_t *a,
+ const tor_addr_port_t *b,
+ int want_a,
+ firewall_connection_t fw_connection,
+ int pref_only, int pref_ipv6)
+{
+ const tor_addr_port_t *pref = fascist_firewall_choose_address_impl(
+ a, b, want_a,
+ fw_connection,
+ 1, pref_ipv6);
+ if (pref_only || pref) {
+ /* If there is a preferred address, use it. If we can only use preferred
+ * addresses, and neither address is preferred, pref will be NULL, and we
+ * want to return NULL, so return it. */
+ return pref;
+ } else {
+ /* If there's no preferred address, and we can return addresses that are
+ * not preferred, use an address that's allowed */
+ return fascist_firewall_choose_address_impl(a, b, want_a, fw_connection,
+ 0, pref_ipv6);
+ }
+}
+
+/** Copy an address and port into <b>ap</b> that we think our firewall will
+ * let us connect to. Uses ipv4_addr/ipv6_addr and
+ * ipv4_orport/ipv6_orport/ReachableORAddresses or
+ * ipv4_dirport/ipv6_dirport/ReachableDirAddresses based on IPv4/IPv6 and
+ * <b>fw_connection</b>.
+ * If pref_only, only choose preferred addresses. In either case, choose
+ * a preferred address before an address that's not preferred.
+ * If both addresses could be chosen (they are both preferred or both allowed)
+ * choose IPv6 if pref_ipv6 is true, otherwise choose IPv4. */
+static void
+fascist_firewall_choose_address_base(const tor_addr_t *ipv4_addr,
+ uint16_t ipv4_orport,
+ uint16_t ipv4_dirport,
+ const tor_addr_t *ipv6_addr,
+ uint16_t ipv6_orport,
+ uint16_t ipv6_dirport,
+ firewall_connection_t fw_connection,
+ int pref_only,
+ int pref_ipv6,
+ tor_addr_port_t* ap)
+{
+ const tor_addr_port_t *result = NULL;
+ const int want_ipv4 = !pref_ipv6;
+
+ tor_assert(ipv6_addr);
+ tor_assert(ap);
+
+ tor_addr_make_null(&ap->addr, AF_UNSPEC);
+ ap->port = 0;
+
+ tor_addr_port_t ipv4_ap;
+ tor_addr_copy(&ipv4_ap.addr, ipv4_addr);
+ ipv4_ap.port = (fw_connection == FIREWALL_OR_CONNECTION
+ ? ipv4_orport
+ : ipv4_dirport);
+
+ tor_addr_port_t ipv6_ap;
+ tor_addr_copy(&ipv6_ap.addr, ipv6_addr);
+ ipv6_ap.port = (fw_connection == FIREWALL_OR_CONNECTION
+ ? ipv6_orport
+ : ipv6_dirport);
+
+ result = fascist_firewall_choose_address(&ipv4_ap, &ipv6_ap,
+ want_ipv4,
+ fw_connection, pref_only,
+ pref_ipv6);
+
+ if (result) {
+ tor_addr_copy(&ap->addr, &result->addr);
+ ap->port = result->port;
+ }
+}
+
+/** Like fascist_firewall_choose_address_base(), but takes a host-order IPv4
+ * address as the first parameter. */
+static void
+fascist_firewall_choose_address_ipv4h(uint32_t ipv4h_addr,
+ uint16_t ipv4_orport,
+ uint16_t ipv4_dirport,
+ const tor_addr_t *ipv6_addr,
+ uint16_t ipv6_orport,
+ uint16_t ipv6_dirport,
+ firewall_connection_t fw_connection,
+ int pref_only,
+ int pref_ipv6,
+ tor_addr_port_t* ap)
+{
+ tor_addr_t ipv4_addr;
+ tor_addr_from_ipv4h(&ipv4_addr, ipv4h_addr);
+ tor_assert(ap);
+
+ tor_addr_make_null(&ap->addr, AF_UNSPEC);
+ ap->port = 0;
+
+ fascist_firewall_choose_address_base(&ipv4_addr, ipv4_orport,
+ ipv4_dirport, ipv6_addr,
+ ipv6_orport, ipv6_dirport,
+ fw_connection, pref_only,
+ pref_ipv6, ap);
+}
+
+/* Some microdescriptor consensus methods have no IPv6 addresses in rs: they
+ * are in the microdescriptors. For these consensus methods, we can't rely on
+ * the node's IPv6 address until its microdescriptor is available (when using
+ * microdescs).
+ * But for bridges, rewrite_node_address_for_bridge() updates node->ri with
+ * the configured address, so we can trust bridge addresses.
+ * (Bridges could gain an IPv6 address if their microdescriptor arrives, but
+ * this will never be their preferred address: that is in the config.)
+ * Returns true if the node needs a microdescriptor for its IPv6 address, and
+ * false if the addresses in the node are already up-to-date.
+ */
+static int
+node_awaiting_ipv6(const or_options_t* options, const node_t *node)
+{
+ tor_assert(node);
+
+ /* There's no point waiting for an IPv6 address if we'd never use it */
+ if (!fascist_firewall_use_ipv6(options)) {
+ return 0;
+ }
+
+ /* If the node has an IPv6 address, we're not waiting */
+ if (node_has_ipv6_addr(node)) {
+ return 0;
+ }
+
+ /* If the current consensus method and flavour has IPv6 addresses, we're not
+ * waiting */
+ if (networkstatus_consensus_has_ipv6(options)) {
+ return 0;
+ }
+
+ /* Bridge clients never use the address from a bridge's md, so there's no
+ * need to wait for it. */
+ if (node_is_a_configured_bridge(node)) {
+ return 0;
+ }
+
+ /* We are waiting if we_use_microdescriptors_for_circuits() and we have no
+ * md. */
+ return (!node->md && we_use_microdescriptors_for_circuits(options));
+}
+
+/** Like fascist_firewall_choose_address_base(), but takes <b>rs</b>.
+ * Consults the corresponding node, then falls back to rs if node is NULL.
+ * This should only happen when there's no valid consensus, and rs doesn't
+ * correspond to a bridge client's bridge.
+ */
+void
+fascist_firewall_choose_address_rs(const routerstatus_t *rs,
+ firewall_connection_t fw_connection,
+ int pref_only, tor_addr_port_t* ap)
+{
+ tor_assert(ap);
+
+ tor_addr_make_null(&ap->addr, AF_UNSPEC);
+ ap->port = 0;
+
+ if (!rs) {
+ return;
+ }
+
+ const or_options_t *options = get_options();
+ const node_t *node = node_get_by_id(rs->identity_digest);
+
+ if (node && !node_awaiting_ipv6(options, node)) {
+ fascist_firewall_choose_address_node(node, fw_connection, pref_only, ap);
+ } else {
+ /* There's no node-specific IPv6 preference, so use the generic IPv6
+ * preference instead. */
+ int pref_ipv6 = (fw_connection == FIREWALL_OR_CONNECTION
+ ? fascist_firewall_prefer_ipv6_orport(options)
+ : fascist_firewall_prefer_ipv6_dirport(options));
+
+ /* Assume IPv4 and IPv6 DirPorts are the same.
+ * Assume the IPv6 OR and Dir addresses are the same. */
+ fascist_firewall_choose_address_ipv4h(rs->addr, rs->or_port, rs->dir_port,
+ &rs->ipv6_addr, rs->ipv6_orport,
+ rs->dir_port, fw_connection,
+ pref_only, pref_ipv6, ap);
+ }
+}
+
+/** Like fascist_firewall_choose_address_base(), but takes <b>node</b>, and
+ * looks up the node's IPv6 preference rather than taking an argument
+ * for pref_ipv6. */
+void
+fascist_firewall_choose_address_node(const node_t *node,
+ firewall_connection_t fw_connection,
+ int pref_only, tor_addr_port_t *ap)
+{
+ tor_assert(ap);
+
+ tor_addr_make_null(&ap->addr, AF_UNSPEC);
+ ap->port = 0;
+
+ if (!node) {
+ return;
+ }
+
+ node_assert_ok(node);
+ /* Calling fascist_firewall_choose_address_node() when the node is missing
+ * IPv6 information breaks IPv6-only clients.
+ * If the node is a hard-coded fallback directory or authority, call
+ * fascist_firewall_choose_address_rs() on the fake (hard-coded) routerstatus
+ * for the node.
+ * If it is not hard-coded, check that the node has a microdescriptor, full
+ * descriptor (routerinfo), or is one of our configured bridges before
+ * calling this function. */
+ if (BUG(node_awaiting_ipv6(get_options(), node))) {
+ return;
+ }
+
+ const int pref_ipv6_node = (fw_connection == FIREWALL_OR_CONNECTION
+ ? node_ipv6_or_preferred(node)
+ : node_ipv6_dir_preferred(node));
+
+ tor_addr_port_t ipv4_or_ap;
+ node_get_prim_orport(node, &ipv4_or_ap);
+ tor_addr_port_t ipv4_dir_ap;
+ node_get_prim_dirport(node, &ipv4_dir_ap);
+
+ tor_addr_port_t ipv6_or_ap;
+ node_get_pref_ipv6_orport(node, &ipv6_or_ap);
+ tor_addr_port_t ipv6_dir_ap;
+ node_get_pref_ipv6_dirport(node, &ipv6_dir_ap);
+
+ /* Assume the IPv6 OR and Dir addresses are the same. */
+ fascist_firewall_choose_address_base(&ipv4_or_ap.addr, ipv4_or_ap.port,
+ ipv4_dir_ap.port, &ipv6_or_ap.addr,
+ ipv6_or_ap.port, ipv6_dir_ap.port,
+ fw_connection, pref_only,
+ pref_ipv6_node, ap);
+}
+
+/** Like fascist_firewall_choose_address_rs(), but takes <b>ds</b>. */
+void
+fascist_firewall_choose_address_dir_server(const dir_server_t *ds,
+ firewall_connection_t fw_connection,
+ int pref_only,
+ tor_addr_port_t *ap)
+{
+ tor_assert(ap);
+
+ tor_addr_make_null(&ap->addr, AF_UNSPEC);
+ ap->port = 0;
+
+ if (!ds) {
+ return;
+ }
+
+ /* A dir_server_t always has a fake_status. As long as it has the same
+ * addresses/ports in both fake_status and dir_server_t, this works fine.
+ * (See #17867.)
+ * This function relies on fascist_firewall_choose_address_rs looking up the
+ * node if it can, because that will get the latest info for the relay. */
+ fascist_firewall_choose_address_rs(&ds->fake_status, fw_connection,
+ pref_only, ap);
+}
+
+/** Return 1 if <b>addr</b> is permitted to connect to our dir port,
+ * based on <b>dir_policy</b>. Else return 0.
+ */
+int
+dir_policy_permits_address(const tor_addr_t *addr)
+{
+ return addr_policy_permits_tor_addr(addr, 1, dir_policy);
+}
+
+/** Return 1 if <b>addr</b> is permitted to connect to our socks port,
+ * based on <b>socks_policy</b>. Else return 0.
+ */
+int
+socks_policy_permits_address(const tor_addr_t *addr)
+{
+ return addr_policy_permits_tor_addr(addr, 1, socks_policy);
+}
+
+/** Return true iff the address <b>addr</b> is in a country listed in the
+ * case-insensitive list of country codes <b>cc_list</b>. */
+static int
+addr_is_in_cc_list(uint32_t addr, const smartlist_t *cc_list)
+{
+ country_t country;
+ const char *name;
+ tor_addr_t tar;
+
+ if (!cc_list)
+ return 0;
+ /* XXXXipv6 */
+ tor_addr_from_ipv4h(&tar, addr);
+ country = geoip_get_country_by_addr(&tar);
+ name = geoip_get_country_name(country);
+ return smartlist_contains_string_case(cc_list, name);
+}
+
+/** Return 1 if <b>addr</b>:<b>port</b> is permitted to publish to our
+ * directory, based on <b>authdir_reject_policy</b>. Else return 0.
+ */
+int
+authdir_policy_permits_address(uint32_t addr, uint16_t port)
+{
+ if (! addr_policy_permits_address(addr, port, authdir_reject_policy))
+ return 0;
+ return !addr_is_in_cc_list(addr, get_options()->AuthDirRejectCCs);
+}
+
+/** Return 1 if <b>addr</b>:<b>port</b> is considered valid in our
+ * directory, based on <b>authdir_invalid_policy</b>. Else return 0.
+ */
+int
+authdir_policy_valid_address(uint32_t addr, uint16_t port)
+{
+ if (! addr_policy_permits_address(addr, port, authdir_invalid_policy))
+ return 0;
+ return !addr_is_in_cc_list(addr, get_options()->AuthDirInvalidCCs);
+}
+
+/** Return 1 if <b>addr</b>:<b>port</b> should be marked as a bad exit,
+ * based on <b>authdir_badexit_policy</b>. Else return 0.
+ */
+int
+authdir_policy_badexit_address(uint32_t addr, uint16_t port)
+{
+ if (! addr_policy_permits_address(addr, port, authdir_badexit_policy))
+ return 1;
+ return addr_is_in_cc_list(addr, get_options()->AuthDirBadExitCCs);
+}
+
+#define REJECT(arg) \
+ STMT_BEGIN *msg = tor_strdup(arg); goto err; STMT_END
+
+/** Config helper: If there's any problem with the policy configuration
+ * options in <b>options</b>, return -1 and set <b>msg</b> to a newly
+ * allocated description of the error. Else return 0. */
+int
+validate_addr_policies(const or_options_t *options, char **msg)
+{
+ /* XXXX Maybe merge this into parse_policies_from_options, to make sure
+ * that the two can't go out of sync. */
+
+ smartlist_t *addr_policy=NULL;
+ *msg = NULL;
+
+ if (policies_parse_exit_policy_from_options(options,0,NULL,&addr_policy)) {
+ REJECT("Error in ExitPolicy entry.");
+ }
+
+ static int warned_about_exitrelay = 0;
+
+ const int exitrelay_setting_is_auto = options->ExitRelay == -1;
+ const int policy_accepts_something =
+ ! (policy_is_reject_star(addr_policy, AF_INET, 1) &&
+ policy_is_reject_star(addr_policy, AF_INET6, 1));
+
+ if (server_mode(options) &&
+ ! warned_about_exitrelay &&
+ exitrelay_setting_is_auto &&
+ policy_accepts_something) {
+ /* Policy accepts something */
+ warned_about_exitrelay = 1;
+ log_warn(LD_CONFIG,
+ "Tor is running as an exit relay%s. If you did not want this "
+ "behavior, please set the ExitRelay option to 0. If you do "
+ "want to run an exit Relay, please set the ExitRelay option "
+ "to 1 to disable this warning, and for forward compatibility.",
+ options->ExitPolicy == NULL ?
+ " with the default exit policy" : "");
+ if (options->ExitPolicy == NULL && options->ReducedExitPolicy == 0) {
+ log_warn(LD_CONFIG,
+ "In a future version of Tor, ExitRelay 0 may become the "
+ "default when no ExitPolicy is given.");
+ }
+ }
+
+ /* The rest of these calls *append* to addr_policy. So don't actually
+ * use the results for anything other than checking if they parse! */
+ if (parse_addr_policy(options->DirPolicy, &addr_policy, -1))
+ REJECT("Error in DirPolicy entry.");
+ if (parse_addr_policy(options->SocksPolicy, &addr_policy, -1))
+ REJECT("Error in SocksPolicy entry.");
+ if (parse_addr_policy(options->AuthDirReject, &addr_policy,
+ ADDR_POLICY_REJECT))
+ REJECT("Error in AuthDirReject entry.");
+ if (parse_addr_policy(options->AuthDirInvalid, &addr_policy,
+ ADDR_POLICY_REJECT))
+ REJECT("Error in AuthDirInvalid entry.");
+ if (parse_addr_policy(options->AuthDirBadExit, &addr_policy,
+ ADDR_POLICY_REJECT))
+ REJECT("Error in AuthDirBadExit entry.");
+
+ if (parse_addr_policy(options->ReachableAddresses, &addr_policy,
+ ADDR_POLICY_ACCEPT))
+ REJECT("Error in ReachableAddresses entry.");
+ if (parse_addr_policy(options->ReachableORAddresses, &addr_policy,
+ ADDR_POLICY_ACCEPT))
+ REJECT("Error in ReachableORAddresses entry.");
+ if (parse_addr_policy(options->ReachableDirAddresses, &addr_policy,
+ ADDR_POLICY_ACCEPT))
+ REJECT("Error in ReachableDirAddresses entry.");
+
+ err:
+ addr_policy_list_free(addr_policy);
+ return *msg ? -1 : 0;
+#undef REJECT
+}
+
+/** Parse <b>string</b> in the same way that the exit policy
+ * is parsed, and put the processed version in *<b>policy</b>.
+ * Ignore port specifiers.
+ */
+static int
+load_policy_from_option(config_line_t *config, const char *option_name,
+ smartlist_t **policy,
+ int assume_action)
+{
+ int r;
+ int killed_any_ports = 0;
+ addr_policy_list_free(*policy);
+ *policy = NULL;
+ r = parse_addr_policy(config, policy, assume_action);
+ if (r < 0) {
+ return -1;
+ }
+ if (*policy) {
+ SMARTLIST_FOREACH_BEGIN(*policy, addr_policy_t *, n) {
+ /* ports aren't used in these. */
+ if (n->prt_min > 1 || n->prt_max != 65535) {
+ addr_policy_t newp, *c;
+ memcpy(&newp, n, sizeof(newp));
+ newp.prt_min = 1;
+ newp.prt_max = 65535;
+ newp.is_canonical = 0;
+ c = addr_policy_get_canonical_entry(&newp);
+ SMARTLIST_REPLACE_CURRENT(*policy, n, c);
+ addr_policy_free(n);
+ killed_any_ports = 1;
+ }
+ } SMARTLIST_FOREACH_END(n);
+ }
+ if (killed_any_ports) {
+ log_warn(LD_CONFIG, "Ignoring ports in %s option.", option_name);
+ }
+ return 0;
+}
+
+/** Set all policies based on <b>options</b>, which should have been validated
+ * first by validate_addr_policies. */
+int
+policies_parse_from_options(const or_options_t *options)
+{
+ int ret = 0;
+ if (load_policy_from_option(options->SocksPolicy, "SocksPolicy",
+ &socks_policy, -1) < 0)
+ ret = -1;
+ if (load_policy_from_option(options->DirPolicy, "DirPolicy",
+ &dir_policy, -1) < 0)
+ ret = -1;
+ if (load_policy_from_option(options->AuthDirReject, "AuthDirReject",
+ &authdir_reject_policy, ADDR_POLICY_REJECT) < 0)
+ ret = -1;
+ if (load_policy_from_option(options->AuthDirInvalid, "AuthDirInvalid",
+ &authdir_invalid_policy, ADDR_POLICY_REJECT) < 0)
+ ret = -1;
+ if (load_policy_from_option(options->AuthDirBadExit, "AuthDirBadExit",
+ &authdir_badexit_policy, ADDR_POLICY_REJECT) < 0)
+ ret = -1;
+ if (parse_reachable_addresses() < 0)
+ ret = -1;
+ return ret;
+}
+
+/** Compare two provided address policy items, and renturn -1, 0, or 1
+ * if the first is less than, equal to, or greater than the second. */
+static int
+single_addr_policy_eq(const addr_policy_t *a, const addr_policy_t *b)
+{
+ int r;
+#define CMP_FIELD(field) do { \
+ if (a->field != b->field) { \
+ return 0; \
+ } \
+ } while (0)
+ CMP_FIELD(policy_type);
+ CMP_FIELD(is_private);
+ /* refcnt and is_canonical are irrelevant to equality,
+ * they are hash table implementation details */
+ if ((r=tor_addr_compare(&a->addr, &b->addr, CMP_EXACT)))
+ return 0;
+ CMP_FIELD(maskbits);
+ CMP_FIELD(prt_min);
+ CMP_FIELD(prt_max);
+#undef CMP_FIELD
+ return 1;
+}
+
+/** As single_addr_policy_eq, but compare every element of two policies.
+ */
+int
+addr_policies_eq(const smartlist_t *a, const smartlist_t *b)
+{
+ int i;
+ int len_a = a ? smartlist_len(a) : 0;
+ int len_b = b ? smartlist_len(b) : 0;
+
+ if (len_a != len_b)
+ return 0;
+
+ for (i = 0; i < len_a; ++i) {
+ if (! single_addr_policy_eq(smartlist_get(a, i), smartlist_get(b, i)))
+ return 0;
+ }
+
+ return 1;
+}
+
+/** Node in hashtable used to store address policy entries. */
+typedef struct policy_map_ent_t {
+ HT_ENTRY(policy_map_ent_t) node;
+ addr_policy_t *policy;
+} policy_map_ent_t;
+
+/* DOCDOC policy_root */
+static HT_HEAD(policy_map, policy_map_ent_t) policy_root = HT_INITIALIZER();
+
+/** Return true iff a and b are equal. */
+static inline int
+policy_eq(policy_map_ent_t *a, policy_map_ent_t *b)
+{
+ return single_addr_policy_eq(a->policy, b->policy);
+}
+
+/** Return a hashcode for <b>ent</b> */
+static unsigned int
+policy_hash(const policy_map_ent_t *ent)
+{
+ const addr_policy_t *a = ent->policy;
+ addr_policy_t aa;
+ memset(&aa, 0, sizeof(aa));
+
+ aa.prt_min = a->prt_min;
+ aa.prt_max = a->prt_max;
+ aa.maskbits = a->maskbits;
+ aa.policy_type = a->policy_type;
+ aa.is_private = a->is_private;
+
+ if (a->is_private) {
+ aa.is_private = 1;
+ } else {
+ tor_addr_copy_tight(&aa.addr, &a->addr);
+ }
+
+ return (unsigned) siphash24g(&aa, sizeof(aa));
+}
+
+HT_PROTOTYPE(policy_map, policy_map_ent_t, node, policy_hash,
+ policy_eq)
+HT_GENERATE2(policy_map, policy_map_ent_t, node, policy_hash,
+ policy_eq, 0.6, tor_reallocarray_, tor_free_)
+
+/** Given a pointer to an addr_policy_t, return a copy of the pointer to the
+ * "canonical" copy of that addr_policy_t; the canonical copy is a single
+ * reference-counted object. */
+addr_policy_t *
+addr_policy_get_canonical_entry(addr_policy_t *e)
+{
+ policy_map_ent_t search, *found;
+ if (e->is_canonical)
+ return e;
+
+ search.policy = e;
+ found = HT_FIND(policy_map, &policy_root, &search);
+ if (!found) {
+ found = tor_malloc_zero(sizeof(policy_map_ent_t));
+ found->policy = tor_memdup(e, sizeof(addr_policy_t));
+ found->policy->is_canonical = 1;
+ found->policy->refcnt = 0;
+ HT_INSERT(policy_map, &policy_root, found);
+ }
+
+ tor_assert(single_addr_policy_eq(found->policy, e));
+ ++found->policy->refcnt;
+ return found->policy;
+}
+
+/** Helper for compare_tor_addr_to_addr_policy. Implements the case where
+ * addr and port are both known. */
+static addr_policy_result_t
+compare_known_tor_addr_to_addr_policy(const tor_addr_t *addr, uint16_t port,
+ const smartlist_t *policy)
+{
+ /* We know the address and port, and we know the policy, so we can just
+ * compute an exact match. */
+ SMARTLIST_FOREACH_BEGIN(policy, addr_policy_t *, tmpe) {
+ if (tmpe->addr.family == AF_UNSPEC) {
+ log_warn(LD_BUG, "Policy contains an AF_UNSPEC address, which only "
+ "matches other AF_UNSPEC addresses.");
+ }
+ /* Address is known */
+ if (!tor_addr_compare_masked(addr, &tmpe->addr, tmpe->maskbits,
+ CMP_EXACT)) {
+ if (port >= tmpe->prt_min && port <= tmpe->prt_max) {
+ /* Exact match for the policy */
+ return tmpe->policy_type == ADDR_POLICY_ACCEPT ?
+ ADDR_POLICY_ACCEPTED : ADDR_POLICY_REJECTED;
+ }
+ }
+ } SMARTLIST_FOREACH_END(tmpe);
+
+ /* accept all by default. */
+ return ADDR_POLICY_ACCEPTED;
+}
+
+/** Helper for compare_tor_addr_to_addr_policy. Implements the case where
+ * addr is known but port is not. */
+static addr_policy_result_t
+compare_known_tor_addr_to_addr_policy_noport(const tor_addr_t *addr,
+ const smartlist_t *policy)
+{
+ /* We look to see if there's a definite match. If so, we return that
+ match's value, unless there's an intervening possible match that says
+ something different. */
+ int maybe_accept = 0, maybe_reject = 0;
+
+ SMARTLIST_FOREACH_BEGIN(policy, addr_policy_t *, tmpe) {
+ if (tmpe->addr.family == AF_UNSPEC) {
+ log_warn(LD_BUG, "Policy contains an AF_UNSPEC address, which only "
+ "matches other AF_UNSPEC addresses.");
+ }
+ if (!tor_addr_compare_masked(addr, &tmpe->addr, tmpe->maskbits,
+ CMP_EXACT)) {
+ if (tmpe->prt_min <= 1 && tmpe->prt_max >= 65535) {
+ /* Definitely matches, since it covers all ports. */
+ if (tmpe->policy_type == ADDR_POLICY_ACCEPT) {
+ /* If we already hit a clause that might trigger a 'reject', than we
+ * can't be sure of this certain 'accept'.*/
+ return maybe_reject ? ADDR_POLICY_PROBABLY_ACCEPTED :
+ ADDR_POLICY_ACCEPTED;
+ } else {
+ return maybe_accept ? ADDR_POLICY_PROBABLY_REJECTED :
+ ADDR_POLICY_REJECTED;
+ }
+ } else {
+ /* Might match. */
+ if (tmpe->policy_type == ADDR_POLICY_REJECT)
+ maybe_reject = 1;
+ else
+ maybe_accept = 1;
+ }
+ }
+ } SMARTLIST_FOREACH_END(tmpe);
+
+ /* accept all by default. */
+ return maybe_reject ? ADDR_POLICY_PROBABLY_ACCEPTED : ADDR_POLICY_ACCEPTED;
+}
+
+/** Helper for compare_tor_addr_to_addr_policy. Implements the case where
+ * port is known but address is not. */
+static addr_policy_result_t
+compare_unknown_tor_addr_to_addr_policy(uint16_t port,
+ const smartlist_t *policy)
+{
+ /* We look to see if there's a definite match. If so, we return that
+ match's value, unless there's an intervening possible match that says
+ something different. */
+ int maybe_accept = 0, maybe_reject = 0;
+
+ SMARTLIST_FOREACH_BEGIN(policy, addr_policy_t *, tmpe) {
+ if (tmpe->addr.family == AF_UNSPEC) {
+ log_warn(LD_BUG, "Policy contains an AF_UNSPEC address, which only "
+ "matches other AF_UNSPEC addresses.");
+ }
+ if (tmpe->prt_min <= port && port <= tmpe->prt_max) {
+ if (tmpe->maskbits == 0) {
+ /* Definitely matches, since it covers all addresses. */
+ if (tmpe->policy_type == ADDR_POLICY_ACCEPT) {
+ /* If we already hit a clause that might trigger a 'reject', than we
+ * can't be sure of this certain 'accept'.*/
+ return maybe_reject ? ADDR_POLICY_PROBABLY_ACCEPTED :
+ ADDR_POLICY_ACCEPTED;
+ } else {
+ return maybe_accept ? ADDR_POLICY_PROBABLY_REJECTED :
+ ADDR_POLICY_REJECTED;
+ }
+ } else {
+ /* Might match. */
+ if (tmpe->policy_type == ADDR_POLICY_REJECT)
+ maybe_reject = 1;
+ else
+ maybe_accept = 1;
+ }
+ }
+ } SMARTLIST_FOREACH_END(tmpe);
+
+ /* accept all by default. */
+ return maybe_reject ? ADDR_POLICY_PROBABLY_ACCEPTED : ADDR_POLICY_ACCEPTED;
+}
+
+/** Decide whether a given addr:port is definitely accepted,
+ * definitely rejected, probably accepted, or probably rejected by a
+ * given policy. If <b>addr</b> is 0, we don't know the IP of the
+ * target address. If <b>port</b> is 0, we don't know the port of the
+ * target address. (At least one of <b>addr</b> and <b>port</b> must be
+ * provided. If you want to know whether a policy would definitely reject
+ * an unknown address:port, use policy_is_reject_star().)
+ *
+ * We could do better by assuming that some ranges never match typical
+ * addresses (127.0.0.1, and so on). But we'll try this for now.
+ */
+MOCK_IMPL(addr_policy_result_t,
+compare_tor_addr_to_addr_policy,(const tor_addr_t *addr, uint16_t port,
+ const smartlist_t *policy))
+{
+ if (!policy) {
+ /* no policy? accept all. */
+ return ADDR_POLICY_ACCEPTED;
+ } else if (addr == NULL || tor_addr_is_null(addr)) {
+ if (port == 0) {
+ log_info(LD_BUG, "Rejecting null address with 0 port (family %d)",
+ addr ? tor_addr_family(addr) : -1);
+ return ADDR_POLICY_REJECTED;
+ }
+ return compare_unknown_tor_addr_to_addr_policy(port, policy);
+ } else if (port == 0) {
+ return compare_known_tor_addr_to_addr_policy_noport(addr, policy);
+ } else {
+ return compare_known_tor_addr_to_addr_policy(addr, port, policy);
+ }
+}
+
+/** Return true iff the address policy <b>a</b> covers every case that
+ * would be covered by <b>b</b>, so that a,b is redundant. */
+static int
+addr_policy_covers(addr_policy_t *a, addr_policy_t *b)
+{
+ if (tor_addr_family(&a->addr) != tor_addr_family(&b->addr)) {
+ /* You can't cover a different family. */
+ return 0;
+ }
+ /* We can ignore accept/reject, since "accept *:80, reject *:80" reduces
+ * to "accept *:80". */
+ if (a->maskbits > b->maskbits) {
+ /* a has more fixed bits than b; it can't possibly cover b. */
+ return 0;
+ }
+ if (tor_addr_compare_masked(&a->addr, &b->addr, a->maskbits, CMP_EXACT)) {
+ /* There's a fixed bit in a that's set differently in b. */
+ return 0;
+ }
+ return (a->prt_min <= b->prt_min && a->prt_max >= b->prt_max);
+}
+
+/** Return true iff the address policies <b>a</b> and <b>b</b> intersect,
+ * that is, there exists an address/port that is covered by <b>a</b> that
+ * is also covered by <b>b</b>.
+ */
+static int
+addr_policy_intersects(addr_policy_t *a, addr_policy_t *b)
+{
+ maskbits_t minbits;
+ /* All the bits we care about are those that are set in both
+ * netmasks. If they are equal in a and b's networkaddresses
+ * then the networks intersect. If there is a difference,
+ * then they do not. */
+ if (a->maskbits < b->maskbits)
+ minbits = a->maskbits;
+ else
+ minbits = b->maskbits;
+ if (tor_addr_compare_masked(&a->addr, &b->addr, minbits, CMP_EXACT))
+ return 0;
+ if (a->prt_max < b->prt_min || b->prt_max < a->prt_min)
+ return 0;
+ return 1;
+}
+
+/** Add the exit policy described by <b>more</b> to <b>policy</b>.
+ */
+STATIC void
+append_exit_policy_string(smartlist_t **policy, const char *more)
+{
+ config_line_t tmp;
+
+ tmp.key = NULL;
+ tmp.value = (char*) more;
+ tmp.next = NULL;
+ if (parse_addr_policy(&tmp, policy, -1)<0) {
+ log_warn(LD_BUG, "Unable to parse internally generated policy %s",more);
+ }
+}
+
+/** Add "reject <b>addr</b>:*" to <b>dest</b>, creating the list as needed. */
+void
+addr_policy_append_reject_addr(smartlist_t **dest, const tor_addr_t *addr)
+{
+ tor_assert(dest);
+ tor_assert(addr);
+
+ addr_policy_t p, *add;
+ memset(&p, 0, sizeof(p));
+ p.policy_type = ADDR_POLICY_REJECT;
+ p.maskbits = tor_addr_family(addr) == AF_INET6 ? 128 : 32;
+ tor_addr_copy(&p.addr, addr);
+ p.prt_min = 1;
+ p.prt_max = 65535;
+
+ add = addr_policy_get_canonical_entry(&p);
+ if (!*dest)
+ *dest = smartlist_new();
+ smartlist_add(*dest, add);
+ log_debug(LD_CONFIG, "Adding a reject ExitPolicy 'reject %s:*'",
+ fmt_addr(addr));
+}
+
+/* Is addr public for the purposes of rejection? */
+static int
+tor_addr_is_public_for_reject(const tor_addr_t *addr)
+{
+ return (!tor_addr_is_null(addr) && !tor_addr_is_internal(addr, 0)
+ && !tor_addr_is_multicast(addr));
+}
+
+/* Add "reject <b>addr</b>:*" to <b>dest</b>, creating the list as needed.
+ * Filter the address, only adding an IPv4 reject rule if ipv4_rules
+ * is true, and similarly for ipv6_rules. Check each address returns true for
+ * tor_addr_is_public_for_reject before adding it.
+ */
+static void
+addr_policy_append_reject_addr_filter(smartlist_t **dest,
+ const tor_addr_t *addr,
+ int ipv4_rules,
+ int ipv6_rules)
+{
+ tor_assert(dest);
+ tor_assert(addr);
+
+ /* Only reject IP addresses which are public */
+ if (tor_addr_is_public_for_reject(addr)) {
+
+ /* Reject IPv4 addresses and IPv6 addresses based on the filters */
+ int is_ipv4 = tor_addr_is_v4(addr);
+ if ((is_ipv4 && ipv4_rules) || (!is_ipv4 && ipv6_rules)) {
+ addr_policy_append_reject_addr(dest, addr);
+ }
+ }
+}
+
+/** Add "reject addr:*" to <b>dest</b>, for each addr in addrs, creating the
+ * list as needed. */
+void
+addr_policy_append_reject_addr_list(smartlist_t **dest,
+ const smartlist_t *addrs)
+{
+ tor_assert(dest);
+ tor_assert(addrs);
+
+ SMARTLIST_FOREACH_BEGIN(addrs, tor_addr_t *, addr) {
+ addr_policy_append_reject_addr(dest, addr);
+ } SMARTLIST_FOREACH_END(addr);
+}
+
+/** Add "reject addr:*" to <b>dest</b>, for each addr in addrs, creating the
+ * list as needed. Filter using */
+static void
+addr_policy_append_reject_addr_list_filter(smartlist_t **dest,
+ const smartlist_t *addrs,
+ int ipv4_rules,
+ int ipv6_rules)
+{
+ tor_assert(dest);
+ tor_assert(addrs);
+
+ SMARTLIST_FOREACH_BEGIN(addrs, tor_addr_t *, addr) {
+ addr_policy_append_reject_addr_filter(dest, addr, ipv4_rules, ipv6_rules);
+ } SMARTLIST_FOREACH_END(addr);
+}
+
+/** Detect and excise "dead code" from the policy *<b>dest</b>. */
+static void
+exit_policy_remove_redundancies(smartlist_t *dest)
+{
+ addr_policy_t *ap, *tmp;
+ int i, j;
+
+ /* Step one: kill every ipv4 thing after *4:*, every IPv6 thing after *6:*
+ */
+ {
+ int kill_v4=0, kill_v6=0;
+ for (i = 0; i < smartlist_len(dest); ++i) {
+ sa_family_t family;
+ ap = smartlist_get(dest, i);
+ family = tor_addr_family(&ap->addr);
+ if ((family == AF_INET && kill_v4) ||
+ (family == AF_INET6 && kill_v6)) {
+ smartlist_del_keeporder(dest, i--);
+ addr_policy_free(ap);
+ continue;
+ }
+
+ if (ap->maskbits == 0 && ap->prt_min <= 1 && ap->prt_max >= 65535) {
+ /* This is a catch-all line -- later lines are unreachable. */
+ if (family == AF_INET) {
+ kill_v4 = 1;
+ } else if (family == AF_INET6) {
+ kill_v6 = 1;
+ }
+ }
+ }
+ }
+
+ /* Step two: for every entry, see if there's a redundant entry
+ * later on, and remove it. */
+ for (i = 0; i < smartlist_len(dest)-1; ++i) {
+ ap = smartlist_get(dest, i);
+ for (j = i+1; j < smartlist_len(dest); ++j) {
+ tmp = smartlist_get(dest, j);
+ tor_assert(j > i);
+ if (addr_policy_covers(ap, tmp)) {
+ char p1[POLICY_BUF_LEN], p2[POLICY_BUF_LEN];
+ policy_write_item(p1, sizeof(p1), tmp, 0);
+ policy_write_item(p2, sizeof(p2), ap, 0);
+ log_debug(LD_CONFIG, "Removing exit policy %s (%d). It is made "
+ "redundant by %s (%d).", p1, j, p2, i);
+ smartlist_del_keeporder(dest, j--);
+ addr_policy_free(tmp);
+ }
+ }
+ }
+
+ /* Step three: for every entry A, see if there's an entry B making this one
+ * redundant later on. This is the case if A and B are of the same type
+ * (accept/reject), A is a subset of B, and there is no other entry of
+ * different type in between those two that intersects with A.
+ *
+ * Anybody want to double-check the logic here? XXX
+ */
+ for (i = 0; i < smartlist_len(dest)-1; ++i) {
+ ap = smartlist_get(dest, i);
+ for (j = i+1; j < smartlist_len(dest); ++j) {
+ // tor_assert(j > i); // j starts out at i+1; j only increases; i only
+ // // decreases.
+ tmp = smartlist_get(dest, j);
+ if (ap->policy_type != tmp->policy_type) {
+ if (addr_policy_intersects(ap, tmp))
+ break;
+ } else { /* policy_types are equal. */
+ if (addr_policy_covers(tmp, ap)) {
+ char p1[POLICY_BUF_LEN], p2[POLICY_BUF_LEN];
+ policy_write_item(p1, sizeof(p1), ap, 0);
+ policy_write_item(p2, sizeof(p2), tmp, 0);
+ log_debug(LD_CONFIG, "Removing exit policy %s. It is already "
+ "covered by %s.", p1, p2);
+ smartlist_del_keeporder(dest, i--);
+ addr_policy_free(ap);
+ break;
+ }
+ }
+ }
+ }
+}
+
+/** Reject private helper for policies_parse_exit_policy_internal: rejects
+ * publicly routable addresses on this exit relay.
+ *
+ * Add reject entries to the linked list *<b>dest</b>:
+ * <ul>
+ * <li>if configured_addresses is non-NULL, add entries that reject each
+ * tor_addr_t in the list as a destination.
+ * <li>if reject_interface_addresses is true, add entries that reject each
+ * public IPv4 and IPv6 address of each interface on this machine.
+ * <li>if reject_configured_port_addresses is true, add entries that reject
+ * each IPv4 and IPv6 address configured for a port.
+ * </ul>
+ *
+ * IPv6 entries are only added if ipv6_exit is true. (All IPv6 addresses are
+ * already blocked by policies_parse_exit_policy_internal if ipv6_exit is
+ * false.)
+ *
+ * The list in <b>dest</b> is created as needed.
+ */
+void
+policies_parse_exit_policy_reject_private(
+ smartlist_t **dest,
+ int ipv6_exit,
+ const smartlist_t *configured_addresses,
+ int reject_interface_addresses,
+ int reject_configured_port_addresses)
+{
+ tor_assert(dest);
+
+ /* Reject configured addresses, if they are from public netblocks. */
+ if (configured_addresses) {
+ addr_policy_append_reject_addr_list_filter(dest, configured_addresses,
+ 1, ipv6_exit);
+ }
+
+ /* Reject configured port addresses, if they are from public netblocks. */
+ if (reject_configured_port_addresses) {
+ const smartlist_t *port_addrs = get_configured_ports();
+
+ SMARTLIST_FOREACH_BEGIN(port_addrs, port_cfg_t *, port) {
+
+ /* Only reject port IP addresses, not port unix sockets */
+ if (!port->is_unix_addr) {
+ addr_policy_append_reject_addr_filter(dest, &port->addr, 1, ipv6_exit);
+ }
+ } SMARTLIST_FOREACH_END(port);
+ }
+
+ /* Reject local addresses from public netblocks on any interface. */
+ if (reject_interface_addresses) {
+ smartlist_t *public_addresses = NULL;
+
+ /* Reject public IPv4 addresses on any interface */
+ public_addresses = get_interface_address6_list(LOG_INFO, AF_INET, 0);
+ addr_policy_append_reject_addr_list_filter(dest, public_addresses, 1, 0);
+ interface_address6_list_free(public_addresses);
+
+ /* Don't look for IPv6 addresses if we're configured as IPv4-only */
+ if (ipv6_exit) {
+ /* Reject public IPv6 addresses on any interface */
+ public_addresses = get_interface_address6_list(LOG_INFO, AF_INET6, 0);
+ addr_policy_append_reject_addr_list_filter(dest, public_addresses, 0, 1);
+ interface_address6_list_free(public_addresses);
+ }
+ }
+
+ /* If addresses were added multiple times, remove all but one of them. */
+ if (*dest) {
+ exit_policy_remove_redundancies(*dest);
+ }
+}
+
+/**
+ * Iterate through <b>policy</b> looking for redundant entries. Log a
+ * warning message with the first redundant entry, if any is found.
+ */
+static void
+policies_log_first_redundant_entry(const smartlist_t *policy)
+{
+ int found_final_effective_entry = 0;
+ int first_redundant_entry = 0;
+ tor_assert(policy);
+ SMARTLIST_FOREACH_BEGIN(policy, const addr_policy_t *, p) {
+ sa_family_t family;
+ int found_ipv4_wildcard = 0, found_ipv6_wildcard = 0;
+ const int i = p_sl_idx;
+
+ /* Look for accept/reject *[4|6|]:* entires */
+ if (p->prt_min <= 1 && p->prt_max == 65535 && p->maskbits == 0) {
+ family = tor_addr_family(&p->addr);
+ /* accept/reject *:* may have already been expanded into
+ * accept/reject *4:*,accept/reject *6:*
+ * But handle both forms.
+ */
+ if (family == AF_INET || family == AF_UNSPEC) {
+ found_ipv4_wildcard = 1;
+ }
+ if (family == AF_INET6 || family == AF_UNSPEC) {
+ found_ipv6_wildcard = 1;
+ }
+ }
+
+ /* We also find accept *4:*,reject *6:* ; and
+ * accept *4:*,<other policies>,accept *6:* ; and similar.
+ * That's ok, because they make any subsequent entries redundant. */
+ if (found_ipv4_wildcard && found_ipv6_wildcard) {
+ found_final_effective_entry = 1;
+ /* if we're not on the final entry in the list */
+ if (i < smartlist_len(policy) - 1) {
+ first_redundant_entry = i + 1;
+ }
+ break;
+ }
+ } SMARTLIST_FOREACH_END(p);
+
+ /* Work out if there are redundant trailing entries in the policy list */
+ if (found_final_effective_entry && first_redundant_entry > 0) {
+ const addr_policy_t *p;
+ /* Longest possible policy is
+ * "accept6 ffff:ffff:..255/128:10000-65535",
+ * which contains a max-length IPv6 address, plus 24 characters. */
+ char line[TOR_ADDR_BUF_LEN + 32];
+
+ tor_assert(first_redundant_entry < smartlist_len(policy));
+ p = smartlist_get(policy, first_redundant_entry);
+ /* since we've already parsed the policy into an addr_policy_t struct,
+ * we might not log exactly what the user typed in */
+ policy_write_item(line, TOR_ADDR_BUF_LEN + 32, p, 0);
+ log_warn(LD_DIR, "Exit policy '%s' and all following policies are "
+ "redundant, as it follows accept/reject *:* rules for both "
+ "IPv4 and IPv6. They will be removed from the exit policy. (Use "
+ "accept/reject *:* as the last entry in any exit policy.)",
+ line);
+ }
+}
+
+#define DEFAULT_EXIT_POLICY \
+ "reject *:25,reject *:119,reject *:135-139,reject *:445," \
+ "reject *:563,reject *:1214,reject *:4661-4666," \
+ "reject *:6346-6429,reject *:6699,reject *:6881-6999,accept *:*"
+
+#define REDUCED_EXIT_POLICY \
+ "accept *:20-23,accept *:43,accept *:53,accept *:79-81,accept *:88," \
+ "accept *:110,accept *:143,accept *:194,accept *:220,accept *:389," \
+ "accept *:443,accept *:464,accept *:465,accept *:531,accept *:543-544," \
+ "accept *:554,accept *:563,accept *:587,accept *:636,accept *:706," \
+ "accept *:749,accept *:873,accept *:902-904,accept *:981,accept *:989-995," \
+ "accept *:1194,accept *:1220,accept *:1293,accept *:1500,accept *:1533," \
+ "accept *:1677,accept *:1723,accept *:1755,accept *:1863," \
+ "accept *:2082-2083,accept *:2086-2087,accept *:2095-2096," \
+ "accept *:2102-2104,accept *:3128,accept *:3389,accept *:3690," \
+ "accept *:4321,accept *:4643,accept *:5050,accept *:5190," \
+ "accept *:5222-5223,accept *:5228,accept *:5900,accept *:6660-6669," \
+ "accept *:6679,accept *:6697,accept *:8000,accept *:8008,accept *:8074," \
+ "accept *:8080,accept *:8082,accept *:8087-8088,accept *:8232-8233," \
+ "accept *:8332-8333,accept *:8443,accept *:8888,accept *:9418," \
+ "accept *:9999,accept *:10000,accept *:11371,accept *:19294," \
+ "accept *:19638,accept *:50002,accept *:64738,reject *:*"
+
+/** Parse the exit policy <b>cfg</b> into the linked list *<b>dest</b>.
+ *
+ * If <b>ipv6_exit</b> is false, prepend "reject *6:*" to the policy.
+ *
+ * If <b>configured_addresses</b> contains addresses:
+ * - prepend entries that reject the addresses in this list. These may be the
+ * advertised relay addresses and/or the outbound bind addresses,
+ * depending on the ExitPolicyRejectPrivate and
+ * ExitPolicyRejectLocalInterfaces settings.
+ * If <b>rejectprivate</b> is true:
+ * - prepend "reject private:*" to the policy.
+ * If <b>reject_interface_addresses</b> is true:
+ * - prepend entries that reject publicly routable interface addresses on
+ * this exit relay by calling policies_parse_exit_policy_reject_private
+ * If <b>reject_configured_port_addresses</b> is true:
+ * - prepend entries that reject all configured port addresses
+ *
+ * If cfg doesn't end in an absolute accept or reject and if
+ * <b>add_default_policy</b> is true, add the default exit
+ * policy afterwards.
+ *
+ * Return -1 if we can't parse cfg, else return 0.
+ *
+ * This function is used to parse the exit policy from our torrc. For
+ * the functions used to parse the exit policy from a router descriptor,
+ * see router_add_exit_policy.
+ */
+static int
+policies_parse_exit_policy_internal(config_line_t *cfg,
+ smartlist_t **dest,
+ int ipv6_exit,
+ int rejectprivate,
+ const smartlist_t *configured_addresses,
+ int reject_interface_addresses,
+ int reject_configured_port_addresses,
+ int add_default_policy,
+ int add_reduced_policy)
+{
+ if (!ipv6_exit) {
+ append_exit_policy_string(dest, "reject *6:*");
+ }
+ if (rejectprivate) {
+ /* Reject IPv4 and IPv6 reserved private netblocks */
+ append_exit_policy_string(dest, "reject private:*");
+ }
+
+ /* Consider rejecting IPv4 and IPv6 advertised relay addresses, outbound bind
+ * addresses, publicly routable addresses, and configured port addresses
+ * on this exit relay */
+ policies_parse_exit_policy_reject_private(dest, ipv6_exit,
+ configured_addresses,
+ reject_interface_addresses,
+ reject_configured_port_addresses);
+
+ if (parse_addr_policy(cfg, dest, -1))
+ return -1;
+
+ /* Before we add the default policy and final rejects, check to see if
+ * there are any lines after accept *:* or reject *:*. These lines have no
+ * effect, and are most likely an error. */
+ policies_log_first_redundant_entry(*dest);
+
+ if (add_reduced_policy) {
+ append_exit_policy_string(dest, REDUCED_EXIT_POLICY);
+ } else if (add_default_policy) {
+ append_exit_policy_string(dest, DEFAULT_EXIT_POLICY);
+ } else {
+ append_exit_policy_string(dest, "reject *4:*");
+ append_exit_policy_string(dest, "reject *6:*");
+ }
+ exit_policy_remove_redundancies(*dest);
+
+ return 0;
+}
+
+/** Parse exit policy in <b>cfg</b> into <b>dest</b> smartlist.
+ *
+ * Prepend an entry that rejects all IPv6 destinations unless
+ * <b>EXIT_POLICY_IPV6_ENABLED</b> bit is set in <b>options</b> bitmask.
+ *
+ * If <b>EXIT_POLICY_REJECT_PRIVATE</b> bit is set in <b>options</b>:
+ * - prepend an entry that rejects all destinations in all netblocks
+ * reserved for private use.
+ * - prepend entries that reject the advertised relay addresses in
+ * configured_addresses
+ * If <b>EXIT_POLICY_REJECT_LOCAL_INTERFACES</b> bit is set in <b>options</b>:
+ * - prepend entries that reject publicly routable addresses on this exit
+ * relay by calling policies_parse_exit_policy_internal
+ * - prepend entries that reject the outbound bind addresses in
+ * configured_addresses
+ * - prepend entries that reject all configured port addresses
+ *
+ * If <b>EXIT_POLICY_ADD_DEFAULT</b> bit is set in <b>options</b>, append
+ * default exit policy entries to <b>result</b> smartlist.
+ */
+int
+policies_parse_exit_policy(config_line_t *cfg, smartlist_t **dest,
+ exit_policy_parser_cfg_t options,
+ const smartlist_t *configured_addresses)
+{
+ int ipv6_enabled = (options & EXIT_POLICY_IPV6_ENABLED) ? 1 : 0;
+ int reject_private = (options & EXIT_POLICY_REJECT_PRIVATE) ? 1 : 0;
+ int add_default = (options & EXIT_POLICY_ADD_DEFAULT) ? 1 : 0;
+ int reject_local_interfaces = (options &
+ EXIT_POLICY_REJECT_LOCAL_INTERFACES) ? 1 : 0;
+ int add_reduced = (options & EXIT_POLICY_ADD_REDUCED) ? 1 : 0;
+
+ return policies_parse_exit_policy_internal(cfg,dest,ipv6_enabled,
+ reject_private,
+ configured_addresses,
+ reject_local_interfaces,
+ reject_local_interfaces,
+ add_default,
+ add_reduced);
+}
+
+/** Helper function that adds a copy of addr to a smartlist as long as it is
+ * non-NULL and not tor_addr_is_null().
+ *
+ * The caller is responsible for freeing all the tor_addr_t* in the smartlist.
+ */
+static void
+policies_copy_addr_to_smartlist(smartlist_t *addr_list, const tor_addr_t *addr)
+{
+ if (addr && !tor_addr_is_null(addr)) {
+ tor_addr_t *addr_copy = tor_malloc(sizeof(tor_addr_t));
+ tor_addr_copy(addr_copy, addr);
+ smartlist_add(addr_list, addr_copy);
+ }
+}
+
+/** Helper function that adds ipv4h_addr to a smartlist as a tor_addr_t *,
+ * as long as it is not tor_addr_is_null(), by converting it to a tor_addr_t
+ * and passing it to policies_add_addr_to_smartlist.
+ *
+ * The caller is responsible for freeing all the tor_addr_t* in the smartlist.
+ */
+static void
+policies_copy_ipv4h_to_smartlist(smartlist_t *addr_list, uint32_t ipv4h_addr)
+{
+ if (ipv4h_addr) {
+ tor_addr_t ipv4_tor_addr;
+ tor_addr_from_ipv4h(&ipv4_tor_addr, ipv4h_addr);
+ policies_copy_addr_to_smartlist(addr_list, &ipv4_tor_addr);
+ }
+}
+
+/** Helper function that adds copies of or_options->OutboundBindAddresses
+ * to a smartlist as tor_addr_t *, as long as or_options is non-NULL, and
+ * the addresses are not tor_addr_is_null(), by passing them to
+ * policies_add_addr_to_smartlist.
+ *
+ * The caller is responsible for freeing all the tor_addr_t* in the smartlist.
+ */
+static void
+policies_copy_outbound_addresses_to_smartlist(smartlist_t *addr_list,
+ const or_options_t *or_options)
+{
+ if (or_options) {
+ for (int i=0;i<OUTBOUND_ADDR_MAX;i++) {
+ for (int j=0;j<2;j++) {
+ if (!tor_addr_is_null(&or_options->OutboundBindAddresses[i][j])) {
+ policies_copy_addr_to_smartlist(addr_list,
+ &or_options->OutboundBindAddresses[i][j]);
+ }
+ }
+ }
+ }
+}
+
+/** Parse <b>ExitPolicy</b> member of <b>or_options</b> into <b>result</b>
+ * smartlist.
+ * If <b>or_options->IPv6Exit</b> is false, prepend an entry that
+ * rejects all IPv6 destinations.
+ *
+ * If <b>or_options->ExitPolicyRejectPrivate</b> is true:
+ * - prepend an entry that rejects all destinations in all netblocks reserved
+ * for private use.
+ * - if local_address is non-zero, treat it as a host-order IPv4 address, and
+ * add it to the list of configured addresses.
+ * - if ipv6_local_address is non-NULL, and not the null tor_addr_t, add it
+ * to the list of configured addresses.
+ * If <b>or_options->ExitPolicyRejectLocalInterfaces</b> is true:
+ * - if or_options->OutboundBindAddresses[][0] (=IPv4) is not the null
+ * tor_addr_t, add it to the list of configured addresses.
+ * - if or_options->OutboundBindAddresses[][1] (=IPv6) is not the null
+ * tor_addr_t, add it to the list of configured addresses.
+ *
+ * If <b>or_options->BridgeRelay</b> is false, append entries of default
+ * Tor exit policy into <b>result</b> smartlist.
+ *
+ * If or_options->ExitRelay is false, then make our exit policy into
+ * "reject *:*" regardless.
+ */
+int
+policies_parse_exit_policy_from_options(const or_options_t *or_options,
+ uint32_t local_address,
+ const tor_addr_t *ipv6_local_address,
+ smartlist_t **result)
+{
+ exit_policy_parser_cfg_t parser_cfg = 0;
+ smartlist_t *configured_addresses = NULL;
+ int rv = 0;
+
+ /* Short-circuit for non-exit relays */
+ if (or_options->ExitRelay == 0) {
+ append_exit_policy_string(result, "reject *4:*");
+ append_exit_policy_string(result, "reject *6:*");
+ return 0;
+ }
+
+ configured_addresses = smartlist_new();
+
+ /* Configure the parser */
+ if (or_options->IPv6Exit) {
+ parser_cfg |= EXIT_POLICY_IPV6_ENABLED;
+ }
+
+ if (or_options->ExitPolicyRejectPrivate) {
+ parser_cfg |= EXIT_POLICY_REJECT_PRIVATE;
+ }
+
+ if (!or_options->BridgeRelay) {
+ if (or_options->ReducedExitPolicy)
+ parser_cfg |= EXIT_POLICY_ADD_REDUCED;
+ else
+ parser_cfg |= EXIT_POLICY_ADD_DEFAULT;
+ }
+
+ if (or_options->ExitPolicyRejectLocalInterfaces) {
+ parser_cfg |= EXIT_POLICY_REJECT_LOCAL_INTERFACES;
+ }
+
+ /* Copy the configured addresses into the tor_addr_t* list */
+ if (or_options->ExitPolicyRejectPrivate) {
+ policies_copy_ipv4h_to_smartlist(configured_addresses, local_address);
+ policies_copy_addr_to_smartlist(configured_addresses, ipv6_local_address);
+ }
+
+ if (or_options->ExitPolicyRejectLocalInterfaces) {
+ policies_copy_outbound_addresses_to_smartlist(configured_addresses,
+ or_options);
+ }
+
+ rv = policies_parse_exit_policy(or_options->ExitPolicy, result, parser_cfg,
+ configured_addresses);
+
+ SMARTLIST_FOREACH(configured_addresses, tor_addr_t *, a, tor_free(a));
+ smartlist_free(configured_addresses);
+
+ return rv;
+}
+
+/** Add "reject *:*" to the end of the policy in *<b>dest</b>, allocating
+ * *<b>dest</b> as needed. */
+void
+policies_exit_policy_append_reject_star(smartlist_t **dest)
+{
+ append_exit_policy_string(dest, "reject *4:*");
+ append_exit_policy_string(dest, "reject *6:*");
+}
+
+/** Replace the exit policy of <b>node</b> with reject *:* */
+void
+policies_set_node_exitpolicy_to_reject_all(node_t *node)
+{
+ node->rejects_all = 1;
+}
+
+/** Return 1 if there is at least one /8 subnet in <b>policy</b> that
+ * allows exiting to <b>port</b>. Otherwise, return 0. */
+static int
+exit_policy_is_general_exit_helper(smartlist_t *policy, int port)
+{
+ uint32_t mask, ip, i;
+ /* Is this /8 rejected (1), or undecided (0)? */
+ char subnet_status[256];
+
+ memset(subnet_status, 0, sizeof(subnet_status));
+ SMARTLIST_FOREACH_BEGIN(policy, addr_policy_t *, p) {
+ if (tor_addr_family(&p->addr) != AF_INET)
+ continue; /* IPv4 only for now */
+ if (p->prt_min > port || p->prt_max < port)
+ continue; /* Doesn't cover our port. */
+ mask = 0;
+ tor_assert(p->maskbits <= 32);
+
+ if (p->maskbits)
+ mask = UINT32_MAX<<(32-p->maskbits);
+ ip = tor_addr_to_ipv4h(&p->addr);
+
+ /* Calculate the first and last subnet that this exit policy touches
+ * and set it as loop boundaries. */
+ for (i = ((mask & ip)>>24); i <= (~((mask & ip) ^ mask)>>24); ++i) {
+ tor_addr_t addr;
+ if (subnet_status[i] != 0)
+ continue; /* We already reject some part of this /8 */
+ tor_addr_from_ipv4h(&addr, i<<24);
+ if (tor_addr_is_internal(&addr, 0) &&
+ !get_options()->DirAllowPrivateAddresses) {
+ continue; /* Local or non-routable addresses */
+ }
+ if (p->policy_type == ADDR_POLICY_ACCEPT) {
+ if (p->maskbits > 8)
+ continue; /* Narrower than a /8. */
+ /* We found an allowed subnet of at least size /8. Done
+ * for this port! */
+ return 1;
+ } else if (p->policy_type == ADDR_POLICY_REJECT) {
+ subnet_status[i] = 1;
+ }
+ }
+ } SMARTLIST_FOREACH_END(p);
+ return 0;
+}
+
+/** Return true iff <b>ri</b> is "useful as an exit node", meaning
+ * it allows exit to at least one /8 address space for each of ports 80
+ * and 443. */
+int
+exit_policy_is_general_exit(smartlist_t *policy)
+{
+ if (!policy) /*XXXX disallow NULL policies? */
+ return 0;
+
+ return (exit_policy_is_general_exit_helper(policy, 80) &&
+ exit_policy_is_general_exit_helper(policy, 443));
+}
+
+/** Return false if <b>policy</b> might permit access to some addr:port;
+ * otherwise if we are certain it rejects everything, return true. If no
+ * part of <b>policy</b> matches, return <b>default_reject</b>.
+ * NULL policies are allowed, and treated as empty. */
+int
+policy_is_reject_star(const smartlist_t *policy, sa_family_t family,
+ int default_reject)
+{
+ if (!policy)
+ return default_reject;
+ SMARTLIST_FOREACH_BEGIN(policy, const addr_policy_t *, p) {
+ if (p->policy_type == ADDR_POLICY_ACCEPT &&
+ (tor_addr_family(&p->addr) == family ||
+ tor_addr_family(&p->addr) == AF_UNSPEC)) {
+ return 0;
+ } else if (p->policy_type == ADDR_POLICY_REJECT &&
+ p->prt_min <= 1 && p->prt_max == 65535 &&
+ p->maskbits == 0 &&
+ (tor_addr_family(&p->addr) == family ||
+ tor_addr_family(&p->addr) == AF_UNSPEC)) {
+ return 1;
+ }
+ } SMARTLIST_FOREACH_END(p);
+ return default_reject;
+}
+
+/** Write a single address policy to the buf_len byte buffer at buf. Return
+ * the number of characters written, or -1 on failure. */
+int
+policy_write_item(char *buf, size_t buflen, const addr_policy_t *policy,
+ int format_for_desc)
+{
+ size_t written = 0;
+ char addrbuf[TOR_ADDR_BUF_LEN];
+ const char *addrpart;
+ int result;
+ const int is_accept = policy->policy_type == ADDR_POLICY_ACCEPT;
+ const sa_family_t family = tor_addr_family(&policy->addr);
+ const int is_ip6 = (family == AF_INET6);
+
+ tor_addr_to_str(addrbuf, &policy->addr, sizeof(addrbuf), 1);
+
+ /* write accept/reject 1.2.3.4 */
+ if (policy->is_private) {
+ addrpart = "private";
+ } else if (policy->maskbits == 0) {
+ if (format_for_desc)
+ addrpart = "*";
+ else if (family == AF_INET6)
+ addrpart = "*6";
+ else if (family == AF_INET)
+ addrpart = "*4";
+ else
+ addrpart = "*";
+ } else {
+ addrpart = addrbuf;
+ }
+
+ result = tor_snprintf(buf, buflen, "%s%s %s",
+ is_accept ? "accept" : "reject",
+ (is_ip6&&format_for_desc)?"6":"",
+ addrpart);
+ if (result < 0)
+ return -1;
+ written += strlen(buf);
+ /* If the maskbits is 32 (IPv4) or 128 (IPv6) we don't need to give it. If
+ the mask is 0, we already wrote "*". */
+ if (policy->maskbits < (is_ip6?128:32) && policy->maskbits > 0) {
+ if (tor_snprintf(buf+written, buflen-written, "/%d", policy->maskbits)<0)
+ return -1;
+ written += strlen(buf+written);
+ }
+ if (policy->prt_min <= 1 && policy->prt_max == 65535) {
+ /* There is no port set; write ":*" */
+ if (written+4 > buflen)
+ return -1;
+ strlcat(buf+written, ":*", buflen-written);
+ written += 2;
+ } else if (policy->prt_min == policy->prt_max) {
+ /* There is only one port; write ":80". */
+ result = tor_snprintf(buf+written, buflen-written, ":%d", policy->prt_min);
+ if (result<0)
+ return -1;
+ written += result;
+ } else {
+ /* There is a range of ports; write ":79-80". */
+ result = tor_snprintf(buf+written, buflen-written, ":%d-%d",
+ policy->prt_min, policy->prt_max);
+ if (result<0)
+ return -1;
+ written += result;
+ }
+ if (written < buflen)
+ buf[written] = '\0';
+ else
+ return -1;
+
+ return (int)written;
+}
+
+/** Create a new exit policy summary, initially only with a single
+ * port 1-64k item */
+/* XXXX This entire thing will do most stuff in O(N^2), or worse. Use an
+ * RB-tree if that turns out to matter. */
+static smartlist_t *
+policy_summary_create(void)
+{
+ smartlist_t *summary;
+ policy_summary_item_t* item;
+
+ item = tor_malloc_zero(sizeof(policy_summary_item_t));
+ item->prt_min = 1;
+ item->prt_max = 65535;
+ item->reject_count = 0;
+ item->accepted = 0;
+
+ summary = smartlist_new();
+ smartlist_add(summary, item);
+
+ return summary;
+}
+
+/** Split the summary item in <b>item</b> at the port <b>new_starts</b>.
+ * The current item is changed to end at new-starts - 1, the new item
+ * copies reject_count and accepted from the old item,
+ * starts at new_starts and ends at the port where the original item
+ * previously ended.
+ */
+static policy_summary_item_t*
+policy_summary_item_split(policy_summary_item_t* old, uint16_t new_starts)
+{
+ policy_summary_item_t* new;
+
+ new = tor_malloc_zero(sizeof(policy_summary_item_t));
+ new->prt_min = new_starts;
+ new->prt_max = old->prt_max;
+ new->reject_count = old->reject_count;
+ new->accepted = old->accepted;
+
+ old->prt_max = new_starts-1;
+
+ tor_assert(old->prt_min <= old->prt_max);
+ tor_assert(new->prt_min <= new->prt_max);
+ return new;
+}
+
+/* XXXX Nick says I'm going to hell for this. If he feels charitably towards
+ * my immortal soul, he can clean it up himself. */
+#define AT(x) ((policy_summary_item_t*)smartlist_get(summary, x))
+
+#define IPV4_BITS (32)
+/* Every IPv4 address is counted as one rejection */
+#define REJECT_CUTOFF_SCALE_IPV4 (0)
+/* Ports are rejected in an IPv4 summary if they are rejected in more than two
+ * IPv4 /8 address blocks */
+#define REJECT_CUTOFF_COUNT_IPV4 (UINT64_C(1) << \
+ (IPV4_BITS - REJECT_CUTOFF_SCALE_IPV4 - 7))
+
+#define IPV6_BITS (128)
+/* IPv6 /64s are counted as one rejection, anything smaller is ignored */
+#define REJECT_CUTOFF_SCALE_IPV6 (64)
+/* Ports are rejected in an IPv6 summary if they are rejected in more than one
+ * IPv6 /16 address block.
+ * This is roughly equivalent to the IPv4 cutoff, as only five IPv6 /12s (and
+ * some scattered smaller blocks) have been allocated to the RIRs.
+ * Network providers are typically allocated one or more IPv6 /32s.
+ */
+#define REJECT_CUTOFF_COUNT_IPV6 (UINT64_C(1) << \
+ (IPV6_BITS - REJECT_CUTOFF_SCALE_IPV6 - 16))
+
+/** Split an exit policy summary so that prt_min and prt_max
+ * fall at exactly the start and end of an item respectively.
+ */
+static int
+policy_summary_split(smartlist_t *summary,
+ uint16_t prt_min, uint16_t prt_max)
+{
+ int start_at_index;
+
+ int i = 0;
+
+ while (AT(i)->prt_max < prt_min)
+ i++;
+ if (AT(i)->prt_min != prt_min) {
+ policy_summary_item_t* new_item;
+ new_item = policy_summary_item_split(AT(i), prt_min);
+ smartlist_insert(summary, i+1, new_item);
+ i++;
+ }
+ start_at_index = i;
+
+ while (AT(i)->prt_max < prt_max)
+ i++;
+ if (AT(i)->prt_max != prt_max) {
+ policy_summary_item_t* new_item;
+ new_item = policy_summary_item_split(AT(i), prt_max+1);
+ smartlist_insert(summary, i+1, new_item);
+ }
+
+ return start_at_index;
+}
+
+/** Mark port ranges as accepted if they are below the reject_count for family
+ */
+static void
+policy_summary_accept(smartlist_t *summary,
+ uint16_t prt_min, uint16_t prt_max,
+ sa_family_t family)
+{
+ tor_assert_nonfatal_once(family == AF_INET || family == AF_INET6);
+ uint64_t family_reject_count = ((family == AF_INET) ?
+ REJECT_CUTOFF_COUNT_IPV4 :
+ REJECT_CUTOFF_COUNT_IPV6);
+
+ int i = policy_summary_split(summary, prt_min, prt_max);
+ while (i < smartlist_len(summary) &&
+ AT(i)->prt_max <= prt_max) {
+ if (!AT(i)->accepted &&
+ AT(i)->reject_count <= family_reject_count)
+ AT(i)->accepted = 1;
+ i++;
+ }
+ tor_assert(i < smartlist_len(summary) || prt_max==65535);
+}
+
+/** Count the number of addresses in a network in family with prefixlen
+ * maskbits against the given portrange. */
+static void
+policy_summary_reject(smartlist_t *summary,
+ maskbits_t maskbits,
+ uint16_t prt_min, uint16_t prt_max,
+ sa_family_t family)
+{
+ tor_assert_nonfatal_once(family == AF_INET || family == AF_INET6);
+
+ int i = policy_summary_split(summary, prt_min, prt_max);
+
+ /* The length of a single address mask */
+ int addrbits = (family == AF_INET) ? IPV4_BITS : IPV6_BITS;
+ tor_assert_nonfatal_once(addrbits >= maskbits);
+
+ /* We divide IPv6 address counts by (1 << scale) to keep them in a uint64_t
+ */
+ int scale = ((family == AF_INET) ?
+ REJECT_CUTOFF_SCALE_IPV4 :
+ REJECT_CUTOFF_SCALE_IPV6);
+
+ tor_assert_nonfatal_once(addrbits >= scale);
+ if (maskbits > (addrbits - scale)) {
+ tor_assert_nonfatal_once(family == AF_INET6);
+ /* The address range is so small, we'd need billions of them to reach the
+ * rejection limit. So we ignore this range in the reject count. */
+ return;
+ }
+
+ uint64_t count = 0;
+ if (addrbits - scale - maskbits >= 64) {
+ tor_assert_nonfatal_once(family == AF_INET6);
+ /* The address range is so large, it's an automatic rejection for all ports
+ * in the range. */
+ count = UINT64_MAX;
+ } else {
+ count = (UINT64_C(1) << (addrbits - scale - maskbits));
+ }
+ tor_assert_nonfatal_once(count > 0);
+ while (i < smartlist_len(summary) &&
+ AT(i)->prt_max <= prt_max) {
+ if (AT(i)->reject_count <= UINT64_MAX - count) {
+ AT(i)->reject_count += count;
+ } else {
+ /* IPv4 would require a 4-billion address redundant policy to get here,
+ * but IPv6 just needs to have ::/0 */
+ if (family == AF_INET) {
+ tor_assert_nonfatal_unreached_once();
+ }
+ /* If we do get here, use saturating arithmetic */
+ AT(i)->reject_count = UINT64_MAX;
+ }
+ i++;
+ }
+ tor_assert(i < smartlist_len(summary) || prt_max==65535);
+}
+
+/** Add a single exit policy item to our summary:
+ *
+ * If it is an accept, ignore it unless it is for all IP addresses
+ * ("*", i.e. its prefixlen/maskbits is 0). Otherwise call
+ * policy_summary_accept().
+ *
+ * If it is a reject, ignore it if it is about one of the private
+ * networks. Otherwise call policy_summary_reject().
+ */
+static void
+policy_summary_add_item(smartlist_t *summary, addr_policy_t *p)
+{
+ if (p->policy_type == ADDR_POLICY_ACCEPT) {
+ if (p->maskbits == 0) {
+ policy_summary_accept(summary, p->prt_min, p->prt_max, p->addr.family);
+ }
+ } else if (p->policy_type == ADDR_POLICY_REJECT) {
+
+ int is_private = 0;
+ int i;
+ for (i = 0; private_nets[i]; ++i) {
+ tor_addr_t addr;
+ maskbits_t maskbits;
+ if (tor_addr_parse_mask_ports(private_nets[i], 0, &addr,
+ &maskbits, NULL, NULL)<0) {
+ tor_assert(0);
+ }
+ if (tor_addr_compare(&p->addr, &addr, CMP_EXACT) == 0 &&
+ p->maskbits == maskbits) {
+ is_private = 1;
+ break;
+ }
+ }
+
+ if (!is_private) {
+ policy_summary_reject(summary, p->maskbits, p->prt_min, p->prt_max,
+ p->addr.family);
+ }
+ } else
+ tor_assert(0);
+}
+
+/** Create a string representing a summary for an exit policy.
+ * The summary will either be an "accept" plus a comma-separated list of port
+ * ranges or a "reject" plus port-ranges, depending on which is shorter.
+ *
+ * If no exits are allowed at all then "reject 1-65535" is returned. If no
+ * ports are blocked instead of "reject " we return "accept 1-65535". (These
+ * are an exception to the shorter-representation-wins rule).
+ */
+char *
+policy_summarize(smartlist_t *policy, sa_family_t family)
+{
+ smartlist_t *summary = policy_summary_create();
+ smartlist_t *accepts, *rejects;
+ int i, last, start_prt;
+ size_t accepts_len, rejects_len;
+ char *accepts_str = NULL, *rejects_str = NULL, *shorter_str, *result;
+ const char *prefix;
+
+ tor_assert(policy);
+
+ /* Create the summary list */
+ SMARTLIST_FOREACH_BEGIN(policy, addr_policy_t *, p) {
+ sa_family_t f = tor_addr_family(&p->addr);
+ if (f != AF_INET && f != AF_INET6) {
+ log_warn(LD_BUG, "Weird family when summarizing address policy");
+ }
+ if (f != family)
+ continue;
+ policy_summary_add_item(summary, p);
+ } SMARTLIST_FOREACH_END(p);
+
+ /* Now create two lists of strings, one for accepted and one
+ * for rejected ports. We take care to merge ranges so that
+ * we avoid getting stuff like "1-4,5-9,10", instead we want
+ * "1-10"
+ */
+ i = 0;
+ start_prt = 1;
+ accepts = smartlist_new();
+ rejects = smartlist_new();
+ while (1) {
+ last = i == smartlist_len(summary)-1;
+ if (last ||
+ AT(i)->accepted != AT(i+1)->accepted) {
+ char buf[POLICY_BUF_LEN];
+
+ if (start_prt == AT(i)->prt_max)
+ tor_snprintf(buf, sizeof(buf), "%d", start_prt);
+ else
+ tor_snprintf(buf, sizeof(buf), "%d-%d", start_prt, AT(i)->prt_max);
+
+ if (AT(i)->accepted)
+ smartlist_add_strdup(accepts, buf);
+ else
+ smartlist_add_strdup(rejects, buf);
+
+ if (last)
+ break;
+
+ start_prt = AT(i+1)->prt_min;
+ };
+ i++;
+ };
+
+ /* Figure out which of the two stringlists will be shorter and use
+ * that to build the result
+ */
+ if (smartlist_len(accepts) == 0) { /* no exits at all */
+ result = tor_strdup("reject 1-65535");
+ goto cleanup;
+ }
+ if (smartlist_len(rejects) == 0) { /* no rejects at all */
+ result = tor_strdup("accept 1-65535");
+ goto cleanup;
+ }
+
+ accepts_str = smartlist_join_strings(accepts, ",", 0, &accepts_len);
+ rejects_str = smartlist_join_strings(rejects, ",", 0, &rejects_len);
+
+ if (rejects_len > MAX_EXITPOLICY_SUMMARY_LEN-strlen("reject")-1 &&
+ accepts_len > MAX_EXITPOLICY_SUMMARY_LEN-strlen("accept")-1) {
+ char *c;
+ shorter_str = accepts_str;
+ prefix = "accept";
+
+ c = shorter_str + (MAX_EXITPOLICY_SUMMARY_LEN-strlen(prefix)-1);
+ while (*c != ',' && c >= shorter_str)
+ c--;
+ tor_assert(c >= shorter_str);
+ tor_assert(*c == ',');
+ *c = '\0';
+
+ } else if (rejects_len < accepts_len) {
+ shorter_str = rejects_str;
+ prefix = "reject";
+ } else {
+ shorter_str = accepts_str;
+ prefix = "accept";
+ }
+
+ tor_asprintf(&result, "%s %s", prefix, shorter_str);
+
+ cleanup:
+ /* cleanup */
+ SMARTLIST_FOREACH(summary, policy_summary_item_t *, s, tor_free(s));
+ smartlist_free(summary);
+
+ tor_free(accepts_str);
+ SMARTLIST_FOREACH(accepts, char *, s, tor_free(s));
+ smartlist_free(accepts);
+
+ tor_free(rejects_str);
+ SMARTLIST_FOREACH(rejects, char *, s, tor_free(s));
+ smartlist_free(rejects);
+
+ return result;
+}
+
+/** Convert a summarized policy string into a short_policy_t. Return NULL
+ * if the string is not well-formed. */
+short_policy_t *
+parse_short_policy(const char *summary)
+{
+ const char *orig_summary = summary;
+ short_policy_t *result;
+ int is_accept;
+ int n_entries;
+ short_policy_entry_t entries[MAX_EXITPOLICY_SUMMARY_LEN]; /* overkill */
+ const char *next;
+
+ if (!strcmpstart(summary, "accept ")) {
+ is_accept = 1;
+ summary += strlen("accept ");
+ } else if (!strcmpstart(summary, "reject ")) {
+ is_accept = 0;
+ summary += strlen("reject ");
+ } else {
+ log_fn(LOG_PROTOCOL_WARN, LD_DIR, "Unrecognized policy summary keyword");
+ return NULL;
+ }
+
+ n_entries = 0;
+ for ( ; *summary; summary = next) {
+ const char *comma = strchr(summary, ',');
+ unsigned low, high;
+ char dummy;
+ char ent_buf[32];
+ size_t len;
+
+ next = comma ? comma+1 : strchr(summary, '\0');
+ len = comma ? (size_t)(comma - summary) : strlen(summary);
+
+ if (n_entries == MAX_EXITPOLICY_SUMMARY_LEN) {
+ log_fn(LOG_PROTOCOL_WARN, LD_DIR, "Impossibly long policy summary %s",
+ escaped(orig_summary));
+ return NULL;
+ }
+
+ if (! TOR_ISDIGIT(*summary) || len > (sizeof(ent_buf)-1)) {
+ /* unrecognized entry format. skip it. */
+ continue;
+ }
+ if (len < 1) {
+ /* empty; skip it. */
+ /* XXX This happens to be unreachable, since if len==0, then *summary is
+ * ',' or '\0', and the TOR_ISDIGIT test above would have failed. */
+ continue;
+ }
+
+ memcpy(ent_buf, summary, len);
+ ent_buf[len] = '\0';
+
+ if (tor_sscanf(ent_buf, "%u-%u%c", &low, &high, &dummy) == 2) {
+ if (low<1 || low>65535 || high<1 || high>65535 || low>high) {
+ log_fn(LOG_PROTOCOL_WARN, LD_DIR,
+ "Found bad entry in policy summary %s", escaped(orig_summary));
+ return NULL;
+ }
+ } else if (tor_sscanf(ent_buf, "%u%c", &low, &dummy) == 1) {
+ if (low<1 || low>65535) {
+ log_fn(LOG_PROTOCOL_WARN, LD_DIR,
+ "Found bad entry in policy summary %s", escaped(orig_summary));
+ return NULL;
+ }
+ high = low;
+ } else {
+ log_fn(LOG_PROTOCOL_WARN, LD_DIR,"Found bad entry in policy summary %s",
+ escaped(orig_summary));
+ return NULL;
+ }
+
+ entries[n_entries].min_port = low;
+ entries[n_entries].max_port = high;
+ n_entries++;
+ }
+
+ if (n_entries == 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_DIR,
+ "Found no port-range entries in summary %s", escaped(orig_summary));
+ return NULL;
+ }
+
+ {
+ size_t size = offsetof(short_policy_t, entries) +
+ sizeof(short_policy_entry_t)*(n_entries);
+ result = tor_malloc_zero(size);
+
+ tor_assert( (char*)&result->entries[n_entries-1] < ((char*)result)+size);
+ }
+
+ result->is_accept = is_accept;
+ result->n_entries = n_entries;
+ memcpy(result->entries, entries, sizeof(short_policy_entry_t)*n_entries);
+ return result;
+}
+
+/** Write <b>policy</b> back out into a string. */
+char *
+write_short_policy(const short_policy_t *policy)
+{
+ int i;
+ char *answer;
+ smartlist_t *sl = smartlist_new();
+
+ smartlist_add_asprintf(sl, "%s", policy->is_accept ? "accept " : "reject ");
+
+ for (i=0; i < policy->n_entries; i++) {
+ const short_policy_entry_t *e = &policy->entries[i];
+ if (e->min_port == e->max_port) {
+ smartlist_add_asprintf(sl, "%d", e->min_port);
+ } else {
+ smartlist_add_asprintf(sl, "%d-%d", e->min_port, e->max_port);
+ }
+ if (i < policy->n_entries-1)
+ smartlist_add_strdup(sl, ",");
+ }
+ answer = smartlist_join_strings(sl, "", 0, NULL);
+ SMARTLIST_FOREACH(sl, char *, a, tor_free(a));
+ smartlist_free(sl);
+ return answer;
+}
+
+/** Release all storage held in <b>policy</b>. */
+void
+short_policy_free_(short_policy_t *policy)
+{
+ tor_free(policy);
+}
+
+/** See whether the <b>addr</b>:<b>port</b> address is likely to be accepted
+ * or rejected by the summarized policy <b>policy</b>. Return values are as
+ * for compare_tor_addr_to_addr_policy. Unlike the regular addr_policy
+ * functions, requires the <b>port</b> be specified. */
+addr_policy_result_t
+compare_tor_addr_to_short_policy(const tor_addr_t *addr, uint16_t port,
+ const short_policy_t *policy)
+{
+ int i;
+ int found_match = 0;
+ int accept_;
+
+ tor_assert(port != 0);
+
+ if (addr && tor_addr_is_null(addr))
+ addr = NULL; /* Unspec means 'no address at all,' in this context. */
+
+ if (addr && get_options()->ClientRejectInternalAddresses &&
+ (tor_addr_is_internal(addr, 0) || tor_addr_is_loopback(addr)))
+ return ADDR_POLICY_REJECTED;
+
+ for (i=0; i < policy->n_entries; ++i) {
+ const short_policy_entry_t *e = &policy->entries[i];
+ if (e->min_port <= port && port <= e->max_port) {
+ found_match = 1;
+ break;
+ }
+ }
+
+ if (found_match)
+ accept_ = policy->is_accept;
+ else
+ accept_ = ! policy->is_accept;
+
+ /* ???? are these right? -NM */
+ /* We should be sure not to return ADDR_POLICY_ACCEPTED in the accept
+ * case here, because it would cause clients to believe that the node
+ * allows exit enclaving. Trying it anyway would open up a cool attack
+ * where the node refuses due to exitpolicy, the client reacts in
+ * surprise by rewriting the node's exitpolicy to reject *:*, and then
+ * an adversary targets users by causing them to attempt such connections
+ * to 98% of the exits.
+ *
+ * Once microdescriptors can handle addresses in special cases (e.g. if
+ * we ever solve ticket 1774), we can provide certainty here. -RD */
+ if (accept_)
+ return ADDR_POLICY_PROBABLY_ACCEPTED;
+ else
+ return ADDR_POLICY_REJECTED;
+}
+
+/** Return true iff <b>policy</b> seems reject all ports */
+int
+short_policy_is_reject_star(const short_policy_t *policy)
+{
+ /* This doesn't need to be as much on the lookout as policy_is_reject_star,
+ * since policy summaries are from the consensus or from consensus
+ * microdescs.
+ */
+ tor_assert(policy);
+ /* Check for an exact match of "reject 1-65535". */
+ return (policy->is_accept == 0 && policy->n_entries == 1 &&
+ policy->entries[0].min_port == 1 &&
+ policy->entries[0].max_port == 65535);
+}
+
+/** Decide whether addr:port is probably or definitely accepted or rejected by
+ * <b>node</b>. See compare_tor_addr_to_addr_policy for details on addr/port
+ * interpretation. */
+addr_policy_result_t
+compare_tor_addr_to_node_policy(const tor_addr_t *addr, uint16_t port,
+ const node_t *node)
+{
+ if (node->rejects_all)
+ return ADDR_POLICY_REJECTED;
+
+ if (addr && tor_addr_family(addr) == AF_INET6) {
+ const short_policy_t *p = NULL;
+ if (node->ri)
+ p = node->ri->ipv6_exit_policy;
+ else if (node->md)
+ p = node->md->ipv6_exit_policy;
+ if (p)
+ return compare_tor_addr_to_short_policy(addr, port, p);
+ else
+ return ADDR_POLICY_REJECTED;
+ }
+
+ if (node->ri) {
+ return compare_tor_addr_to_addr_policy(addr, port, node->ri->exit_policy);
+ } else if (node->md) {
+ if (node->md->exit_policy == NULL)
+ return ADDR_POLICY_REJECTED;
+ else
+ return compare_tor_addr_to_short_policy(addr, port,
+ node->md->exit_policy);
+ } else {
+ return ADDR_POLICY_PROBABLY_REJECTED;
+ }
+}
+
+/**
+ * Given <b>policy_list</b>, a list of addr_policy_t, produce a string
+ * representation of the list.
+ * If <b>include_ipv4</b> is true, include IPv4 entries.
+ * If <b>include_ipv6</b> is true, include IPv6 entries.
+ */
+char *
+policy_dump_to_string(const smartlist_t *policy_list,
+ int include_ipv4,
+ int include_ipv6)
+{
+ smartlist_t *policy_string_list;
+ char *policy_string = NULL;
+
+ policy_string_list = smartlist_new();
+
+ SMARTLIST_FOREACH_BEGIN(policy_list, addr_policy_t *, tmpe) {
+ char *pbuf;
+ int bytes_written_to_pbuf;
+ if ((tor_addr_family(&tmpe->addr) == AF_INET6) && (!include_ipv6)) {
+ continue; /* Don't include IPv6 parts of address policy */
+ }
+ if ((tor_addr_family(&tmpe->addr) == AF_INET) && (!include_ipv4)) {
+ continue; /* Don't include IPv4 parts of address policy */
+ }
+
+ pbuf = tor_malloc(POLICY_BUF_LEN);
+ bytes_written_to_pbuf = policy_write_item(pbuf,POLICY_BUF_LEN, tmpe, 1);
+
+ if (bytes_written_to_pbuf < 0) {
+ log_warn(LD_BUG, "policy_dump_to_string ran out of room!");
+ tor_free(pbuf);
+ goto done;
+ }
+
+ smartlist_add(policy_string_list,pbuf);
+ } SMARTLIST_FOREACH_END(tmpe);
+
+ policy_string = smartlist_join_strings(policy_string_list, "\n", 0, NULL);
+
+ done:
+ SMARTLIST_FOREACH(policy_string_list, char *, str, tor_free(str));
+ smartlist_free(policy_string_list);
+
+ return policy_string;
+}
+
+/** Implementation for GETINFO control command: knows the answer for questions
+ * about "exit-policy/..." */
+int
+getinfo_helper_policies(control_connection_t *conn,
+ const char *question, char **answer,
+ const char **errmsg)
+{
+ (void) conn;
+ (void) errmsg;
+ if (!strcmp(question, "exit-policy/default")) {
+ *answer = tor_strdup(DEFAULT_EXIT_POLICY);
+ } else if (!strcmp(question, "exit-policy/reject-private/default")) {
+ smartlist_t *private_policy_strings;
+ const char **priv = private_nets;
+
+ private_policy_strings = smartlist_new();
+
+ while (*priv != NULL) {
+ /* IPv6 addresses are in "[]" and contain ":",
+ * IPv4 addresses are not in "[]" and contain "." */
+ smartlist_add_asprintf(private_policy_strings, "reject %s:*", *priv);
+ priv++;
+ }
+
+ *answer = smartlist_join_strings(private_policy_strings,
+ ",", 0, NULL);
+
+ SMARTLIST_FOREACH(private_policy_strings, char *, str, tor_free(str));
+ smartlist_free(private_policy_strings);
+ } else if (!strcmp(question, "exit-policy/reject-private/relay")) {
+ const or_options_t *options = get_options();
+ int err = 0;
+ const routerinfo_t *me = router_get_my_routerinfo_with_err(&err);
+
+ if (!me) {
+ *errmsg = routerinfo_err_to_string(err);
+ return routerinfo_err_is_transient(err) ? -1 : 0;
+ }
+
+ if (!options->ExitPolicyRejectPrivate &&
+ !options->ExitPolicyRejectLocalInterfaces) {
+ *answer = tor_strdup("");
+ return 0;
+ }
+
+ smartlist_t *private_policy_list = smartlist_new();
+ smartlist_t *configured_addresses = smartlist_new();
+
+ /* Copy the configured addresses into the tor_addr_t* list */
+ if (options->ExitPolicyRejectPrivate) {
+ policies_copy_ipv4h_to_smartlist(configured_addresses, me->addr);
+ policies_copy_addr_to_smartlist(configured_addresses, &me->ipv6_addr);
+ }
+
+ if (options->ExitPolicyRejectLocalInterfaces) {
+ policies_copy_outbound_addresses_to_smartlist(configured_addresses,
+ options);
+ }
+
+ policies_parse_exit_policy_reject_private(
+ &private_policy_list,
+ options->IPv6Exit,
+ configured_addresses,
+ options->ExitPolicyRejectLocalInterfaces,
+ options->ExitPolicyRejectLocalInterfaces);
+ *answer = policy_dump_to_string(private_policy_list, 1, 1);
+
+ addr_policy_list_free(private_policy_list);
+ SMARTLIST_FOREACH(configured_addresses, tor_addr_t *, a, tor_free(a));
+ smartlist_free(configured_addresses);
+ } else if (!strcmpstart(question, "exit-policy/")) {
+ int include_ipv4 = 0;
+ int include_ipv6 = 0;
+
+ int err = 0;
+ const routerinfo_t *me = router_get_my_routerinfo_with_err(&err);
+
+ if (!me) {
+ *errmsg = routerinfo_err_to_string(err);
+ return routerinfo_err_is_transient(err) ? -1 : 0;
+ }
+
+ if (!strcmp(question, "exit-policy/ipv4")) {
+ include_ipv4 = 1;
+ } else if (!strcmp(question, "exit-policy/ipv6")) {
+ include_ipv6 = 1;
+ } else if (!strcmp(question, "exit-policy/full")) {
+ include_ipv4 = include_ipv6 = 1;
+ } else {
+ return 0; /* No such key. */
+ }
+
+ *answer = router_dump_exit_policy_to_string(me,include_ipv4,
+ include_ipv6);
+ }
+
+ return 0;
+}
+
+/** Release all storage held by <b>p</b>. */
+void
+addr_policy_list_free_(smartlist_t *lst)
+{
+ if (!lst)
+ return;
+ SMARTLIST_FOREACH(lst, addr_policy_t *, policy, addr_policy_free(policy));
+ smartlist_free(lst);
+}
+
+/** Release all storage held by <b>p</b>. */
+void
+addr_policy_free_(addr_policy_t *p)
+{
+ if (!p)
+ return;
+
+ if (--p->refcnt <= 0) {
+ if (p->is_canonical) {
+ policy_map_ent_t search, *found;
+ search.policy = p;
+ found = HT_REMOVE(policy_map, &policy_root, &search);
+ if (found) {
+ tor_assert(p == found->policy);
+ tor_free(found);
+ }
+ }
+ tor_free(p);
+ }
+}
+
+/** Release all storage held by policy variables. */
+void
+policies_free_all(void)
+{
+ addr_policy_list_free(reachable_or_addr_policy);
+ reachable_or_addr_policy = NULL;
+ addr_policy_list_free(reachable_dir_addr_policy);
+ reachable_dir_addr_policy = NULL;
+ addr_policy_list_free(socks_policy);
+ socks_policy = NULL;
+ addr_policy_list_free(dir_policy);
+ dir_policy = NULL;
+ addr_policy_list_free(authdir_reject_policy);
+ authdir_reject_policy = NULL;
+ addr_policy_list_free(authdir_invalid_policy);
+ authdir_invalid_policy = NULL;
+ addr_policy_list_free(authdir_badexit_policy);
+ authdir_badexit_policy = NULL;
+
+ if (!HT_EMPTY(&policy_root)) {
+ policy_map_ent_t **ent;
+ int n = 0;
+ char buf[POLICY_BUF_LEN];
+
+ log_warn(LD_MM, "Still had %d address policies cached at shutdown.",
+ (int)HT_SIZE(&policy_root));
+
+ /* Note the first 10 cached policies to try to figure out where they
+ * might be coming from. */
+ HT_FOREACH(ent, policy_map, &policy_root) {
+ if (++n > 10)
+ break;
+ if (policy_write_item(buf, sizeof(buf), (*ent)->policy, 0) >= 0)
+ log_warn(LD_MM," %d [%d]: %s", n, (*ent)->policy->refcnt, buf);
+ }
+ }
+ HT_CLEAR(policy_map, &policy_root);
+}
diff --git a/src/core/or/policies.h b/src/core/or/policies.h
new file mode 100644
index 0000000000..7da3ba031f
--- /dev/null
+++ b/src/core/or/policies.h
@@ -0,0 +1,187 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file policies.h
+ * \brief Header file for policies.c.
+ **/
+
+#ifndef TOR_POLICIES_H
+#define TOR_POLICIES_H
+
+/* (length of
+ * "accept6 [ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff]/128:65535-65535\n"
+ * plus a terminating NUL, rounded up to a nice number.)
+ */
+#define POLICY_BUF_LEN 72
+
+#define EXIT_POLICY_IPV6_ENABLED (1 << 0)
+#define EXIT_POLICY_REJECT_PRIVATE (1 << 1)
+#define EXIT_POLICY_ADD_DEFAULT (1 << 2)
+#define EXIT_POLICY_REJECT_LOCAL_INTERFACES (1 << 3)
+#define EXIT_POLICY_ADD_REDUCED (1 << 4)
+#define EXIT_POLICY_OPTION_MAX EXIT_POLICY_ADD_REDUCED
+/* All options set: used for unit testing */
+#define EXIT_POLICY_OPTION_ALL ((EXIT_POLICY_OPTION_MAX << 1) - 1)
+
+typedef enum firewall_connection_t {
+ FIREWALL_OR_CONNECTION = 0,
+ FIREWALL_DIR_CONNECTION = 1
+} firewall_connection_t;
+
+typedef int exit_policy_parser_cfg_t;
+
+/** Outcome of applying an address policy to an address. */
+typedef enum {
+ /** The address was accepted */
+ ADDR_POLICY_ACCEPTED=0,
+ /** The address was rejected */
+ ADDR_POLICY_REJECTED=-1,
+ /** Part of the address was unknown, but as far as we can tell, it was
+ * accepted. */
+ ADDR_POLICY_PROBABLY_ACCEPTED=1,
+ /** Part of the address was unknown, but as far as we can tell, it was
+ * rejected. */
+ ADDR_POLICY_PROBABLY_REJECTED=2,
+} addr_policy_result_t;
+
+/** A single entry in a parsed policy summary, describing a range of ports. */
+typedef struct short_policy_entry_t {
+ uint16_t min_port, max_port;
+} short_policy_entry_t;
+
+/** A short_poliy_t is the parsed version of a policy summary. */
+typedef struct short_policy_t {
+ /** True if the members of 'entries' are port ranges to accept; false if
+ * they are port ranges to reject */
+ unsigned int is_accept : 1;
+ /** The actual number of values in 'entries'. */
+ unsigned int n_entries : 31;
+ /** An array of 0 or more short_policy_entry_t values, each describing a
+ * range of ports that this policy accepts or rejects (depending on the
+ * value of is_accept).
+ */
+ short_policy_entry_t entries[FLEXIBLE_ARRAY_MEMBER];
+} short_policy_t;
+
+int firewall_is_fascist_or(void);
+int firewall_is_fascist_dir(void);
+int fascist_firewall_use_ipv6(const or_options_t *options);
+int fascist_firewall_prefer_ipv6_orport(const or_options_t *options);
+int fascist_firewall_prefer_ipv6_dirport(const or_options_t *options);
+
+int fascist_firewall_allows_address_addr(const tor_addr_t *addr,
+ uint16_t port,
+ firewall_connection_t fw_connection,
+ int pref_only, int pref_ipv6);
+
+int fascist_firewall_allows_rs(const routerstatus_t *rs,
+ firewall_connection_t fw_connection,
+ int pref_only);
+int fascist_firewall_allows_node(const node_t *node,
+ firewall_connection_t fw_connection,
+ int pref_only);
+int fascist_firewall_allows_dir_server(const dir_server_t *ds,
+ firewall_connection_t fw_connection,
+ int pref_only);
+
+void fascist_firewall_choose_address_rs(const routerstatus_t *rs,
+ firewall_connection_t fw_connection,
+ int pref_only, tor_addr_port_t* ap);
+void fascist_firewall_choose_address_node(const node_t *node,
+ firewall_connection_t fw_connection,
+ int pref_only, tor_addr_port_t* ap);
+void fascist_firewall_choose_address_dir_server(const dir_server_t *ds,
+ firewall_connection_t fw_connection,
+ int pref_only, tor_addr_port_t* ap);
+
+int dir_policy_permits_address(const tor_addr_t *addr);
+int socks_policy_permits_address(const tor_addr_t *addr);
+int authdir_policy_permits_address(uint32_t addr, uint16_t port);
+int authdir_policy_valid_address(uint32_t addr, uint16_t port);
+int authdir_policy_badexit_address(uint32_t addr, uint16_t port);
+
+int validate_addr_policies(const or_options_t *options, char **msg);
+void policy_expand_private(smartlist_t **policy);
+void policy_expand_unspec(smartlist_t **policy);
+int policies_parse_from_options(const or_options_t *options);
+
+addr_policy_t *addr_policy_get_canonical_entry(addr_policy_t *ent);
+int addr_policies_eq(const smartlist_t *a, const smartlist_t *b);
+MOCK_DECL(addr_policy_result_t, compare_tor_addr_to_addr_policy,
+ (const tor_addr_t *addr, uint16_t port, const smartlist_t *policy));
+addr_policy_result_t compare_tor_addr_to_node_policy(const tor_addr_t *addr,
+ uint16_t port, const node_t *node);
+
+int policies_parse_exit_policy_from_options(
+ const or_options_t *or_options,
+ uint32_t local_address,
+ const tor_addr_t *ipv6_local_address,
+ smartlist_t **result);
+struct config_line_t;
+int policies_parse_exit_policy(struct config_line_t *cfg, smartlist_t **dest,
+ exit_policy_parser_cfg_t options,
+ const smartlist_t *configured_addresses);
+void policies_parse_exit_policy_reject_private(
+ smartlist_t **dest,
+ int ipv6_exit,
+ const smartlist_t *configured_addresses,
+ int reject_interface_addresses,
+ int reject_configured_port_addresses);
+void policies_exit_policy_append_reject_star(smartlist_t **dest);
+void addr_policy_append_reject_addr(smartlist_t **dest,
+ const tor_addr_t *addr);
+void addr_policy_append_reject_addr_list(smartlist_t **dest,
+ const smartlist_t *addrs);
+void policies_set_node_exitpolicy_to_reject_all(node_t *exitrouter);
+int exit_policy_is_general_exit(smartlist_t *policy);
+int policy_is_reject_star(const smartlist_t *policy, sa_family_t family,
+ int reject_by_default);
+char * policy_dump_to_string(const smartlist_t *policy_list,
+ int include_ipv4,
+ int include_ipv6);
+int getinfo_helper_policies(control_connection_t *conn,
+ const char *question, char **answer,
+ const char **errmsg);
+int policy_write_item(char *buf, size_t buflen, const addr_policy_t *item,
+ int format_for_desc);
+
+void addr_policy_list_free_(smartlist_t *p);
+#define addr_policy_list_free(lst) \
+ FREE_AND_NULL(smartlist_t, addr_policy_list_free_, (lst))
+void addr_policy_free_(addr_policy_t *p);
+#define addr_policy_free(p) \
+ FREE_AND_NULL(addr_policy_t, addr_policy_free_, (p))
+void policies_free_all(void);
+
+char *policy_summarize(smartlist_t *policy, sa_family_t family);
+
+short_policy_t *parse_short_policy(const char *summary);
+char *write_short_policy(const short_policy_t *policy);
+void short_policy_free_(short_policy_t *policy);
+#define short_policy_free(p) \
+ FREE_AND_NULL(short_policy_t, short_policy_free_, (p))
+int short_policy_is_reject_star(const short_policy_t *policy);
+addr_policy_result_t compare_tor_addr_to_short_policy(
+ const tor_addr_t *addr, uint16_t port,
+ const short_policy_t *policy);
+
+#ifdef POLICIES_PRIVATE
+STATIC void append_exit_policy_string(smartlist_t **policy, const char *more);
+STATIC int fascist_firewall_allows_address(const tor_addr_t *addr,
+ uint16_t port,
+ smartlist_t *firewall_policy,
+ int pref_only, int pref_ipv6);
+STATIC const tor_addr_port_t * fascist_firewall_choose_address(
+ const tor_addr_port_t *a,
+ const tor_addr_port_t *b,
+ int want_a,
+ firewall_connection_t fw_connection,
+ int pref_only, int pref_ipv6);
+
+#endif /* defined(POLICIES_PRIVATE) */
+
+#endif /* !defined(TOR_POLICIES_H) */
diff --git a/src/core/or/port_cfg_st.h b/src/core/or/port_cfg_st.h
new file mode 100644
index 0000000000..86a3b963bc
--- /dev/null
+++ b/src/core/or/port_cfg_st.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef PORT_CFG_ST_H
+#define PORT_CFG_ST_H
+
+#include "or/entry_port_cfg_st.h"
+#include "or/server_port_cfg_st.h"
+
+/** Configuration for a single port that we're listening on. */
+struct port_cfg_t {
+ tor_addr_t addr; /**< The actual IP to listen on, if !is_unix_addr. */
+ int port; /**< The configured port, or CFG_AUTO_PORT to tell Tor to pick its
+ * own port. */
+ uint8_t type; /**< One of CONN_TYPE_*_LISTENER */
+ unsigned is_unix_addr : 1; /**< True iff this is an AF_UNIX address. */
+
+ unsigned is_group_writable : 1;
+ unsigned is_world_writable : 1;
+ unsigned relax_dirmode_check : 1;
+
+ entry_port_cfg_t entry_cfg;
+
+ server_port_cfg_t server_cfg;
+
+ /* Unix sockets only: */
+ /** Path for an AF_UNIX address */
+ char unix_addr[FLEXIBLE_ARRAY_MEMBER];
+};
+
+#endif
+
diff --git a/src/core/or/reasons.c b/src/core/or/reasons.c
new file mode 100644
index 0000000000..7d8dcf374c
--- /dev/null
+++ b/src/core/or/reasons.c
@@ -0,0 +1,497 @@
+/* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file reasons.c
+ * \brief Convert circuit, stream, and orconn error reasons to and/or from
+ * strings and errno values.
+ *
+ * This module is just a bunch of functions full of case statements that
+ * convert from one representation of our error codes to another. These are
+ * mainly used in generating log messages, in sending messages to the
+ * controller in control.c, and in converting errors from one protocol layer
+ * to another.
+ **/
+
+#include "or/or.h"
+#include "or/config.h"
+#include "or/reasons.h"
+#include "or/routerlist.h"
+#include "lib/tls/tortls.h"
+
+/***************************** Edge (stream) reasons **********************/
+
+/** Convert the reason for ending a stream <b>reason</b> into the format used
+ * in STREAM events. Return NULL if the reason is unrecognized. */
+const char *
+stream_end_reason_to_control_string(int reason)
+{
+ reason &= END_STREAM_REASON_MASK;
+ switch (reason) {
+ case END_STREAM_REASON_MISC: return "MISC";
+ case END_STREAM_REASON_RESOLVEFAILED: return "RESOLVEFAILED";
+ case END_STREAM_REASON_CONNECTREFUSED: return "CONNECTREFUSED";
+ case END_STREAM_REASON_EXITPOLICY: return "EXITPOLICY";
+ case END_STREAM_REASON_DESTROY: return "DESTROY";
+ case END_STREAM_REASON_DONE: return "DONE";
+ case END_STREAM_REASON_TIMEOUT: return "TIMEOUT";
+ case END_STREAM_REASON_NOROUTE: return "NOROUTE";
+ case END_STREAM_REASON_HIBERNATING: return "HIBERNATING";
+ case END_STREAM_REASON_INTERNAL: return "INTERNAL";
+ case END_STREAM_REASON_RESOURCELIMIT: return "RESOURCELIMIT";
+ case END_STREAM_REASON_CONNRESET: return "CONNRESET";
+ case END_STREAM_REASON_TORPROTOCOL: return "TORPROTOCOL";
+ case END_STREAM_REASON_NOTDIRECTORY: return "NOTDIRECTORY";
+
+ case END_STREAM_REASON_CANT_ATTACH: return "CANT_ATTACH";
+ case END_STREAM_REASON_NET_UNREACHABLE: return "NET_UNREACHABLE";
+ case END_STREAM_REASON_SOCKSPROTOCOL: return "SOCKS_PROTOCOL";
+ // XXXX Controlspec
+ case END_STREAM_REASON_HTTPPROTOCOL: return "HTTP_PROTOCOL";
+
+ case END_STREAM_REASON_PRIVATE_ADDR: return "PRIVATE_ADDR";
+
+ default: return NULL;
+ }
+}
+
+/** Translate <b>reason</b>, which came from a relay 'end' cell,
+ * into a static const string describing why the stream is closing.
+ * <b>reason</b> is -1 if no reason was provided.
+ */
+const char *
+stream_end_reason_to_string(int reason)
+{
+ switch (reason) {
+ case -1:
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "End cell arrived with length 0. Should be at least 1.");
+ return "MALFORMED";
+ case END_STREAM_REASON_MISC: return "misc error";
+ case END_STREAM_REASON_RESOLVEFAILED: return "resolve failed";
+ case END_STREAM_REASON_CONNECTREFUSED: return "connection refused";
+ case END_STREAM_REASON_EXITPOLICY: return "exit policy failed";
+ case END_STREAM_REASON_DESTROY: return "destroyed";
+ case END_STREAM_REASON_DONE: return "closed normally";
+ case END_STREAM_REASON_TIMEOUT: return "gave up (timeout)";
+ case END_STREAM_REASON_NOROUTE: return "no route to host";
+ case END_STREAM_REASON_HIBERNATING: return "server is hibernating";
+ case END_STREAM_REASON_INTERNAL: return "internal error at server";
+ case END_STREAM_REASON_RESOURCELIMIT: return "server out of resources";
+ case END_STREAM_REASON_CONNRESET: return "connection reset";
+ case END_STREAM_REASON_TORPROTOCOL: return "Tor protocol error";
+ case END_STREAM_REASON_NOTDIRECTORY: return "not a directory";
+ default:
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Reason for ending (%d) not recognized.",reason);
+ return "unknown";
+ }
+}
+
+/** Translate <b>reason</b> (as from a relay 'end' cell) into an
+ * appropriate SOCKS5 reply code.
+ *
+ * A reason of 0 means that we're not actually expecting to send
+ * this code back to the socks client; we just call it 'succeeded'
+ * to keep things simple.
+ */
+socks5_reply_status_t
+stream_end_reason_to_socks5_response(int reason)
+{
+ switch (reason & END_STREAM_REASON_MASK) {
+ case 0:
+ return SOCKS5_SUCCEEDED;
+ case END_STREAM_REASON_MISC:
+ return SOCKS5_GENERAL_ERROR;
+ case END_STREAM_REASON_RESOLVEFAILED:
+ return SOCKS5_HOST_UNREACHABLE;
+ case END_STREAM_REASON_CONNECTREFUSED:
+ return SOCKS5_CONNECTION_REFUSED;
+ case END_STREAM_REASON_ENTRYPOLICY:
+ return SOCKS5_NOT_ALLOWED;
+ case END_STREAM_REASON_EXITPOLICY:
+ return SOCKS5_NOT_ALLOWED;
+ case END_STREAM_REASON_DESTROY:
+ return SOCKS5_GENERAL_ERROR;
+ case END_STREAM_REASON_DONE:
+ /* Note that 'DONE' usually indicates a successful close from the other
+ * side of the stream... but if we receive it before a connected cell --
+ * that is, before we have sent a SOCKS reply -- that means that the
+ * other side of the circuit closed the connection before telling us it
+ * was complete. */
+ return SOCKS5_CONNECTION_REFUSED;
+ case END_STREAM_REASON_TIMEOUT:
+ return SOCKS5_TTL_EXPIRED;
+ case END_STREAM_REASON_NOROUTE:
+ return SOCKS5_HOST_UNREACHABLE;
+ case END_STREAM_REASON_RESOURCELIMIT:
+ return SOCKS5_GENERAL_ERROR;
+ case END_STREAM_REASON_HIBERNATING:
+ return SOCKS5_GENERAL_ERROR;
+ case END_STREAM_REASON_INTERNAL:
+ return SOCKS5_GENERAL_ERROR;
+ case END_STREAM_REASON_CONNRESET:
+ return SOCKS5_CONNECTION_REFUSED;
+ case END_STREAM_REASON_TORPROTOCOL:
+ return SOCKS5_GENERAL_ERROR;
+
+ case END_STREAM_REASON_CANT_ATTACH:
+ return SOCKS5_GENERAL_ERROR;
+ case END_STREAM_REASON_NET_UNREACHABLE:
+ return SOCKS5_NET_UNREACHABLE;
+ case END_STREAM_REASON_SOCKSPROTOCOL:
+ return SOCKS5_GENERAL_ERROR;
+ case END_STREAM_REASON_HTTPPROTOCOL:
+ // LCOV_EXCL_START
+ tor_assert_nonfatal_unreached();
+ return SOCKS5_GENERAL_ERROR;
+ // LCOV_EXCL_STOP
+ case END_STREAM_REASON_PRIVATE_ADDR:
+ return SOCKS5_GENERAL_ERROR;
+
+ default:
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Reason for ending (%d) not recognized; "
+ "sending generic socks error.", reason);
+ return SOCKS5_GENERAL_ERROR;
+ }
+}
+
+/* We need to use a few macros to deal with the fact that Windows
+ * decided that their sockets interface should be a permakludge.
+ * E_CASE is for errors where windows has both a EFOO and a WSAEFOO
+ * version, and S_CASE is for errors where windows has only a WSAEFOO
+ * version. (The E is for 'error', the S is for 'socket'). */
+#ifdef _WIN32
+#define E_CASE(s) case s: case WSA ## s
+#define S_CASE(s) case WSA ## s
+#else
+#define E_CASE(s) case s
+#define S_CASE(s) case s
+#endif /* defined(_WIN32) */
+
+/** Given an errno from a failed exit connection, return a reason code
+ * appropriate for use in a RELAY END cell. */
+uint8_t
+errno_to_stream_end_reason(int e)
+{
+ /* To add new errors here, find out if they exist on Windows, and if a WSA*
+ * equivalent exists on windows. Add a case, an S_CASE, or an E_CASE as
+ * appropriate. */
+ switch (e) {
+ case EPIPE:
+ return END_STREAM_REASON_DONE;
+ E_CASE(EBADF):
+ E_CASE(EFAULT):
+ E_CASE(EINVAL):
+ S_CASE(EISCONN):
+ S_CASE(ENOTSOCK):
+ S_CASE(EPROTONOSUPPORT):
+ S_CASE(EAFNOSUPPORT):
+ S_CASE(ENOTCONN):
+ return END_STREAM_REASON_INTERNAL;
+ S_CASE(ENETUNREACH):
+ S_CASE(EHOSTUNREACH):
+ E_CASE(EACCES):
+ case EPERM:
+ return END_STREAM_REASON_NOROUTE;
+ S_CASE(ECONNREFUSED):
+ return END_STREAM_REASON_CONNECTREFUSED;
+ S_CASE(ECONNRESET):
+ return END_STREAM_REASON_CONNRESET;
+ S_CASE(ETIMEDOUT):
+ return END_STREAM_REASON_TIMEOUT;
+ S_CASE(ENOBUFS):
+ case ENOMEM:
+ case ENFILE:
+ S_CASE(EADDRINUSE):
+ S_CASE(EADDRNOTAVAIL):
+ E_CASE(EMFILE):
+ return END_STREAM_REASON_RESOURCELIMIT;
+ default:
+ log_info(LD_EXIT, "Didn't recognize errno %d (%s); telling the client "
+ "that we are ending a stream for 'misc' reason.",
+ e, tor_socket_strerror(e));
+ return END_STREAM_REASON_MISC;
+ }
+}
+
+/***************************** ORConn reasons *****************************/
+
+/** Convert the reason for ending an OR connection <b>r</b> into the format
+ * used in ORCONN events. Return "UNKNOWN" if the reason is unrecognized. */
+const char *
+orconn_end_reason_to_control_string(int r)
+{
+ /* To add new errors here, find out if they exist on Windows, and if a WSA*
+ * equivalent exists on windows. Add a case, an S_CASE, or an E_CASE as
+ * appropriate. */
+ switch (r) {
+ case END_OR_CONN_REASON_DONE:
+ return "DONE";
+ case END_OR_CONN_REASON_REFUSED:
+ return "CONNECTREFUSED";
+ case END_OR_CONN_REASON_OR_IDENTITY:
+ return "IDENTITY";
+ case END_OR_CONN_REASON_CONNRESET:
+ return "CONNECTRESET";
+ case END_OR_CONN_REASON_TIMEOUT:
+ return "TIMEOUT";
+ case END_OR_CONN_REASON_NO_ROUTE:
+ return "NOROUTE";
+ case END_OR_CONN_REASON_IO_ERROR:
+ return "IOERROR";
+ case END_OR_CONN_REASON_RESOURCE_LIMIT:
+ return "RESOURCELIMIT";
+ case END_OR_CONN_REASON_MISC:
+ return "MISC";
+ case END_OR_CONN_REASON_PT_MISSING:
+ return "PT_MISSING";
+ case 0:
+ return "";
+ default:
+ log_warn(LD_BUG, "Unrecognized or_conn reason code %d", r);
+ return "UNKNOWN";
+ }
+}
+
+/** Convert a TOR_TLS_* error code into an END_OR_CONN_* reason. */
+int
+tls_error_to_orconn_end_reason(int e)
+{
+ switch (e) {
+ case TOR_TLS_ERROR_IO:
+ return END_OR_CONN_REASON_IO_ERROR;
+ case TOR_TLS_ERROR_CONNREFUSED:
+ return END_OR_CONN_REASON_REFUSED;
+ case TOR_TLS_ERROR_CONNRESET:
+ return END_OR_CONN_REASON_CONNRESET;
+ case TOR_TLS_ERROR_NO_ROUTE:
+ return END_OR_CONN_REASON_NO_ROUTE;
+ case TOR_TLS_ERROR_TIMEOUT:
+ return END_OR_CONN_REASON_TIMEOUT;
+ case TOR_TLS_WANTREAD:
+ case TOR_TLS_WANTWRITE:
+ case TOR_TLS_CLOSE:
+ case TOR_TLS_DONE:
+ return END_OR_CONN_REASON_DONE;
+ default:
+ return END_OR_CONN_REASON_MISC;
+ }
+}
+
+/** Given an errno from a failed ORConn connection, return a reason code
+ * appropriate for use in the controller orconn events. */
+int
+errno_to_orconn_end_reason(int e)
+{
+ switch (e) {
+ case EPIPE:
+ return END_OR_CONN_REASON_DONE;
+ S_CASE(ENOTCONN):
+ S_CASE(ENETUNREACH):
+ S_CASE(ENETDOWN):
+ S_CASE(EHOSTUNREACH):
+ return END_OR_CONN_REASON_NO_ROUTE;
+ S_CASE(ECONNREFUSED):
+ return END_OR_CONN_REASON_REFUSED;
+ S_CASE(ECONNRESET):
+ return END_OR_CONN_REASON_CONNRESET;
+ S_CASE(ETIMEDOUT):
+ return END_OR_CONN_REASON_TIMEOUT;
+ S_CASE(ENOBUFS):
+ case ENOMEM:
+ case ENFILE:
+ E_CASE(EMFILE):
+ E_CASE(EACCES):
+ E_CASE(EBADF):
+ E_CASE(EFAULT):
+ E_CASE(EINVAL):
+ return END_OR_CONN_REASON_RESOURCE_LIMIT;
+ default:
+ log_info(LD_OR, "Didn't recognize errno %d (%s).",
+ e, tor_socket_strerror(e));
+ return END_OR_CONN_REASON_MISC;
+ }
+}
+
+/***************************** Circuit reasons *****************************/
+
+/** Convert a numeric reason for destroying a circuit into a string for a
+ * CIRCUIT event. */
+const char *
+circuit_end_reason_to_control_string(int reason)
+{
+ int is_remote = 0;
+
+ if (reason >= 0 && reason & END_CIRC_REASON_FLAG_REMOTE) {
+ reason &= ~END_CIRC_REASON_FLAG_REMOTE;
+ is_remote = 1;
+ }
+
+ switch (reason) {
+ case END_CIRC_AT_ORIGIN:
+ /* This shouldn't get passed here; it's a catch-all reason. */
+ return "ORIGIN";
+ case END_CIRC_REASON_NONE:
+ /* This shouldn't get passed here; it's a catch-all reason. */
+ return "NONE";
+ case END_CIRC_REASON_TORPROTOCOL:
+ return "TORPROTOCOL";
+ case END_CIRC_REASON_INTERNAL:
+ return "INTERNAL";
+ case END_CIRC_REASON_REQUESTED:
+ return "REQUESTED";
+ case END_CIRC_REASON_HIBERNATING:
+ return "HIBERNATING";
+ case END_CIRC_REASON_RESOURCELIMIT:
+ return "RESOURCELIMIT";
+ case END_CIRC_REASON_CONNECTFAILED:
+ return "CONNECTFAILED";
+ case END_CIRC_REASON_OR_IDENTITY:
+ return "OR_IDENTITY";
+ case END_CIRC_REASON_CHANNEL_CLOSED:
+ return "CHANNEL_CLOSED";
+ case END_CIRC_REASON_FINISHED:
+ return "FINISHED";
+ case END_CIRC_REASON_TIMEOUT:
+ return "TIMEOUT";
+ case END_CIRC_REASON_DESTROYED:
+ return "DESTROYED";
+ case END_CIRC_REASON_NOPATH:
+ return "NOPATH";
+ case END_CIRC_REASON_NOSUCHSERVICE:
+ return "NOSUCHSERVICE";
+ case END_CIRC_REASON_MEASUREMENT_EXPIRED:
+ return "MEASUREMENT_EXPIRED";
+ case END_CIRC_REASON_IP_NOW_REDUNDANT:
+ return "IP_NOW_REDUNDANT";
+ default:
+ if (is_remote) {
+ /*
+ * If it's remote, it's not a bug *here*, so don't use LD_BUG, but
+ * do note that the someone we're talking to is speaking the Tor
+ * protocol with a weird accent.
+ */
+ log_warn(LD_PROTOCOL,
+ "Remote server sent bogus reason code %d", reason);
+ } else {
+ log_warn(LD_BUG,
+ "Unrecognized reason code %d", reason);
+ }
+ return NULL;
+ }
+}
+
+/** Return a string corresponding to a SOCKS4 response code. */
+const char *
+socks4_response_code_to_string(uint8_t code)
+{
+ switch (code) {
+ case 0x5a:
+ return "connection accepted";
+ case 0x5b:
+ return "server rejected connection";
+ case 0x5c:
+ return "server cannot connect to identd on this client";
+ case 0x5d:
+ return "user id does not match identd";
+ default:
+ return "invalid SOCKS 4 response code";
+ }
+}
+
+/** Return a string corresponding to a SOCKS5 response code. */
+const char *
+socks5_response_code_to_string(uint8_t code)
+{
+ switch (code) {
+ case 0x00:
+ return "connection accepted";
+ case 0x01:
+ return "general SOCKS server failure";
+ case 0x02:
+ return "connection not allowed by ruleset";
+ case 0x03:
+ return "Network unreachable";
+ case 0x04:
+ return "Host unreachable";
+ case 0x05:
+ return "Connection refused";
+ case 0x06:
+ return "TTL expired";
+ case 0x07:
+ return "Command not supported";
+ case 0x08:
+ return "Address type not supported";
+ default:
+ return "unknown reason";
+ }
+}
+
+/** Return a string corresponding to a bandwidth_weight_rule_t */
+const char *
+bandwidth_weight_rule_to_string(bandwidth_weight_rule_t rule)
+{
+ switch (rule)
+ {
+ case NO_WEIGHTING:
+ return "no weighting";
+ case WEIGHT_FOR_EXIT:
+ return "weight as exit";
+ case WEIGHT_FOR_MID:
+ return "weight as middle node";
+ case WEIGHT_FOR_GUARD:
+ return "weight as guard";
+ case WEIGHT_FOR_DIR:
+ return "weight as directory";
+ default:
+ return "unknown rule";
+ }
+}
+
+/** Given a RELAY_END reason value, convert it to an HTTP response to be
+ * send over an HTTP tunnel connection. */
+const char *
+end_reason_to_http_connect_response_line(int endreason)
+{
+ endreason &= END_STREAM_REASON_MASK;
+ /* XXXX these are probably all wrong. Should they all be 502? */
+ switch (endreason) {
+ case 0:
+ return "HTTP/1.0 200 OK\r\n\r\n";
+ case END_STREAM_REASON_MISC:
+ return "HTTP/1.0 500 Internal Server Error\r\n\r\n";
+ case END_STREAM_REASON_RESOLVEFAILED:
+ return "HTTP/1.0 404 Not Found (resolve failed)\r\n\r\n";
+ case END_STREAM_REASON_NOROUTE:
+ return "HTTP/1.0 404 Not Found (no route)\r\n\r\n";
+ case END_STREAM_REASON_CONNECTREFUSED:
+ return "HTTP/1.0 403 Forbidden (connection refused)\r\n\r\n";
+ case END_STREAM_REASON_EXITPOLICY:
+ return "HTTP/1.0 403 Forbidden (exit policy)\r\n\r\n";
+ case END_STREAM_REASON_DESTROY:
+ return "HTTP/1.0 502 Bad Gateway (destroy cell received)\r\n\r\n";
+ case END_STREAM_REASON_DONE:
+ return "HTTP/1.0 502 Bad Gateway (unexpected close)\r\n\r\n";
+ case END_STREAM_REASON_TIMEOUT:
+ return "HTTP/1.0 504 Gateway Timeout\r\n\r\n";
+ case END_STREAM_REASON_HIBERNATING:
+ return "HTTP/1.0 502 Bad Gateway (hibernating server)\r\n\r\n";
+ case END_STREAM_REASON_INTERNAL:
+ return "HTTP/1.0 502 Bad Gateway (internal error)\r\n\r\n";
+ case END_STREAM_REASON_RESOURCELIMIT:
+ return "HTTP/1.0 502 Bad Gateway (resource limit)\r\n\r\n";
+ case END_STREAM_REASON_CONNRESET:
+ return "HTTP/1.0 403 Forbidden (connection reset)\r\n\r\n";
+ case END_STREAM_REASON_TORPROTOCOL:
+ return "HTTP/1.0 502 Bad Gateway (tor protocol violation)\r\n\r\n";
+ case END_STREAM_REASON_ENTRYPOLICY:
+ return "HTTP/1.0 403 Forbidden (entry policy violation)\r\n\r\n";
+ case END_STREAM_REASON_NOTDIRECTORY: /* Fall Through */
+ default:
+ tor_assert_nonfatal_unreached();
+ return "HTTP/1.0 500 Internal Server Error (weird end reason)\r\n\r\n";
+ }
+}
diff --git a/src/core/or/reasons.h b/src/core/or/reasons.h
new file mode 100644
index 0000000000..837b4a0f1a
--- /dev/null
+++ b/src/core/or/reasons.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file reasons.h
+ * \brief Header file for reasons.c.
+ **/
+
+#ifndef TOR_REASONS_H
+#define TOR_REASONS_H
+
+#include "lib/net/socks5_status.h"
+enum bandwidth_weight_rule_t;
+
+const char *stream_end_reason_to_control_string(int reason);
+const char *stream_end_reason_to_string(int reason);
+socks5_reply_status_t stream_end_reason_to_socks5_response(int reason);
+uint8_t errno_to_stream_end_reason(int e);
+
+const char *orconn_end_reason_to_control_string(int r);
+int tls_error_to_orconn_end_reason(int e);
+int errno_to_orconn_end_reason(int e);
+
+const char *circuit_end_reason_to_control_string(int reason);
+const char *socks4_response_code_to_string(uint8_t code);
+const char *socks5_response_code_to_string(uint8_t code);
+
+const char *bandwidth_weight_rule_to_string(enum bandwidth_weight_rule_t rule);
+const char *end_reason_to_http_connect_response_line(int endreason);
+
+#endif /* !defined(TOR_REASONS_H) */
diff --git a/src/core/or/relay.c b/src/core/or/relay.c
new file mode 100644
index 0000000000..e8e1762b40
--- /dev/null
+++ b/src/core/or/relay.c
@@ -0,0 +1,3088 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file relay.c
+ * \brief Handle relay cell encryption/decryption, plus packaging and
+ * receiving from circuits, plus queuing on circuits.
+ *
+ * This is a core modules that makes Tor work. It's responsible for
+ * dealing with RELAY cells (the ones that travel more than one hop along a
+ * circuit), by:
+ * <ul>
+ * <li>constructing relays cells,
+ * <li>encrypting relay cells,
+ * <li>decrypting relay cells,
+ * <li>demultiplexing relay cells as they arrive on a connection,
+ * <li>queueing relay cells for retransmission,
+ * <li>or handling relay cells that are for us to receive (as an exit or a
+ * client).
+ * </ul>
+ *
+ * RELAY cells are generated throughout the code at the client or relay side,
+ * using relay_send_command_from_edge() or one of the functions like
+ * connection_edge_send_command() that calls it. Of particular interest is
+ * connection_edge_package_raw_inbuf(), which takes information that has
+ * arrived on an edge connection socket, and packages it as a RELAY_DATA cell
+ * -- this is how information is actually sent across the Tor network. The
+ * cryptography for these functions is handled deep in
+ * circuit_package_relay_cell(), which either adds a single layer of
+ * encryption (if we're an exit), or multiple layers (if we're the origin of
+ * the circuit). After construction and encryption, the RELAY cells are
+ * passed to append_cell_to_circuit_queue(), which queues them for
+ * transmission and tells the circuitmux (see circuitmux.c) that the circuit
+ * is waiting to send something.
+ *
+ * Incoming RELAY cells arrive at circuit_receive_relay_cell(), called from
+ * command.c. There they are decrypted and, if they are for us, are passed to
+ * connection_edge_process_relay_cell(). If they're not for us, they're
+ * re-queued for retransmission again with append_cell_to_circuit_queue().
+ *
+ * The connection_edge_process_relay_cell() function handles all the different
+ * types of relay cells, launching requests or transmitting data as needed.
+ **/
+
+#define RELAY_PRIVATE
+#include "or/or.h"
+#include "or/addressmap.h"
+#include "lib/err/backtrace.h"
+#include "lib/container/buffers.h"
+#include "or/channel.h"
+#include "or/circpathbias.h"
+#include "or/circuitbuild.h"
+#include "or/circuitlist.h"
+#include "or/circuituse.h"
+#include "lib/compress/compress.h"
+#include "or/config.h"
+#include "or/connection.h"
+#include "or/connection_edge.h"
+#include "or/connection_or.h"
+#include "or/control.h"
+#include "lib/crypt_ops/crypto_rand.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "or/directory.h"
+#include "or/geoip.h"
+#include "or/hs_cache.h"
+#include "or/main.h"
+#include "or/networkstatus.h"
+#include "or/nodelist.h"
+#include "or/onion.h"
+#include "or/policies.h"
+#include "or/reasons.h"
+#include "or/relay.h"
+#include "or/relay_crypto.h"
+#include "or/rendcache.h"
+#include "or/rendcommon.h"
+#include "or/router.h"
+#include "or/routerlist.h"
+#include "or/routerparse.h"
+#include "or/scheduler.h"
+#include "or/rephist.h"
+
+#include "or/cell_st.h"
+#include "or/cell_queue_st.h"
+#include "or/cpath_build_state_st.h"
+#include "or/dir_connection_st.h"
+#include "or/destroy_cell_queue_st.h"
+#include "or/entry_connection_st.h"
+#include "or/extend_info_st.h"
+#include "or/or_circuit_st.h"
+#include "or/origin_circuit_st.h"
+#include "or/routerinfo_st.h"
+#include "or/socks_request_st.h"
+
+#include "lib/intmath/weakrng.h"
+
+static edge_connection_t *relay_lookup_conn(circuit_t *circ, cell_t *cell,
+ cell_direction_t cell_direction,
+ crypt_path_t *layer_hint);
+
+static void circuit_consider_sending_sendme(circuit_t *circ,
+ crypt_path_t *layer_hint);
+static void circuit_resume_edge_reading(circuit_t *circ,
+ crypt_path_t *layer_hint);
+static int circuit_resume_edge_reading_helper(edge_connection_t *conn,
+ circuit_t *circ,
+ crypt_path_t *layer_hint);
+static int circuit_consider_stop_edge_reading(circuit_t *circ,
+ crypt_path_t *layer_hint);
+static int circuit_queue_streams_are_blocked(circuit_t *circ);
+static void adjust_exit_policy_from_exitpolicy_failure(origin_circuit_t *circ,
+ entry_connection_t *conn,
+ node_t *node,
+ const tor_addr_t *addr);
+
+/** Stop reading on edge connections when we have this many cells
+ * waiting on the appropriate queue. */
+#define CELL_QUEUE_HIGHWATER_SIZE 256
+/** Start reading from edge connections again when we get down to this many
+ * cells. */
+#define CELL_QUEUE_LOWWATER_SIZE 64
+
+/** Stats: how many relay cells have originated at this hop, or have
+ * been relayed onward (not recognized at this hop)?
+ */
+uint64_t stats_n_relay_cells_relayed = 0;
+/** Stats: how many relay cells have been delivered to streams at this
+ * hop?
+ */
+uint64_t stats_n_relay_cells_delivered = 0;
+/** Stats: how many circuits have we closed due to the cell queue limit being
+ * reached (see append_cell_to_circuit_queue()) */
+uint64_t stats_n_circ_max_cell_reached = 0;
+
+/** Used to tell which stream to read from first on a circuit. */
+static tor_weak_rng_t stream_choice_rng = TOR_WEAK_RNG_INIT;
+
+/**
+ * Update channel usage state based on the type of relay cell and
+ * circuit properties.
+ *
+ * This is needed to determine if a client channel is being
+ * used for application traffic, and if a relay channel is being
+ * used for multihop circuits and application traffic. The decision
+ * to pad in channelpadding.c depends upon this info (as well as
+ * consensus parameters) to decide what channels to pad.
+ */
+static void
+circuit_update_channel_usage(circuit_t *circ, cell_t *cell)
+{
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ /*
+ * The client state was first set much earlier in
+ * circuit_send_next_onion_skin(), so we can start padding as early as
+ * possible.
+ *
+ * However, if padding turns out to be expensive, we may want to not do
+ * it until actual application traffic starts flowing (which is controlled
+ * via consensus param nf_pad_before_usage).
+ *
+ * So: If we're an origin circuit and we've created a full length circuit,
+ * then any CELL_RELAY cell means application data. Increase the usage
+ * state of the channel to indicate this.
+ *
+ * We want to wait for CELL_RELAY specifically here, so we know that
+ * the channel was definitely being used for data and not for extends.
+ * By default, we pad as soon as a channel has been used for *any*
+ * circuits, so this state is irrelevant to the padding decision in
+ * the default case. However, if padding turns out to be expensive,
+ * we would like the ability to avoid padding until we're absolutely
+ * sure that a channel is used for enough application data to be worth
+ * padding.
+ *
+ * (So it does not matter that CELL_RELAY_EARLY can actually contain
+ * application data. This is only a load reducing option and that edge
+ * case does not matter if we're desperately trying to reduce overhead
+ * anyway. See also consensus parameter nf_pad_before_usage).
+ */
+ if (BUG(!circ->n_chan))
+ return;
+
+ if (circ->n_chan->channel_usage == CHANNEL_USED_FOR_FULL_CIRCS &&
+ cell->command == CELL_RELAY) {
+ circ->n_chan->channel_usage = CHANNEL_USED_FOR_USER_TRAFFIC;
+ }
+ } else {
+ /* If we're a relay circuit, the question is more complicated. Basically:
+ * we only want to pad connections that carry multihop (anonymous)
+ * circuits.
+ *
+ * We assume we're more than one hop if either the previous hop
+ * is not a client, or if the previous hop is a client and there's
+ * a next hop. Then, circuit traffic starts at RELAY_EARLY, and
+ * user application traffic starts when we see RELAY cells.
+ */
+ or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
+
+ if (BUG(!or_circ->p_chan))
+ return;
+
+ if (!channel_is_client(or_circ->p_chan) ||
+ (channel_is_client(or_circ->p_chan) && circ->n_chan)) {
+ if (cell->command == CELL_RELAY_EARLY) {
+ if (or_circ->p_chan->channel_usage < CHANNEL_USED_FOR_FULL_CIRCS) {
+ or_circ->p_chan->channel_usage = CHANNEL_USED_FOR_FULL_CIRCS;
+ }
+ } else if (cell->command == CELL_RELAY) {
+ or_circ->p_chan->channel_usage = CHANNEL_USED_FOR_USER_TRAFFIC;
+ }
+ }
+ }
+}
+
+/** Receive a relay cell:
+ * - Crypt it (encrypt if headed toward the origin or if we <b>are</b> the
+ * origin; decrypt if we're headed toward the exit).
+ * - Check if recognized (if exitward).
+ * - If recognized and the digest checks out, then find if there's a stream
+ * that the cell is intended for, and deliver it to the right
+ * connection_edge.
+ * - If not recognized, then we need to relay it: append it to the appropriate
+ * cell_queue on <b>circ</b>.
+ *
+ * Return -<b>reason</b> on failure.
+ */
+int
+circuit_receive_relay_cell(cell_t *cell, circuit_t *circ,
+ cell_direction_t cell_direction)
+{
+ channel_t *chan = NULL;
+ crypt_path_t *layer_hint=NULL;
+ char recognized=0;
+ int reason;
+
+ tor_assert(cell);
+ tor_assert(circ);
+ tor_assert(cell_direction == CELL_DIRECTION_OUT ||
+ cell_direction == CELL_DIRECTION_IN);
+ if (circ->marked_for_close)
+ return 0;
+
+ if (relay_decrypt_cell(circ, cell, cell_direction, &layer_hint, &recognized)
+ < 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "relay crypt failed. Dropping connection.");
+ return -END_CIRC_REASON_INTERNAL;
+ }
+
+ circuit_update_channel_usage(circ, cell);
+
+ if (recognized) {
+ edge_connection_t *conn = NULL;
+
+ if (circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING) {
+ pathbias_check_probe_response(circ, cell);
+
+ /* We need to drop this cell no matter what to avoid code that expects
+ * a certain purpose (such as the hidserv code). */
+ return 0;
+ }
+
+ conn = relay_lookup_conn(circ, cell, cell_direction, layer_hint);
+ if (cell_direction == CELL_DIRECTION_OUT) {
+ ++stats_n_relay_cells_delivered;
+ log_debug(LD_OR,"Sending away from origin.");
+ if ((reason=connection_edge_process_relay_cell(cell, circ, conn, NULL))
+ < 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "connection_edge_process_relay_cell (away from origin) "
+ "failed.");
+ return reason;
+ }
+ }
+ if (cell_direction == CELL_DIRECTION_IN) {
+ ++stats_n_relay_cells_delivered;
+ log_debug(LD_OR,"Sending to origin.");
+ if ((reason = connection_edge_process_relay_cell(cell, circ, conn,
+ layer_hint)) < 0) {
+ /* If a client is trying to connect to unknown hidden service port,
+ * END_CIRC_AT_ORIGIN is sent back so we can then close the circuit.
+ * Do not log warn as this is an expected behavior for a service. */
+ if (reason != END_CIRC_AT_ORIGIN) {
+ log_warn(LD_OR,
+ "connection_edge_process_relay_cell (at origin) failed.");
+ }
+ return reason;
+ }
+ }
+ return 0;
+ }
+
+ /* not recognized. pass it on. */
+ if (cell_direction == CELL_DIRECTION_OUT) {
+ cell->circ_id = circ->n_circ_id; /* switch it */
+ chan = circ->n_chan;
+ } else if (! CIRCUIT_IS_ORIGIN(circ)) {
+ cell->circ_id = TO_OR_CIRCUIT(circ)->p_circ_id; /* switch it */
+ chan = TO_OR_CIRCUIT(circ)->p_chan;
+ } else {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Dropping unrecognized inbound cell on origin circuit.");
+ /* If we see unrecognized cells on path bias testing circs,
+ * it's bad mojo. Those circuits need to die.
+ * XXX: Shouldn't they always die? */
+ if (circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING) {
+ TO_ORIGIN_CIRCUIT(circ)->path_state = PATH_STATE_USE_FAILED;
+ return -END_CIRC_REASON_TORPROTOCOL;
+ } else {
+ return 0;
+ }
+ }
+
+ if (!chan) {
+ // XXXX Can this splice stuff be done more cleanly?
+ if (! CIRCUIT_IS_ORIGIN(circ) &&
+ TO_OR_CIRCUIT(circ)->rend_splice &&
+ cell_direction == CELL_DIRECTION_OUT) {
+ or_circuit_t *splice_ = TO_OR_CIRCUIT(circ)->rend_splice;
+ tor_assert(circ->purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED);
+ tor_assert(splice_->base_.purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED);
+ cell->circ_id = splice_->p_circ_id;
+ cell->command = CELL_RELAY; /* can't be relay_early anyway */
+ if ((reason = circuit_receive_relay_cell(cell, TO_CIRCUIT(splice_),
+ CELL_DIRECTION_IN)) < 0) {
+ log_warn(LD_REND, "Error relaying cell across rendezvous; closing "
+ "circuits");
+ /* XXXX Do this here, or just return -1? */
+ circuit_mark_for_close(circ, -reason);
+ return reason;
+ }
+ return 0;
+ }
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Didn't recognize cell, but circ stops here! Closing circ.");
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+
+ log_debug(LD_OR,"Passing on unrecognized cell.");
+
+ ++stats_n_relay_cells_relayed; /* XXXX no longer quite accurate {cells}
+ * we might kill the circ before we relay
+ * the cells. */
+
+ append_cell_to_circuit_queue(circ, chan, cell, cell_direction, 0);
+ return 0;
+}
+
+/** Package a relay cell from an edge:
+ * - Encrypt it to the right layer
+ * - Append it to the appropriate cell_queue on <b>circ</b>.
+ */
+static int
+circuit_package_relay_cell(cell_t *cell, circuit_t *circ,
+ cell_direction_t cell_direction,
+ crypt_path_t *layer_hint, streamid_t on_stream,
+ const char *filename, int lineno)
+{
+ channel_t *chan; /* where to send the cell */
+
+ if (circ->marked_for_close) {
+ /* Circuit is marked; send nothing. */
+ return 0;
+ }
+
+ if (cell_direction == CELL_DIRECTION_OUT) {
+ chan = circ->n_chan;
+ if (!chan) {
+ log_warn(LD_BUG,"outgoing relay cell sent from %s:%d has n_chan==NULL."
+ " Dropping. Circuit is in state %s (%d), and is "
+ "%smarked for close. (%s:%d, %d)", filename, lineno,
+ circuit_state_to_string(circ->state), circ->state,
+ circ->marked_for_close ? "" : "not ",
+ circ->marked_for_close_file?circ->marked_for_close_file:"",
+ circ->marked_for_close, circ->marked_for_close_reason);
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ circuit_log_path(LOG_WARN, LD_BUG, TO_ORIGIN_CIRCUIT(circ));
+ }
+ log_backtrace(LOG_WARN,LD_BUG,"");
+ return 0; /* just drop it */
+ }
+ if (!CIRCUIT_IS_ORIGIN(circ)) {
+ log_warn(LD_BUG,"outgoing relay cell sent from %s:%d on non-origin "
+ "circ. Dropping.", filename, lineno);
+ log_backtrace(LOG_WARN,LD_BUG,"");
+ return 0; /* just drop it */
+ }
+
+ relay_encrypt_cell_outbound(cell, TO_ORIGIN_CIRCUIT(circ), layer_hint);
+
+ /* Update circ written totals for control port */
+ origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
+ ocirc->n_written_circ_bw = tor_add_u32_nowrap(ocirc->n_written_circ_bw,
+ CELL_PAYLOAD_SIZE);
+
+ } else { /* incoming cell */
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ /* We should never package an _incoming_ cell from the circuit
+ * origin; that means we messed up somewhere. */
+ log_warn(LD_BUG,"incoming relay cell at origin circuit. Dropping.");
+ assert_circuit_ok(circ);
+ return 0; /* just drop it */
+ }
+ or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
+ relay_encrypt_cell_inbound(cell, or_circ);
+ chan = or_circ->p_chan;
+ }
+ ++stats_n_relay_cells_relayed;
+
+ append_cell_to_circuit_queue(circ, chan, cell, cell_direction, on_stream);
+ return 0;
+}
+
+/** If cell's stream_id matches the stream_id of any conn that's
+ * attached to circ, return that conn, else return NULL.
+ */
+static edge_connection_t *
+relay_lookup_conn(circuit_t *circ, cell_t *cell,
+ cell_direction_t cell_direction, crypt_path_t *layer_hint)
+{
+ edge_connection_t *tmpconn;
+ relay_header_t rh;
+
+ relay_header_unpack(&rh, cell->payload);
+
+ if (!rh.stream_id)
+ return NULL;
+
+ /* IN or OUT cells could have come from either direction, now
+ * that we allow rendezvous *to* an OP.
+ */
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ for (tmpconn = TO_ORIGIN_CIRCUIT(circ)->p_streams; tmpconn;
+ tmpconn=tmpconn->next_stream) {
+ if (rh.stream_id == tmpconn->stream_id &&
+ !tmpconn->base_.marked_for_close &&
+ tmpconn->cpath_layer == layer_hint) {
+ log_debug(LD_APP,"found conn for stream %d.", rh.stream_id);
+ return tmpconn;
+ }
+ }
+ } else {
+ for (tmpconn = TO_OR_CIRCUIT(circ)->n_streams; tmpconn;
+ tmpconn=tmpconn->next_stream) {
+ if (rh.stream_id == tmpconn->stream_id &&
+ !tmpconn->base_.marked_for_close) {
+ log_debug(LD_EXIT,"found conn for stream %d.", rh.stream_id);
+ if (cell_direction == CELL_DIRECTION_OUT ||
+ connection_edge_is_rendezvous_stream(tmpconn))
+ return tmpconn;
+ }
+ }
+ for (tmpconn = TO_OR_CIRCUIT(circ)->resolving_streams; tmpconn;
+ tmpconn=tmpconn->next_stream) {
+ if (rh.stream_id == tmpconn->stream_id &&
+ !tmpconn->base_.marked_for_close) {
+ log_debug(LD_EXIT,"found conn for stream %d.", rh.stream_id);
+ return tmpconn;
+ }
+ }
+ }
+ return NULL; /* probably a begin relay cell */
+}
+
+/** Pack the relay_header_t host-order structure <b>src</b> into
+ * network-order in the buffer <b>dest</b>. See tor-spec.txt for details
+ * about the wire format.
+ */
+void
+relay_header_pack(uint8_t *dest, const relay_header_t *src)
+{
+ set_uint8(dest, src->command);
+ set_uint16(dest+1, htons(src->recognized));
+ set_uint16(dest+3, htons(src->stream_id));
+ memcpy(dest+5, src->integrity, 4);
+ set_uint16(dest+9, htons(src->length));
+}
+
+/** Unpack the network-order buffer <b>src</b> into a host-order
+ * relay_header_t structure <b>dest</b>.
+ */
+void
+relay_header_unpack(relay_header_t *dest, const uint8_t *src)
+{
+ dest->command = get_uint8(src);
+ dest->recognized = ntohs(get_uint16(src+1));
+ dest->stream_id = ntohs(get_uint16(src+3));
+ memcpy(dest->integrity, src+5, 4);
+ dest->length = ntohs(get_uint16(src+9));
+}
+
+/** Convert the relay <b>command</b> into a human-readable string. */
+static const char *
+relay_command_to_string(uint8_t command)
+{
+ static char buf[64];
+ switch (command) {
+ case RELAY_COMMAND_BEGIN: return "BEGIN";
+ case RELAY_COMMAND_DATA: return "DATA";
+ case RELAY_COMMAND_END: return "END";
+ case RELAY_COMMAND_CONNECTED: return "CONNECTED";
+ case RELAY_COMMAND_SENDME: return "SENDME";
+ case RELAY_COMMAND_EXTEND: return "EXTEND";
+ case RELAY_COMMAND_EXTENDED: return "EXTENDED";
+ case RELAY_COMMAND_TRUNCATE: return "TRUNCATE";
+ case RELAY_COMMAND_TRUNCATED: return "TRUNCATED";
+ case RELAY_COMMAND_DROP: return "DROP";
+ case RELAY_COMMAND_RESOLVE: return "RESOLVE";
+ case RELAY_COMMAND_RESOLVED: return "RESOLVED";
+ case RELAY_COMMAND_BEGIN_DIR: return "BEGIN_DIR";
+ case RELAY_COMMAND_ESTABLISH_INTRO: return "ESTABLISH_INTRO";
+ case RELAY_COMMAND_ESTABLISH_RENDEZVOUS: return "ESTABLISH_RENDEZVOUS";
+ case RELAY_COMMAND_INTRODUCE1: return "INTRODUCE1";
+ case RELAY_COMMAND_INTRODUCE2: return "INTRODUCE2";
+ case RELAY_COMMAND_RENDEZVOUS1: return "RENDEZVOUS1";
+ case RELAY_COMMAND_RENDEZVOUS2: return "RENDEZVOUS2";
+ case RELAY_COMMAND_INTRO_ESTABLISHED: return "INTRO_ESTABLISHED";
+ case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED:
+ return "RENDEZVOUS_ESTABLISHED";
+ case RELAY_COMMAND_INTRODUCE_ACK: return "INTRODUCE_ACK";
+ case RELAY_COMMAND_EXTEND2: return "EXTEND2";
+ case RELAY_COMMAND_EXTENDED2: return "EXTENDED2";
+ default:
+ tor_snprintf(buf, sizeof(buf), "Unrecognized relay command %u",
+ (unsigned)command);
+ return buf;
+ }
+}
+
+/** Make a relay cell out of <b>relay_command</b> and <b>payload</b>, and send
+ * it onto the open circuit <b>circ</b>. <b>stream_id</b> is the ID on
+ * <b>circ</b> for the stream that's sending the relay cell, or 0 if it's a
+ * control cell. <b>cpath_layer</b> is NULL for OR->OP cells, or the
+ * destination hop for OP->OR cells.
+ *
+ * If you can't send the cell, mark the circuit for close and return -1. Else
+ * return 0.
+ */
+MOCK_IMPL(int,
+relay_send_command_from_edge_,(streamid_t stream_id, circuit_t *circ,
+ uint8_t relay_command, const char *payload,
+ size_t payload_len, crypt_path_t *cpath_layer,
+ const char *filename, int lineno))
+{
+ cell_t cell;
+ relay_header_t rh;
+ cell_direction_t cell_direction;
+ /* XXXX NM Split this function into a separate versions per circuit type? */
+
+ tor_assert(circ);
+ tor_assert(payload_len <= RELAY_PAYLOAD_SIZE);
+
+ memset(&cell, 0, sizeof(cell_t));
+ cell.command = CELL_RELAY;
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ tor_assert(cpath_layer);
+ cell.circ_id = circ->n_circ_id;
+ cell_direction = CELL_DIRECTION_OUT;
+ } else {
+ tor_assert(! cpath_layer);
+ cell.circ_id = TO_OR_CIRCUIT(circ)->p_circ_id;
+ cell_direction = CELL_DIRECTION_IN;
+ }
+
+ memset(&rh, 0, sizeof(rh));
+ rh.command = relay_command;
+ rh.stream_id = stream_id;
+ rh.length = payload_len;
+ relay_header_pack(cell.payload, &rh);
+ if (payload_len)
+ memcpy(cell.payload+RELAY_HEADER_SIZE, payload, payload_len);
+
+ log_debug(LD_OR,"delivering %d cell %s.", relay_command,
+ cell_direction == CELL_DIRECTION_OUT ? "forward" : "backward");
+
+ if (relay_command == RELAY_COMMAND_DROP)
+ rep_hist_padding_count_write(PADDING_TYPE_DROP);
+
+ /* If we are sending an END cell and this circuit is used for a tunneled
+ * directory request, advance its state. */
+ if (relay_command == RELAY_COMMAND_END && circ->dirreq_id)
+ geoip_change_dirreq_state(circ->dirreq_id, DIRREQ_TUNNELED,
+ DIRREQ_END_CELL_SENT);
+
+ if (cell_direction == CELL_DIRECTION_OUT && circ->n_chan) {
+ /* if we're using relaybandwidthrate, this conn wants priority */
+ channel_timestamp_client(circ->n_chan);
+ }
+
+ if (cell_direction == CELL_DIRECTION_OUT) {
+ origin_circuit_t *origin_circ = TO_ORIGIN_CIRCUIT(circ);
+ if (origin_circ->remaining_relay_early_cells > 0 &&
+ (relay_command == RELAY_COMMAND_EXTEND ||
+ relay_command == RELAY_COMMAND_EXTEND2 ||
+ cpath_layer != origin_circ->cpath)) {
+ /* If we've got any relay_early cells left and (we're sending
+ * an extend cell or we're not talking to the first hop), use
+ * one of them. Don't worry about the conn protocol version:
+ * append_cell_to_circuit_queue will fix it up. */
+ cell.command = CELL_RELAY_EARLY;
+ --origin_circ->remaining_relay_early_cells;
+ log_debug(LD_OR, "Sending a RELAY_EARLY cell; %d remaining.",
+ (int)origin_circ->remaining_relay_early_cells);
+ /* Memorize the command that is sent as RELAY_EARLY cell; helps debug
+ * task 878. */
+ origin_circ->relay_early_commands[
+ origin_circ->relay_early_cells_sent++] = relay_command;
+ } else if (relay_command == RELAY_COMMAND_EXTEND ||
+ relay_command == RELAY_COMMAND_EXTEND2) {
+ /* If no RELAY_EARLY cells can be sent over this circuit, log which
+ * commands have been sent as RELAY_EARLY cells before; helps debug
+ * task 878. */
+ smartlist_t *commands_list = smartlist_new();
+ int i = 0;
+ char *commands = NULL;
+ for (; i < origin_circ->relay_early_cells_sent; i++)
+ smartlist_add(commands_list, (char *)
+ relay_command_to_string(origin_circ->relay_early_commands[i]));
+ commands = smartlist_join_strings(commands_list, ",", 0, NULL);
+ log_warn(LD_BUG, "Uh-oh. We're sending a RELAY_COMMAND_EXTEND cell, "
+ "but we have run out of RELAY_EARLY cells on that circuit. "
+ "Commands sent before: %s", commands);
+ tor_free(commands);
+ smartlist_free(commands_list);
+ }
+
+ /* Let's assume we're well-behaved: Anything that we decide to send is
+ * valid, delivered data. */
+ circuit_sent_valid_data(origin_circ, rh.length);
+ }
+
+ if (circuit_package_relay_cell(&cell, circ, cell_direction, cpath_layer,
+ stream_id, filename, lineno) < 0) {
+ log_warn(LD_BUG,"circuit_package_relay_cell failed. Closing.");
+ circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL);
+ return -1;
+ }
+ return 0;
+}
+
+/** Make a relay cell out of <b>relay_command</b> and <b>payload</b>, and
+ * send it onto the open circuit <b>circ</b>. <b>fromconn</b> is the stream
+ * that's sending the relay cell, or NULL if it's a control cell.
+ * <b>cpath_layer</b> is NULL for OR->OP cells, or the destination hop
+ * for OP->OR cells.
+ *
+ * If you can't send the cell, mark the circuit for close and
+ * return -1. Else return 0.
+ */
+int
+connection_edge_send_command(edge_connection_t *fromconn,
+ uint8_t relay_command, const char *payload,
+ size_t payload_len)
+{
+ /* XXXX NM Split this function into a separate versions per circuit type? */
+ circuit_t *circ;
+ crypt_path_t *cpath_layer = fromconn->cpath_layer;
+ tor_assert(fromconn);
+ circ = fromconn->on_circuit;
+
+ if (fromconn->base_.marked_for_close) {
+ log_warn(LD_BUG,
+ "called on conn that's already marked for close at %s:%d.",
+ fromconn->base_.marked_for_close_file,
+ fromconn->base_.marked_for_close);
+ return 0;
+ }
+
+ if (!circ) {
+ if (fromconn->base_.type == CONN_TYPE_AP) {
+ log_info(LD_APP,"no circ. Closing conn.");
+ connection_mark_unattached_ap(EDGE_TO_ENTRY_CONN(fromconn),
+ END_STREAM_REASON_INTERNAL);
+ } else {
+ log_info(LD_EXIT,"no circ. Closing conn.");
+ fromconn->edge_has_sent_end = 1; /* no circ to send to */
+ fromconn->end_reason = END_STREAM_REASON_INTERNAL;
+ connection_mark_for_close(TO_CONN(fromconn));
+ }
+ return -1;
+ }
+
+ if (circ->marked_for_close) {
+ /* The circuit has been marked, but not freed yet. When it's freed, it
+ * will mark this connection for close. */
+ return -1;
+ }
+
+#ifdef MEASUREMENTS_21206
+ /* Keep track of the number of RELAY_DATA cells sent for directory
+ * connections. */
+ connection_t *linked_conn = TO_CONN(fromconn)->linked_conn;
+
+ if (linked_conn && linked_conn->type == CONN_TYPE_DIR) {
+ ++(TO_DIR_CONN(linked_conn)->data_cells_sent);
+ }
+#endif /* defined(MEASUREMENTS_21206) */
+
+ return relay_send_command_from_edge(fromconn->stream_id, circ,
+ relay_command, payload,
+ payload_len, cpath_layer);
+}
+
+/** How many times will I retry a stream that fails due to DNS
+ * resolve failure or misc error?
+ */
+#define MAX_RESOLVE_FAILURES 3
+
+/** Return 1 if reason is something that you should retry if you
+ * get the end cell before you've connected; else return 0. */
+static int
+edge_reason_is_retriable(int reason)
+{
+ return reason == END_STREAM_REASON_HIBERNATING ||
+ reason == END_STREAM_REASON_RESOURCELIMIT ||
+ reason == END_STREAM_REASON_EXITPOLICY ||
+ reason == END_STREAM_REASON_RESOLVEFAILED ||
+ reason == END_STREAM_REASON_MISC ||
+ reason == END_STREAM_REASON_NOROUTE;
+}
+
+/** Called when we receive an END cell on a stream that isn't open yet,
+ * from the client side.
+ * Arguments are as for connection_edge_process_relay_cell().
+ */
+static int
+connection_ap_process_end_not_open(
+ relay_header_t *rh, cell_t *cell, origin_circuit_t *circ,
+ entry_connection_t *conn, crypt_path_t *layer_hint)
+{
+ node_t *exitrouter;
+ int reason = *(cell->payload+RELAY_HEADER_SIZE);
+ int control_reason;
+ edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn);
+ (void) layer_hint; /* unused */
+
+ if (rh->length > 0) {
+ if (reason == END_STREAM_REASON_TORPROTOCOL ||
+ reason == END_STREAM_REASON_DESTROY) {
+ /* Both of these reasons could mean a failed tag
+ * hit the exit and it complained. Do not probe.
+ * Fail the circuit. */
+ circ->path_state = PATH_STATE_USE_FAILED;
+ return -END_CIRC_REASON_TORPROTOCOL;
+ } else if (reason == END_STREAM_REASON_INTERNAL) {
+ /* We can't infer success or failure, since older Tors report
+ * ENETUNREACH as END_STREAM_REASON_INTERNAL. */
+ } else {
+ /* Path bias: If we get a valid reason code from the exit,
+ * it wasn't due to tagging.
+ *
+ * We rely on recognized+digest being strong enough to make
+ * tags unlikely to allow us to get tagged, yet 'recognized'
+ * reason codes here. */
+ pathbias_mark_use_success(circ);
+ }
+ }
+
+ /* This end cell is now valid. */
+ circuit_read_valid_data(circ, rh->length);
+
+ if (rh->length == 0) {
+ reason = END_STREAM_REASON_MISC;
+ }
+
+ control_reason = reason | END_STREAM_REASON_FLAG_REMOTE;
+
+ if (edge_reason_is_retriable(reason) &&
+ /* avoid retry if rend */
+ !connection_edge_is_rendezvous_stream(edge_conn)) {
+ const char *chosen_exit_digest =
+ circ->build_state->chosen_exit->identity_digest;
+ log_info(LD_APP,"Address '%s' refused due to '%s'. Considering retrying.",
+ safe_str(conn->socks_request->address),
+ stream_end_reason_to_string(reason));
+ exitrouter = node_get_mutable_by_id(chosen_exit_digest);
+ switch (reason) {
+ case END_STREAM_REASON_EXITPOLICY: {
+ tor_addr_t addr;
+ tor_addr_make_unspec(&addr);
+ if (rh->length >= 5) {
+ int ttl = -1;
+ tor_addr_make_unspec(&addr);
+ if (rh->length == 5 || rh->length == 9) {
+ tor_addr_from_ipv4n(&addr,
+ get_uint32(cell->payload+RELAY_HEADER_SIZE+1));
+ if (rh->length == 9)
+ ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+5));
+ } else if (rh->length == 17 || rh->length == 21) {
+ tor_addr_from_ipv6_bytes(&addr,
+ (char*)(cell->payload+RELAY_HEADER_SIZE+1));
+ if (rh->length == 21)
+ ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+17));
+ }
+ if (tor_addr_is_null(&addr)) {
+ log_info(LD_APP,"Address '%s' resolved to 0.0.0.0. Closing,",
+ safe_str(conn->socks_request->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
+ return 0;
+ }
+
+ if ((tor_addr_family(&addr) == AF_INET &&
+ !conn->entry_cfg.ipv4_traffic) ||
+ (tor_addr_family(&addr) == AF_INET6 &&
+ !conn->entry_cfg.ipv6_traffic)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_APP,
+ "Got an EXITPOLICY failure on a connection with a "
+ "mismatched family. Closing.");
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
+ return 0;
+ }
+ if (get_options()->ClientDNSRejectInternalAddresses &&
+ tor_addr_is_internal(&addr, 0)) {
+ log_info(LD_APP,"Address '%s' resolved to internal. Closing,",
+ safe_str(conn->socks_request->address));
+ connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
+ return 0;
+ }
+
+ client_dns_set_addressmap(conn,
+ conn->socks_request->address, &addr,
+ conn->chosen_exit_name, ttl);
+
+ {
+ char new_addr[TOR_ADDR_BUF_LEN];
+ tor_addr_to_str(new_addr, &addr, sizeof(new_addr), 1);
+ if (strcmp(conn->socks_request->address, new_addr)) {
+ strlcpy(conn->socks_request->address, new_addr,
+ sizeof(conn->socks_request->address));
+ control_event_stream_status(conn, STREAM_EVENT_REMAP, 0);
+ }
+ }
+ }
+ /* check if the exit *ought* to have allowed it */
+
+ adjust_exit_policy_from_exitpolicy_failure(circ,
+ conn,
+ exitrouter,
+ &addr);
+
+ if (conn->chosen_exit_optional ||
+ conn->chosen_exit_retries) {
+ /* stop wanting a specific exit */
+ conn->chosen_exit_optional = 0;
+ /* A non-zero chosen_exit_retries can happen if we set a
+ * TrackHostExits for this address under a port that the exit
+ * relay allows, but then try the same address with a different
+ * port that it doesn't allow to exit. We shouldn't unregister
+ * the mapping, since it is probably still wanted on the
+ * original port. But now we give away to the exit relay that
+ * we probably have a TrackHostExits on it. So be it. */
+ conn->chosen_exit_retries = 0;
+ tor_free(conn->chosen_exit_name); /* clears it */
+ }
+ if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0)
+ return 0;
+ /* else, conn will get closed below */
+ break;
+ }
+ case END_STREAM_REASON_CONNECTREFUSED:
+ if (!conn->chosen_exit_optional)
+ break; /* break means it'll close, below */
+ /* Else fall through: expire this circuit, clear the
+ * chosen_exit_name field, and try again. */
+ /* Falls through. */
+ case END_STREAM_REASON_RESOLVEFAILED:
+ case END_STREAM_REASON_TIMEOUT:
+ case END_STREAM_REASON_MISC:
+ case END_STREAM_REASON_NOROUTE:
+ if (client_dns_incr_failures(conn->socks_request->address)
+ < MAX_RESOLVE_FAILURES) {
+ /* We haven't retried too many times; reattach the connection. */
+ circuit_log_path(LOG_INFO,LD_APP,circ);
+ /* Mark this circuit "unusable for new streams". */
+ mark_circuit_unusable_for_new_conns(circ);
+
+ if (conn->chosen_exit_optional) {
+ /* stop wanting a specific exit */
+ conn->chosen_exit_optional = 0;
+ tor_free(conn->chosen_exit_name); /* clears it */
+ }
+ if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0)
+ return 0;
+ /* else, conn will get closed below */
+ } else {
+ log_notice(LD_APP,
+ "Have tried resolving or connecting to address '%s' "
+ "at %d different places. Giving up.",
+ safe_str(conn->socks_request->address),
+ MAX_RESOLVE_FAILURES);
+ /* clear the failures, so it will have a full try next time */
+ client_dns_clear_failures(conn->socks_request->address);
+ }
+ break;
+ case END_STREAM_REASON_HIBERNATING:
+ case END_STREAM_REASON_RESOURCELIMIT:
+ if (exitrouter) {
+ policies_set_node_exitpolicy_to_reject_all(exitrouter);
+ }
+ if (conn->chosen_exit_optional) {
+ /* stop wanting a specific exit */
+ conn->chosen_exit_optional = 0;
+ tor_free(conn->chosen_exit_name); /* clears it */
+ }
+ if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0)
+ return 0;
+ /* else, will close below */
+ break;
+ } /* end switch */
+ log_info(LD_APP,"Giving up on retrying; conn can't be handled.");
+ }
+
+ log_info(LD_APP,
+ "Edge got end (%s) before we're connected. Marking for close.",
+ stream_end_reason_to_string(rh->length > 0 ? reason : -1));
+ circuit_log_path(LOG_INFO,LD_APP,circ);
+ /* need to test because of detach_retriable */
+ if (!ENTRY_TO_CONN(conn)->marked_for_close)
+ connection_mark_unattached_ap(conn, control_reason);
+ return 0;
+}
+
+/** Called when we have gotten an END_REASON_EXITPOLICY failure on <b>circ</b>
+ * for <b>conn</b>, while attempting to connect via <b>node</b>. If the node
+ * told us which address it rejected, then <b>addr</b> is that address;
+ * otherwise it is AF_UNSPEC.
+ *
+ * If we are sure the node should have allowed this address, mark the node as
+ * having a reject *:* exit policy. Otherwise, mark the circuit as unusable
+ * for this particular address.
+ **/
+static void
+adjust_exit_policy_from_exitpolicy_failure(origin_circuit_t *circ,
+ entry_connection_t *conn,
+ node_t *node,
+ const tor_addr_t *addr)
+{
+ int make_reject_all = 0;
+ const sa_family_t family = tor_addr_family(addr);
+
+ if (node) {
+ tor_addr_t tmp;
+ int asked_for_family = tor_addr_parse(&tmp, conn->socks_request->address);
+ if (family == AF_UNSPEC) {
+ make_reject_all = 1;
+ } else if (node_exit_policy_is_exact(node, family) &&
+ asked_for_family != -1 && !conn->chosen_exit_name) {
+ make_reject_all = 1;
+ }
+
+ if (make_reject_all) {
+ log_info(LD_APP,
+ "Exitrouter %s seems to be more restrictive than its exit "
+ "policy. Not using this router as exit for now.",
+ node_describe(node));
+ policies_set_node_exitpolicy_to_reject_all(node);
+ }
+ }
+
+ if (family != AF_UNSPEC)
+ addr_policy_append_reject_addr(&circ->prepend_policy, addr);
+}
+
+/** Helper: change the socks_request-&gt;address field on conn to the
+ * dotted-quad representation of <b>new_addr</b>,
+ * and send an appropriate REMAP event. */
+static void
+remap_event_helper(entry_connection_t *conn, const tor_addr_t *new_addr)
+{
+ tor_addr_to_str(conn->socks_request->address, new_addr,
+ sizeof(conn->socks_request->address),
+ 1);
+ control_event_stream_status(conn, STREAM_EVENT_REMAP,
+ REMAP_STREAM_SOURCE_EXIT);
+}
+
+/** Extract the contents of a connected cell in <b>cell</b>, whose relay
+ * header has already been parsed into <b>rh</b>. On success, set
+ * <b>addr_out</b> to the address we're connected to, and <b>ttl_out</b> to
+ * the ttl of that address, in seconds, and return 0. On failure, return
+ * -1.
+ *
+ * Note that the resulting address can be UNSPEC if the connected cell had no
+ * address (as for a stream to an union service or a tunneled directory
+ * connection), and that the ttl can be absent (in which case <b>ttl_out</b>
+ * is set to -1). */
+STATIC int
+connected_cell_parse(const relay_header_t *rh, const cell_t *cell,
+ tor_addr_t *addr_out, int *ttl_out)
+{
+ uint32_t bytes;
+ const uint8_t *payload = cell->payload + RELAY_HEADER_SIZE;
+
+ tor_addr_make_unspec(addr_out);
+ *ttl_out = -1;
+ if (rh->length == 0)
+ return 0;
+ if (rh->length < 4)
+ return -1;
+ bytes = ntohl(get_uint32(payload));
+
+ /* If bytes is 0, this is maybe a v6 address. Otherwise it's a v4 address */
+ if (bytes != 0) {
+ /* v4 address */
+ tor_addr_from_ipv4h(addr_out, bytes);
+ if (rh->length >= 8) {
+ bytes = ntohl(get_uint32(payload + 4));
+ if (bytes <= INT32_MAX)
+ *ttl_out = bytes;
+ }
+ } else {
+ if (rh->length < 25) /* 4 bytes of 0s, 1 addr, 16 ipv4, 4 ttl. */
+ return -1;
+ if (get_uint8(payload + 4) != 6)
+ return -1;
+ tor_addr_from_ipv6_bytes(addr_out, (char*)(payload + 5));
+ bytes = ntohl(get_uint32(payload + 21));
+ if (bytes <= INT32_MAX)
+ *ttl_out = (int) bytes;
+ }
+ return 0;
+}
+
+/** Drop all storage held by <b>addr</b>. */
+STATIC void
+address_ttl_free_(address_ttl_t *addr)
+{
+ if (!addr)
+ return;
+ tor_free(addr->hostname);
+ tor_free(addr);
+}
+
+/** Parse a resolved cell in <b>cell</b>, with parsed header in <b>rh</b>.
+ * Return -1 on parse error. On success, add one or more newly allocated
+ * address_ttl_t to <b>addresses_out</b>; set *<b>errcode_out</b> to
+ * one of 0, RESOLVED_TYPE_ERROR, or RESOLVED_TYPE_ERROR_TRANSIENT, and
+ * return 0. */
+STATIC int
+resolved_cell_parse(const cell_t *cell, const relay_header_t *rh,
+ smartlist_t *addresses_out, int *errcode_out)
+{
+ const uint8_t *cp;
+ uint8_t answer_type;
+ size_t answer_len;
+ address_ttl_t *addr;
+ size_t remaining;
+ int errcode = 0;
+ smartlist_t *addrs;
+
+ tor_assert(cell);
+ tor_assert(rh);
+ tor_assert(addresses_out);
+ tor_assert(errcode_out);
+
+ *errcode_out = 0;
+
+ if (rh->length > RELAY_PAYLOAD_SIZE)
+ return -1;
+
+ addrs = smartlist_new();
+
+ cp = cell->payload + RELAY_HEADER_SIZE;
+
+ remaining = rh->length;
+ while (remaining) {
+ const uint8_t *cp_orig = cp;
+ if (remaining < 2)
+ goto err;
+ answer_type = *cp++;
+ answer_len = *cp++;
+ if (remaining < 2 + answer_len + 4) {
+ goto err;
+ }
+ if (answer_type == RESOLVED_TYPE_IPV4) {
+ if (answer_len != 4) {
+ goto err;
+ }
+ addr = tor_malloc_zero(sizeof(*addr));
+ tor_addr_from_ipv4n(&addr->addr, get_uint32(cp));
+ cp += 4;
+ addr->ttl = ntohl(get_uint32(cp));
+ cp += 4;
+ smartlist_add(addrs, addr);
+ } else if (answer_type == RESOLVED_TYPE_IPV6) {
+ if (answer_len != 16)
+ goto err;
+ addr = tor_malloc_zero(sizeof(*addr));
+ tor_addr_from_ipv6_bytes(&addr->addr, (const char*) cp);
+ cp += 16;
+ addr->ttl = ntohl(get_uint32(cp));
+ cp += 4;
+ smartlist_add(addrs, addr);
+ } else if (answer_type == RESOLVED_TYPE_HOSTNAME) {
+ if (answer_len == 0) {
+ goto err;
+ }
+ addr = tor_malloc_zero(sizeof(*addr));
+ addr->hostname = tor_memdup_nulterm(cp, answer_len);
+ cp += answer_len;
+ addr->ttl = ntohl(get_uint32(cp));
+ cp += 4;
+ smartlist_add(addrs, addr);
+ } else if (answer_type == RESOLVED_TYPE_ERROR_TRANSIENT ||
+ answer_type == RESOLVED_TYPE_ERROR) {
+ errcode = answer_type;
+ /* Ignore the error contents */
+ cp += answer_len + 4;
+ } else {
+ cp += answer_len + 4;
+ }
+ tor_assert(((ssize_t)remaining) >= (cp - cp_orig));
+ remaining -= (cp - cp_orig);
+ }
+
+ if (errcode && smartlist_len(addrs) == 0) {
+ /* Report an error only if there were no results. */
+ *errcode_out = errcode;
+ }
+
+ smartlist_add_all(addresses_out, addrs);
+ smartlist_free(addrs);
+
+ return 0;
+
+ err:
+ /* On parse error, don't report any results */
+ SMARTLIST_FOREACH(addrs, address_ttl_t *, a, address_ttl_free(a));
+ smartlist_free(addrs);
+ return -1;
+}
+
+/** Helper for connection_edge_process_resolved_cell: given an error code,
+ * an entry_connection, and a list of address_ttl_t *, report the best answer
+ * to the entry_connection. */
+static void
+connection_ap_handshake_socks_got_resolved_cell(entry_connection_t *conn,
+ int error_code,
+ smartlist_t *results)
+{
+ address_ttl_t *addr_ipv4 = NULL;
+ address_ttl_t *addr_ipv6 = NULL;
+ address_ttl_t *addr_hostname = NULL;
+ address_ttl_t *addr_best = NULL;
+
+ /* If it's an error code, that's easy. */
+ if (error_code) {
+ tor_assert(error_code == RESOLVED_TYPE_ERROR ||
+ error_code == RESOLVED_TYPE_ERROR_TRANSIENT);
+ connection_ap_handshake_socks_resolved(conn,
+ error_code,0,NULL,-1,-1);
+ return;
+ }
+
+ /* Get the first answer of each type. */
+ SMARTLIST_FOREACH_BEGIN(results, address_ttl_t *, addr) {
+ if (addr->hostname) {
+ if (!addr_hostname) {
+ addr_hostname = addr;
+ }
+ } else if (tor_addr_family(&addr->addr) == AF_INET) {
+ if (!addr_ipv4 && conn->entry_cfg.ipv4_traffic) {
+ addr_ipv4 = addr;
+ }
+ } else if (tor_addr_family(&addr->addr) == AF_INET6) {
+ if (!addr_ipv6 && conn->entry_cfg.ipv6_traffic) {
+ addr_ipv6 = addr;
+ }
+ }
+ } SMARTLIST_FOREACH_END(addr);
+
+ /* Now figure out which type we wanted to deliver. */
+ if (conn->socks_request->command == SOCKS_COMMAND_RESOLVE_PTR) {
+ if (addr_hostname) {
+ connection_ap_handshake_socks_resolved(conn,
+ RESOLVED_TYPE_HOSTNAME,
+ strlen(addr_hostname->hostname),
+ (uint8_t*)addr_hostname->hostname,
+ addr_hostname->ttl,-1);
+ } else {
+ connection_ap_handshake_socks_resolved(conn,
+ RESOLVED_TYPE_ERROR,0,NULL,-1,-1);
+ }
+ return;
+ }
+
+ if (conn->entry_cfg.prefer_ipv6) {
+ addr_best = addr_ipv6 ? addr_ipv6 : addr_ipv4;
+ } else {
+ addr_best = addr_ipv4 ? addr_ipv4 : addr_ipv6;
+ }
+
+ /* Now convert it to the ugly old interface */
+ if (! addr_best) {
+ connection_ap_handshake_socks_resolved(conn,
+ RESOLVED_TYPE_ERROR,0,NULL,-1,-1);
+ return;
+ }
+
+ connection_ap_handshake_socks_resolved_addr(conn,
+ &addr_best->addr,
+ addr_best->ttl,
+ -1);
+
+ remap_event_helper(conn, &addr_best->addr);
+}
+
+/** Handle a RELAY_COMMAND_RESOLVED cell that we received on a non-open AP
+ * stream. */
+STATIC int
+connection_edge_process_resolved_cell(edge_connection_t *conn,
+ const cell_t *cell,
+ const relay_header_t *rh)
+{
+ entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn);
+ smartlist_t *resolved_addresses = NULL;
+ int errcode = 0;
+
+ if (conn->base_.state != AP_CONN_STATE_RESOLVE_WAIT) {
+ log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got a 'resolved' cell while "
+ "not in state resolve_wait. Dropping.");
+ return 0;
+ }
+ tor_assert(SOCKS_COMMAND_IS_RESOLVE(entry_conn->socks_request->command));
+
+ resolved_addresses = smartlist_new();
+ if (resolved_cell_parse(cell, rh, resolved_addresses, &errcode)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Dropping malformed 'resolved' cell");
+ connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL);
+ goto done;
+ }
+
+ if (get_options()->ClientDNSRejectInternalAddresses) {
+ int orig_len = smartlist_len(resolved_addresses);
+ SMARTLIST_FOREACH_BEGIN(resolved_addresses, address_ttl_t *, addr) {
+ if (addr->hostname == NULL && tor_addr_is_internal(&addr->addr, 0)) {
+ log_info(LD_APP, "Got a resolved cell with answer %s; dropping that "
+ "answer.",
+ safe_str_client(fmt_addr(&addr->addr)));
+ address_ttl_free(addr);
+ SMARTLIST_DEL_CURRENT(resolved_addresses, addr);
+ }
+ } SMARTLIST_FOREACH_END(addr);
+ if (orig_len && smartlist_len(resolved_addresses) == 0) {
+ log_info(LD_APP, "Got a resolved cell with only private addresses; "
+ "dropping it.");
+ connection_ap_handshake_socks_resolved(entry_conn,
+ RESOLVED_TYPE_ERROR_TRANSIENT,
+ 0, NULL, 0, TIME_MAX);
+ connection_mark_unattached_ap(entry_conn,
+ END_STREAM_REASON_TORPROTOCOL);
+ goto done;
+ }
+ }
+
+ /* This is valid data at this point. Count it */
+ if (conn->on_circuit && CIRCUIT_IS_ORIGIN(conn->on_circuit)) {
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(conn->on_circuit),
+ rh->length);
+ }
+
+ connection_ap_handshake_socks_got_resolved_cell(entry_conn,
+ errcode,
+ resolved_addresses);
+
+ connection_mark_unattached_ap(entry_conn,
+ END_STREAM_REASON_DONE |
+ END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED);
+
+ done:
+ SMARTLIST_FOREACH(resolved_addresses, address_ttl_t *, addr,
+ address_ttl_free(addr));
+ smartlist_free(resolved_addresses);
+ return 0;
+}
+
+/** An incoming relay cell has arrived from circuit <b>circ</b> to
+ * stream <b>conn</b>.
+ *
+ * The arguments here are the same as in
+ * connection_edge_process_relay_cell() below; this function is called
+ * from there when <b>conn</b> is defined and not in an open state.
+ */
+static int
+connection_edge_process_relay_cell_not_open(
+ relay_header_t *rh, cell_t *cell, circuit_t *circ,
+ edge_connection_t *conn, crypt_path_t *layer_hint)
+{
+ if (rh->command == RELAY_COMMAND_END) {
+ if (CIRCUIT_IS_ORIGIN(circ) && conn->base_.type == CONN_TYPE_AP) {
+ return connection_ap_process_end_not_open(rh, cell,
+ TO_ORIGIN_CIRCUIT(circ),
+ EDGE_TO_ENTRY_CONN(conn),
+ layer_hint);
+ } else {
+ /* we just got an 'end', don't need to send one */
+ conn->edge_has_sent_end = 1;
+ conn->end_reason = *(cell->payload+RELAY_HEADER_SIZE) |
+ END_STREAM_REASON_FLAG_REMOTE;
+ connection_mark_for_close(TO_CONN(conn));
+ return 0;
+ }
+ }
+
+ if (conn->base_.type == CONN_TYPE_AP &&
+ rh->command == RELAY_COMMAND_CONNECTED) {
+ tor_addr_t addr;
+ int ttl;
+ entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn);
+ tor_assert(CIRCUIT_IS_ORIGIN(circ));
+ if (conn->base_.state != AP_CONN_STATE_CONNECT_WAIT) {
+ log_fn(LOG_PROTOCOL_WARN, LD_APP,
+ "Got 'connected' while not in state connect_wait. Dropping.");
+ return 0;
+ }
+ CONNECTION_AP_EXPECT_NONPENDING(entry_conn);
+ conn->base_.state = AP_CONN_STATE_OPEN;
+ log_info(LD_APP,"'connected' received for circid %u streamid %d "
+ "after %d seconds.",
+ (unsigned)circ->n_circ_id,
+ rh->stream_id,
+ (int)(time(NULL) - conn->base_.timestamp_last_read_allowed));
+ if (connected_cell_parse(rh, cell, &addr, &ttl) < 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_APP,
+ "Got a badly formatted connected cell. Closing.");
+ connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
+ connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL);
+ return 0;
+ }
+ if (tor_addr_family(&addr) != AF_UNSPEC) {
+ /* The family is not UNSPEC: so we were given an address in the
+ * connected cell. (This is normal, except for BEGINDIR and onion
+ * service streams.) */
+ const sa_family_t family = tor_addr_family(&addr);
+ if (tor_addr_is_null(&addr) ||
+ (get_options()->ClientDNSRejectInternalAddresses &&
+ tor_addr_is_internal(&addr, 0))) {
+ log_info(LD_APP, "...but it claims the IP address was %s. Closing.",
+ fmt_addr(&addr));
+ connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
+ connection_mark_unattached_ap(entry_conn,
+ END_STREAM_REASON_TORPROTOCOL);
+ return 0;
+ }
+
+ if ((family == AF_INET && ! entry_conn->entry_cfg.ipv4_traffic) ||
+ (family == AF_INET6 && ! entry_conn->entry_cfg.ipv6_traffic)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_APP,
+ "Got a connected cell to %s with unsupported address family."
+ " Closing.", fmt_addr(&addr));
+ connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
+ connection_mark_unattached_ap(entry_conn,
+ END_STREAM_REASON_TORPROTOCOL);
+ return 0;
+ }
+
+ client_dns_set_addressmap(entry_conn,
+ entry_conn->socks_request->address, &addr,
+ entry_conn->chosen_exit_name, ttl);
+
+ remap_event_helper(entry_conn, &addr);
+ }
+ circuit_log_path(LOG_INFO,LD_APP,TO_ORIGIN_CIRCUIT(circ));
+ /* don't send a socks reply to transparent conns */
+ tor_assert(entry_conn->socks_request != NULL);
+ if (!entry_conn->socks_request->has_finished) {
+ connection_ap_handshake_socks_reply(entry_conn, NULL, 0, 0);
+ }
+
+ /* Was it a linked dir conn? If so, a dir request just started to
+ * fetch something; this could be a bootstrap status milestone. */
+ log_debug(LD_APP, "considering");
+ if (TO_CONN(conn)->linked_conn &&
+ TO_CONN(conn)->linked_conn->type == CONN_TYPE_DIR) {
+ connection_t *dirconn = TO_CONN(conn)->linked_conn;
+ log_debug(LD_APP, "it is! %d", dirconn->purpose);
+ switch (dirconn->purpose) {
+ case DIR_PURPOSE_FETCH_CERTIFICATE:
+ if (consensus_is_waiting_for_certs())
+ control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_KEYS, 0);
+ break;
+ case DIR_PURPOSE_FETCH_CONSENSUS:
+ control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_STATUS, 0);
+ break;
+ case DIR_PURPOSE_FETCH_SERVERDESC:
+ case DIR_PURPOSE_FETCH_MICRODESC:
+ if (TO_DIR_CONN(dirconn)->router_purpose == ROUTER_PURPOSE_GENERAL)
+ control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_DESCRIPTORS,
+ count_loading_descriptors_progress());
+ break;
+ }
+ }
+ /* This is definitely a success, so forget about any pending data we
+ * had sent. */
+ if (entry_conn->pending_optimistic_data) {
+ buf_free(entry_conn->pending_optimistic_data);
+ entry_conn->pending_optimistic_data = NULL;
+ }
+
+ /* This is valid data at this point. Count it */
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), rh->length);
+
+ /* handle anything that might have queued */
+ if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) {
+ /* (We already sent an end cell if possible) */
+ connection_mark_for_close(TO_CONN(conn));
+ return 0;
+ }
+ return 0;
+ }
+ if (conn->base_.type == CONN_TYPE_AP &&
+ rh->command == RELAY_COMMAND_RESOLVED) {
+ return connection_edge_process_resolved_cell(conn, cell, rh);
+ }
+
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Got an unexpected relay command %d, in state %d (%s). Dropping.",
+ rh->command, conn->base_.state,
+ conn_state_to_string(conn->base_.type, conn->base_.state));
+ return 0; /* for forward compatibility, don't kill the circuit */
+// connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
+// connection_mark_for_close(conn);
+// return -1;
+}
+
+/** An incoming relay cell has arrived on circuit <b>circ</b>. If
+ * <b>conn</b> is NULL this is a control cell, else <b>cell</b> is
+ * destined for <b>conn</b>.
+ *
+ * If <b>layer_hint</b> is defined, then we're the origin of the
+ * circuit, and it specifies the hop that packaged <b>cell</b>.
+ *
+ * Return -reason if you want to warn and tear down the circuit, else 0.
+ */
+STATIC int
+connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
+ edge_connection_t *conn,
+ crypt_path_t *layer_hint)
+{
+ static int num_seen=0;
+ relay_header_t rh;
+ unsigned domain = layer_hint?LD_APP:LD_EXIT;
+ int reason;
+ int optimistic_data = 0; /* Set to 1 if we receive data on a stream
+ * that's in the EXIT_CONN_STATE_RESOLVING
+ * or EXIT_CONN_STATE_CONNECTING states. */
+
+ tor_assert(cell);
+ tor_assert(circ);
+
+ relay_header_unpack(&rh, cell->payload);
+// log_fn(LOG_DEBUG,"command %d stream %d", rh.command, rh.stream_id);
+ num_seen++;
+ log_debug(domain, "Now seen %d relay cells here (command %d, stream %d).",
+ num_seen, rh.command, rh.stream_id);
+
+ if (rh.length > RELAY_PAYLOAD_SIZE) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Relay cell length field too long. Closing circuit.");
+ return - END_CIRC_REASON_TORPROTOCOL;
+ }
+
+ if (rh.stream_id == 0) {
+ switch (rh.command) {
+ case RELAY_COMMAND_BEGIN:
+ case RELAY_COMMAND_CONNECTED:
+ case RELAY_COMMAND_END:
+ case RELAY_COMMAND_RESOLVE:
+ case RELAY_COMMAND_RESOLVED:
+ case RELAY_COMMAND_BEGIN_DIR:
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay command %d with zero "
+ "stream_id. Dropping.", (int)rh.command);
+ return 0;
+ default:
+ ;
+ }
+ }
+
+ /* either conn is NULL, in which case we've got a control cell, or else
+ * conn points to the recognized stream. */
+
+ if (conn && !connection_state_is_open(TO_CONN(conn))) {
+ if (conn->base_.type == CONN_TYPE_EXIT &&
+ (conn->base_.state == EXIT_CONN_STATE_CONNECTING ||
+ conn->base_.state == EXIT_CONN_STATE_RESOLVING) &&
+ rh.command == RELAY_COMMAND_DATA) {
+ /* Allow DATA cells to be delivered to an exit node in state
+ * EXIT_CONN_STATE_CONNECTING or EXIT_CONN_STATE_RESOLVING.
+ * This speeds up HTTP, for example. */
+ optimistic_data = 1;
+ } else if (rh.stream_id == 0 && rh.command == RELAY_COMMAND_DATA) {
+ log_warn(LD_BUG, "Somehow I had a connection that matched a "
+ "data cell with stream ID 0.");
+ } else {
+ return connection_edge_process_relay_cell_not_open(
+ &rh, cell, circ, conn, layer_hint);
+ }
+ }
+
+ switch (rh.command) {
+ case RELAY_COMMAND_DROP:
+ rep_hist_padding_count_read(PADDING_TYPE_DROP);
+// log_info(domain,"Got a relay-level padding cell. Dropping.");
+ return 0;
+ case RELAY_COMMAND_BEGIN:
+ case RELAY_COMMAND_BEGIN_DIR:
+ if (layer_hint &&
+ circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) {
+ log_fn(LOG_PROTOCOL_WARN, LD_APP,
+ "Relay begin request unsupported at AP. Dropping.");
+ return 0;
+ }
+ if (circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED &&
+ layer_hint != TO_ORIGIN_CIRCUIT(circ)->cpath->prev) {
+ log_fn(LOG_PROTOCOL_WARN, LD_APP,
+ "Relay begin request to Hidden Service "
+ "from intermediary node. Dropping.");
+ return 0;
+ }
+ if (conn) {
+ log_fn(LOG_PROTOCOL_WARN, domain,
+ "Begin cell for known stream. Dropping.");
+ return 0;
+ }
+ if (rh.command == RELAY_COMMAND_BEGIN_DIR &&
+ circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) {
+ /* Assign this circuit and its app-ward OR connection a unique ID,
+ * so that we can measure download times. The local edge and dir
+ * connection will be assigned the same ID when they are created
+ * and linked. */
+ static uint64_t next_id = 0;
+ circ->dirreq_id = ++next_id;
+ TO_OR_CIRCUIT(circ)->p_chan->dirreq_id = circ->dirreq_id;
+ }
+ return connection_exit_begin_conn(cell, circ);
+ case RELAY_COMMAND_DATA:
+ ++stats_n_data_cells_received;
+ if (( layer_hint && --layer_hint->deliver_window < 0) ||
+ (!layer_hint && --circ->deliver_window < 0)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "(relay data) circ deliver_window below 0. Killing.");
+ if (conn) {
+ /* XXXX Do we actually need to do this? Will killing the circuit
+ * not send an END and mark the stream for close as appropriate? */
+ connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
+ connection_mark_for_close(TO_CONN(conn));
+ }
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+ log_debug(domain,"circ deliver_window now %d.", layer_hint ?
+ layer_hint->deliver_window : circ->deliver_window);
+
+ circuit_consider_sending_sendme(circ, layer_hint);
+
+ if (rh.stream_id == 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay data cell with zero "
+ "stream_id. Dropping.");
+ return 0;
+ } else if (!conn) {
+ log_info(domain,"data cell dropped, unknown stream (streamid %d).",
+ rh.stream_id);
+ return 0;
+ }
+
+ if (--conn->deliver_window < 0) { /* is it below 0 after decrement? */
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "(relay data) conn deliver_window below 0. Killing.");
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+ /* Total all valid application bytes delivered */
+ if (CIRCUIT_IS_ORIGIN(circ) && rh.length > 0) {
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), rh.length);
+ }
+
+ stats_n_data_bytes_received += rh.length;
+ connection_buf_add((char*)(cell->payload + RELAY_HEADER_SIZE),
+ rh.length, TO_CONN(conn));
+
+#ifdef MEASUREMENTS_21206
+ /* Count number of RELAY_DATA cells received on a linked directory
+ * connection. */
+ connection_t *linked_conn = TO_CONN(conn)->linked_conn;
+
+ if (linked_conn && linked_conn->type == CONN_TYPE_DIR) {
+ ++(TO_DIR_CONN(linked_conn)->data_cells_received);
+ }
+#endif /* defined(MEASUREMENTS_21206) */
+
+ if (!optimistic_data) {
+ /* Only send a SENDME if we're not getting optimistic data; otherwise
+ * a SENDME could arrive before the CONNECTED.
+ */
+ connection_edge_consider_sending_sendme(conn);
+ }
+
+ return 0;
+ case RELAY_COMMAND_END:
+ reason = rh.length > 0 ?
+ get_uint8(cell->payload+RELAY_HEADER_SIZE) : END_STREAM_REASON_MISC;
+ if (!conn) {
+ log_info(domain,"end cell (%s) dropped, unknown stream.",
+ stream_end_reason_to_string(reason));
+ return 0;
+ }
+/* XXX add to this log_fn the exit node's nickname? */
+ log_info(domain,TOR_SOCKET_T_FORMAT": end cell (%s) for stream %d. "
+ "Removing stream.",
+ conn->base_.s,
+ stream_end_reason_to_string(reason),
+ conn->stream_id);
+ if (conn->base_.type == CONN_TYPE_AP) {
+ entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn);
+ if (entry_conn->socks_request &&
+ !entry_conn->socks_request->has_finished)
+ log_warn(LD_BUG,
+ "open stream hasn't sent socks answer yet? Closing.");
+ }
+ /* We just *got* an end; no reason to send one. */
+ conn->edge_has_sent_end = 1;
+ if (!conn->end_reason)
+ conn->end_reason = reason | END_STREAM_REASON_FLAG_REMOTE;
+ if (!conn->base_.marked_for_close) {
+ /* only mark it if not already marked. it's possible to
+ * get the 'end' right around when the client hangs up on us. */
+ connection_mark_and_flush(TO_CONN(conn));
+
+ /* Total all valid application bytes delivered */
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), rh.length);
+ }
+ }
+ return 0;
+ case RELAY_COMMAND_EXTEND:
+ case RELAY_COMMAND_EXTEND2: {
+ static uint64_t total_n_extend=0, total_nonearly=0;
+ total_n_extend++;
+ if (rh.stream_id) {
+ log_fn(LOG_PROTOCOL_WARN, domain,
+ "'extend' cell received for non-zero stream. Dropping.");
+ return 0;
+ }
+ if (cell->command != CELL_RELAY_EARLY &&
+ !networkstatus_get_param(NULL,"AllowNonearlyExtend",0,0,1)) {
+#define EARLY_WARNING_INTERVAL 3600
+ static ratelim_t early_warning_limit =
+ RATELIM_INIT(EARLY_WARNING_INTERVAL);
+ char *m;
+ if (cell->command == CELL_RELAY) {
+ ++total_nonearly;
+ if ((m = rate_limit_log(&early_warning_limit, approx_time()))) {
+ double percentage = ((double)total_nonearly)/total_n_extend;
+ percentage *= 100;
+ log_fn(LOG_PROTOCOL_WARN, domain, "EXTEND cell received, "
+ "but not via RELAY_EARLY. Dropping.%s", m);
+ log_fn(LOG_PROTOCOL_WARN, domain, " (We have dropped %.02f%% of "
+ "all EXTEND cells for this reason)", percentage);
+ tor_free(m);
+ }
+ } else {
+ log_fn(LOG_WARN, domain,
+ "EXTEND cell received, in a cell with type %d! Dropping.",
+ cell->command);
+ }
+ return 0;
+ }
+ return circuit_extend(cell, circ);
+ }
+ case RELAY_COMMAND_EXTENDED:
+ case RELAY_COMMAND_EXTENDED2:
+ if (!layer_hint) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "'extended' unsupported at non-origin. Dropping.");
+ return 0;
+ }
+ log_debug(domain,"Got an extended cell! Yay.");
+ {
+ extended_cell_t extended_cell;
+ if (extended_cell_parse(&extended_cell, rh.command,
+ (const uint8_t*)cell->payload+RELAY_HEADER_SIZE,
+ rh.length)<0) {
+ log_warn(LD_PROTOCOL,
+ "Can't parse EXTENDED cell; killing circuit.");
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+ if ((reason = circuit_finish_handshake(TO_ORIGIN_CIRCUIT(circ),
+ &extended_cell.created_cell)) < 0) {
+ circuit_mark_for_close(circ, -reason);
+ return 0; /* We don't want to cause a warning, so we mark the circuit
+ * here. */
+ }
+ }
+ if ((reason=circuit_send_next_onion_skin(TO_ORIGIN_CIRCUIT(circ)))<0) {
+ log_info(domain,"circuit_send_next_onion_skin() failed.");
+ return reason;
+ }
+ /* Total all valid bytes delivered. */
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), rh.length);
+ }
+ return 0;
+ case RELAY_COMMAND_TRUNCATE:
+ if (layer_hint) {
+ log_fn(LOG_PROTOCOL_WARN, LD_APP,
+ "'truncate' unsupported at origin. Dropping.");
+ return 0;
+ }
+ if (circ->n_hop) {
+ if (circ->n_chan)
+ log_warn(LD_BUG, "n_chan and n_hop set on the same circuit!");
+ extend_info_free(circ->n_hop);
+ circ->n_hop = NULL;
+ tor_free(circ->n_chan_create_cell);
+ circuit_set_state(circ, CIRCUIT_STATE_OPEN);
+ }
+ if (circ->n_chan) {
+ uint8_t trunc_reason = get_uint8(cell->payload + RELAY_HEADER_SIZE);
+ circuit_clear_cell_queue(circ, circ->n_chan);
+ channel_send_destroy(circ->n_circ_id, circ->n_chan,
+ trunc_reason);
+ circuit_set_n_circid_chan(circ, 0, NULL);
+ }
+ log_debug(LD_EXIT, "Processed 'truncate', replying.");
+ {
+ char payload[1];
+ payload[0] = (char)END_CIRC_REASON_REQUESTED;
+ relay_send_command_from_edge(0, circ, RELAY_COMMAND_TRUNCATED,
+ payload, sizeof(payload), NULL);
+ }
+ return 0;
+ case RELAY_COMMAND_TRUNCATED:
+ if (!layer_hint) {
+ log_fn(LOG_PROTOCOL_WARN, LD_EXIT,
+ "'truncated' unsupported at non-origin. Dropping.");
+ return 0;
+ }
+ circuit_truncated(TO_ORIGIN_CIRCUIT(circ), layer_hint,
+ get_uint8(cell->payload + RELAY_HEADER_SIZE));
+ return 0;
+ case RELAY_COMMAND_CONNECTED:
+ if (conn) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "'connected' unsupported while open. Closing circ.");
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+ log_info(domain,
+ "'connected' received on circid %u for streamid %d, "
+ "no conn attached anymore. Ignoring.",
+ (unsigned)circ->n_circ_id, rh.stream_id);
+ return 0;
+ case RELAY_COMMAND_SENDME:
+ if (!rh.stream_id) {
+ if (layer_hint) {
+ if (layer_hint->package_window + CIRCWINDOW_INCREMENT >
+ CIRCWINDOW_START_MAX) {
+ static struct ratelim_t exit_warn_ratelim = RATELIM_INIT(600);
+ log_fn_ratelim(&exit_warn_ratelim, LOG_WARN, LD_PROTOCOL,
+ "Unexpected sendme cell from exit relay. "
+ "Closing circ.");
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+ layer_hint->package_window += CIRCWINDOW_INCREMENT;
+ log_debug(LD_APP,"circ-level sendme at origin, packagewindow %d.",
+ layer_hint->package_window);
+ circuit_resume_edge_reading(circ, layer_hint);
+
+ /* We count circuit-level sendme's as valid delivered data because
+ * they are rate limited.
+ */
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ),
+ rh.length);
+ }
+
+ } else {
+ if (circ->package_window + CIRCWINDOW_INCREMENT >
+ CIRCWINDOW_START_MAX) {
+ static struct ratelim_t client_warn_ratelim = RATELIM_INIT(600);
+ log_fn_ratelim(&client_warn_ratelim,LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Unexpected sendme cell from client. "
+ "Closing circ (window %d).",
+ circ->package_window);
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+ circ->package_window += CIRCWINDOW_INCREMENT;
+ log_debug(LD_APP,
+ "circ-level sendme at non-origin, packagewindow %d.",
+ circ->package_window);
+ circuit_resume_edge_reading(circ, layer_hint);
+ }
+ return 0;
+ }
+ if (!conn) {
+ log_info(domain,"sendme cell dropped, unknown stream (streamid %d).",
+ rh.stream_id);
+ return 0;
+ }
+
+ /* Don't allow the other endpoint to request more than our maximum
+ * (i.e. initial) stream SENDME window worth of data. Well-behaved
+ * stock clients will not request more than this max (as per the check
+ * in the while loop of connection_edge_consider_sending_sendme()).
+ */
+ if (conn->package_window + STREAMWINDOW_INCREMENT >
+ STREAMWINDOW_START_MAX) {
+ static struct ratelim_t stream_warn_ratelim = RATELIM_INIT(600);
+ log_fn_ratelim(&stream_warn_ratelim, LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Unexpected stream sendme cell. Closing circ (window %d).",
+ conn->package_window);
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+
+ /* At this point, the stream sendme is valid */
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ),
+ rh.length);
+ }
+
+ conn->package_window += STREAMWINDOW_INCREMENT;
+ log_debug(domain,"stream-level sendme, packagewindow now %d.",
+ conn->package_window);
+ if (circuit_queue_streams_are_blocked(circ)) {
+ /* Still waiting for queue to flush; don't touch conn */
+ return 0;
+ }
+ connection_start_reading(TO_CONN(conn));
+ /* handle whatever might still be on the inbuf */
+ if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) {
+ /* (We already sent an end cell if possible) */
+ connection_mark_for_close(TO_CONN(conn));
+ return 0;
+ }
+ return 0;
+ case RELAY_COMMAND_RESOLVE:
+ if (layer_hint) {
+ log_fn(LOG_PROTOCOL_WARN, LD_APP,
+ "resolve request unsupported at AP; dropping.");
+ return 0;
+ } else if (conn) {
+ log_fn(LOG_PROTOCOL_WARN, domain,
+ "resolve request for known stream; dropping.");
+ return 0;
+ } else if (circ->purpose != CIRCUIT_PURPOSE_OR) {
+ log_fn(LOG_PROTOCOL_WARN, domain,
+ "resolve request on circ with purpose %d; dropping",
+ circ->purpose);
+ return 0;
+ }
+ connection_exit_begin_resolve(cell, TO_OR_CIRCUIT(circ));
+ return 0;
+ case RELAY_COMMAND_RESOLVED:
+ if (conn) {
+ log_fn(LOG_PROTOCOL_WARN, domain,
+ "'resolved' unsupported while open. Closing circ.");
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+ log_info(domain,
+ "'resolved' received, no conn attached anymore. Ignoring.");
+ return 0;
+ case RELAY_COMMAND_ESTABLISH_INTRO:
+ case RELAY_COMMAND_ESTABLISH_RENDEZVOUS:
+ case RELAY_COMMAND_INTRODUCE1:
+ case RELAY_COMMAND_INTRODUCE2:
+ case RELAY_COMMAND_INTRODUCE_ACK:
+ case RELAY_COMMAND_RENDEZVOUS1:
+ case RELAY_COMMAND_RENDEZVOUS2:
+ case RELAY_COMMAND_INTRO_ESTABLISHED:
+ case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED:
+ rend_process_relay_cell(circ, layer_hint,
+ rh.command, rh.length,
+ cell->payload+RELAY_HEADER_SIZE);
+ return 0;
+ }
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Received unknown relay command %d. Perhaps the other side is using "
+ "a newer version of Tor? Dropping.",
+ rh.command);
+ return 0; /* for forward compatibility, don't kill the circuit */
+}
+
+/** How many relay_data cells have we built, ever? */
+uint64_t stats_n_data_cells_packaged = 0;
+/** How many bytes of data have we put in relay_data cells have we built,
+ * ever? This would be RELAY_PAYLOAD_SIZE*stats_n_data_cells_packaged if
+ * every relay cell we ever sent were completely full of data. */
+uint64_t stats_n_data_bytes_packaged = 0;
+/** How many relay_data cells have we received, ever? */
+uint64_t stats_n_data_cells_received = 0;
+/** How many bytes of data have we received relay_data cells, ever? This would
+ * be RELAY_PAYLOAD_SIZE*stats_n_data_cells_packaged if every relay cell we
+ * ever received were completely full of data. */
+uint64_t stats_n_data_bytes_received = 0;
+
+/** If <b>conn</b> has an entire relay payload of bytes on its inbuf (or
+ * <b>package_partial</b> is true), and the appropriate package windows aren't
+ * empty, grab a cell and send it down the circuit.
+ *
+ * If *<b>max_cells</b> is given, package no more than max_cells. Decrement
+ * *<b>max_cells</b> by the number of cells packaged.
+ *
+ * Return -1 (and send a RELAY_COMMAND_END cell if necessary) if conn should
+ * be marked for close, else return 0.
+ */
+int
+connection_edge_package_raw_inbuf(edge_connection_t *conn, int package_partial,
+ int *max_cells)
+{
+ size_t bytes_to_process, length;
+ char payload[CELL_PAYLOAD_SIZE];
+ circuit_t *circ;
+ const unsigned domain = conn->base_.type == CONN_TYPE_AP ? LD_APP : LD_EXIT;
+ int sending_from_optimistic = 0;
+ entry_connection_t *entry_conn =
+ conn->base_.type == CONN_TYPE_AP ? EDGE_TO_ENTRY_CONN(conn) : NULL;
+ const int sending_optimistically =
+ entry_conn &&
+ conn->base_.type == CONN_TYPE_AP &&
+ conn->base_.state != AP_CONN_STATE_OPEN;
+ crypt_path_t *cpath_layer = conn->cpath_layer;
+
+ tor_assert(conn);
+
+ if (conn->base_.marked_for_close) {
+ log_warn(LD_BUG,
+ "called on conn that's already marked for close at %s:%d.",
+ conn->base_.marked_for_close_file, conn->base_.marked_for_close);
+ return 0;
+ }
+
+ if (max_cells && *max_cells <= 0)
+ return 0;
+
+ repeat_connection_edge_package_raw_inbuf:
+
+ circ = circuit_get_by_edge_conn(conn);
+ if (!circ) {
+ log_info(domain,"conn has no circuit! Closing.");
+ conn->end_reason = END_STREAM_REASON_CANT_ATTACH;
+ return -1;
+ }
+
+ if (circuit_consider_stop_edge_reading(circ, cpath_layer))
+ return 0;
+
+ if (conn->package_window <= 0) {
+ log_info(domain,"called with package_window %d. Skipping.",
+ conn->package_window);
+ connection_stop_reading(TO_CONN(conn));
+ return 0;
+ }
+
+ sending_from_optimistic = entry_conn &&
+ entry_conn->sending_optimistic_data != NULL;
+
+ if (PREDICT_UNLIKELY(sending_from_optimistic)) {
+ bytes_to_process = buf_datalen(entry_conn->sending_optimistic_data);
+ if (PREDICT_UNLIKELY(!bytes_to_process)) {
+ log_warn(LD_BUG, "sending_optimistic_data was non-NULL but empty");
+ bytes_to_process = connection_get_inbuf_len(TO_CONN(conn));
+ sending_from_optimistic = 0;
+ }
+ } else {
+ bytes_to_process = connection_get_inbuf_len(TO_CONN(conn));
+ }
+
+ if (!bytes_to_process)
+ return 0;
+
+ if (!package_partial && bytes_to_process < RELAY_PAYLOAD_SIZE)
+ return 0;
+
+ if (bytes_to_process > RELAY_PAYLOAD_SIZE) {
+ length = RELAY_PAYLOAD_SIZE;
+ } else {
+ length = bytes_to_process;
+ }
+ stats_n_data_bytes_packaged += length;
+ stats_n_data_cells_packaged += 1;
+
+ if (PREDICT_UNLIKELY(sending_from_optimistic)) {
+ /* XXXX We could be more efficient here by sometimes packing
+ * previously-sent optimistic data in the same cell with data
+ * from the inbuf. */
+ buf_get_bytes(entry_conn->sending_optimistic_data, payload, length);
+ if (!buf_datalen(entry_conn->sending_optimistic_data)) {
+ buf_free(entry_conn->sending_optimistic_data);
+ entry_conn->sending_optimistic_data = NULL;
+ }
+ } else {
+ connection_buf_get_bytes(payload, length, TO_CONN(conn));
+ }
+
+ log_debug(domain,TOR_SOCKET_T_FORMAT": Packaging %d bytes (%d waiting).",
+ conn->base_.s,
+ (int)length, (int)connection_get_inbuf_len(TO_CONN(conn)));
+
+ if (sending_optimistically && !sending_from_optimistic) {
+ /* This is new optimistic data; remember it in case we need to detach and
+ retry */
+ if (!entry_conn->pending_optimistic_data)
+ entry_conn->pending_optimistic_data = buf_new();
+ buf_add(entry_conn->pending_optimistic_data, payload, length);
+ }
+
+ if (connection_edge_send_command(conn, RELAY_COMMAND_DATA,
+ payload, length) < 0 )
+ /* circuit got marked for close, don't continue, don't need to mark conn */
+ return 0;
+
+ if (!cpath_layer) { /* non-rendezvous exit */
+ tor_assert(circ->package_window > 0);
+ circ->package_window--;
+ } else { /* we're an AP, or an exit on a rendezvous circ */
+ tor_assert(cpath_layer->package_window > 0);
+ cpath_layer->package_window--;
+ }
+
+ if (--conn->package_window <= 0) { /* is it 0 after decrement? */
+ connection_stop_reading(TO_CONN(conn));
+ log_debug(domain,"conn->package_window reached 0.");
+ circuit_consider_stop_edge_reading(circ, cpath_layer);
+ return 0; /* don't process the inbuf any more */
+ }
+ log_debug(domain,"conn->package_window is now %d",conn->package_window);
+
+ if (max_cells) {
+ *max_cells -= 1;
+ if (*max_cells <= 0)
+ return 0;
+ }
+
+ /* handle more if there's more, or return 0 if there isn't */
+ goto repeat_connection_edge_package_raw_inbuf;
+}
+
+/** Called when we've just received a relay data cell, when
+ * we've just finished flushing all bytes to stream <b>conn</b>,
+ * or when we've flushed *some* bytes to the stream <b>conn</b>.
+ *
+ * If conn->outbuf is not too full, and our deliver window is
+ * low, send back a suitable number of stream-level sendme cells.
+ */
+void
+connection_edge_consider_sending_sendme(edge_connection_t *conn)
+{
+ circuit_t *circ;
+
+ if (connection_outbuf_too_full(TO_CONN(conn)))
+ return;
+
+ circ = circuit_get_by_edge_conn(conn);
+ if (!circ) {
+ /* this can legitimately happen if the destroy has already
+ * arrived and torn down the circuit */
+ log_info(LD_APP,"No circuit associated with conn. Skipping.");
+ return;
+ }
+
+ while (conn->deliver_window <= STREAMWINDOW_START - STREAMWINDOW_INCREMENT) {
+ log_debug(conn->base_.type == CONN_TYPE_AP ?LD_APP:LD_EXIT,
+ "Outbuf %d, Queuing stream sendme.",
+ (int)conn->base_.outbuf_flushlen);
+ conn->deliver_window += STREAMWINDOW_INCREMENT;
+ if (connection_edge_send_command(conn, RELAY_COMMAND_SENDME,
+ NULL, 0) < 0) {
+ log_warn(LD_APP,"connection_edge_send_command failed. Skipping.");
+ return; /* the circuit's closed, don't continue */
+ }
+ }
+}
+
+/** The circuit <b>circ</b> has received a circuit-level sendme
+ * (on hop <b>layer_hint</b>, if we're the OP). Go through all the
+ * attached streams and let them resume reading and packaging, if
+ * their stream windows allow it.
+ */
+static void
+circuit_resume_edge_reading(circuit_t *circ, crypt_path_t *layer_hint)
+{
+ if (circuit_queue_streams_are_blocked(circ)) {
+ log_debug(layer_hint?LD_APP:LD_EXIT,"Too big queue, no resuming");
+ return;
+ }
+ log_debug(layer_hint?LD_APP:LD_EXIT,"resuming");
+
+ if (CIRCUIT_IS_ORIGIN(circ))
+ circuit_resume_edge_reading_helper(TO_ORIGIN_CIRCUIT(circ)->p_streams,
+ circ, layer_hint);
+ else
+ circuit_resume_edge_reading_helper(TO_OR_CIRCUIT(circ)->n_streams,
+ circ, layer_hint);
+}
+
+void
+stream_choice_seed_weak_rng(void)
+{
+ crypto_seed_weak_rng(&stream_choice_rng);
+}
+
+/** A helper function for circuit_resume_edge_reading() above.
+ * The arguments are the same, except that <b>conn</b> is the head
+ * of a linked list of edge streams that should each be considered.
+ */
+static int
+circuit_resume_edge_reading_helper(edge_connection_t *first_conn,
+ circuit_t *circ,
+ crypt_path_t *layer_hint)
+{
+ edge_connection_t *conn;
+ int n_packaging_streams, n_streams_left;
+ int packaged_this_round;
+ int cells_on_queue;
+ int cells_per_conn;
+ edge_connection_t *chosen_stream = NULL;
+ int max_to_package;
+
+ if (first_conn == NULL) {
+ /* Don't bother to try to do the rest of this if there are no connections
+ * to resume. */
+ return 0;
+ }
+
+ /* How many cells do we have space for? It will be the minimum of
+ * the number needed to exhaust the package window, and the minimum
+ * needed to fill the cell queue. */
+ max_to_package = circ->package_window;
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ cells_on_queue = circ->n_chan_cells.n;
+ } else {
+ or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
+ cells_on_queue = or_circ->p_chan_cells.n;
+ }
+ if (CELL_QUEUE_HIGHWATER_SIZE - cells_on_queue < max_to_package)
+ max_to_package = CELL_QUEUE_HIGHWATER_SIZE - cells_on_queue;
+
+ /* Once we used to start listening on the streams in the order they
+ * appeared in the linked list. That leads to starvation on the
+ * streams that appeared later on the list, since the first streams
+ * would always get to read first. Instead, we just pick a random
+ * stream on the list, and enable reading for streams starting at that
+ * point (and wrapping around as if the list were circular). It would
+ * probably be better to actually remember which streams we've
+ * serviced in the past, but this is simple and effective. */
+
+ /* Select a stream uniformly at random from the linked list. We
+ * don't need cryptographic randomness here. */
+ {
+ int num_streams = 0;
+ for (conn = first_conn; conn; conn = conn->next_stream) {
+ num_streams++;
+ if (tor_weak_random_one_in_n(&stream_choice_rng, num_streams)) {
+ chosen_stream = conn;
+ }
+ /* Invariant: chosen_stream has been chosen uniformly at random from
+ * among the first num_streams streams on first_conn.
+ *
+ * (Note that we iterate over every stream on the circuit, so that after
+ * we've considered the first stream, we've chosen it with P=1; and
+ * after we consider the second stream, we've switched to it with P=1/2
+ * and stayed with the first stream with P=1/2; and after we've
+ * considered the third stream, we've switched to it with P=1/3 and
+ * remained with one of the first two streams with P=(2/3), giving each
+ * one P=(1/2)(2/3) )=(1/3).) */
+ }
+ }
+
+ /* Count how many non-marked streams there are that have anything on
+ * their inbuf, and enable reading on all of the connections. */
+ n_packaging_streams = 0;
+ /* Activate reading starting from the chosen stream */
+ for (conn=chosen_stream; conn; conn = conn->next_stream) {
+ /* Start reading for the streams starting from here */
+ if (conn->base_.marked_for_close || conn->package_window <= 0)
+ continue;
+ if (!layer_hint || conn->cpath_layer == layer_hint) {
+ connection_start_reading(TO_CONN(conn));
+
+ if (connection_get_inbuf_len(TO_CONN(conn)) > 0)
+ ++n_packaging_streams;
+ }
+ }
+ /* Go back and do the ones we skipped, circular-style */
+ for (conn = first_conn; conn != chosen_stream; conn = conn->next_stream) {
+ if (conn->base_.marked_for_close || conn->package_window <= 0)
+ continue;
+ if (!layer_hint || conn->cpath_layer == layer_hint) {
+ connection_start_reading(TO_CONN(conn));
+
+ if (connection_get_inbuf_len(TO_CONN(conn)) > 0)
+ ++n_packaging_streams;
+ }
+ }
+
+ if (n_packaging_streams == 0) /* avoid divide-by-zero */
+ return 0;
+
+ again:
+
+ cells_per_conn = CEIL_DIV(max_to_package, n_packaging_streams);
+
+ packaged_this_round = 0;
+ n_streams_left = 0;
+
+ /* Iterate over all connections. Package up to cells_per_conn cells on
+ * each. Update packaged_this_round with the total number of cells
+ * packaged, and n_streams_left with the number that still have data to
+ * package.
+ */
+ for (conn=first_conn; conn; conn=conn->next_stream) {
+ if (conn->base_.marked_for_close || conn->package_window <= 0)
+ continue;
+ if (!layer_hint || conn->cpath_layer == layer_hint) {
+ int n = cells_per_conn, r;
+ /* handle whatever might still be on the inbuf */
+ r = connection_edge_package_raw_inbuf(conn, 1, &n);
+
+ /* Note how many we packaged */
+ packaged_this_round += (cells_per_conn-n);
+
+ if (r<0) {
+ /* Problem while packaging. (We already sent an end cell if
+ * possible) */
+ connection_mark_for_close(TO_CONN(conn));
+ continue;
+ }
+
+ /* If there's still data to read, we'll be coming back to this stream. */
+ if (connection_get_inbuf_len(TO_CONN(conn)))
+ ++n_streams_left;
+
+ /* If the circuit won't accept any more data, return without looking
+ * at any more of the streams. Any connections that should be stopped
+ * have already been stopped by connection_edge_package_raw_inbuf. */
+ if (circuit_consider_stop_edge_reading(circ, layer_hint))
+ return -1;
+ /* XXXX should we also stop immediately if we fill up the cell queue?
+ * Probably. */
+ }
+ }
+
+ /* If we made progress, and we are willing to package more, and there are
+ * any streams left that want to package stuff... try again!
+ */
+ if (packaged_this_round && packaged_this_round < max_to_package &&
+ n_streams_left) {
+ max_to_package -= packaged_this_round;
+ n_packaging_streams = n_streams_left;
+ goto again;
+ }
+
+ return 0;
+}
+
+/** Check if the package window for <b>circ</b> is empty (at
+ * hop <b>layer_hint</b> if it's defined).
+ *
+ * If yes, tell edge streams to stop reading and return 1.
+ * Else return 0.
+ */
+static int
+circuit_consider_stop_edge_reading(circuit_t *circ, crypt_path_t *layer_hint)
+{
+ edge_connection_t *conn = NULL;
+ unsigned domain = layer_hint ? LD_APP : LD_EXIT;
+
+ if (!layer_hint) {
+ or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
+ log_debug(domain,"considering circ->package_window %d",
+ circ->package_window);
+ if (circ->package_window <= 0) {
+ log_debug(domain,"yes, not-at-origin. stopped.");
+ for (conn = or_circ->n_streams; conn; conn=conn->next_stream)
+ connection_stop_reading(TO_CONN(conn));
+ return 1;
+ }
+ return 0;
+ }
+ /* else, layer hint is defined, use it */
+ log_debug(domain,"considering layer_hint->package_window %d",
+ layer_hint->package_window);
+ if (layer_hint->package_window <= 0) {
+ log_debug(domain,"yes, at-origin. stopped.");
+ for (conn = TO_ORIGIN_CIRCUIT(circ)->p_streams; conn;
+ conn=conn->next_stream) {
+ if (conn->cpath_layer == layer_hint)
+ connection_stop_reading(TO_CONN(conn));
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/** Check if the deliver_window for circuit <b>circ</b> (at hop
+ * <b>layer_hint</b> if it's defined) is low enough that we should
+ * send a circuit-level sendme back down the circuit. If so, send
+ * enough sendmes that the window would be overfull if we sent any
+ * more.
+ */
+static void
+circuit_consider_sending_sendme(circuit_t *circ, crypt_path_t *layer_hint)
+{
+// log_fn(LOG_INFO,"Considering: layer_hint is %s",
+// layer_hint ? "defined" : "null");
+ while ((layer_hint ? layer_hint->deliver_window : circ->deliver_window) <=
+ CIRCWINDOW_START - CIRCWINDOW_INCREMENT) {
+ log_debug(LD_CIRC,"Queuing circuit sendme.");
+ if (layer_hint)
+ layer_hint->deliver_window += CIRCWINDOW_INCREMENT;
+ else
+ circ->deliver_window += CIRCWINDOW_INCREMENT;
+ if (relay_send_command_from_edge(0, circ, RELAY_COMMAND_SENDME,
+ NULL, 0, layer_hint) < 0) {
+ log_warn(LD_CIRC,
+ "relay_send_command_from_edge failed. Circuit's closed.");
+ return; /* the circuit's closed, don't continue */
+ }
+ }
+}
+
+/** The total number of cells we have allocated. */
+static size_t total_cells_allocated = 0;
+
+/** Release storage held by <b>cell</b>. */
+static inline void
+packed_cell_free_unchecked(packed_cell_t *cell)
+{
+ --total_cells_allocated;
+ tor_free(cell);
+}
+
+/** Allocate and return a new packed_cell_t. */
+STATIC packed_cell_t *
+packed_cell_new(void)
+{
+ ++total_cells_allocated;
+ return tor_malloc_zero(sizeof(packed_cell_t));
+}
+
+/** Return a packed cell used outside by channel_t lower layer */
+void
+packed_cell_free_(packed_cell_t *cell)
+{
+ if (!cell)
+ return;
+ packed_cell_free_unchecked(cell);
+}
+
+/** Log current statistics for cell pool allocation at log level
+ * <b>severity</b>. */
+void
+dump_cell_pool_usage(int severity)
+{
+ int n_circs = 0;
+ int n_cells = 0;
+ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, c) {
+ n_cells += c->n_chan_cells.n;
+ if (!CIRCUIT_IS_ORIGIN(c))
+ n_cells += TO_OR_CIRCUIT(c)->p_chan_cells.n;
+ ++n_circs;
+ }
+ SMARTLIST_FOREACH_END(c);
+ tor_log(severity, LD_MM,
+ "%d cells allocated on %d circuits. %d cells leaked.",
+ n_cells, n_circs, (int)total_cells_allocated - n_cells);
+}
+
+/** Allocate a new copy of packed <b>cell</b>. */
+static inline packed_cell_t *
+packed_cell_copy(const cell_t *cell, int wide_circ_ids)
+{
+ packed_cell_t *c = packed_cell_new();
+ cell_pack(c, cell, wide_circ_ids);
+ return c;
+}
+
+/** Append <b>cell</b> to the end of <b>queue</b>. */
+void
+cell_queue_append(cell_queue_t *queue, packed_cell_t *cell)
+{
+ TOR_SIMPLEQ_INSERT_TAIL(&queue->head, cell, next);
+ ++queue->n;
+}
+
+/** Append a newly allocated copy of <b>cell</b> to the end of the
+ * <b>exitward</b> (or app-ward) <b>queue</b> of <b>circ</b>. If
+ * <b>use_stats</b> is true, record statistics about the cell.
+ */
+void
+cell_queue_append_packed_copy(circuit_t *circ, cell_queue_t *queue,
+ int exitward, const cell_t *cell,
+ int wide_circ_ids, int use_stats)
+{
+ packed_cell_t *copy = packed_cell_copy(cell, wide_circ_ids);
+ (void)circ;
+ (void)exitward;
+ (void)use_stats;
+
+ copy->inserted_timestamp = monotime_coarse_get_stamp();
+
+ cell_queue_append(queue, copy);
+}
+
+/** Initialize <b>queue</b> as an empty cell queue. */
+void
+cell_queue_init(cell_queue_t *queue)
+{
+ memset(queue, 0, sizeof(cell_queue_t));
+ TOR_SIMPLEQ_INIT(&queue->head);
+}
+
+/** Remove and free every cell in <b>queue</b>. */
+void
+cell_queue_clear(cell_queue_t *queue)
+{
+ packed_cell_t *cell;
+ while ((cell = TOR_SIMPLEQ_FIRST(&queue->head))) {
+ TOR_SIMPLEQ_REMOVE_HEAD(&queue->head, next);
+ packed_cell_free_unchecked(cell);
+ }
+ TOR_SIMPLEQ_INIT(&queue->head);
+ queue->n = 0;
+}
+
+/** Extract and return the cell at the head of <b>queue</b>; return NULL if
+ * <b>queue</b> is empty. */
+STATIC packed_cell_t *
+cell_queue_pop(cell_queue_t *queue)
+{
+ packed_cell_t *cell = TOR_SIMPLEQ_FIRST(&queue->head);
+ if (!cell)
+ return NULL;
+ TOR_SIMPLEQ_REMOVE_HEAD(&queue->head, next);
+ --queue->n;
+ return cell;
+}
+
+/** Initialize <b>queue</b> as an empty cell queue. */
+void
+destroy_cell_queue_init(destroy_cell_queue_t *queue)
+{
+ memset(queue, 0, sizeof(destroy_cell_queue_t));
+ TOR_SIMPLEQ_INIT(&queue->head);
+}
+
+/** Remove and free every cell in <b>queue</b>. */
+void
+destroy_cell_queue_clear(destroy_cell_queue_t *queue)
+{
+ destroy_cell_t *cell;
+ while ((cell = TOR_SIMPLEQ_FIRST(&queue->head))) {
+ TOR_SIMPLEQ_REMOVE_HEAD(&queue->head, next);
+ tor_free(cell);
+ }
+ TOR_SIMPLEQ_INIT(&queue->head);
+ queue->n = 0;
+}
+
+/** Extract and return the cell at the head of <b>queue</b>; return NULL if
+ * <b>queue</b> is empty. */
+STATIC destroy_cell_t *
+destroy_cell_queue_pop(destroy_cell_queue_t *queue)
+{
+ destroy_cell_t *cell = TOR_SIMPLEQ_FIRST(&queue->head);
+ if (!cell)
+ return NULL;
+ TOR_SIMPLEQ_REMOVE_HEAD(&queue->head, next);
+ --queue->n;
+ return cell;
+}
+
+/** Append a destroy cell for <b>circid</b> to <b>queue</b>. */
+void
+destroy_cell_queue_append(destroy_cell_queue_t *queue,
+ circid_t circid,
+ uint8_t reason)
+{
+ destroy_cell_t *cell = tor_malloc_zero(sizeof(destroy_cell_t));
+ cell->circid = circid;
+ cell->reason = reason;
+ /* Not yet used, but will be required for OOM handling. */
+ cell->inserted_timestamp = monotime_coarse_get_stamp();
+
+ TOR_SIMPLEQ_INSERT_TAIL(&queue->head, cell, next);
+ ++queue->n;
+}
+
+/** Convert a destroy_cell_t to a newly allocated cell_t. Frees its input. */
+static packed_cell_t *
+destroy_cell_to_packed_cell(destroy_cell_t *inp, int wide_circ_ids)
+{
+ packed_cell_t *packed = packed_cell_new();
+ cell_t cell;
+ memset(&cell, 0, sizeof(cell));
+ cell.circ_id = inp->circid;
+ cell.command = CELL_DESTROY;
+ cell.payload[0] = inp->reason;
+ cell_pack(packed, &cell, wide_circ_ids);
+
+ tor_free(inp);
+ return packed;
+}
+
+/** Return the total number of bytes used for each packed_cell in a queue.
+ * Approximate. */
+size_t
+packed_cell_mem_cost(void)
+{
+ return sizeof(packed_cell_t);
+}
+
+/* DOCDOC */
+size_t
+cell_queues_get_total_allocation(void)
+{
+ return total_cells_allocated * packed_cell_mem_cost();
+}
+
+/** How long after we've been low on memory should we try to conserve it? */
+#define MEMORY_PRESSURE_INTERVAL (30*60)
+
+/** The time at which we were last low on memory. */
+static time_t last_time_under_memory_pressure = 0;
+
+/** Check whether we've got too much space used for cells. If so,
+ * call the OOM handler and return 1. Otherwise, return 0. */
+STATIC int
+cell_queues_check_size(void)
+{
+ time_t now = time(NULL);
+ size_t alloc = cell_queues_get_total_allocation();
+ alloc += buf_get_total_allocation();
+ alloc += tor_compress_get_total_allocation();
+ const size_t rend_cache_total = rend_cache_get_total_allocation();
+ alloc += rend_cache_total;
+ const size_t geoip_client_cache_total =
+ geoip_client_cache_total_allocation();
+ alloc += geoip_client_cache_total;
+ if (alloc >= get_options()->MaxMemInQueues_low_threshold) {
+ last_time_under_memory_pressure = approx_time();
+ if (alloc >= get_options()->MaxMemInQueues) {
+ /* If we're spending over 20% of the memory limit on hidden service
+ * descriptors, free them until we're down to 10%. Do the same for geoip
+ * client cache. */
+ if (rend_cache_total > get_options()->MaxMemInQueues / 5) {
+ const size_t bytes_to_remove =
+ rend_cache_total - (size_t)(get_options()->MaxMemInQueues / 10);
+ alloc -= hs_cache_handle_oom(now, bytes_to_remove);
+ }
+ if (geoip_client_cache_total > get_options()->MaxMemInQueues / 5) {
+ const size_t bytes_to_remove =
+ geoip_client_cache_total -
+ (size_t)(get_options()->MaxMemInQueues / 10);
+ alloc -= geoip_client_cache_handle_oom(now, bytes_to_remove);
+ }
+ circuits_handle_oom(alloc);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/** Return true if we've been under memory pressure in the last
+ * MEMORY_PRESSURE_INTERVAL seconds. */
+int
+have_been_under_memory_pressure(void)
+{
+ return last_time_under_memory_pressure + MEMORY_PRESSURE_INTERVAL
+ < approx_time();
+}
+
+/**
+ * Update the number of cells available on the circuit's n_chan or p_chan's
+ * circuit mux.
+ */
+void
+update_circuit_on_cmux_(circuit_t *circ, cell_direction_t direction,
+ const char *file, int lineno)
+{
+ channel_t *chan = NULL;
+ or_circuit_t *or_circ = NULL;
+ circuitmux_t *cmux = NULL;
+
+ tor_assert(circ);
+
+ /* Okay, get the channel */
+ if (direction == CELL_DIRECTION_OUT) {
+ chan = circ->n_chan;
+ } else {
+ or_circ = TO_OR_CIRCUIT(circ);
+ chan = or_circ->p_chan;
+ }
+
+ tor_assert(chan);
+ tor_assert(chan->cmux);
+
+ /* Now get the cmux */
+ cmux = chan->cmux;
+
+ /* Cmux sanity check */
+ if (! circuitmux_is_circuit_attached(cmux, circ)) {
+ log_warn(LD_BUG, "called on non-attached circuit from %s:%d",
+ file, lineno);
+ return;
+ }
+ tor_assert(circuitmux_attached_circuit_direction(cmux, circ) == direction);
+
+ /* Update the number of cells we have for the circuit mux */
+ if (direction == CELL_DIRECTION_OUT) {
+ circuitmux_set_num_cells(cmux, circ, circ->n_chan_cells.n);
+ } else {
+ circuitmux_set_num_cells(cmux, circ, or_circ->p_chan_cells.n);
+ }
+}
+
+/** Remove all circuits from the cmux on <b>chan</b>.
+ *
+ * If <b>circuits_out</b> is non-NULL, add all detached circuits to
+ * <b>circuits_out</b>.
+ **/
+void
+channel_unlink_all_circuits(channel_t *chan, smartlist_t *circuits_out)
+{
+ tor_assert(chan);
+ tor_assert(chan->cmux);
+
+ circuitmux_detach_all_circuits(chan->cmux, circuits_out);
+ chan->num_n_circuits = 0;
+ chan->num_p_circuits = 0;
+}
+
+/** Block (if <b>block</b> is true) or unblock (if <b>block</b> is false)
+ * every edge connection that is using <b>circ</b> to write to <b>chan</b>,
+ * and start or stop reading as appropriate.
+ *
+ * If <b>stream_id</b> is nonzero, block only the edge connection whose
+ * stream_id matches it.
+ *
+ * Returns the number of streams whose status we changed.
+ */
+static int
+set_streams_blocked_on_circ(circuit_t *circ, channel_t *chan,
+ int block, streamid_t stream_id)
+{
+ edge_connection_t *edge = NULL;
+ int n = 0;
+ if (circ->n_chan == chan) {
+ circ->streams_blocked_on_n_chan = block;
+ if (CIRCUIT_IS_ORIGIN(circ))
+ edge = TO_ORIGIN_CIRCUIT(circ)->p_streams;
+ } else {
+ circ->streams_blocked_on_p_chan = block;
+ tor_assert(!CIRCUIT_IS_ORIGIN(circ));
+ edge = TO_OR_CIRCUIT(circ)->n_streams;
+ }
+
+ for (; edge; edge = edge->next_stream) {
+ connection_t *conn = TO_CONN(edge);
+ if (stream_id && edge->stream_id != stream_id)
+ continue;
+
+ if (edge->edge_blocked_on_circ != block) {
+ ++n;
+ edge->edge_blocked_on_circ = block;
+ }
+
+ if (!conn->read_event) {
+ /* This connection is a placeholder for something; probably a DNS
+ * request. It can't actually stop or start reading.*/
+ continue;
+ }
+
+ if (block) {
+ if (connection_is_reading(conn))
+ connection_stop_reading(conn);
+ } else {
+ /* Is this right? */
+ if (!connection_is_reading(conn))
+ connection_start_reading(conn);
+ }
+ }
+
+ return n;
+}
+
+/** Extract the command from a packed cell. */
+static uint8_t
+packed_cell_get_command(const packed_cell_t *cell, int wide_circ_ids)
+{
+ if (wide_circ_ids) {
+ return get_uint8(cell->body+4);
+ } else {
+ return get_uint8(cell->body+2);
+ }
+}
+
+/** Extract the circuit ID from a packed cell. */
+circid_t
+packed_cell_get_circid(const packed_cell_t *cell, int wide_circ_ids)
+{
+ if (wide_circ_ids) {
+ return ntohl(get_uint32(cell->body));
+ } else {
+ return ntohs(get_uint16(cell->body));
+ }
+}
+
+/** Pull as many cells as possible (but no more than <b>max</b>) from the
+ * queue of the first active circuit on <b>chan</b>, and write them to
+ * <b>chan</b>-&gt;outbuf. Return the number of cells written. Advance
+ * the active circuit pointer to the next active circuit in the ring. */
+MOCK_IMPL(int,
+channel_flush_from_first_active_circuit, (channel_t *chan, int max))
+{
+ circuitmux_t *cmux = NULL;
+ int n_flushed = 0;
+ cell_queue_t *queue;
+ destroy_cell_queue_t *destroy_queue=NULL;
+ circuit_t *circ;
+ or_circuit_t *or_circ;
+ int streams_blocked;
+ packed_cell_t *cell;
+
+ /* Get the cmux */
+ tor_assert(chan);
+ tor_assert(chan->cmux);
+ cmux = chan->cmux;
+
+ /* Main loop: pick a circuit, send a cell, update the cmux */
+ while (n_flushed < max) {
+ circ = circuitmux_get_first_active_circuit(cmux, &destroy_queue);
+ if (destroy_queue) {
+ destroy_cell_t *dcell;
+ /* this code is duplicated from some of the logic below. Ugly! XXXX */
+ /* If we are given a destroy_queue here, then it is required to be
+ * nonempty... */
+ tor_assert(destroy_queue->n > 0);
+ dcell = destroy_cell_queue_pop(destroy_queue);
+ /* ...and pop() will always yield a cell from a nonempty queue. */
+ tor_assert(dcell);
+ /* frees dcell */
+ cell = destroy_cell_to_packed_cell(dcell, chan->wide_circ_ids);
+ /* Send the DESTROY cell. It is very unlikely that this fails but just
+ * in case, get rid of the channel. */
+ if (channel_write_packed_cell(chan, cell) < 0) {
+ /* The cell has been freed. */
+ channel_mark_for_close(chan);
+ continue;
+ }
+ /* Update the cmux destroy counter */
+ circuitmux_notify_xmit_destroy(cmux);
+ cell = NULL;
+ ++n_flushed;
+ continue;
+ }
+ /* If it returns NULL, no cells left to send */
+ if (!circ) break;
+
+ if (circ->n_chan == chan) {
+ queue = &circ->n_chan_cells;
+ streams_blocked = circ->streams_blocked_on_n_chan;
+ } else {
+ or_circ = TO_OR_CIRCUIT(circ);
+ tor_assert(or_circ->p_chan == chan);
+ queue = &TO_OR_CIRCUIT(circ)->p_chan_cells;
+ streams_blocked = circ->streams_blocked_on_p_chan;
+ }
+
+ /* Circuitmux told us this was active, so it should have cells */
+ if (/*BUG(*/ queue->n == 0 /*)*/) {
+ log_warn(LD_BUG, "Found a supposedly active circuit with no cells "
+ "to send. Trying to recover.");
+ circuitmux_set_num_cells(cmux, circ, 0);
+ if (! circ->marked_for_close)
+ circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL);
+ continue;
+ }
+
+ tor_assert(queue->n > 0);
+
+ /*
+ * Get just one cell here; once we've sent it, that can change the circuit
+ * selection, so we have to loop around for another even if this circuit
+ * has more than one.
+ */
+ cell = cell_queue_pop(queue);
+
+ /* Calculate the exact time that this cell has spent in the queue. */
+ if (get_options()->CellStatistics ||
+ get_options()->TestingEnableCellStatsEvent) {
+ uint32_t timestamp_now = monotime_coarse_get_stamp();
+ uint32_t msec_waiting =
+ (uint32_t) monotime_coarse_stamp_units_to_approx_msec(
+ timestamp_now - cell->inserted_timestamp);
+
+ if (get_options()->CellStatistics && !CIRCUIT_IS_ORIGIN(circ)) {
+ or_circ = TO_OR_CIRCUIT(circ);
+ or_circ->total_cell_waiting_time += msec_waiting;
+ or_circ->processed_cells++;
+ }
+
+ if (get_options()->TestingEnableCellStatsEvent) {
+ uint8_t command = packed_cell_get_command(cell, chan->wide_circ_ids);
+
+ testing_cell_stats_entry_t *ent =
+ tor_malloc_zero(sizeof(testing_cell_stats_entry_t));
+ ent->command = command;
+ ent->waiting_time = msec_waiting / 10;
+ ent->removed = 1;
+ if (circ->n_chan == chan)
+ ent->exitward = 1;
+ if (!circ->testing_cell_stats)
+ circ->testing_cell_stats = smartlist_new();
+ smartlist_add(circ->testing_cell_stats, ent);
+ }
+ }
+
+ /* If we just flushed our queue and this circuit is used for a
+ * tunneled directory request, possibly advance its state. */
+ if (queue->n == 0 && chan->dirreq_id)
+ geoip_change_dirreq_state(chan->dirreq_id,
+ DIRREQ_TUNNELED,
+ DIRREQ_CIRC_QUEUE_FLUSHED);
+
+ /* Now send the cell. It is very unlikely that this fails but just in
+ * case, get rid of the channel. */
+ if (channel_write_packed_cell(chan, cell) < 0) {
+ /* The cell has been freed at this point. */
+ channel_mark_for_close(chan);
+ continue;
+ }
+ cell = NULL;
+
+ /*
+ * Don't packed_cell_free_unchecked(cell) here because the channel will
+ * do so when it gets out of the channel queue (probably already did, in
+ * which case that was an immediate double-free bug).
+ */
+
+ /* Update the counter */
+ ++n_flushed;
+
+ /*
+ * Now update the cmux; tell it we've just sent a cell, and how many
+ * we have left.
+ */
+ circuitmux_notify_xmit_cells(cmux, circ, 1);
+ circuitmux_set_num_cells(cmux, circ, queue->n);
+ if (queue->n == 0)
+ log_debug(LD_GENERAL, "Made a circuit inactive.");
+
+ /* Is the cell queue low enough to unblock all the streams that are waiting
+ * to write to this circuit? */
+ if (streams_blocked && queue->n <= CELL_QUEUE_LOWWATER_SIZE)
+ set_streams_blocked_on_circ(circ, chan, 0, 0); /* unblock streams */
+
+ /* If n_flushed < max still, loop around and pick another circuit */
+ }
+
+ /* Okay, we're done sending now */
+ return n_flushed;
+}
+
+/* Minimum value is the maximum circuit window size.
+ *
+ * SENDME cells makes it that we can control how many cells can be inflight on
+ * a circuit from end to end. This logic makes it that on any circuit cell
+ * queue, we have a maximum of cells possible.
+ *
+ * Because the Tor protocol allows for a client to exit at any hop in a
+ * circuit and a circuit can be of a maximum of 8 hops, so in theory the
+ * normal worst case will be the circuit window start value times the maximum
+ * number of hops (8). Having more cells then that means something is wrong.
+ *
+ * However, because padding cells aren't counted in the package window, we set
+ * the maximum size to a reasonably large size for which we expect that we'll
+ * never reach in theory. And if we ever do because of future changes, we'll
+ * be able to control it with a consensus parameter.
+ *
+ * XXX: Unfortunately, END cells aren't accounted for in the circuit window
+ * which means that for instance if a client opens 8001 streams, the 8001
+ * following END cells will queue up in the circuit which will get closed if
+ * the max limit is 8000. Which is sad because it is allowed by the Tor
+ * protocol. But, we need an upper bound on circuit queue in order to avoid
+ * DoS memory pressure so the default size is a middle ground between not
+ * having any limit and having a very restricted one. This is why we can also
+ * control it through a consensus parameter. */
+#define RELAY_CIRC_CELL_QUEUE_SIZE_MIN CIRCWINDOW_START_MAX
+/* We can't have a consensus parameter above this value. */
+#define RELAY_CIRC_CELL_QUEUE_SIZE_MAX INT32_MAX
+/* Default value is set to a large value so we can handle padding cells
+ * properly which aren't accounted for in the SENDME window. Default is 50000
+ * allowed cells in the queue resulting in ~25MB. */
+#define RELAY_CIRC_CELL_QUEUE_SIZE_DEFAULT \
+ (50 * RELAY_CIRC_CELL_QUEUE_SIZE_MIN)
+
+/* The maximum number of cell a circuit queue can contain. This is updated at
+ * every new consensus and controlled by a parameter. */
+static int32_t max_circuit_cell_queue_size =
+ RELAY_CIRC_CELL_QUEUE_SIZE_DEFAULT;
+
+/* Called when the consensus has changed. At this stage, the global consensus
+ * object has NOT been updated. It is called from
+ * notify_before_networkstatus_changes(). */
+void
+relay_consensus_has_changed(const networkstatus_t *ns)
+{
+ tor_assert(ns);
+
+ /* Update the circuit max cell queue size from the consensus. */
+ max_circuit_cell_queue_size =
+ networkstatus_get_param(ns, "circ_max_cell_queue_size",
+ RELAY_CIRC_CELL_QUEUE_SIZE_DEFAULT,
+ RELAY_CIRC_CELL_QUEUE_SIZE_MIN,
+ RELAY_CIRC_CELL_QUEUE_SIZE_MAX);
+}
+
+/** Add <b>cell</b> to the queue of <b>circ</b> writing to <b>chan</b>
+ * transmitting in <b>direction</b>.
+ *
+ * The given <b>cell</b> is copied onto the circuit queue so the caller must
+ * cleanup the memory.
+ *
+ * This function is part of the fast path. */
+void
+append_cell_to_circuit_queue(circuit_t *circ, channel_t *chan,
+ cell_t *cell, cell_direction_t direction,
+ streamid_t fromstream)
+{
+ or_circuit_t *orcirc = NULL;
+ cell_queue_t *queue;
+ int streams_blocked;
+ int exitward;
+ if (circ->marked_for_close)
+ return;
+
+ exitward = (direction == CELL_DIRECTION_OUT);
+ if (exitward) {
+ queue = &circ->n_chan_cells;
+ streams_blocked = circ->streams_blocked_on_n_chan;
+ } else {
+ orcirc = TO_OR_CIRCUIT(circ);
+ queue = &orcirc->p_chan_cells;
+ streams_blocked = circ->streams_blocked_on_p_chan;
+ }
+
+ if (PREDICT_UNLIKELY(queue->n >= max_circuit_cell_queue_size)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "%s circuit has %d cells in its queue, maximum allowed is %d. "
+ "Closing circuit for safety reasons.",
+ (exitward) ? "Outbound" : "Inbound", queue->n,
+ max_circuit_cell_queue_size);
+ circuit_mark_for_close(circ, END_CIRC_REASON_RESOURCELIMIT);
+ stats_n_circ_max_cell_reached++;
+ return;
+ }
+
+ /* Very important that we copy to the circuit queue because all calls to
+ * this function use the stack for the cell memory. */
+ cell_queue_append_packed_copy(circ, queue, exitward, cell,
+ chan->wide_circ_ids, 1);
+
+ /* Check and run the OOM if needed. */
+ if (PREDICT_UNLIKELY(cell_queues_check_size())) {
+ /* We ran the OOM handler which might have closed this circuit. */
+ if (circ->marked_for_close)
+ return;
+ }
+
+ /* If we have too many cells on the circuit, we should stop reading from
+ * the edge streams for a while. */
+ if (!streams_blocked && queue->n >= CELL_QUEUE_HIGHWATER_SIZE)
+ set_streams_blocked_on_circ(circ, chan, 1, 0); /* block streams */
+
+ if (streams_blocked && fromstream) {
+ /* This edge connection is apparently not blocked; block it. */
+ set_streams_blocked_on_circ(circ, chan, 1, fromstream);
+ }
+
+ update_circuit_on_cmux(circ, direction);
+ if (queue->n == 1) {
+ /* This was the first cell added to the queue. We just made this
+ * circuit active. */
+ log_debug(LD_GENERAL, "Made a circuit active.");
+ }
+
+ /* New way: mark this as having waiting cells for the scheduler */
+ scheduler_channel_has_waiting_cells(chan);
+}
+
+/** Append an encoded value of <b>addr</b> to <b>payload_out</b>, which must
+ * have at least 18 bytes of free space. The encoding is, as specified in
+ * tor-spec.txt:
+ * RESOLVED_TYPE_IPV4 or RESOLVED_TYPE_IPV6 [1 byte]
+ * LENGTH [1 byte]
+ * ADDRESS [length bytes]
+ * Return the number of bytes added, or -1 on error */
+int
+append_address_to_payload(uint8_t *payload_out, const tor_addr_t *addr)
+{
+ uint32_t a;
+ switch (tor_addr_family(addr)) {
+ case AF_INET:
+ payload_out[0] = RESOLVED_TYPE_IPV4;
+ payload_out[1] = 4;
+ a = tor_addr_to_ipv4n(addr);
+ memcpy(payload_out+2, &a, 4);
+ return 6;
+ case AF_INET6:
+ payload_out[0] = RESOLVED_TYPE_IPV6;
+ payload_out[1] = 16;
+ memcpy(payload_out+2, tor_addr_to_in6_addr8(addr), 16);
+ return 18;
+ case AF_UNSPEC:
+ default:
+ return -1;
+ }
+}
+
+/** Given <b>payload_len</b> bytes at <b>payload</b>, starting with an address
+ * encoded as by append_address_to_payload(), try to decode the address into
+ * *<b>addr_out</b>. Return the next byte in the payload after the address on
+ * success, or NULL on failure. */
+const uint8_t *
+decode_address_from_payload(tor_addr_t *addr_out, const uint8_t *payload,
+ int payload_len)
+{
+ if (payload_len < 2)
+ return NULL;
+ if (payload_len < 2+payload[1])
+ return NULL;
+
+ switch (payload[0]) {
+ case RESOLVED_TYPE_IPV4:
+ if (payload[1] != 4)
+ return NULL;
+ tor_addr_from_ipv4n(addr_out, get_uint32(payload+2));
+ break;
+ case RESOLVED_TYPE_IPV6:
+ if (payload[1] != 16)
+ return NULL;
+ tor_addr_from_ipv6_bytes(addr_out, (char*)(payload+2));
+ break;
+ default:
+ tor_addr_make_unspec(addr_out);
+ break;
+ }
+ return payload + 2 + payload[1];
+}
+
+/** Remove all the cells queued on <b>circ</b> for <b>chan</b>. */
+void
+circuit_clear_cell_queue(circuit_t *circ, channel_t *chan)
+{
+ cell_queue_t *queue;
+ cell_direction_t direction;
+
+ if (circ->n_chan == chan) {
+ queue = &circ->n_chan_cells;
+ direction = CELL_DIRECTION_OUT;
+ } else {
+ or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
+ tor_assert(orcirc->p_chan == chan);
+ queue = &orcirc->p_chan_cells;
+ direction = CELL_DIRECTION_IN;
+ }
+
+ /* Clear the queue */
+ cell_queue_clear(queue);
+
+ /* Update the cell counter in the cmux */
+ if (chan->cmux && circuitmux_is_circuit_attached(chan->cmux, circ))
+ update_circuit_on_cmux(circ, direction);
+}
+
+/** Return 1 if we shouldn't restart reading on this circuit, even if
+ * we get a SENDME. Else return 0.
+*/
+static int
+circuit_queue_streams_are_blocked(circuit_t *circ)
+{
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ return circ->streams_blocked_on_n_chan;
+ } else {
+ return circ->streams_blocked_on_p_chan;
+ }
+}
diff --git a/src/core/or/relay.h b/src/core/or/relay.h
new file mode 100644
index 0000000000..db7f17b96c
--- /dev/null
+++ b/src/core/or/relay.h
@@ -0,0 +1,124 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file relay.h
+ * \brief Header file for relay.c.
+ **/
+
+#ifndef TOR_RELAY_H
+#define TOR_RELAY_H
+
+extern uint64_t stats_n_relay_cells_relayed;
+extern uint64_t stats_n_relay_cells_delivered;
+extern uint64_t stats_n_circ_max_cell_reached;
+
+void relay_consensus_has_changed(const networkstatus_t *ns);
+int circuit_receive_relay_cell(cell_t *cell, circuit_t *circ,
+ cell_direction_t cell_direction);
+size_t cell_queues_get_total_allocation(void);
+
+void relay_header_pack(uint8_t *dest, const relay_header_t *src);
+void relay_header_unpack(relay_header_t *dest, const uint8_t *src);
+MOCK_DECL(int,
+relay_send_command_from_edge_,(streamid_t stream_id, circuit_t *circ,
+ uint8_t relay_command, const char *payload,
+ size_t payload_len, crypt_path_t *cpath_layer,
+ const char *filename, int lineno));
+/* Indicates to relay_send_command_from_edge() that it is a control cell. */
+#define CONTROL_CELL_ID 0
+#define relay_send_command_from_edge(stream_id, circ, relay_command, payload, \
+ payload_len, cpath_layer) \
+ relay_send_command_from_edge_((stream_id), (circ), (relay_command), \
+ (payload), (payload_len), (cpath_layer), \
+ __FILE__, __LINE__)
+int connection_edge_send_command(edge_connection_t *fromconn,
+ uint8_t relay_command, const char *payload,
+ size_t payload_len);
+int connection_edge_package_raw_inbuf(edge_connection_t *conn,
+ int package_partial,
+ int *max_cells);
+void connection_edge_consider_sending_sendme(edge_connection_t *conn);
+
+extern uint64_t stats_n_data_cells_packaged;
+extern uint64_t stats_n_data_bytes_packaged;
+extern uint64_t stats_n_data_cells_received;
+extern uint64_t stats_n_data_bytes_received;
+
+void dump_cell_pool_usage(int severity);
+size_t packed_cell_mem_cost(void);
+
+int have_been_under_memory_pressure(void);
+
+/* For channeltls.c */
+void packed_cell_free_(packed_cell_t *cell);
+#define packed_cell_free(cell) \
+ FREE_AND_NULL(packed_cell_t, packed_cell_free_, (cell))
+
+void cell_queue_init(cell_queue_t *queue);
+void cell_queue_clear(cell_queue_t *queue);
+void cell_queue_append(cell_queue_t *queue, packed_cell_t *cell);
+void cell_queue_append_packed_copy(circuit_t *circ, cell_queue_t *queue,
+ int exitward, const cell_t *cell,
+ int wide_circ_ids, int use_stats);
+
+void append_cell_to_circuit_queue(circuit_t *circ, channel_t *chan,
+ cell_t *cell, cell_direction_t direction,
+ streamid_t fromstream);
+
+void destroy_cell_queue_init(destroy_cell_queue_t *queue);
+void destroy_cell_queue_clear(destroy_cell_queue_t *queue);
+void destroy_cell_queue_append(destroy_cell_queue_t *queue,
+ circid_t circid,
+ uint8_t reason);
+
+void channel_unlink_all_circuits(channel_t *chan, smartlist_t *detached_out);
+MOCK_DECL(int, channel_flush_from_first_active_circuit,
+ (channel_t *chan, int max));
+void update_circuit_on_cmux_(circuit_t *circ, cell_direction_t direction,
+ const char *file, int lineno);
+#define update_circuit_on_cmux(circ, direction) \
+ update_circuit_on_cmux_((circ), (direction), SHORT_FILE__, __LINE__)
+
+int append_address_to_payload(uint8_t *payload_out, const tor_addr_t *addr);
+const uint8_t *decode_address_from_payload(tor_addr_t *addr_out,
+ const uint8_t *payload,
+ int payload_len);
+void circuit_clear_cell_queue(circuit_t *circ, channel_t *chan);
+
+void stream_choice_seed_weak_rng(void);
+
+circid_t packed_cell_get_circid(const packed_cell_t *cell, int wide_circ_ids);
+
+#ifdef RELAY_PRIVATE
+STATIC int connected_cell_parse(const relay_header_t *rh, const cell_t *cell,
+ tor_addr_t *addr_out, int *ttl_out);
+/** An address-and-ttl tuple as yielded by resolved_cell_parse */
+typedef struct address_ttl_s {
+ tor_addr_t addr;
+ char *hostname;
+ int ttl;
+} address_ttl_t;
+STATIC void address_ttl_free_(address_ttl_t *addr);
+#define address_ttl_free(addr) \
+ FREE_AND_NULL(address_ttl_t, address_ttl_free_, (addr))
+STATIC int resolved_cell_parse(const cell_t *cell, const relay_header_t *rh,
+ smartlist_t *addresses_out, int *errcode_out);
+STATIC int connection_edge_process_resolved_cell(edge_connection_t *conn,
+ const cell_t *cell,
+ const relay_header_t *rh);
+STATIC packed_cell_t *packed_cell_new(void);
+STATIC packed_cell_t *cell_queue_pop(cell_queue_t *queue);
+STATIC destroy_cell_t *destroy_cell_queue_pop(destroy_cell_queue_t *queue);
+STATIC int cell_queues_check_size(void);
+STATIC int connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
+ edge_connection_t *conn,
+ crypt_path_t *layer_hint);
+
+#endif /* defined(RELAY_PRIVATE) */
+
+#endif /* !defined(TOR_RELAY_H) */
+
diff --git a/src/core/or/relay_crypto_st.h b/src/core/or/relay_crypto_st.h
new file mode 100644
index 0000000000..f186e182f0
--- /dev/null
+++ b/src/core/or/relay_crypto_st.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef RELAY_CRYPTO_ST_H
+#define RELAY_CRYPTO_ST_H
+
+#define crypto_cipher_t aes_cnt_cipher
+struct crypto_cipher_t;
+struct crypto_digest_t;
+
+struct relay_crypto_t {
+ /* crypto environments */
+ /** Encryption key and counter for cells heading towards the OR at this
+ * step. */
+ struct crypto_cipher_t *f_crypto;
+ /** Encryption key and counter for cells heading back from the OR at this
+ * step. */
+ struct crypto_cipher_t *b_crypto;
+
+ /** Digest state for cells heading towards the OR at this step. */
+ struct crypto_digest_t *f_digest; /* for integrity checking */
+ /** Digest state for cells heading away from the OR at this step. */
+ struct crypto_digest_t *b_digest;
+
+};
+#undef crypto_cipher_t
+
+#endif
diff --git a/src/core/or/scheduler.c b/src/core/or/scheduler.c
new file mode 100644
index 0000000000..b8eaca3dca
--- /dev/null
+++ b/src/core/or/scheduler.c
@@ -0,0 +1,768 @@
+/* Copyright (c) 2013-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#include "or/or.h"
+#include "or/config.h"
+
+#include "lib/evloop/compat_libevent.h"
+#define SCHEDULER_PRIVATE_
+#define SCHEDULER_KIST_PRIVATE
+#include "or/scheduler.h"
+#include "or/main.h"
+#include "lib/container/buffers.h"
+#define TOR_CHANNEL_INTERNAL_
+#include "or/channeltls.h"
+#include "lib/evloop/compat_libevent.h"
+
+#include "or/or_connection_st.h"
+
+/**
+ * \file scheduler.c
+ * \brief Channel scheduling system: decides which channels should send and
+ * receive when.
+ *
+ * This module is the global/common parts of the scheduling system. This system
+ * is what decides what channels get to send cells on their circuits and when.
+ *
+ * Terms:
+ * - "Scheduling system": the collection of scheduler*.{h,c} files and their
+ * aggregate behavior.
+ * - "Scheduler implementation": a scheduler_t. The scheduling system has one
+ * active scheduling implementation at a time.
+ *
+ * In this file you will find state that any scheduler implementation can have
+ * access to as well as the functions the rest of Tor uses to interact with the
+ * scheduling system.
+ *
+ * The earliest versions of Tor approximated a kind of round-robin system
+ * among active connections, but only approximated it. It would only consider
+ * one connection (roughly equal to a channel in today's terms) at a time, and
+ * thus could only prioritize circuits against others on the same connection.
+ *
+ * Then in response to the KIST paper[0], Tor implemented a global
+ * circuit scheduler. It was supposed to prioritize circuits across many
+ * channels, but wasn't effective. It is preserved in scheduler_vanilla.c.
+ *
+ * [0]: http://www.robgjansen.com/publications/kist-sec2014.pdf
+ *
+ * Then we actually got around to implementing KIST for real. We decided to
+ * modularize the scheduler so new ones can be implemented. You can find KIST
+ * in scheduler_kist.c.
+ *
+ * Channels have one of four scheduling states based on whether or not they
+ * have cells to send and whether or not they are able to send.
+ *
+ * <ol>
+ * <li>
+ * Not open for writes, no cells to send.
+ * <ul><li> Not much to do here, and the channel will have scheduler_state
+ * == SCHED_CHAN_IDLE
+ * <li> Transitions from:
+ * <ul>
+ * <li>Open for writes/has cells by simultaneously draining all circuit
+ * queues and filling the output buffer.
+ * </ul>
+ * <li> Transitions to:
+ * <ul>
+ * <li> Not open for writes/has cells by arrival of cells on an attached
+ * circuit (this would be driven from append_cell_to_circuit_queue())
+ * <li> Open for writes/no cells by a channel type specific path;
+ * driven from connection_or_flushed_some() for channel_tls_t.
+ * </ul>
+ * </ul>
+ *
+ * <li> Open for writes, no cells to send
+ * <ul>
+ * <li>Not much here either; this will be the state an idle but open
+ * channel can be expected to settle in. It will have scheduler_state
+ * == SCHED_CHAN_WAITING_FOR_CELLS
+ * <li> Transitions from:
+ * <ul>
+ * <li>Not open for writes/no cells by flushing some of the output
+ * buffer.
+ * <li>Open for writes/has cells by the scheduler moving cells from
+ * circuit queues to channel output queue, but not having enough
+ * to fill the output queue.
+ * </ul>
+ * <li> Transitions to:
+ * <ul>
+ * <li>Open for writes/has cells by arrival of new cells on an attached
+ * circuit, in append_cell_to_circuit_queue()
+ * </ul>
+ * </ul>
+ *
+ * <li>Not open for writes, cells to send
+ * <ul>
+ * <li>This is the state of a busy circuit limited by output bandwidth;
+ * cells have piled up in the circuit queues waiting to be relayed.
+ * The channel will have scheduler_state == SCHED_CHAN_WAITING_TO_WRITE.
+ * <li> Transitions from:
+ * <ul>
+ * <li>Not open for writes/no cells by arrival of cells on an attached
+ * circuit
+ * <li>Open for writes/has cells by filling an output buffer without
+ * draining all cells from attached circuits
+ * </ul>
+ * <li> Transitions to:
+ * <ul>
+ * <li>Opens for writes/has cells by draining some of the output buffer
+ * via the connection_or_flushed_some() path (for channel_tls_t).
+ * </ul>
+ * </ul>
+ *
+ * <li>Open for writes, cells to send
+ * <ul>
+ * <li>This connection is ready to relay some cells and waiting for
+ * the scheduler to choose it. The channel will have scheduler_state ==
+ * SCHED_CHAN_PENDING.
+ * <li>Transitions from:
+ * <ul>
+ * <li>Not open for writes/has cells by the connection_or_flushed_some()
+ * path
+ * <li>Open for writes/no cells by the append_cell_to_circuit_queue()
+ * path
+ * </ul>
+ * <li> Transitions to:
+ * <ul>
+ * <li>Not open for writes/no cells by draining all circuit queues and
+ * simultaneously filling the output buffer.
+ * <li>Not open for writes/has cells by writing enough cells to fill the
+ * output buffer
+ * <li>Open for writes/no cells by draining all attached circuit queues
+ * without also filling the output buffer
+ * </ul>
+ * </ul>
+ * </ol>
+ *
+ * Other event-driven parts of the code move channels between these scheduling
+ * states by calling scheduler functions. The scheduling system builds up a
+ * list of channels in the SCHED_CHAN_PENDING state that the scheduler
+ * implementation should then use when it runs. Scheduling implementations need
+ * to properly update channel states during their scheduler_t->run() function
+ * as that is the only opportunity for channels to move from SCHED_CHAN_PENDING
+ * to any other state.
+ *
+ * The remainder of this file is a small amount of state that any scheduler
+ * implementation should have access to, and the functions the rest of Tor uses
+ * to interact with the scheduling system.
+ */
+
+/*****************************************************************************
+ * Scheduling system state
+ *
+ * State that can be accessed from any scheduler implementation (but not
+ * outside the scheduling system)
+ *****************************************************************************/
+
+/** DOCDOC */
+STATIC const scheduler_t *the_scheduler;
+
+/**
+ * We keep a list of channels that are pending - i.e, have cells to write
+ * and can accept them to send. The enum scheduler_state in channel_t
+ * is reserved for our use.
+ *
+ * Priority queue of channels that can write and have cells (pending work)
+ */
+STATIC smartlist_t *channels_pending = NULL;
+
+/**
+ * This event runs the scheduler from its callback, and is manually
+ * activated whenever a channel enters open for writes/cells to send.
+ */
+STATIC struct mainloop_event_t *run_sched_ev = NULL;
+
+static int have_logged_kist_suddenly_disabled = 0;
+
+/*****************************************************************************
+ * Scheduling system static function definitions
+ *
+ * Functions that can only be accessed from this file.
+ *****************************************************************************/
+
+/** Return a human readable string for the given scheduler type. */
+static const char *
+get_scheduler_type_string(scheduler_types_t type)
+{
+ switch (type) {
+ case SCHEDULER_VANILLA:
+ return "Vanilla";
+ case SCHEDULER_KIST:
+ return "KIST";
+ case SCHEDULER_KIST_LITE:
+ return "KISTLite";
+ case SCHEDULER_NONE:
+ /* fallthrough */
+ default:
+ tor_assert_unreached();
+ return "(N/A)";
+ }
+}
+
+/**
+ * Scheduler event callback; this should get triggered once per event loop
+ * if any scheduling work was created during the event loop.
+ */
+static void
+scheduler_evt_callback(mainloop_event_t *event, void *arg)
+{
+ (void) event;
+ (void) arg;
+
+ log_debug(LD_SCHED, "Scheduler event callback called");
+
+ /* Run the scheduler. This is a mandatory function. */
+
+ /* We might as well assert on this. If this function doesn't exist, no cells
+ * are getting scheduled. Things are very broken. scheduler_t says the run()
+ * function is mandatory. */
+ tor_assert(the_scheduler->run);
+ the_scheduler->run();
+
+ /* Schedule itself back in if it has more work. */
+
+ /* Again, might as well assert on this mandatory scheduler_t function. If it
+ * doesn't exist, there's no way to tell libevent to run the scheduler again
+ * in the future. */
+ tor_assert(the_scheduler->schedule);
+ the_scheduler->schedule();
+}
+
+/** Using the global options, select the scheduler we should be using. */
+static void
+select_scheduler(void)
+{
+ scheduler_t *new_scheduler = NULL;
+
+#ifdef TOR_UNIT_TESTS
+ /* This is hella annoying to set in the options for every test that passes
+ * through the scheduler and there are many so if we don't explicitly have
+ * a list of types set, just put the vanilla one. */
+ if (get_options()->SchedulerTypes_ == NULL) {
+ the_scheduler = get_vanilla_scheduler();
+ return;
+ }
+#endif /* defined(TOR_UNIT_TESTS) */
+
+ /* This list is ordered that is first entry has the first priority. Thus, as
+ * soon as we find a scheduler type that we can use, we use it and stop. */
+ SMARTLIST_FOREACH_BEGIN(get_options()->SchedulerTypes_, int *, type) {
+ switch (*type) {
+ case SCHEDULER_VANILLA:
+ new_scheduler = get_vanilla_scheduler();
+ goto end;
+ case SCHEDULER_KIST:
+ if (!scheduler_can_use_kist()) {
+#ifdef HAVE_KIST_SUPPORT
+ if (!have_logged_kist_suddenly_disabled) {
+ /* We should only log this once in most cases. If it was the kernel
+ * losing support for kist that caused scheduler_can_use_kist() to
+ * return false, then this flag makes sure we only log this message
+ * once. If it was the consensus that switched from "yes use kist"
+ * to "no don't use kist", then we still set the flag so we log
+ * once, but we unset the flag elsewhere if we ever can_use_kist()
+ * again.
+ */
+ have_logged_kist_suddenly_disabled = 1;
+ log_notice(LD_SCHED, "Scheduler type KIST has been disabled by "
+ "the consensus or no kernel support.");
+ }
+#else /* !(defined(HAVE_KIST_SUPPORT)) */
+ log_info(LD_SCHED, "Scheduler type KIST not built in");
+#endif /* defined(HAVE_KIST_SUPPORT) */
+ continue;
+ }
+ /* This flag will only get set in one of two cases:
+ * 1 - the kernel lost support for kist. In that case, we don't expect to
+ * ever end up here
+ * 2 - the consensus went from "yes use kist" to "no don't use kist".
+ * We might end up here if the consensus changes back to "yes", in which
+ * case we might want to warn the user again if it goes back to "no"
+ * yet again. Thus we unset the flag */
+ have_logged_kist_suddenly_disabled = 0;
+ new_scheduler = get_kist_scheduler();
+ scheduler_kist_set_full_mode();
+ goto end;
+ case SCHEDULER_KIST_LITE:
+ new_scheduler = get_kist_scheduler();
+ scheduler_kist_set_lite_mode();
+ goto end;
+ case SCHEDULER_NONE:
+ /* fallthrough */
+ default:
+ /* Our option validation should have caught this. */
+ tor_assert_unreached();
+ }
+ } SMARTLIST_FOREACH_END(type);
+
+ end:
+ if (new_scheduler == NULL) {
+ log_err(LD_SCHED, "Tor was unable to select a scheduler type. Please "
+ "make sure Schedulers is correctly configured with "
+ "what Tor does support.");
+ /* We weren't able to choose a scheduler which means that none of the ones
+ * set in Schedulers are supported or usable. We will respect the user
+ * wishes of using what it has been configured and don't do a sneaky
+ * fallback. Because this can be changed at runtime, we have to stop tor
+ * right now. */
+ exit(1); // XXXX bad exit
+ }
+
+ /* Set the chosen scheduler. */
+ the_scheduler = new_scheduler;
+}
+
+/**
+ * Helper function called from a few different places. It changes the
+ * scheduler implementation, if necessary. And if it did, it then tells the
+ * old one to free its state and the new one to initialize.
+ */
+static void
+set_scheduler(void)
+{
+ const scheduler_t *old_scheduler = the_scheduler;
+ scheduler_types_t old_scheduler_type = SCHEDULER_NONE;
+
+ /* We keep track of the type in order to log only if the type switched. We
+ * can't just use the scheduler pointers because KIST and KISTLite share the
+ * same object. */
+ if (the_scheduler) {
+ old_scheduler_type = the_scheduler->type;
+ }
+
+ /* From the options, select the scheduler type to set. */
+ select_scheduler();
+ tor_assert(the_scheduler);
+
+ /* We look at the pointer difference in case the old sched and new sched
+ * share the same scheduler object, as is the case with KIST and KISTLite. */
+ if (old_scheduler != the_scheduler) {
+ /* Allow the old scheduler to clean up, if needed. */
+ if (old_scheduler && old_scheduler->free_all) {
+ old_scheduler->free_all();
+ }
+
+ /* Initialize the new scheduler. */
+ if (the_scheduler->init) {
+ the_scheduler->init();
+ }
+ }
+
+ /* Finally we notice log if we switched schedulers. We use the type in case
+ * two schedulers share a scheduler object. */
+ if (old_scheduler_type != the_scheduler->type) {
+ log_notice(LD_CONFIG, "Scheduler type %s has been enabled.",
+ get_scheduler_type_string(the_scheduler->type));
+ }
+}
+
+/*****************************************************************************
+ * Scheduling system private function definitions
+ *
+ * Functions that can only be accessed from scheduler*.c
+ *****************************************************************************/
+
+/** Returns human readable string for the given channel scheduler state. */
+const char *
+get_scheduler_state_string(int scheduler_state)
+{
+ switch (scheduler_state) {
+ case SCHED_CHAN_IDLE:
+ return "IDLE";
+ case SCHED_CHAN_WAITING_FOR_CELLS:
+ return "WAITING_FOR_CELLS";
+ case SCHED_CHAN_WAITING_TO_WRITE:
+ return "WAITING_TO_WRITE";
+ case SCHED_CHAN_PENDING:
+ return "PENDING";
+ default:
+ return "(invalid)";
+ }
+}
+
+/** Helper that logs channel scheduler_state changes. Use this instead of
+ * setting scheduler_state directly. */
+void
+scheduler_set_channel_state(channel_t *chan, int new_state)
+{
+ log_debug(LD_SCHED, "chan %" PRIu64 " changed from scheduler state %s to %s",
+ chan->global_identifier,
+ get_scheduler_state_string(chan->scheduler_state),
+ get_scheduler_state_string(new_state));
+ chan->scheduler_state = new_state;
+}
+
+/** Return the pending channel list. */
+smartlist_t *
+get_channels_pending(void)
+{
+ return channels_pending;
+}
+
+/** Comparison function to use when sorting pending channels. */
+MOCK_IMPL(int,
+scheduler_compare_channels, (const void *c1_v, const void *c2_v))
+{
+ const channel_t *c1 = NULL, *c2 = NULL;
+ /* These are a workaround for -Wbad-function-cast throwing a fit */
+ const circuitmux_policy_t *p1, *p2;
+ uintptr_t p1_i, p2_i;
+
+ tor_assert(c1_v);
+ tor_assert(c2_v);
+
+ c1 = (const channel_t *)(c1_v);
+ c2 = (const channel_t *)(c2_v);
+
+ if (c1 != c2) {
+ if (circuitmux_get_policy(c1->cmux) ==
+ circuitmux_get_policy(c2->cmux)) {
+ /* Same cmux policy, so use the mux comparison */
+ return circuitmux_compare_muxes(c1->cmux, c2->cmux);
+ } else {
+ /*
+ * Different policies; not important to get this edge case perfect
+ * because the current code never actually gives different channels
+ * different cmux policies anyway. Just use this arbitrary but
+ * definite choice.
+ */
+ p1 = circuitmux_get_policy(c1->cmux);
+ p2 = circuitmux_get_policy(c2->cmux);
+ p1_i = (uintptr_t)p1;
+ p2_i = (uintptr_t)p2;
+
+ return (p1_i < p2_i) ? -1 : 1;
+ }
+ } else {
+ /* c1 == c2, so always equal */
+ return 0;
+ }
+}
+
+/*****************************************************************************
+ * Scheduling system global functions
+ *
+ * Functions that can be accessed from anywhere in Tor.
+ *****************************************************************************/
+
+/**
+ * This is how the scheduling system is notified of Tor's configuration
+ * changing. For example: a SIGHUP was issued.
+ */
+void
+scheduler_conf_changed(void)
+{
+ /* Let the scheduler decide what it should do. */
+ set_scheduler();
+
+ /* Then tell the (possibly new) scheduler that we have new options. */
+ if (the_scheduler->on_new_options) {
+ the_scheduler->on_new_options();
+ }
+}
+
+/**
+ * Whenever we get a new consensus, this function is called.
+ */
+void
+scheduler_notify_networkstatus_changed(void)
+{
+ /* Maybe the consensus param made us change the scheduler. */
+ set_scheduler();
+
+ /* Then tell the (possibly new) scheduler that we have a new consensus */
+ if (the_scheduler->on_new_consensus) {
+ the_scheduler->on_new_consensus();
+ }
+}
+
+/**
+ * Free everything scheduling-related from main.c. Note this is only called
+ * when Tor is shutting down, while scheduler_t->free_all() is called both when
+ * Tor is shutting down and when we are switching schedulers.
+ */
+void
+scheduler_free_all(void)
+{
+ log_debug(LD_SCHED, "Shutting down scheduler");
+
+ if (run_sched_ev) {
+ mainloop_event_free(run_sched_ev);
+ run_sched_ev = NULL;
+ }
+
+ if (channels_pending) {
+ /* We don't have ownership of the objects in this list. */
+ smartlist_free(channels_pending);
+ channels_pending = NULL;
+ }
+
+ if (the_scheduler && the_scheduler->free_all) {
+ the_scheduler->free_all();
+ }
+ the_scheduler = NULL;
+}
+
+/** Mark a channel as no longer ready to accept writes. */
+MOCK_IMPL(void,
+scheduler_channel_doesnt_want_writes,(channel_t *chan))
+{
+ IF_BUG_ONCE(!chan) {
+ return;
+ }
+ IF_BUG_ONCE(!channels_pending) {
+ return;
+ }
+
+ /* If it's already in pending, we can put it in waiting_to_write */
+ if (chan->scheduler_state == SCHED_CHAN_PENDING) {
+ /*
+ * It's in channels_pending, so it shouldn't be in any of
+ * the other lists. It can't write any more, so it goes to
+ * channels_waiting_to_write.
+ */
+ smartlist_pqueue_remove(channels_pending,
+ scheduler_compare_channels,
+ offsetof(channel_t, sched_heap_idx),
+ chan);
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_TO_WRITE);
+ } else {
+ /*
+ * It's not in pending, so it can't become waiting_to_write; it's
+ * either not in any of the lists (nothing to do) or it's already in
+ * waiting_for_cells (remove it, can't write any more).
+ */
+ if (chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS) {
+ scheduler_set_channel_state(chan, SCHED_CHAN_IDLE);
+ }
+ }
+}
+
+/** Mark a channel as having waiting cells. */
+MOCK_IMPL(void,
+scheduler_channel_has_waiting_cells,(channel_t *chan))
+{
+ IF_BUG_ONCE(!chan) {
+ return;
+ }
+ IF_BUG_ONCE(!channels_pending) {
+ return;
+ }
+
+ /* First, check if it's also writeable */
+ if (chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS) {
+ /*
+ * It's in channels_waiting_for_cells, so it shouldn't be in any of
+ * the other lists. It has waiting cells now, so it goes to
+ * channels_pending.
+ */
+ scheduler_set_channel_state(chan, SCHED_CHAN_PENDING);
+ if (!SCHED_BUG(chan->sched_heap_idx != -1, chan)) {
+ smartlist_pqueue_add(channels_pending,
+ scheduler_compare_channels,
+ offsetof(channel_t, sched_heap_idx),
+ chan);
+ }
+ /* If we made a channel pending, we potentially have scheduling work to
+ * do. */
+ the_scheduler->schedule();
+ } else {
+ /*
+ * It's not in waiting_for_cells, so it can't become pending; it's
+ * either not in any of the lists (we add it to waiting_to_write)
+ * or it's already in waiting_to_write or pending (we do nothing)
+ */
+ if (!(chan->scheduler_state == SCHED_CHAN_WAITING_TO_WRITE ||
+ chan->scheduler_state == SCHED_CHAN_PENDING)) {
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_TO_WRITE);
+ }
+ }
+}
+
+/** Add the scheduler event to the set of pending events with next_run being
+ * the longest time libevent should wait before triggering the event. */
+void
+scheduler_ev_add(const struct timeval *next_run)
+{
+ tor_assert(run_sched_ev);
+ tor_assert(next_run);
+ if (BUG(mainloop_event_schedule(run_sched_ev, next_run) < 0)) {
+ log_warn(LD_SCHED, "Adding to libevent failed. Next run time was set to: "
+ "%ld.%06ld", next_run->tv_sec, (long)next_run->tv_usec);
+ return;
+ }
+}
+
+/** Make the scheduler event active with the given flags. */
+void
+scheduler_ev_active(void)
+{
+ tor_assert(run_sched_ev);
+ mainloop_event_activate(run_sched_ev);
+}
+
+/*
+ * Initialize everything scheduling-related from config.c. Note this is only
+ * called when Tor is starting up, while scheduler_t->init() is called both
+ * when Tor is starting up and when we are switching schedulers.
+ */
+void
+scheduler_init(void)
+{
+ log_debug(LD_SCHED, "Initting scheduler");
+
+ // Two '!' because we really do want to check if the pointer is non-NULL
+ IF_BUG_ONCE(!!run_sched_ev) {
+ log_warn(LD_SCHED, "We should not already have a libevent scheduler event."
+ "I'll clean the old one up, but this is odd.");
+ mainloop_event_free(run_sched_ev);
+ run_sched_ev = NULL;
+ }
+ run_sched_ev = mainloop_event_new(scheduler_evt_callback, NULL);
+ channels_pending = smartlist_new();
+
+ set_scheduler();
+}
+
+/*
+ * If a channel is going away, this is how the scheduling system is informed
+ * so it can do any freeing necessary. This ultimately calls
+ * scheduler_t->on_channel_free() so the current scheduler can release any
+ * state specific to this channel.
+ */
+MOCK_IMPL(void,
+scheduler_release_channel,(channel_t *chan))
+{
+ IF_BUG_ONCE(!chan) {
+ return;
+ }
+ IF_BUG_ONCE(!channels_pending) {
+ return;
+ }
+
+ /* Try to remove the channel from the pending list regardless of its
+ * scheduler state. We can release a channel in many places in the tor code
+ * so we can't rely on the channel state (PENDING) to remove it from the
+ * list.
+ *
+ * For instance, the channel can change state from OPEN to CLOSING while
+ * being handled in the scheduler loop leading to the channel being in
+ * PENDING state but not in the pending list. Furthermore, we release the
+ * channel when it changes state to close and a second time when we free it.
+ * Not ideal at all but for now that is the way it is. */
+ if (chan->sched_heap_idx != -1) {
+ smartlist_pqueue_remove(channels_pending,
+ scheduler_compare_channels,
+ offsetof(channel_t, sched_heap_idx),
+ chan);
+ }
+
+ if (the_scheduler->on_channel_free) {
+ the_scheduler->on_channel_free(chan);
+ }
+ scheduler_set_channel_state(chan, SCHED_CHAN_IDLE);
+}
+
+/** Mark a channel as ready to accept writes */
+
+void
+scheduler_channel_wants_writes(channel_t *chan)
+{
+ IF_BUG_ONCE(!chan) {
+ return;
+ }
+ IF_BUG_ONCE(!channels_pending) {
+ return;
+ }
+
+ /* If it's already in waiting_to_write, we can put it in pending */
+ if (chan->scheduler_state == SCHED_CHAN_WAITING_TO_WRITE) {
+ /*
+ * It can write now, so it goes to channels_pending.
+ */
+ scheduler_set_channel_state(chan, SCHED_CHAN_PENDING);
+ if (!SCHED_BUG(chan->sched_heap_idx != -1, chan)) {
+ smartlist_pqueue_add(channels_pending,
+ scheduler_compare_channels,
+ offsetof(channel_t, sched_heap_idx),
+ chan);
+ }
+ /* We just made a channel pending, we have scheduling work to do. */
+ the_scheduler->schedule();
+ } else {
+ /*
+ * It's not in SCHED_CHAN_WAITING_TO_WRITE, so it can't become pending;
+ * it's either idle and goes to WAITING_FOR_CELLS, or it's a no-op.
+ */
+ if (!(chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS ||
+ chan->scheduler_state == SCHED_CHAN_PENDING)) {
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
+ }
+ }
+}
+
+/* Log warn the given channel and extra scheduler context as well. This is
+ * used by SCHED_BUG() in order to be able to extract as much information as
+ * we can when we hit a bug. Channel chan can be NULL. */
+void
+scheduler_bug_occurred(const channel_t *chan)
+{
+ char buf[128];
+
+ if (chan != NULL) {
+ const size_t outbuf_len =
+ buf_datalen(TO_CONN(BASE_CHAN_TO_TLS((channel_t *) chan)->conn)->outbuf);
+ tor_snprintf(buf, sizeof(buf),
+ "Channel %" PRIu64 " in state %s and scheduler state %s."
+ " Num cells on cmux: %d. Connection outbuf len: %lu.",
+ chan->global_identifier,
+ channel_state_to_string(chan->state),
+ get_scheduler_state_string(chan->scheduler_state),
+ circuitmux_num_cells(chan->cmux),
+ (unsigned long)outbuf_len);
+ }
+
+ {
+ char *msg;
+ /* Rate limit every 60 seconds. If we start seeing this every 60 sec, we
+ * know something is stuck/wrong. It *should* be loud but not too much. */
+ static ratelim_t rlimit = RATELIM_INIT(60);
+ if ((msg = rate_limit_log(&rlimit, approx_time()))) {
+ log_warn(LD_BUG, "%s Num pending channels: %d. "
+ "Channel in pending list: %s.%s",
+ (chan != NULL) ? buf : "No channel in bug context.",
+ smartlist_len(channels_pending),
+ (smartlist_pos(channels_pending, chan) == -1) ? "no" : "yes",
+ msg);
+ tor_free(msg);
+ }
+ }
+}
+
+#ifdef TOR_UNIT_TESTS
+
+/*
+ * Notify scheduler that a channel's queue position may have changed.
+ */
+void
+scheduler_touch_channel(channel_t *chan)
+{
+ IF_BUG_ONCE(!chan) {
+ return;
+ }
+
+ if (chan->scheduler_state == SCHED_CHAN_PENDING) {
+ /* Remove and re-add it */
+ smartlist_pqueue_remove(channels_pending,
+ scheduler_compare_channels,
+ offsetof(channel_t, sched_heap_idx),
+ chan);
+ smartlist_pqueue_add(channels_pending,
+ scheduler_compare_channels,
+ offsetof(channel_t, sched_heap_idx),
+ chan);
+ }
+ /* else no-op, since it isn't in the queue */
+}
+
+#endif /* defined(TOR_UNIT_TESTS) */
diff --git a/src/core/or/scheduler.h b/src/core/or/scheduler.h
new file mode 100644
index 0000000000..856923f9a7
--- /dev/null
+++ b/src/core/or/scheduler.h
@@ -0,0 +1,218 @@
+/* * Copyright (c) 2017-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file scheduler.h
+ * \brief Header file for scheduler*.c
+ **/
+
+#ifndef TOR_SCHEDULER_H
+#define TOR_SCHEDULER_H
+
+#include "or/or.h"
+#include "or/channel.h"
+#include "lib/testsupport/testsupport.h"
+
+/** Scheduler type, we build an ordered list with those values from the
+ * parsed strings in Schedulers. The reason to do such a thing is so we can
+ * quickly and without parsing strings select the scheduler at anytime. */
+typedef enum {
+ SCHEDULER_NONE = -1,
+ SCHEDULER_VANILLA = 1,
+ SCHEDULER_KIST = 2,
+ SCHEDULER_KIST_LITE = 3,
+} scheduler_types_t;
+
+/**
+ * A scheduler implementation is a collection of function pointers. If you
+ * would like to add a new scheduler called foo, create scheduler_foo.c,
+ * implement at least the mandatory ones, and implement get_foo_scheduler()
+ * that returns a complete scheduler_t for your foo scheduler. See
+ * scheduler_kist.c for an example.
+ *
+ * These function pointers SHOULD NOT be used anywhere outside of the
+ * scheduling source files. The rest of Tor should communicate with the
+ * scheduling system through the functions near the bottom of this file, and
+ * those functions will call into the current scheduler implementation as
+ * necessary.
+ *
+ * If your scheduler doesn't need to implement something (for example: it
+ * doesn't create any state for itself, thus it has nothing to free when Tor
+ * is shutting down), then set that function pointer to NULL.
+ */
+typedef struct scheduler_s {
+ /* Scheduler type. This is used for logging when the scheduler is switched
+ * during runtime. */
+ scheduler_types_t type;
+
+ /* (Optional) To be called when we want to prepare a scheduler for use.
+ * Perhaps Tor just started and we are the lucky chosen scheduler, or
+ * perhaps Tor is switching to this scheduler. No matter the case, this is
+ * where we would prepare any state and initialize parameters. You might
+ * think of this as the opposite of free_all(). */
+ void (*init)(void);
+
+ /* (Optional) To be called when we want to tell the scheduler to delete all
+ * of its state (if any). Perhaps Tor is shutting down or perhaps we are
+ * switching schedulers. */
+ void (*free_all)(void);
+
+ /* (Mandatory) Libevent controls the main event loop in Tor, and this is
+ * where we register with libevent the next execution of run_sched_ev [which
+ * ultimately calls run()]. */
+ void (*schedule)(void);
+
+ /* (Mandatory) This is the heart of a scheduler! This is where the
+ * excitement happens! Here libevent has given us the chance to execute, and
+ * we should do whatever we need to do in order to move some cells from
+ * their circuit queues to output buffers in an intelligent manner. We
+ * should do this quickly. When we are done, we'll try to schedule() ourself
+ * if more work needs to be done to setup the next scheduling run. */
+ void (*run)(void);
+
+ /*
+ * External event not related to the scheduler but that can influence it.
+ */
+
+ /* (Optional) To be called whenever Tor finds out about a new consensus.
+ * First the scheduling system as a whole will react to the new consensus
+ * and change the scheduler if needed. After that, the current scheduler
+ * (which might be new) will call this so it has the chance to react to the
+ * new consensus too. If there's a consensus parameter that your scheduler
+ * wants to keep an eye on, this is where you should check for it. */
+ void (*on_new_consensus)(void);
+
+ /* (Optional) To be called when a channel is being freed. Sometimes channels
+ * go away (for example: the relay on the other end is shutting down). If
+ * the scheduler keeps any channel-specific state and has memory to free
+ * when channels go away, implement this and free it here. */
+ void (*on_channel_free)(const channel_t *);
+
+ /* (Optional) To be called whenever Tor is reloading configuration options.
+ * For example: SIGHUP was issued and Tor is rereading its torrc. A
+ * scheduler should use this as an opportunity to parse and cache torrc
+ * options so that it doesn't have to call get_options() all the time. */
+ void (*on_new_options)(void);
+} scheduler_t;
+
+/*****************************************************************************
+ * Globally visible scheduler variables/values
+ *
+ * These are variables/constants that all of Tor should be able to see.
+ *****************************************************************************/
+
+/* Default interval that KIST runs (in ms). */
+#define KIST_SCHED_RUN_INTERVAL_DEFAULT 10
+/* Minimum interval that KIST runs. This value disables KIST. */
+#define KIST_SCHED_RUN_INTERVAL_MIN 0
+/* Maximum interval that KIST runs (in ms). */
+#define KIST_SCHED_RUN_INTERVAL_MAX 100
+
+/*****************************************************************************
+ * Globally visible scheduler functions
+ *
+ * These functions are how the rest of Tor communicates with the scheduling
+ * system.
+ *****************************************************************************/
+
+void scheduler_init(void);
+void scheduler_free_all(void);
+void scheduler_conf_changed(void);
+void scheduler_notify_networkstatus_changed(void);
+MOCK_DECL(void, scheduler_release_channel, (channel_t *chan));
+
+/*
+ * Ways for a channel to interact with the scheduling system. A channel only
+ * really knows (i) whether or not it has cells it wants to send, and
+ * (ii) whether or not it would like to write.
+ */
+void scheduler_channel_wants_writes(channel_t *chan);
+MOCK_DECL(void, scheduler_channel_doesnt_want_writes, (channel_t *chan));
+MOCK_DECL(void, scheduler_channel_has_waiting_cells, (channel_t *chan));
+
+/*****************************************************************************
+ * Private scheduler functions
+ *
+ * These functions are only visible to the scheduling system, the current
+ * scheduler implementation, and tests.
+ *****************************************************************************/
+#ifdef SCHEDULER_PRIVATE_
+
+/*********************************
+ * Defined in scheduler.c
+ *********************************/
+
+void scheduler_set_channel_state(channel_t *chan, int new_state);
+const char *get_scheduler_state_string(int scheduler_state);
+
+/* Triggers a BUG() and extra information with chan if available. */
+#define SCHED_BUG(cond, chan) \
+ (PREDICT_UNLIKELY(cond) ? \
+ ((BUG(cond)) ? (scheduler_bug_occurred(chan), 1) : 0) : 0)
+
+void scheduler_bug_occurred(const channel_t *chan);
+
+smartlist_t *get_channels_pending(void);
+MOCK_DECL(int, scheduler_compare_channels,
+ (const void *c1_v, const void *c2_v));
+void scheduler_ev_active(void);
+void scheduler_ev_add(const struct timeval *next_run);
+
+#ifdef TOR_UNIT_TESTS
+extern smartlist_t *channels_pending;
+extern struct mainloop_event_t *run_sched_ev;
+extern const scheduler_t *the_scheduler;
+void scheduler_touch_channel(channel_t *chan);
+#endif /* defined(TOR_UNIT_TESTS) */
+
+/*********************************
+ * Defined in scheduler_kist.c
+ *********************************/
+
+#ifdef SCHEDULER_KIST_PRIVATE
+
+/* Socket table entry which holds information of a channel's socket and kernel
+ * TCP information. Only used by KIST. */
+typedef struct socket_table_ent_s {
+ HT_ENTRY(socket_table_ent_s) node;
+ const channel_t *chan;
+ /* Amount written this scheduling run */
+ uint64_t written;
+ /* Amount that can be written this scheduling run */
+ uint64_t limit;
+ /* TCP info from the kernel */
+ uint32_t cwnd;
+ uint32_t unacked;
+ uint32_t mss;
+ uint32_t notsent;
+} socket_table_ent_t;
+
+typedef HT_HEAD(outbuf_table_s, outbuf_table_ent_s) outbuf_table_t;
+
+MOCK_DECL(int, channel_should_write_to_kernel,
+ (outbuf_table_t *table, channel_t *chan));
+MOCK_DECL(void, channel_write_to_kernel, (channel_t *chan));
+MOCK_DECL(void, update_socket_info_impl, (socket_table_ent_t *ent));
+
+int scheduler_can_use_kist(void);
+void scheduler_kist_set_full_mode(void);
+void scheduler_kist_set_lite_mode(void);
+scheduler_t *get_kist_scheduler(void);
+int kist_scheduler_run_interval(void);
+
+#ifdef TOR_UNIT_TESTS
+extern int32_t sched_run_interval;
+#endif /* TOR_UNIT_TESTS */
+
+#endif /* defined(SCHEDULER_KIST_PRIVATE) */
+
+/*********************************
+ * Defined in scheduler_vanilla.c
+ *********************************/
+
+scheduler_t *get_vanilla_scheduler(void);
+
+#endif /* defined(SCHEDULER_PRIVATE_) */
+
+#endif /* !defined(TOR_SCHEDULER_H) */
+
diff --git a/src/core/or/scheduler_kist.c b/src/core/or/scheduler_kist.c
new file mode 100644
index 0000000000..5a45ccab88
--- /dev/null
+++ b/src/core/or/scheduler_kist.c
@@ -0,0 +1,842 @@
+/* Copyright (c) 2017-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#define SCHEDULER_KIST_PRIVATE
+
+#include "or/or.h"
+#include "lib/container/buffers.h"
+#include "or/config.h"
+#include "or/connection.h"
+#include "or/networkstatus.h"
+#define TOR_CHANNEL_INTERNAL_
+#include "or/channel.h"
+#include "or/channeltls.h"
+#define SCHEDULER_PRIVATE_
+#include "or/scheduler.h"
+#include "lib/math/fp.h"
+
+#include "or/or_connection_st.h"
+
+#define TLS_PER_CELL_OVERHEAD 29
+
+#ifdef HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+
+#ifdef HAVE_KIST_SUPPORT
+/* Kernel interface needed for KIST. */
+#include <netinet/tcp.h>
+#include <linux/sockios.h>
+#endif /* HAVE_KIST_SUPPORT */
+
+/*****************************************************************************
+ * Data structures and supporting functions
+ *****************************************************************************/
+
+/* Socket_table hash table stuff. The socket_table keeps track of per-socket
+ * limit information imposed by kist and used by kist. */
+
+static uint32_t
+socket_table_ent_hash(const socket_table_ent_t *ent)
+{
+ return (uint32_t)ent->chan->global_identifier;
+}
+
+static unsigned
+socket_table_ent_eq(const socket_table_ent_t *a, const socket_table_ent_t *b)
+{
+ return a->chan == b->chan;
+}
+
+typedef HT_HEAD(socket_table_s, socket_table_ent_s) socket_table_t;
+
+static socket_table_t socket_table = HT_INITIALIZER();
+
+HT_PROTOTYPE(socket_table_s, socket_table_ent_s, node, socket_table_ent_hash,
+ socket_table_ent_eq)
+HT_GENERATE2(socket_table_s, socket_table_ent_s, node, socket_table_ent_hash,
+ socket_table_ent_eq, 0.6, tor_reallocarray, tor_free_)
+
+/* outbuf_table hash table stuff. The outbuf_table keeps track of which
+ * channels have data sitting in their outbuf so the kist scheduler can force
+ * a write from outbuf to kernel periodically during a run and at the end of a
+ * run. */
+
+typedef struct outbuf_table_ent_s {
+ HT_ENTRY(outbuf_table_ent_s) node;
+ channel_t *chan;
+} outbuf_table_ent_t;
+
+static uint32_t
+outbuf_table_ent_hash(const outbuf_table_ent_t *ent)
+{
+ return (uint32_t)ent->chan->global_identifier;
+}
+
+static unsigned
+outbuf_table_ent_eq(const outbuf_table_ent_t *a, const outbuf_table_ent_t *b)
+{
+ return a->chan->global_identifier == b->chan->global_identifier;
+}
+
+HT_PROTOTYPE(outbuf_table_s, outbuf_table_ent_s, node, outbuf_table_ent_hash,
+ outbuf_table_ent_eq)
+HT_GENERATE2(outbuf_table_s, outbuf_table_ent_s, node, outbuf_table_ent_hash,
+ outbuf_table_ent_eq, 0.6, tor_reallocarray, tor_free_)
+
+/*****************************************************************************
+ * Other internal data
+ *****************************************************************************/
+
+/* Store the last time the scheduler was run so we can decide when to next run
+ * the scheduler based on it. */
+static monotime_t scheduler_last_run;
+/* This is a factor for the extra_space calculation in kist per-socket limits.
+ * It is the number of extra congestion windows we want to write to the kernel.
+ */
+static double sock_buf_size_factor = 1.0;
+/* How often the scheduler runs. */
+STATIC int sched_run_interval = KIST_SCHED_RUN_INTERVAL_DEFAULT;
+
+#ifdef HAVE_KIST_SUPPORT
+/* Indicate if KIST lite mode is on or off. We can disable it at runtime.
+ * Important to have because of the KISTLite -> KIST possible transition. */
+static unsigned int kist_lite_mode = 0;
+/* Indicate if we don't have the kernel support. This can happen if the kernel
+ * changed and it doesn't recognized the values passed to the syscalls needed
+ * by KIST. In that case, fallback to the naive approach. */
+static unsigned int kist_no_kernel_support = 0;
+#else /* !(defined(HAVE_KIST_SUPPORT)) */
+static unsigned int kist_lite_mode = 1;
+#endif /* defined(HAVE_KIST_SUPPORT) */
+
+/*****************************************************************************
+ * Internally called function implementations
+ *****************************************************************************/
+
+/* Little helper function to get the length of a channel's output buffer */
+static inline size_t
+channel_outbuf_length(channel_t *chan)
+{
+ /* In theory, this can not happen because we can not scheduler a channel
+ * without a connection that has its outbuf initialized. Just in case, bug
+ * on this so we can understand a bit more why it happened. */
+ if (SCHED_BUG(BASE_CHAN_TO_TLS(chan)->conn == NULL, chan)) {
+ return 0;
+ }
+ return buf_datalen(TO_CONN(BASE_CHAN_TO_TLS(chan)->conn)->outbuf);
+}
+
+/* Little helper function for HT_FOREACH_FN. */
+static int
+each_channel_write_to_kernel(outbuf_table_ent_t *ent, void *data)
+{
+ (void) data; /* Make compiler happy. */
+ channel_write_to_kernel(ent->chan);
+ return 0; /* Returning non-zero removes the element from the table. */
+}
+
+/* Free the given outbuf table entry ent. */
+static int
+free_outbuf_info_by_ent(outbuf_table_ent_t *ent, void *data)
+{
+ (void) data; /* Make compiler happy. */
+ log_debug(LD_SCHED, "Freeing outbuf table entry from chan=%" PRIu64,
+ ent->chan->global_identifier);
+ tor_free(ent);
+ return 1; /* So HT_FOREACH_FN will remove the element */
+}
+
+/* Free the given socket table entry ent. */
+static int
+free_socket_info_by_ent(socket_table_ent_t *ent, void *data)
+{
+ (void) data; /* Make compiler happy. */
+ log_debug(LD_SCHED, "Freeing socket table entry from chan=%" PRIu64,
+ ent->chan->global_identifier);
+ tor_free(ent);
+ return 1; /* So HT_FOREACH_FN will remove the element */
+}
+
+/* Clean up socket_table. Probably because the KIST sched impl is going away */
+static void
+free_all_socket_info(void)
+{
+ HT_FOREACH_FN(socket_table_s, &socket_table, free_socket_info_by_ent, NULL);
+ HT_CLEAR(socket_table_s, &socket_table);
+}
+
+static socket_table_ent_t *
+socket_table_search(socket_table_t *table, const channel_t *chan)
+{
+ socket_table_ent_t search, *ent = NULL;
+ search.chan = chan;
+ ent = HT_FIND(socket_table_s, table, &search);
+ return ent;
+}
+
+/* Free a socket entry in table for the given chan. */
+static void
+free_socket_info_by_chan(socket_table_t *table, const channel_t *chan)
+{
+ socket_table_ent_t *ent = NULL;
+ ent = socket_table_search(table, chan);
+ if (!ent)
+ return;
+ log_debug(LD_SCHED, "scheduler free socket info for chan=%" PRIu64,
+ chan->global_identifier);
+ HT_REMOVE(socket_table_s, table, ent);
+ free_socket_info_by_ent(ent, NULL);
+}
+
+/* Perform system calls for the given socket in order to calculate kist's
+ * per-socket limit as documented in the function body. */
+MOCK_IMPL(void,
+update_socket_info_impl, (socket_table_ent_t *ent))
+{
+#ifdef HAVE_KIST_SUPPORT
+ int64_t tcp_space, extra_space;
+ const tor_socket_t sock =
+ TO_CONN(BASE_CHAN_TO_TLS((channel_t *) ent->chan)->conn)->s;
+ struct tcp_info tcp;
+ socklen_t tcp_info_len = sizeof(tcp);
+
+ if (kist_no_kernel_support || kist_lite_mode) {
+ goto fallback;
+ }
+
+ /* Gather information */
+ if (getsockopt(sock, SOL_TCP, TCP_INFO, (void *)&(tcp), &tcp_info_len) < 0) {
+ if (errno == EINVAL) {
+ /* Oops, this option is not provided by the kernel, we'll have to
+ * disable KIST entirely. This can happen if tor was built on a machine
+ * with the support previously or if the kernel was updated and lost the
+ * support. */
+ log_notice(LD_SCHED, "Looks like our kernel doesn't have the support "
+ "for KIST anymore. We will fallback to the naive "
+ "approach. Remove KIST from the Schedulers list "
+ "to disable.");
+ kist_no_kernel_support = 1;
+ }
+ goto fallback;
+ }
+ if (ioctl(sock, SIOCOUTQNSD, &(ent->notsent)) < 0) {
+ if (errno == EINVAL) {
+ log_notice(LD_SCHED, "Looks like our kernel doesn't have the support "
+ "for KIST anymore. We will fallback to the naive "
+ "approach. Remove KIST from the Schedulers list "
+ "to disable.");
+ /* Same reason as the above. */
+ kist_no_kernel_support = 1;
+ }
+ goto fallback;
+ }
+ ent->cwnd = tcp.tcpi_snd_cwnd;
+ ent->unacked = tcp.tcpi_unacked;
+ ent->mss = tcp.tcpi_snd_mss;
+
+ /* In order to reduce outbound kernel queuing delays and thus improve Tor's
+ * ability to prioritize circuits, KIST wants to set a socket write limit
+ * that is near the amount that the socket would be able to immediately send
+ * into the Internet.
+ *
+ * We first calculate how much the socket could send immediately (assuming
+ * completely full packets) according to the congestion window and the number
+ * of unacked packets.
+ *
+ * Then we add a little extra space in a controlled way. We do this so any
+ * when the kernel gets ACKs back for data currently sitting in the "TCP
+ * space", it will already have some more data to send immediately. It will
+ * not have to wait for the scheduler to run again. The amount of extra space
+ * is a factor of the current congestion window. With the suggested
+ * sock_buf_size_factor value of 1.0, we allow at most 2*cwnd bytes to sit in
+ * the kernel: 1 cwnd on the wire waiting for ACKs and 1 cwnd ready and
+ * waiting to be sent when those ACKs finally come.
+ *
+ * In the below diagram, we see some bytes in the TCP-space (denoted by '*')
+ * that have be sent onto the wire and are waiting for ACKs. We have a little
+ * more room in "TCP space" that we can fill with data that will be
+ * immediately sent. We also see the "extra space" KIST calculates. The sum
+ * of the empty "TCP space" and the "extra space" is the kist-imposed write
+ * limit for this socket.
+ *
+ * <----------------kernel-outbound-socket-queue----------------|
+ * <*********---------------------------------------------------|
+ * |----TCP-space-----|----extra-space-----|
+ * |------------------|
+ * ^ ((cwnd - unacked) * mss) bytes
+ * |--------------------|
+ * ^ ((cwnd * mss) * factor) bytes
+ */
+
+ /* These values from the kernel are uint32_t, they will always fit into a
+ * int64_t tcp_space variable but if the congestion window cwnd is smaller
+ * than the unacked packets, the remaining TCP space is set to 0. */
+ if (ent->cwnd >= ent->unacked) {
+ tcp_space = (ent->cwnd - ent->unacked) * (int64_t)(ent->mss);
+ } else {
+ tcp_space = 0;
+ }
+
+ /* The clamp_double_to_int64 makes sure the first part fits into an int64_t.
+ * In fact, if sock_buf_size_factor is still forced to be >= 0 in config.c,
+ * then it will be positive for sure. Then we subtract a uint32_t. Getting a
+ * negative value is OK, see after how it is being handled. */
+ extra_space =
+ clamp_double_to_int64(
+ (ent->cwnd * (int64_t)ent->mss) * sock_buf_size_factor) -
+ ent->notsent;
+ if ((tcp_space + extra_space) < 0) {
+ /* This means that the "notsent" queue is just too big so we shouldn't put
+ * more in the kernel for now. */
+ ent->limit = 0;
+ } else {
+ /* The positive sum of two int64_t will always fit into an uint64_t.
+ * And we know this will always be positive, since we checked above. */
+ ent->limit = (uint64_t)tcp_space + (uint64_t)extra_space;
+ }
+ return;
+
+#else /* !(defined(HAVE_KIST_SUPPORT)) */
+ goto fallback;
+#endif /* defined(HAVE_KIST_SUPPORT) */
+
+ fallback:
+ /* If all of a sudden we don't have kist support, we just zero out all the
+ * variables for this socket since we don't know what they should be. We
+ * also allow the socket to write as much as it can from the estimated
+ * number of cells the lower layer can accept, effectively returning it to
+ * Vanilla scheduler behavior. */
+ ent->cwnd = ent->unacked = ent->mss = ent->notsent = 0;
+ /* This function calls the specialized channel object (currently channeltls)
+ * and ask how many cells it can write on the outbuf which we then multiply
+ * by the size of the cells for this channel. The cast is because this
+ * function requires a non-const channel object, meh. */
+ ent->limit = channel_num_cells_writeable((channel_t *) ent->chan) *
+ (get_cell_network_size(ent->chan->wide_circ_ids) +
+ TLS_PER_CELL_OVERHEAD);
+}
+
+/* Given a socket that isn't in the table, add it.
+ * Given a socket that is in the table, re-init values that need init-ing
+ * every scheduling run
+ */
+static void
+init_socket_info(socket_table_t *table, const channel_t *chan)
+{
+ socket_table_ent_t *ent = NULL;
+ ent = socket_table_search(table, chan);
+ if (!ent) {
+ log_debug(LD_SCHED, "scheduler init socket info for chan=%" PRIu64,
+ chan->global_identifier);
+ ent = tor_malloc_zero(sizeof(*ent));
+ ent->chan = chan;
+ HT_INSERT(socket_table_s, table, ent);
+ }
+ ent->written = 0;
+}
+
+/* Add chan to the outbuf table if it isn't already in it. If it is, then don't
+ * do anything */
+static void
+outbuf_table_add(outbuf_table_t *table, channel_t *chan)
+{
+ outbuf_table_ent_t search, *ent;
+ search.chan = chan;
+ ent = HT_FIND(outbuf_table_s, table, &search);
+ if (!ent) {
+ log_debug(LD_SCHED, "scheduler init outbuf info for chan=%" PRIu64,
+ chan->global_identifier);
+ ent = tor_malloc_zero(sizeof(*ent));
+ ent->chan = chan;
+ HT_INSERT(outbuf_table_s, table, ent);
+ }
+}
+
+static void
+outbuf_table_remove(outbuf_table_t *table, channel_t *chan)
+{
+ outbuf_table_ent_t search, *ent;
+ search.chan = chan;
+ ent = HT_FIND(outbuf_table_s, table, &search);
+ if (ent) {
+ HT_REMOVE(outbuf_table_s, table, ent);
+ free_outbuf_info_by_ent(ent, NULL);
+ }
+}
+
+/* Set the scheduler running interval. */
+static void
+set_scheduler_run_interval(void)
+{
+ int old_sched_run_interval = sched_run_interval;
+ sched_run_interval = kist_scheduler_run_interval();
+ if (old_sched_run_interval != sched_run_interval) {
+ log_info(LD_SCHED, "Scheduler KIST changing its running interval "
+ "from %" PRId32 " to %" PRId32,
+ old_sched_run_interval, sched_run_interval);
+ }
+}
+
+/* Return true iff the channel hasn't hit its kist-imposed write limit yet */
+static int
+socket_can_write(socket_table_t *table, const channel_t *chan)
+{
+ socket_table_ent_t *ent = NULL;
+ ent = socket_table_search(table, chan);
+ if (SCHED_BUG(!ent, chan)) {
+ return 1; // Just return true, saying that kist wouldn't limit the socket
+ }
+
+ /* We previously calculated a write limit for this socket. In the below
+ * calculation, first determine how much room is left in bytes. Then divide
+ * that by the amount of space a cell takes. If there's room for at least 1
+ * cell, then KIST will allow the socket to write. */
+ int64_t kist_limit_space =
+ (int64_t) (ent->limit - ent->written) /
+ (CELL_MAX_NETWORK_SIZE + TLS_PER_CELL_OVERHEAD);
+ return kist_limit_space > 0;
+}
+
+/* Update the channel's socket kernel information. */
+static void
+update_socket_info(socket_table_t *table, const channel_t *chan)
+{
+ socket_table_ent_t *ent = NULL;
+ ent = socket_table_search(table, chan);
+ if (SCHED_BUG(!ent, chan)) {
+ return; // Whelp. Entry didn't exist for some reason so nothing to do.
+ }
+ update_socket_info_impl(ent);
+ log_debug(LD_SCHED, "chan=%" PRIu64 " updated socket info, limit: %" PRIu64
+ ", cwnd: %" PRIu32 ", unacked: %" PRIu32
+ ", notsent: %" PRIu32 ", mss: %" PRIu32,
+ ent->chan->global_identifier, ent->limit, ent->cwnd, ent->unacked,
+ ent->notsent, ent->mss);
+}
+
+/* Increment the channel's socket written value by the number of bytes. */
+static void
+update_socket_written(socket_table_t *table, channel_t *chan, size_t bytes)
+{
+ socket_table_ent_t *ent = NULL;
+ ent = socket_table_search(table, chan);
+ if (SCHED_BUG(!ent, chan)) {
+ return; // Whelp. Entry didn't exist so nothing to do.
+ }
+
+ log_debug(LD_SCHED, "chan=%" PRIu64 " wrote %lu bytes, old was %" PRIi64,
+ chan->global_identifier, (unsigned long) bytes, ent->written);
+
+ ent->written += bytes;
+}
+
+/*
+ * A naive KIST impl would write every single cell all the way to the kernel.
+ * That would take a lot of system calls. A less bad KIST impl would write a
+ * channel's outbuf to the kernel only when we are switching to a different
+ * channel. But if we have two channels with equal priority, we end up writing
+ * one cell for each and bouncing back and forth. This KIST impl avoids that
+ * by only writing a channel's outbuf to the kernel if it has 8 cells or more
+ * in it.
+ */
+MOCK_IMPL(int, channel_should_write_to_kernel,
+ (outbuf_table_t *table, channel_t *chan))
+{
+ outbuf_table_add(table, chan);
+ /* CELL_MAX_NETWORK_SIZE * 8 because we only want to write the outbuf to the
+ * kernel if there's 8 or more cells waiting */
+ return channel_outbuf_length(chan) > (CELL_MAX_NETWORK_SIZE * 8);
+}
+
+/* Little helper function to write a channel's outbuf all the way to the
+ * kernel */
+MOCK_IMPL(void, channel_write_to_kernel, (channel_t *chan))
+{
+ log_debug(LD_SCHED, "Writing %lu bytes to kernel for chan %" PRIu64,
+ (unsigned long)channel_outbuf_length(chan),
+ chan->global_identifier);
+ connection_handle_write(TO_CONN(BASE_CHAN_TO_TLS(chan)->conn), 0);
+}
+
+/* Return true iff the scheduler has work to perform. */
+static int
+have_work(void)
+{
+ smartlist_t *cp = get_channels_pending();
+ IF_BUG_ONCE(!cp) {
+ return 0; // channels_pending doesn't exist so... no work?
+ }
+ return smartlist_len(cp) > 0;
+}
+
+/* Function of the scheduler interface: free_all() */
+static void
+kist_free_all(void)
+{
+ free_all_socket_info();
+}
+
+/* Function of the scheduler interface: on_channel_free() */
+static void
+kist_on_channel_free_fn(const channel_t *chan)
+{
+ free_socket_info_by_chan(&socket_table, chan);
+}
+
+/* Function of the scheduler interface: on_new_consensus() */
+static void
+kist_scheduler_on_new_consensus(void)
+{
+ set_scheduler_run_interval();
+}
+
+/* Function of the scheduler interface: on_new_options() */
+static void
+kist_scheduler_on_new_options(void)
+{
+ sock_buf_size_factor = get_options()->KISTSockBufSizeFactor;
+
+ /* Calls kist_scheduler_run_interval which calls get_options(). */
+ set_scheduler_run_interval();
+}
+
+/* Function of the scheduler interface: init() */
+static void
+kist_scheduler_init(void)
+{
+ /* When initializing the scheduler, the last run could be 0 because it is
+ * declared static or a value in the past that was set when it was last
+ * used. In both cases, we want to initialize it to now so we don't risk
+ * using the value 0 which doesn't play well with our monotonic time
+ * interface.
+ *
+ * One side effect is that the first scheduler run will be at the next tick
+ * that is in now + 10 msec (KIST_SCHED_RUN_INTERVAL_DEFAULT) by default. */
+ monotime_get(&scheduler_last_run);
+
+ kist_scheduler_on_new_options();
+ IF_BUG_ONCE(sched_run_interval == 0) {
+ log_warn(LD_SCHED, "We are initing the KIST scheduler and noticed the "
+ "KISTSchedRunInterval is telling us to not use KIST. That's "
+ "weird! We'll continue using KIST, but at %" PRId32 "ms.",
+ KIST_SCHED_RUN_INTERVAL_DEFAULT);
+ sched_run_interval = KIST_SCHED_RUN_INTERVAL_DEFAULT;
+ }
+}
+
+/* Function of the scheduler interface: schedule() */
+static void
+kist_scheduler_schedule(void)
+{
+ struct monotime_t now;
+ struct timeval next_run;
+ int64_t diff;
+
+ if (!have_work()) {
+ return;
+ }
+ monotime_get(&now);
+
+ /* If time is really monotonic, we can never have now being smaller than the
+ * last scheduler run. The scheduler_last_run at first is set to 0.
+ * Unfortunately, not all platforms guarantee monotonic time so we log at
+ * info level but don't make it more noisy. */
+ diff = monotime_diff_msec(&scheduler_last_run, &now);
+ if (diff < 0) {
+ log_info(LD_SCHED, "Monotonic time between now and last run of scheduler "
+ "is negative: %" PRId64 ". Setting diff to 0.", diff);
+ diff = 0;
+ }
+ if (diff < sched_run_interval) {
+ next_run.tv_sec = 0;
+ /* Takes 1000 ms -> us. This will always be valid because diff can NOT be
+ * negative and can NOT be bigger than sched_run_interval so values can
+ * only go from 1000 usec (diff set to interval - 1) to 100000 usec (diff
+ * set to 0) for the maximum allowed run interval (100ms). */
+ next_run.tv_usec = (int) ((sched_run_interval - diff) * 1000);
+ /* Re-adding an event reschedules it. It does not duplicate it. */
+ scheduler_ev_add(&next_run);
+ } else {
+ scheduler_ev_active();
+ }
+}
+
+/* Function of the scheduler interface: run() */
+static void
+kist_scheduler_run(void)
+{
+ /* Define variables */
+ channel_t *chan = NULL; // current working channel
+ /* The last distinct chan served in a sched loop. */
+ channel_t *prev_chan = NULL;
+ int flush_result; // temporarily store results from flush calls
+ /* Channels to be re-adding to pending at the end */
+ smartlist_t *to_readd = NULL;
+ smartlist_t *cp = get_channels_pending();
+
+ outbuf_table_t outbuf_table = HT_INITIALIZER();
+
+ /* For each pending channel, collect new kernel information */
+ SMARTLIST_FOREACH_BEGIN(cp, const channel_t *, pchan) {
+ init_socket_info(&socket_table, pchan);
+ update_socket_info(&socket_table, pchan);
+ } SMARTLIST_FOREACH_END(pchan);
+
+ log_debug(LD_SCHED, "Running the scheduler. %d channels pending",
+ smartlist_len(cp));
+
+ /* The main scheduling loop. Loop until there are no more pending channels */
+ while (smartlist_len(cp) > 0) {
+ /* get best channel */
+ chan = smartlist_pqueue_pop(cp, scheduler_compare_channels,
+ offsetof(channel_t, sched_heap_idx));
+ if (SCHED_BUG(!chan, NULL)) {
+ /* Some-freaking-how a NULL got into the channels_pending. That should
+ * never happen, but it should be harmless to ignore it and keep looping.
+ */
+ continue;
+ }
+ outbuf_table_add(&outbuf_table, chan);
+
+ /* if we have switched to a new channel, consider writing the previous
+ * channel's outbuf to the kernel. */
+ if (!prev_chan) {
+ prev_chan = chan;
+ }
+ if (prev_chan != chan) {
+ if (channel_should_write_to_kernel(&outbuf_table, prev_chan)) {
+ channel_write_to_kernel(prev_chan);
+ outbuf_table_remove(&outbuf_table, prev_chan);
+ }
+ prev_chan = chan;
+ }
+
+ /* Only flush and write if the per-socket limit hasn't been hit */
+ if (socket_can_write(&socket_table, chan)) {
+ /* flush to channel queue/outbuf */
+ flush_result = (int)channel_flush_some_cells(chan, 1); // 1 for num cells
+ /* XXX: While flushing cells, it is possible that the connection write
+ * fails leading to the channel to be closed which triggers a release
+ * and free its entry in the socket table. And because of a engineering
+ * design issue, the error is not propagated back so we don't get an
+ * error at this point. So before we continue, make sure the channel is
+ * open and if not just ignore it. See #23751. */
+ if (!CHANNEL_IS_OPEN(chan)) {
+ /* Channel isn't open so we put it back in IDLE mode. It is either
+ * renegotiating its TLS session or about to be released. */
+ scheduler_set_channel_state(chan, SCHED_CHAN_IDLE);
+ continue;
+ }
+ /* flush_result has the # cells flushed */
+ if (flush_result > 0) {
+ update_socket_written(&socket_table, chan, flush_result *
+ (CELL_MAX_NETWORK_SIZE + TLS_PER_CELL_OVERHEAD));
+ } else {
+ /* XXX: This can happen because tor sometimes does flush in an
+ * opportunistic way cells from the circuit to the outbuf so the
+ * channel can end up here without having anything to flush nor needed
+ * to write to the kernel. Hopefully we'll fix that soon but for now
+ * we have to handle this case which happens kind of often. */
+ log_debug(LD_SCHED,
+ "We didn't flush anything on a chan that we think "
+ "can write and wants to write. The channel's state is '%s' "
+ "and in scheduler state '%s'. We're going to mark it as "
+ "waiting_for_cells (as that's most likely the issue) and "
+ "stop scheduling it this round.",
+ channel_state_to_string(chan->state),
+ get_scheduler_state_string(chan->scheduler_state));
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
+ continue;
+ }
+ }
+
+ /* Decide what to do with the channel now */
+
+ if (!channel_more_to_flush(chan) &&
+ !socket_can_write(&socket_table, chan)) {
+
+ /* Case 1: no more cells to send, and cannot write */
+
+ /*
+ * You might think we should put the channel in SCHED_CHAN_IDLE. And
+ * you're probably correct. While implementing KIST, we found that the
+ * scheduling system would sometimes lose track of channels when we did
+ * that. We suspect it has to do with the difference between "can't
+ * write because socket/outbuf is full" and KIST's "can't write because
+ * we've arbitrarily decided that that's enough for now." Sometimes
+ * channels run out of cells at the same time they hit their
+ * kist-imposed write limit and maybe the rest of Tor doesn't put the
+ * channel back in pending when it is supposed to.
+ *
+ * This should be investigated again. It is as simple as changing
+ * SCHED_CHAN_WAITING_FOR_CELLS to SCHED_CHAN_IDLE and seeing if Tor
+ * starts having serious throughput issues. Best done in shadow/chutney.
+ */
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
+ } else if (!channel_more_to_flush(chan)) {
+
+ /* Case 2: no more cells to send, but still open for writes */
+
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
+ } else if (!socket_can_write(&socket_table, chan)) {
+
+ /* Case 3: cells to send, but cannot write */
+
+ /*
+ * We want to write, but can't. If we left the channel in
+ * channels_pending, we would never exit the scheduling loop. We need to
+ * add it to a temporary list of channels to be added to channels_pending
+ * after the scheduling loop is over. They can hopefully be taken care of
+ * in the next scheduling round.
+ */
+ if (!to_readd) {
+ to_readd = smartlist_new();
+ }
+ smartlist_add(to_readd, chan);
+ } else {
+
+ /* Case 4: cells to send, and still open for writes */
+
+ scheduler_set_channel_state(chan, SCHED_CHAN_PENDING);
+ if (!SCHED_BUG(chan->sched_heap_idx != -1, chan)) {
+ smartlist_pqueue_add(cp, scheduler_compare_channels,
+ offsetof(channel_t, sched_heap_idx), chan);
+ }
+ }
+ } /* End of main scheduling loop */
+
+ /* Write the outbuf of any channels that still have data */
+ HT_FOREACH_FN(outbuf_table_s, &outbuf_table, each_channel_write_to_kernel,
+ NULL);
+ /* We are done with it. */
+ HT_FOREACH_FN(outbuf_table_s, &outbuf_table, free_outbuf_info_by_ent, NULL);
+ HT_CLEAR(outbuf_table_s, &outbuf_table);
+
+ log_debug(LD_SCHED, "len pending=%d, len to_readd=%d",
+ smartlist_len(cp),
+ (to_readd ? smartlist_len(to_readd) : -1));
+
+ /* Re-add any channels we need to */
+ if (to_readd) {
+ SMARTLIST_FOREACH_BEGIN(to_readd, channel_t *, readd_chan) {
+ scheduler_set_channel_state(readd_chan, SCHED_CHAN_PENDING);
+ if (!smartlist_contains(cp, readd_chan)) {
+ if (!SCHED_BUG(chan->sched_heap_idx != -1, chan)) {
+ /* XXXX Note that the check above is in theory redundant with
+ * the smartlist_contains check. But let's make sure we're
+ * not messing anything up, and leave them both for now. */
+ smartlist_pqueue_add(cp, scheduler_compare_channels,
+ offsetof(channel_t, sched_heap_idx), readd_chan);
+ }
+ }
+ } SMARTLIST_FOREACH_END(readd_chan);
+ smartlist_free(to_readd);
+ }
+
+ monotime_get(&scheduler_last_run);
+}
+
+/*****************************************************************************
+ * Externally called function implementations not called through scheduler_t
+ *****************************************************************************/
+
+/* Stores the kist scheduler function pointers. */
+static scheduler_t kist_scheduler = {
+ .type = SCHEDULER_KIST,
+ .free_all = kist_free_all,
+ .on_channel_free = kist_on_channel_free_fn,
+ .init = kist_scheduler_init,
+ .on_new_consensus = kist_scheduler_on_new_consensus,
+ .schedule = kist_scheduler_schedule,
+ .run = kist_scheduler_run,
+ .on_new_options = kist_scheduler_on_new_options,
+};
+
+/* Return the KIST scheduler object. If it didn't exists, return a newly
+ * allocated one but init() is not called. */
+scheduler_t *
+get_kist_scheduler(void)
+{
+ return &kist_scheduler;
+}
+
+/* Check the torrc (and maybe consensus) for the configured KIST scheduler run
+ * interval.
+ * - If torrc > 0, then return the positive torrc value (should use KIST, and
+ * should use the set value)
+ * - If torrc == 0, then look in the consensus for what the value should be.
+ * - If == 0, then return 0 (don't use KIST)
+ * - If > 0, then return the positive consensus value
+ * - If consensus doesn't say anything, return 10 milliseconds, default.
+ */
+int
+kist_scheduler_run_interval(void)
+{
+ int run_interval = get_options()->KISTSchedRunInterval;
+
+ if (run_interval != 0) {
+ log_debug(LD_SCHED, "Found KISTSchedRunInterval=%" PRId32 " in torrc. "
+ "Using that.", run_interval);
+ return run_interval;
+ }
+
+ log_debug(LD_SCHED, "KISTSchedRunInterval=0, turning to the consensus.");
+
+ /* Will either be the consensus value or the default. Note that 0 can be
+ * returned which means the consensus wants us to NOT use KIST. */
+ return networkstatus_get_param(NULL, "KISTSchedRunInterval",
+ KIST_SCHED_RUN_INTERVAL_DEFAULT,
+ KIST_SCHED_RUN_INTERVAL_MIN,
+ KIST_SCHED_RUN_INTERVAL_MAX);
+}
+
+/* Set KISTLite mode that is KIST without kernel support. */
+void
+scheduler_kist_set_lite_mode(void)
+{
+ kist_lite_mode = 1;
+ kist_scheduler.type = SCHEDULER_KIST_LITE;
+ log_info(LD_SCHED,
+ "Setting KIST scheduler without kernel support (KISTLite mode)");
+}
+
+/* Set KIST mode that is KIST with kernel support. */
+void
+scheduler_kist_set_full_mode(void)
+{
+ kist_lite_mode = 0;
+ kist_scheduler.type = SCHEDULER_KIST;
+ log_info(LD_SCHED,
+ "Setting KIST scheduler with kernel support (KIST mode)");
+}
+
+#ifdef HAVE_KIST_SUPPORT
+
+/* Return true iff the scheduler subsystem should use KIST. */
+int
+scheduler_can_use_kist(void)
+{
+ if (kist_no_kernel_support) {
+ /* We have no kernel support so we can't use KIST. */
+ return 0;
+ }
+
+ /* We do have the support, time to check if we can get the interval that the
+ * consensus can be disabling. */
+ int run_interval = kist_scheduler_run_interval();
+ log_debug(LD_SCHED, "Determined KIST sched_run_interval should be "
+ "%" PRId32 ". Can%s use KIST.",
+ run_interval, (run_interval > 0 ? "" : " not"));
+ return run_interval > 0;
+}
+
+#else /* !(defined(HAVE_KIST_SUPPORT)) */
+
+int
+scheduler_can_use_kist(void)
+{
+ return 0;
+}
+
+#endif /* defined(HAVE_KIST_SUPPORT) */
diff --git a/src/core/or/scheduler_vanilla.c b/src/core/or/scheduler_vanilla.c
new file mode 100644
index 0000000000..e05bebb37c
--- /dev/null
+++ b/src/core/or/scheduler_vanilla.c
@@ -0,0 +1,175 @@
+/* Copyright (c) 2017-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#include "or/or.h"
+#include "or/config.h"
+#define TOR_CHANNEL_INTERNAL_
+#include "or/channel.h"
+#define SCHEDULER_PRIVATE_
+#include "or/scheduler.h"
+
+/*****************************************************************************
+ * Other internal data
+ *****************************************************************************/
+
+/* Maximum cells to flush in a single call to channel_flush_some_cells(); */
+#define MAX_FLUSH_CELLS 1000
+
+/*****************************************************************************
+ * Externally called function implementations
+ *****************************************************************************/
+
+/* Return true iff the scheduler has work to perform. */
+static int
+have_work(void)
+{
+ smartlist_t *cp = get_channels_pending();
+ IF_BUG_ONCE(!cp) {
+ return 0; // channels_pending doesn't exist so... no work?
+ }
+ return smartlist_len(cp) > 0;
+}
+
+/** Re-trigger the scheduler in a way safe to use from the callback */
+
+static void
+vanilla_scheduler_schedule(void)
+{
+ if (!have_work()) {
+ return;
+ }
+
+ /* Activate our event so it can process channels. */
+ scheduler_ev_active();
+}
+
+static void
+vanilla_scheduler_run(void)
+{
+ int n_cells, n_chans_before, n_chans_after;
+ ssize_t flushed, flushed_this_time;
+ smartlist_t *cp = get_channels_pending();
+ smartlist_t *to_readd = NULL;
+ channel_t *chan = NULL;
+
+ log_debug(LD_SCHED, "We have a chance to run the scheduler");
+
+ n_chans_before = smartlist_len(cp);
+
+ while (smartlist_len(cp) > 0) {
+ /* Pop off a channel */
+ chan = smartlist_pqueue_pop(cp,
+ scheduler_compare_channels,
+ offsetof(channel_t, sched_heap_idx));
+ IF_BUG_ONCE(!chan) {
+ /* Some-freaking-how a NULL got into the channels_pending. That should
+ * never happen, but it should be harmless to ignore it and keep looping.
+ */
+ continue;
+ }
+
+ /* Figure out how many cells we can write */
+ n_cells = channel_num_cells_writeable(chan);
+ if (n_cells > 0) {
+ log_debug(LD_SCHED,
+ "Scheduler saw pending channel %"PRIu64 " at %p with "
+ "%d cells writeable",
+ (chan->global_identifier), chan, n_cells);
+
+ flushed = 0;
+ while (flushed < n_cells) {
+ flushed_this_time =
+ channel_flush_some_cells(chan,
+ MIN(MAX_FLUSH_CELLS, (size_t) n_cells - flushed));
+ if (flushed_this_time <= 0) break;
+ flushed += flushed_this_time;
+ }
+
+ if (flushed < n_cells) {
+ /* We ran out of cells to flush */
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
+ } else {
+ /* The channel may still have some cells */
+ if (channel_more_to_flush(chan)) {
+ /* The channel goes to either pending or waiting_to_write */
+ if (channel_num_cells_writeable(chan) > 0) {
+ /* Add it back to pending later */
+ if (!to_readd) to_readd = smartlist_new();
+ smartlist_add(to_readd, chan);
+ log_debug(LD_SCHED,
+ "Channel %"PRIu64 " at %p "
+ "is still pending",
+ (chan->global_identifier),
+ chan);
+ } else {
+ /* It's waiting to be able to write more */
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_TO_WRITE);
+ }
+ } else {
+ /* No cells left; it can go to idle or waiting_for_cells */
+ if (channel_num_cells_writeable(chan) > 0) {
+ /*
+ * It can still accept writes, so it goes to
+ * waiting_for_cells
+ */
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
+ } else {
+ /*
+ * We exactly filled up the output queue with all available
+ * cells; go to idle.
+ */
+ scheduler_set_channel_state(chan, SCHED_CHAN_IDLE);
+ }
+ }
+ }
+
+ log_debug(LD_SCHED,
+ "Scheduler flushed %d cells onto pending channel "
+ "%"PRIu64 " at %p",
+ (int)flushed, (chan->global_identifier),
+ chan);
+ } else {
+ log_info(LD_SCHED,
+ "Scheduler saw pending channel %"PRIu64 " at %p with "
+ "no cells writeable",
+ (chan->global_identifier), chan);
+ /* Put it back to WAITING_TO_WRITE */
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_TO_WRITE);
+ }
+ }
+
+ /* Readd any channels we need to */
+ if (to_readd) {
+ SMARTLIST_FOREACH_BEGIN(to_readd, channel_t *, readd_chan) {
+ scheduler_set_channel_state(readd_chan, SCHED_CHAN_PENDING);
+ smartlist_pqueue_add(cp,
+ scheduler_compare_channels,
+ offsetof(channel_t, sched_heap_idx),
+ readd_chan);
+ } SMARTLIST_FOREACH_END(readd_chan);
+ smartlist_free(to_readd);
+ }
+
+ n_chans_after = smartlist_len(cp);
+ log_debug(LD_SCHED, "Scheduler handled %d of %d pending channels",
+ n_chans_before - n_chans_after, n_chans_before);
+}
+
+/* Stores the vanilla scheduler function pointers. */
+static scheduler_t vanilla_scheduler = {
+ .type = SCHEDULER_VANILLA,
+ .free_all = NULL,
+ .on_channel_free = NULL,
+ .init = NULL,
+ .on_new_consensus = NULL,
+ .schedule = vanilla_scheduler_schedule,
+ .run = vanilla_scheduler_run,
+ .on_new_options = NULL,
+};
+
+scheduler_t *
+get_vanilla_scheduler(void)
+{
+ return &vanilla_scheduler;
+}
+
diff --git a/src/core/or/server_port_cfg_st.h b/src/core/or/server_port_cfg_st.h
new file mode 100644
index 0000000000..e1a9ca496a
--- /dev/null
+++ b/src/core/or/server_port_cfg_st.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef SERVER_PORT_CFG_ST_H
+#define SERVER_PORT_CFG_ST_H
+
+struct server_port_cfg_t {
+ /* Server port types (or, dir) only: */
+ unsigned int no_advertise : 1;
+ unsigned int no_listen : 1;
+ unsigned int all_addrs : 1;
+ unsigned int bind_ipv4_only : 1;
+ unsigned int bind_ipv6_only : 1;
+};
+
+#endif
+
diff --git a/src/core/or/socks_request_st.h b/src/core/or/socks_request_st.h
new file mode 100644
index 0000000000..d7b979c3eb
--- /dev/null
+++ b/src/core/or/socks_request_st.h
@@ -0,0 +1,75 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef SOCKS_REQUEST_ST_H
+#define SOCKS_REQUEST_ST_H
+
+#define MAX_SOCKS_REPLY_LEN 1024
+
+#define SOCKS_NO_AUTH 0x00
+#define SOCKS_USER_PASS 0x02
+
+/** Please open a TCP connection to this addr:port. */
+#define SOCKS_COMMAND_CONNECT 0x01
+/** Please turn this FQDN into an IP address, privately. */
+#define SOCKS_COMMAND_RESOLVE 0xF0
+/** Please turn this IP address into an FQDN, privately. */
+#define SOCKS_COMMAND_RESOLVE_PTR 0xF1
+
+/* || 0 is for -Wparentheses-equality (-Wall?) appeasement under clang */
+#define SOCKS_COMMAND_IS_CONNECT(c) (((c)==SOCKS_COMMAND_CONNECT) || 0)
+#define SOCKS_COMMAND_IS_RESOLVE(c) ((c)==SOCKS_COMMAND_RESOLVE || \
+ (c)==SOCKS_COMMAND_RESOLVE_PTR)
+
+/** State of a SOCKS request from a user to an OP. Also used to encode other
+ * information for non-socks user request (such as those on TransPort and
+ * DNSPort) */
+struct socks_request_t {
+ /** Which version of SOCKS did the client use? One of "0, 4, 5" -- where
+ * 0 means that no socks handshake ever took place, and this is just a
+ * stub connection (e.g. see connection_ap_make_link()). */
+ uint8_t socks_version;
+ /** If using socks5 authentication, which authentication type did we
+ * negotiate? currently we support 0 (no authentication) and 2
+ * (username/password). */
+ uint8_t auth_type;
+ /** What is this stream's goal? One of the SOCKS_COMMAND_* values */
+ uint8_t command;
+ /** Which kind of listener created this stream? */
+ uint8_t listener_type;
+ size_t replylen; /**< Length of <b>reply</b>. */
+ uint8_t reply[MAX_SOCKS_REPLY_LEN]; /**< Write an entry into this string if
+ * we want to specify our own socks reply,
+ * rather than using the default socks4 or
+ * socks5 socks reply. We use this for the
+ * two-stage socks5 handshake.
+ */
+ char address[MAX_SOCKS_ADDR_LEN]; /**< What address did the client ask to
+ connect to/resolve? */
+ uint16_t port; /**< What port did the client ask to connect to? */
+ unsigned int has_finished : 1; /**< Has the SOCKS handshake finished? Used to
+ * make sure we send back a socks reply for
+ * every connection. */
+ unsigned int got_auth : 1; /**< Have we received any authentication data? */
+ /** If this is set, we will choose "no authentication" instead of
+ * "username/password" authentication if both are offered. Used as input to
+ * parse_socks. */
+ unsigned int socks_prefer_no_auth : 1;
+
+ /** Number of bytes in username; 0 if username is NULL */
+ size_t usernamelen;
+ /** Number of bytes in password; 0 if password is NULL */
+ uint8_t passwordlen;
+ /** The negotiated username value if any (for socks5), or the entire
+ * authentication string (for socks4). This value is NOT nul-terminated;
+ * see usernamelen for its length. */
+ char *username;
+ /** The negotiated password value if any (for socks5). This value is NOT
+ * nul-terminated; see passwordlen for its length. */
+ char *password;
+};
+
+#endif
diff --git a/src/core/or/status.c b/src/core/or/status.c
new file mode 100644
index 0000000000..2259b3aae7
--- /dev/null
+++ b/src/core/or/status.c
@@ -0,0 +1,251 @@
+/* Copyright (c) 2010-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file status.c
+ * \brief Collect status information and log heartbeat messages.
+ *
+ * This module is responsible for implementing the heartbeat log messages,
+ * which periodically inform users and operators about basic facts to
+ * do with their Tor instance. The log_heartbeat() function, invoked from
+ * main.c, is the principle entry point. It collects data from elsewhere
+ * in Tor, and logs it in a human-readable format.
+ **/
+
+#define STATUS_PRIVATE
+
+#include "or/or.h"
+#include "or/circuituse.h"
+#include "or/config.h"
+#include "or/status.h"
+#include "or/nodelist.h"
+#include "or/relay.h"
+#include "or/router.h"
+#include "or/circuitlist.h"
+#include "or/main.h"
+#include "or/rephist.h"
+#include "or/hibernate.h"
+#include "or/statefile.h"
+#include "or/hs_stats.h"
+#include "or/hs_service.h"
+#include "or/dos.h"
+
+#include "or/or_state_st.h"
+#include "or/routerinfo_st.h"
+#include "lib/tls/tortls.h"
+
+static void log_accounting(const time_t now, const or_options_t *options);
+#include "or/geoip.h"
+
+/** Return the total number of circuits. */
+STATIC int
+count_circuits(void)
+{
+ return smartlist_len(circuit_get_global_list());
+}
+
+/** Take seconds <b>secs</b> and return a newly allocated human-readable
+ * uptime string. */
+STATIC char *
+secs_to_uptime(long secs)
+{
+ long int days = secs / 86400;
+ int hours = (int)((secs - (days * 86400)) / 3600);
+ int minutes = (int)((secs - (days * 86400) - (hours * 3600)) / 60);
+ char *uptime_string = NULL;
+
+ switch (days) {
+ case 0:
+ tor_asprintf(&uptime_string, "%d:%02d hours", hours, minutes);
+ break;
+ case 1:
+ tor_asprintf(&uptime_string, "%ld day %d:%02d hours",
+ days, hours, minutes);
+ break;
+ default:
+ tor_asprintf(&uptime_string, "%ld days %d:%02d hours",
+ days, hours, minutes);
+ break;
+ }
+
+ return uptime_string;
+}
+
+/** Take <b>bytes</b> and returns a newly allocated human-readable usage
+ * string. */
+STATIC char *
+bytes_to_usage(uint64_t bytes)
+{
+ char *bw_string = NULL;
+
+ if (bytes < (1<<20)) { /* Less than a megabyte. */
+ tor_asprintf(&bw_string, "%"PRIu64" kB", (bytes>>10));
+ } else if (bytes < (1<<30)) { /* Megabytes. Let's add some precision. */
+ double bw = ((double)bytes);
+ tor_asprintf(&bw_string, "%.2f MB", bw/(1<<20));
+ } else { /* Gigabytes. */
+ double bw = ((double)bytes);
+ tor_asprintf(&bw_string, "%.2f GB", bw/(1<<30));
+ }
+
+ return bw_string;
+}
+
+/** Log some usage info about our onion service(s). */
+static void
+log_onion_service_stats(void)
+{
+ unsigned int num_services = hs_service_get_num_services();
+
+ /* If there are no active onion services, no need to print logs */
+ if (num_services == 0) {
+ return;
+ }
+
+ log_notice(LD_HEARTBEAT,
+ "Our onion service%s received %u v2 and %u v3 INTRODUCE2 cells "
+ "and attempted to launch %d rendezvous circuits.",
+ num_services == 1 ? "" : "s",
+ hs_stats_get_n_introduce2_v2_cells(),
+ hs_stats_get_n_introduce2_v3_cells(),
+ hs_stats_get_n_rendezvous_launches());
+}
+
+/** Log a "heartbeat" message describing Tor's status and history so that the
+ * user can know that there is indeed a running Tor. Return 0 on success and
+ * -1 on failure. */
+int
+log_heartbeat(time_t now)
+{
+ char *bw_sent = NULL;
+ char *bw_rcvd = NULL;
+ char *uptime = NULL;
+ const routerinfo_t *me;
+ double r = tls_get_write_overhead_ratio();
+ const int hibernating = we_are_hibernating();
+
+ const or_options_t *options = get_options();
+
+ if (public_server_mode(options) && !hibernating) {
+ /* Let's check if we are in the current cached consensus. */
+ if (!(me = router_get_my_routerinfo()))
+ return -1; /* Something stinks, we won't even attempt this. */
+ else
+ if (!node_get_by_id(me->cache_info.identity_digest))
+ log_fn(LOG_NOTICE, LD_HEARTBEAT, "Heartbeat: It seems like we are not "
+ "in the cached consensus.");
+ }
+
+ uptime = secs_to_uptime(get_uptime());
+ bw_rcvd = bytes_to_usage(get_bytes_read());
+ bw_sent = bytes_to_usage(get_bytes_written());
+
+ log_fn(LOG_NOTICE, LD_HEARTBEAT, "Heartbeat: Tor's uptime is %s, with %d "
+ "circuits open. I've sent %s and received %s.%s",
+ uptime, count_circuits(), bw_sent, bw_rcvd,
+ hibernating?" We are currently hibernating.":"");
+
+ if (server_mode(options) && accounting_is_enabled(options) && !hibernating) {
+ log_accounting(now, options);
+ }
+
+ double fullness_pct = 100;
+ if (stats_n_data_cells_packaged && !hibernating) {
+ fullness_pct =
+ 100*(((double)stats_n_data_bytes_packaged) /
+ ((double)stats_n_data_cells_packaged*RELAY_PAYLOAD_SIZE));
+ }
+ const double overhead_pct = ( r - 1.0 ) * 100.0;
+
+#define FULLNESS_PCT_THRESHOLD 80
+#define TLS_OVERHEAD_THRESHOLD 15
+
+ const int severity = (fullness_pct < FULLNESS_PCT_THRESHOLD ||
+ overhead_pct > TLS_OVERHEAD_THRESHOLD)
+ ? LOG_NOTICE : LOG_INFO;
+
+ log_fn(severity, LD_HEARTBEAT,
+ "Average packaged cell fullness: %2.3f%%. "
+ "TLS write overhead: %.f%%", fullness_pct, overhead_pct);
+
+ if (public_server_mode(options)) {
+ rep_hist_log_circuit_handshake_stats(now);
+ rep_hist_log_link_protocol_counts();
+ dos_log_heartbeat();
+ }
+
+ circuit_log_ancient_one_hop_circuits(1800);
+
+ if (options->BridgeRelay) {
+ char *msg = NULL;
+ msg = format_client_stats_heartbeat(now);
+ if (msg)
+ log_notice(LD_HEARTBEAT, "%s", msg);
+ tor_free(msg);
+ }
+
+ if (options->MainloopStats) {
+ const uint64_t main_loop_success_count = get_main_loop_success_count();
+ const uint64_t main_loop_error_count = get_main_loop_error_count();
+ const uint64_t main_loop_idle_count = get_main_loop_idle_count();
+
+ log_fn(LOG_NOTICE, LD_HEARTBEAT, "Main event loop statistics: "
+ "%"PRIu64 " successful returns, "
+ "%"PRIu64 " erroneous returns, and "
+ "%"PRIu64 " idle returns.",
+ (main_loop_success_count),
+ (main_loop_error_count),
+ (main_loop_idle_count));
+ }
+
+ /** Now, if we are an HS service, log some stats about our usage */
+ log_onion_service_stats();
+
+ tor_free(uptime);
+ tor_free(bw_sent);
+ tor_free(bw_rcvd);
+
+ return 0;
+}
+
+static void
+log_accounting(const time_t now, const or_options_t *options)
+{
+ or_state_t *state = get_or_state();
+ char *acc_rcvd = bytes_to_usage(state->AccountingBytesReadInInterval);
+ char *acc_sent = bytes_to_usage(state->AccountingBytesWrittenInInterval);
+ char *acc_used = bytes_to_usage(get_accounting_bytes());
+ uint64_t acc_bytes = options->AccountingMax;
+ char *acc_max;
+ time_t interval_end = accounting_get_end_time();
+ char end_buf[ISO_TIME_LEN + 1];
+ char *remaining = NULL;
+ acc_max = bytes_to_usage(acc_bytes);
+ format_local_iso_time(end_buf, interval_end);
+ remaining = secs_to_uptime(interval_end - now);
+
+ const char *acc_rule;
+ switch (options->AccountingRule) {
+ case ACCT_MAX: acc_rule = "max";
+ break;
+ case ACCT_SUM: acc_rule = "sum";
+ break;
+ case ACCT_OUT: acc_rule = "out";
+ break;
+ case ACCT_IN: acc_rule = "in";
+ break;
+ default: acc_rule = "max";
+ break;
+ }
+
+ log_notice(LD_HEARTBEAT, "Heartbeat: Accounting enabled. "
+ "Sent: %s, Received: %s, Used: %s / %s, Rule: %s. The "
+ "current accounting interval ends on %s, in %s.",
+ acc_sent, acc_rcvd, acc_used, acc_max, acc_rule, end_buf, remaining);
+
+ tor_free(acc_rcvd);
+ tor_free(acc_sent);
+ tor_free(acc_used);
+ tor_free(acc_max);
+ tor_free(remaining);
+}
diff --git a/src/core/or/status.h b/src/core/or/status.h
new file mode 100644
index 0000000000..7258ed5939
--- /dev/null
+++ b/src/core/or/status.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2010-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_STATUS_H
+#define TOR_STATUS_H
+
+#include "lib/testsupport/testsupport.h"
+
+int log_heartbeat(time_t now);
+
+#ifdef STATUS_PRIVATE
+STATIC int count_circuits(void);
+STATIC char *secs_to_uptime(long secs);
+STATIC char *bytes_to_usage(uint64_t bytes);
+#endif
+
+#endif /* !defined(TOR_STATUS_H) */
+
diff --git a/src/core/or/tor_version_st.h b/src/core/or/tor_version_st.h
new file mode 100644
index 0000000000..5950c5d5c4
--- /dev/null
+++ b/src/core/or/tor_version_st.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_VERSION_ST_H
+#define TOR_VERSION_ST_H
+
+#define MAX_STATUS_TAG_LEN 32
+/** Structure to hold parsed Tor versions. This is a little messier
+ * than we would like it to be, because we changed version schemes with 0.1.0.
+ *
+ * See version-spec.txt for the whole business.
+ */
+struct tor_version_t {
+ int major;
+ int minor;
+ int micro;
+ /** Release status. For version in the post-0.1 format, this is always
+ * VER_RELEASE. */
+ enum { VER_PRE=0, VER_RC=1, VER_RELEASE=2, } status;
+ int patchlevel;
+ char status_tag[MAX_STATUS_TAG_LEN];
+ int svn_revision;
+
+ int git_tag_len;
+ char git_tag[DIGEST_LEN];
+};
+
+#endif
+
diff --git a/src/core/or/var_cell_st.h b/src/core/or/var_cell_st.h
new file mode 100644
index 0000000000..514afc44b1
--- /dev/null
+++ b/src/core/or/var_cell_st.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef VAR_CELL_ST_H
+#define VAR_CELL_ST_H
+
+/** Parsed variable-length onion routing cell. */
+struct var_cell_t {
+ /** Type of the cell: CELL_VERSIONS, etc. */
+ uint8_t command;
+ /** Circuit thich received the cell */
+ circid_t circ_id;
+ /** Number of bytes actually stored in <b>payload</b> */
+ uint16_t payload_len;
+ /** Payload of this cell */
+ uint8_t payload[FLEXIBLE_ARRAY_MEMBER];
+};
+
+#endif
+
diff --git a/src/core/proto/proto_cell.c b/src/core/proto/proto_cell.c
new file mode 100644
index 0000000000..41554bd1b0
--- /dev/null
+++ b/src/core/proto/proto_cell.c
@@ -0,0 +1,86 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#include "or/or.h"
+#include "lib/container/buffers.h"
+#include "or/proto_cell.h"
+
+#include "or/connection_or.h"
+
+#include "or/var_cell_st.h"
+
+/** True iff the cell command <b>command</b> is one that implies a
+ * variable-length cell in Tor link protocol <b>linkproto</b>. */
+static inline int
+cell_command_is_var_length(uint8_t command, int linkproto)
+{
+ /* If linkproto is v2 (2), CELL_VERSIONS is the only variable-length cells
+ * work as implemented here. If it's 1, there are no variable-length cells.
+ * Tor does not support other versions right now, and so can't negotiate
+ * them.
+ */
+ switch (linkproto) {
+ case 1:
+ /* Link protocol version 1 has no variable-length cells. */
+ return 0;
+ case 2:
+ /* In link protocol version 2, VERSIONS is the only variable-length cell */
+ return command == CELL_VERSIONS;
+ case 0:
+ case 3:
+ default:
+ /* In link protocol version 3 and later, and in version "unknown",
+ * commands 128 and higher indicate variable-length. VERSIONS is
+ * grandfathered in. */
+ return command == CELL_VERSIONS || command >= 128;
+ }
+}
+
+/** Check <b>buf</b> for a variable-length cell according to the rules of link
+ * protocol version <b>linkproto</b>. If one is found, pull it off the buffer
+ * and assign a newly allocated var_cell_t to *<b>out</b>, and return 1.
+ * Return 0 if whatever is on the start of buf_t is not a variable-length
+ * cell. Return 1 and set *<b>out</b> to NULL if there seems to be the start
+ * of a variable-length cell on <b>buf</b>, but the whole thing isn't there
+ * yet. */
+int
+fetch_var_cell_from_buf(buf_t *buf, var_cell_t **out, int linkproto)
+{
+ char hdr[VAR_CELL_MAX_HEADER_SIZE];
+ var_cell_t *result;
+ uint8_t command;
+ uint16_t length;
+ const int wide_circ_ids = linkproto >= MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS;
+ const int circ_id_len = get_circ_id_size(wide_circ_ids);
+ const unsigned header_len = get_var_cell_header_size(wide_circ_ids);
+ *out = NULL;
+ if (buf_datalen(buf) < header_len)
+ return 0;
+ buf_peek(buf, hdr, header_len);
+
+ command = get_uint8(hdr + circ_id_len);
+ if (!(cell_command_is_var_length(command, linkproto)))
+ return 0;
+
+ length = ntohs(get_uint16(hdr + circ_id_len + 1));
+ if (buf_datalen(buf) < (size_t)(header_len+length))
+ return 1;
+
+ result = var_cell_new(length);
+ result->command = command;
+ if (wide_circ_ids)
+ result->circ_id = ntohl(get_uint32(hdr));
+ else
+ result->circ_id = ntohs(get_uint16(hdr));
+
+ buf_drain(buf, header_len);
+ buf_peek(buf, (char*) result->payload, length);
+ buf_drain(buf, length);
+
+ *out = result;
+ return 1;
+}
+
diff --git a/src/core/proto/proto_cell.h b/src/core/proto/proto_cell.h
new file mode 100644
index 0000000000..b29645e41d
--- /dev/null
+++ b/src/core/proto/proto_cell.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_PROTO_CELL_H
+#define TOR_PROTO_CELL_H
+
+struct buf_t;
+struct var_cell_t;
+
+int fetch_var_cell_from_buf(struct buf_t *buf, struct var_cell_t **out,
+ int linkproto);
+
+#endif /* !defined(TOR_PROTO_CELL_H) */
+
diff --git a/src/core/proto/proto_control0.c b/src/core/proto/proto_control0.c
new file mode 100644
index 0000000000..34e7ddb8d9
--- /dev/null
+++ b/src/core/proto/proto_control0.c
@@ -0,0 +1,26 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#include "or/or.h"
+#include "lib/container/buffers.h"
+#include "or/proto_control0.h"
+
+/** Return 1 iff buf looks more like it has an (obsolete) v0 controller
+ * command on it than any valid v1 controller command. */
+int
+peek_buf_has_control0_command(buf_t *buf)
+{
+ if (buf_datalen(buf) >= 4) {
+ char header[4];
+ uint16_t cmd;
+ buf_peek(buf, header, sizeof(header));
+ cmd = ntohs(get_uint16(header+2));
+ if (cmd <= 0x14)
+ return 1; /* This is definitely not a v1 control command. */
+ }
+ return 0;
+}
+
diff --git a/src/core/proto/proto_control0.h b/src/core/proto/proto_control0.h
new file mode 100644
index 0000000000..b80dc6c8f8
--- /dev/null
+++ b/src/core/proto/proto_control0.h
@@ -0,0 +1,14 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_PROTO_CONTROL0_H
+#define TOR_PROTO_CONTROL0_H
+
+struct buf_t;
+int peek_buf_has_control0_command(struct buf_t *buf);
+
+#endif /* !defined(TOR_PROTO_CONTROL0_H) */
+
diff --git a/src/core/proto/proto_ext_or.c b/src/core/proto/proto_ext_or.c
new file mode 100644
index 0000000000..f30d876231
--- /dev/null
+++ b/src/core/proto/proto_ext_or.c
@@ -0,0 +1,40 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#include "or/or.h"
+#include "lib/container/buffers.h"
+#include "or/ext_orport.h"
+#include "or/proto_ext_or.h"
+
+/** The size of the header of an Extended ORPort message: 2 bytes for
+ * COMMAND, 2 bytes for BODYLEN */
+#define EXT_OR_CMD_HEADER_SIZE 4
+
+/** Read <b>buf</b>, which should contain an Extended ORPort message
+ * from a transport proxy. If well-formed, create and populate
+ * <b>out</b> with the Extended ORport message. Return 0 if the
+ * buffer was incomplete, 1 if it was well-formed and -1 if we
+ * encountered an error while parsing it. */
+int
+fetch_ext_or_command_from_buf(buf_t *buf, ext_or_cmd_t **out)
+{
+ char hdr[EXT_OR_CMD_HEADER_SIZE];
+ uint16_t len;
+
+ if (buf_datalen(buf) < EXT_OR_CMD_HEADER_SIZE)
+ return 0;
+ buf_peek(buf, hdr, sizeof(hdr));
+ len = ntohs(get_uint16(hdr+2));
+ if (buf_datalen(buf) < (unsigned)len + EXT_OR_CMD_HEADER_SIZE)
+ return 0;
+ *out = ext_or_cmd_new(len);
+ (*out)->cmd = ntohs(get_uint16(hdr));
+ (*out)->len = len;
+ buf_drain(buf, EXT_OR_CMD_HEADER_SIZE);
+ buf_get_bytes(buf, (*out)->body, len);
+ return 1;
+}
+
diff --git a/src/core/proto/proto_ext_or.h b/src/core/proto/proto_ext_or.h
new file mode 100644
index 0000000000..708a45974b
--- /dev/null
+++ b/src/core/proto/proto_ext_or.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_PROTO_EXT_OR_H
+#define TOR_PROTO_EXT_OR_H
+
+struct buf_t;
+
+/** A parsed Extended ORPort message. */
+typedef struct ext_or_cmd_t {
+ uint16_t cmd; /** Command type */
+ uint16_t len; /** Body length */
+ char body[FLEXIBLE_ARRAY_MEMBER]; /** Message body */
+} ext_or_cmd_t;
+
+int fetch_ext_or_command_from_buf(struct buf_t *buf,
+ struct ext_or_cmd_t **out);
+
+#endif /* !defined(TOR_PROTO_EXT_OR_H) */
diff --git a/src/core/proto/proto_http.c b/src/core/proto/proto_http.c
new file mode 100644
index 0000000000..ecc669e3a4
--- /dev/null
+++ b/src/core/proto/proto_http.c
@@ -0,0 +1,171 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#define PROTO_HTTP_PRIVATE
+#include "or/or.h"
+#include "lib/container/buffers.h"
+#include "or/proto_http.h"
+
+/** Return true if <b>cmd</b> looks like a HTTP (proxy) request. */
+int
+peek_buf_has_http_command(const buf_t *buf)
+{
+ if (buf_peek_startswith(buf, "CONNECT ") ||
+ buf_peek_startswith(buf, "DELETE ") ||
+ buf_peek_startswith(buf, "GET ") ||
+ buf_peek_startswith(buf, "POST ") ||
+ buf_peek_startswith(buf, "PUT " ))
+ return 1;
+ return 0;
+}
+
+/** There is a (possibly incomplete) http statement on <b>buf</b>, of the
+ * form "\%s\\r\\n\\r\\n\%s", headers, body. (body may contain NULs.)
+ * If a) the headers include a Content-Length field and all bytes in
+ * the body are present, or b) there's no Content-Length field and
+ * all headers are present, then:
+ *
+ * - strdup headers into <b>*headers_out</b>, and NUL-terminate it.
+ * - memdup body into <b>*body_out</b>, and NUL-terminate it.
+ * - Then remove them from <b>buf</b>, and return 1.
+ *
+ * - If headers or body is NULL, discard that part of the buf.
+ * - If a headers or body doesn't fit in the arg, return -1.
+ * (We ensure that the headers or body don't exceed max len,
+ * _even if_ we're planning to discard them.)
+ * - If force_complete is true, then succeed even if not all of the
+ * content has arrived.
+ *
+ * Else, change nothing and return 0.
+ */
+int
+fetch_from_buf_http(buf_t *buf,
+ char **headers_out, size_t max_headerlen,
+ char **body_out, size_t *body_used, size_t max_bodylen,
+ int force_complete)
+{
+ const char *headers;
+ size_t headerlen, bodylen, contentlen=0;
+ int crlf_offset;
+ int r;
+
+ if (buf_datalen(buf) == 0)
+ return 0;
+
+ crlf_offset = buf_find_string_offset(buf, "\r\n\r\n", 4);
+ if (crlf_offset > (int)max_headerlen ||
+ (crlf_offset < 0 && buf_datalen(buf) > max_headerlen)) {
+ log_debug(LD_HTTP,"headers too long.");
+ return -1;
+ } else if (crlf_offset < 0) {
+ log_debug(LD_HTTP,"headers not all here yet.");
+ return 0;
+ }
+ /* Okay, we have a full header. Make sure it all appears in the first
+ * chunk. */
+ headerlen = crlf_offset + 4;
+ size_t headers_in_chunk = 0;
+ buf_pullup(buf, headerlen, &headers, &headers_in_chunk);
+
+ bodylen = buf_datalen(buf) - headerlen;
+ log_debug(LD_HTTP,"headerlen %d, bodylen %d.", (int)headerlen, (int)bodylen);
+
+ if (max_headerlen <= headerlen) {
+ log_warn(LD_HTTP,"headerlen %d larger than %d. Failing.",
+ (int)headerlen, (int)max_headerlen-1);
+ return -1;
+ }
+ if (max_bodylen <= bodylen) {
+ log_warn(LD_HTTP,"bodylen %d larger than %d. Failing.",
+ (int)bodylen, (int)max_bodylen-1);
+ return -1;
+ }
+
+ r = buf_http_find_content_length(headers, headerlen, &contentlen);
+ if (r == -1) {
+ log_warn(LD_PROTOCOL, "Content-Length is bogus; maybe "
+ "someone is trying to crash us.");
+ return -1;
+ } else if (r == 1) {
+ /* if content-length is malformed, then our body length is 0. fine. */
+ log_debug(LD_HTTP,"Got a contentlen of %d.",(int)contentlen);
+ if (bodylen < contentlen) {
+ if (!force_complete) {
+ log_debug(LD_HTTP,"body not all here yet.");
+ return 0; /* not all there yet */
+ }
+ }
+ if (bodylen > contentlen) {
+ bodylen = contentlen;
+ log_debug(LD_HTTP,"bodylen reduced to %d.",(int)bodylen);
+ }
+ } else {
+ tor_assert(r == 0);
+ /* Leave bodylen alone */
+ }
+
+ /* all happy. copy into the appropriate places, and return 1 */
+ if (headers_out) {
+ *headers_out = tor_malloc(headerlen+1);
+ buf_get_bytes(buf, *headers_out, headerlen);
+ (*headers_out)[headerlen] = 0; /* NUL terminate it */
+ }
+ if (body_out) {
+ tor_assert(body_used);
+ *body_used = bodylen;
+ *body_out = tor_malloc(bodylen+1);
+ buf_get_bytes(buf, *body_out, bodylen);
+ (*body_out)[bodylen] = 0; /* NUL terminate it */
+ }
+ return 1;
+}
+
+/**
+ * Scan the HTTP headers in the <b>headerlen</b>-byte memory range at
+ * <b>headers</b>, looking for a "Content-Length" header. Try to set
+ * *<b>result_out</b> to the numeric value of that header if possible.
+ * Return -1 if the header was malformed, 0 if it was missing, and 1 if
+ * it was present and well-formed.
+ */
+STATIC int
+buf_http_find_content_length(const char *headers, size_t headerlen,
+ size_t *result_out)
+{
+ const char *p, *newline;
+ char *len_str, *eos=NULL;
+ size_t remaining, result;
+ int ok;
+ *result_out = 0; /* The caller shouldn't look at this unless the
+ * return value is 1, but let's prevent confusion */
+
+#define CONTENT_LENGTH "\r\nContent-Length: "
+ p = (char*) tor_memstr(headers, headerlen, CONTENT_LENGTH);
+ if (p == NULL)
+ return 0;
+
+ tor_assert(p >= headers && p < headers+headerlen);
+ remaining = (headers+headerlen)-p;
+ p += strlen(CONTENT_LENGTH);
+ remaining -= strlen(CONTENT_LENGTH);
+
+ newline = memchr(p, '\n', remaining);
+ if (newline == NULL)
+ return -1;
+
+ len_str = tor_memdup_nulterm(p, newline-p);
+ /* We limit the size to INT_MAX because other parts of the buffer.c
+ * code don't like buffers to be any bigger than that. */
+ result = (size_t) tor_parse_uint64(len_str, 10, 0, INT_MAX, &ok, &eos);
+ if (eos && !tor_strisspace(eos)) {
+ ok = 0;
+ } else {
+ *result_out = result;
+ }
+ tor_free(len_str);
+
+ return ok ? 1 : -1;
+}
+
diff --git a/src/core/proto/proto_http.h b/src/core/proto/proto_http.h
new file mode 100644
index 0000000000..587e435ede
--- /dev/null
+++ b/src/core/proto/proto_http.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_PROTO_HTTP_H
+#define TOR_PROTO_HTTP_H
+
+struct buf_t;
+
+int fetch_from_buf_http(struct buf_t *buf,
+ char **headers_out, size_t max_headerlen,
+ char **body_out, size_t *body_used, size_t max_bodylen,
+ int force_complete);
+int peek_buf_has_http_command(const struct buf_t *buf);
+
+#ifdef PROTO_HTTP_PRIVATE
+STATIC int buf_http_find_content_length(const char *headers, size_t headerlen,
+ size_t *result_out);
+#endif
+
+#endif /* !defined(TOR_PROTO_HTTP_H) */
+
diff --git a/src/core/proto/proto_socks.c b/src/core/proto/proto_socks.c
new file mode 100644
index 0000000000..f5e6ce581b
--- /dev/null
+++ b/src/core/proto/proto_socks.c
@@ -0,0 +1,713 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#include "or/or.h"
+#include "or/addressmap.h"
+#include "lib/container/buffers.h"
+#include "or/connection.h"
+#include "or/control.h"
+#include "or/config.h"
+#include "lib/crypt_ops/crypto_util.h"
+#include "or/ext_orport.h"
+#include "or/proto_socks.h"
+#include "or/reasons.h"
+
+#include "or/socks_request_st.h"
+
+static void socks_request_set_socks5_error(socks_request_t *req,
+ socks5_reply_status_t reason);
+
+static int parse_socks(const char *data, size_t datalen, socks_request_t *req,
+ int log_sockstype, int safe_socks, ssize_t *drain_out,
+ size_t *want_length_out);
+static int parse_socks_client(const uint8_t *data, size_t datalen,
+ int state, char **reason,
+ ssize_t *drain_out);
+/**
+ * Wait this many seconds before warning the user about using SOCKS unsafely
+ * again. */
+#define SOCKS_WARN_INTERVAL 5
+
+/** Warn that the user application has made an unsafe socks request using
+ * protocol <b>socks_protocol</b> on port <b>port</b>. Don't warn more than
+ * once per SOCKS_WARN_INTERVAL, unless <b>safe_socks</b> is set. */
+static void
+log_unsafe_socks_warning(int socks_protocol, const char *address,
+ uint16_t port, int safe_socks)
+{
+ static ratelim_t socks_ratelim = RATELIM_INIT(SOCKS_WARN_INTERVAL);
+
+ if (safe_socks) {
+ log_fn_ratelim(&socks_ratelim, LOG_WARN, LD_APP,
+ "Your application (using socks%d to port %d) is giving "
+ "Tor only an IP address. Applications that do DNS resolves "
+ "themselves may leak information. Consider using Socks4A "
+ "(e.g. via privoxy or socat) instead. For more information, "
+ "please see https://wiki.torproject.org/TheOnionRouter/"
+ "TorFAQ#SOCKSAndDNS.%s",
+ socks_protocol,
+ (int)port,
+ safe_socks ? " Rejecting." : "");
+ }
+ control_event_client_status(LOG_WARN,
+ "DANGEROUS_SOCKS PROTOCOL=SOCKS%d ADDRESS=%s:%d",
+ socks_protocol, address, (int)port);
+}
+
+/** Do not attempt to parse socks messages longer than this. This value is
+ * actually significantly higher than the longest possible socks message. */
+#define MAX_SOCKS_MESSAGE_LEN 512
+
+/** Return a new socks_request_t. */
+socks_request_t *
+socks_request_new(void)
+{
+ return tor_malloc_zero(sizeof(socks_request_t));
+}
+
+/** Free all storage held in the socks_request_t <b>req</b>. */
+void
+socks_request_free_(socks_request_t *req)
+{
+ if (!req)
+ return;
+ if (req->username) {
+ memwipe(req->username, 0x10, req->usernamelen);
+ tor_free(req->username);
+ }
+ if (req->password) {
+ memwipe(req->password, 0x04, req->passwordlen);
+ tor_free(req->password);
+ }
+ memwipe(req, 0xCC, sizeof(socks_request_t));
+ tor_free(req);
+}
+
+/** There is a (possibly incomplete) socks handshake on <b>buf</b>, of one
+ * of the forms
+ * - socks4: "socksheader username\\0"
+ * - socks4a: "socksheader username\\0 destaddr\\0"
+ * - socks5 phase one: "version #methods methods"
+ * - socks5 phase two: "version command 0 addresstype..."
+ * If it's a complete and valid handshake, and destaddr fits in
+ * MAX_SOCKS_ADDR_LEN bytes, then pull the handshake off the buf,
+ * assign to <b>req</b>, and return 1.
+ *
+ * If it's invalid or too big, return -1.
+ *
+ * Else it's not all there yet, leave buf alone and return 0.
+ *
+ * If you want to specify the socks reply, write it into <b>req->reply</b>
+ * and set <b>req->replylen</b>, else leave <b>req->replylen</b> alone.
+ *
+ * If <b>log_sockstype</b> is non-zero, then do a notice-level log of whether
+ * the connection is possibly leaking DNS requests locally or not.
+ *
+ * If <b>safe_socks</b> is true, then reject unsafe socks protocols.
+ *
+ * If returning 0 or -1, <b>req->address</b> and <b>req->port</b> are
+ * undefined.
+ */
+int
+fetch_from_buf_socks(buf_t *buf, socks_request_t *req,
+ int log_sockstype, int safe_socks)
+{
+ int res;
+ ssize_t n_drain;
+ size_t want_length = 128;
+ const char *head = NULL;
+ size_t datalen = 0;
+
+ if (buf_datalen(buf) < 2) /* version and another byte */
+ return 0;
+
+ do {
+ n_drain = 0;
+ buf_pullup(buf, want_length, &head, &datalen);
+ tor_assert(head && datalen >= 2);
+ want_length = 0;
+
+ res = parse_socks(head, datalen, req, log_sockstype,
+ safe_socks, &n_drain, &want_length);
+
+ if (n_drain < 0)
+ buf_clear(buf);
+ else if (n_drain > 0)
+ buf_drain(buf, n_drain);
+
+ } while (res == 0 && head && want_length < buf_datalen(buf) &&
+ buf_datalen(buf) >= 2);
+
+ return res;
+}
+
+/** Create a SOCKS5 reply message with <b>reason</b> in its REP field and
+ * have Tor send it as error response to <b>req</b>.
+ */
+static void
+socks_request_set_socks5_error(socks_request_t *req,
+ socks5_reply_status_t reason)
+{
+ req->replylen = 10;
+ memset(req->reply,0,10);
+
+ req->reply[0] = 0x05; // VER field.
+ req->reply[1] = reason; // REP field.
+ req->reply[3] = 0x01; // ATYP field.
+}
+
+static const char SOCKS_PROXY_IS_NOT_AN_HTTP_PROXY_MSG[] =
+ "HTTP/1.0 501 Tor is not an HTTP Proxy\r\n"
+ "Content-Type: text/html; charset=iso-8859-1\r\n\r\n"
+ "<html>\n"
+ "<head>\n"
+ "<title>This is a SOCKS Proxy, Not An HTTP Proxy</title>\n"
+ "</head>\n"
+ "<body>\n"
+ "<h1>This is a SOCKs proxy, not an HTTP proxy.</h1>\n"
+ "<p>\n"
+ "It appears you have configured your web browser to use this Tor port as\n"
+ "an HTTP proxy.\n"
+ "</p><p>\n"
+ "This is not correct: This port is configured as a SOCKS proxy, not\n"
+ "an HTTP proxy. If you need an HTTP proxy tunnel, use the HTTPTunnelPort\n"
+ "configuration option in place of, or in addition to, SOCKSPort.\n"
+ "Please configure your client accordingly.\n"
+ "</p>\n"
+ "<p>\n"
+ "See <a href=\"https://www.torproject.org/documentation.html\">"
+ "https://www.torproject.org/documentation.html</a> for more "
+ "information.\n"
+ "</p>\n"
+ "</body>\n"
+ "</html>\n";
+
+/** Implementation helper to implement fetch_from_*_socks. Instead of looking
+ * at a buffer's contents, we look at the <b>datalen</b> bytes of data in
+ * <b>data</b>. Instead of removing data from the buffer, we set
+ * <b>drain_out</b> to the amount of data that should be removed (or -1 if the
+ * buffer should be cleared). Instead of pulling more data into the first
+ * chunk of the buffer, we set *<b>want_length_out</b> to the number of bytes
+ * we'd like to see in the input buffer, if they're available. */
+static int
+parse_socks(const char *data, size_t datalen, socks_request_t *req,
+ int log_sockstype, int safe_socks, ssize_t *drain_out,
+ size_t *want_length_out)
+{
+ unsigned int len;
+ char tmpbuf[TOR_ADDR_BUF_LEN+1];
+ tor_addr_t destaddr;
+ uint32_t destip;
+ uint8_t socksver;
+ char *next, *startaddr;
+ unsigned char usernamelen, passlen;
+ struct in_addr in;
+
+ if (datalen < 2) {
+ /* We always need at least 2 bytes. */
+ *want_length_out = 2;
+ return 0;
+ }
+
+ if (req->socks_version == 5 && !req->got_auth) {
+ /* See if we have received authentication. Strictly speaking, we should
+ also check whether we actually negotiated username/password
+ authentication. But some broken clients will send us authentication
+ even if we negotiated SOCKS_NO_AUTH. */
+ if (*data == 1) { /* username/pass version 1 */
+ /* Format is: authversion [1 byte] == 1
+ usernamelen [1 byte]
+ username [usernamelen bytes]
+ passlen [1 byte]
+ password [passlen bytes] */
+ usernamelen = (unsigned char)*(data + 1);
+ if (datalen < 2u + usernamelen + 1u) {
+ *want_length_out = 2u + usernamelen + 1u;
+ return 0;
+ }
+ passlen = (unsigned char)*(data + 2u + usernamelen);
+ if (datalen < 2u + usernamelen + 1u + passlen) {
+ *want_length_out = 2u + usernamelen + 1u + passlen;
+ return 0;
+ }
+ req->replylen = 2; /* 2 bytes of response */
+ req->reply[0] = 1; /* authversion == 1 */
+ req->reply[1] = 0; /* authentication successful */
+ log_debug(LD_APP,
+ "socks5: Accepted username/password without checking.");
+ if (usernamelen) {
+ req->username = tor_memdup(data+2u, usernamelen);
+ req->usernamelen = usernamelen;
+ }
+ if (passlen) {
+ req->password = tor_memdup(data+3u+usernamelen, passlen);
+ req->passwordlen = passlen;
+ }
+ *drain_out = 2u + usernamelen + 1u + passlen;
+ req->got_auth = 1;
+ *want_length_out = 7; /* Minimal socks5 command. */
+ return 0;
+ } else if (req->auth_type == SOCKS_USER_PASS) {
+ /* unknown version byte */
+ log_warn(LD_APP, "Socks5 username/password version %d not recognized; "
+ "rejecting.", (int)*data);
+ return -1;
+ }
+ }
+
+ socksver = *data;
+
+ switch (socksver) { /* which version of socks? */
+ case 5: /* socks5 */
+
+ if (req->socks_version != 5) { /* we need to negotiate a method */
+ unsigned char nummethods = (unsigned char)*(data+1);
+ int have_user_pass, have_no_auth;
+ int r=0;
+ tor_assert(!req->socks_version);
+ if (datalen < 2u+nummethods) {
+ *want_length_out = 2u+nummethods;
+ return 0;
+ }
+ if (!nummethods)
+ return -1;
+ req->replylen = 2; /* 2 bytes of response */
+ req->reply[0] = 5; /* socks5 reply */
+ have_user_pass = (memchr(data+2, SOCKS_USER_PASS, nummethods) !=NULL);
+ have_no_auth = (memchr(data+2, SOCKS_NO_AUTH, nummethods) !=NULL);
+ if (have_user_pass && !(have_no_auth && req->socks_prefer_no_auth)) {
+ req->auth_type = SOCKS_USER_PASS;
+ req->reply[1] = SOCKS_USER_PASS; /* tell client to use "user/pass"
+ auth method */
+ req->socks_version = 5; /* remember we've already negotiated auth */
+ log_debug(LD_APP,"socks5: accepted method 2 (username/password)");
+ r=0;
+ } else if (have_no_auth) {
+ req->reply[1] = SOCKS_NO_AUTH; /* tell client to use "none" auth
+ method */
+ req->socks_version = 5; /* remember we've already negotiated auth */
+ log_debug(LD_APP,"socks5: accepted method 0 (no authentication)");
+ r=0;
+ } else {
+ log_warn(LD_APP,
+ "socks5: offered methods don't include 'no auth' or "
+ "username/password. Rejecting.");
+ req->reply[1] = '\xFF'; /* reject all methods */
+ r=-1;
+ }
+ /* Remove packet from buf. Some SOCKS clients will have sent extra
+ * junk at this point; let's hope it's an authentication message. */
+ *drain_out = 2u + nummethods;
+
+ return r;
+ }
+ if (req->auth_type != SOCKS_NO_AUTH && !req->got_auth) {
+ log_warn(LD_APP,
+ "socks5: negotiated authentication, but none provided");
+ return -1;
+ }
+ /* we know the method; read in the request */
+ log_debug(LD_APP,"socks5: checking request");
+ if (datalen < 7) {/* basic info plus >=1 for addr plus 2 for port */
+ *want_length_out = 7;
+ return 0; /* not yet */
+ }
+ req->command = (unsigned char) *(data+1);
+ if (req->command != SOCKS_COMMAND_CONNECT &&
+ req->command != SOCKS_COMMAND_RESOLVE &&
+ req->command != SOCKS_COMMAND_RESOLVE_PTR) {
+ /* not a connect or resolve or a resolve_ptr? we don't support it. */
+ socks_request_set_socks5_error(req,SOCKS5_COMMAND_NOT_SUPPORTED);
+
+ log_warn(LD_APP,"socks5: command %d not recognized. Rejecting.",
+ req->command);
+ return -1;
+ }
+ switch (*(data+3)) { /* address type */
+ case 1: /* IPv4 address */
+ case 4: /* IPv6 address */ {
+ const int is_v6 = *(data+3) == 4;
+ const unsigned addrlen = is_v6 ? 16 : 4;
+ log_debug(LD_APP,"socks5: ipv4 address type");
+ if (datalen < 6+addrlen) {/* ip/port there? */
+ *want_length_out = 6+addrlen;
+ return 0; /* not yet */
+ }
+
+ if (is_v6)
+ tor_addr_from_ipv6_bytes(&destaddr, data+4);
+ else
+ tor_addr_from_ipv4n(&destaddr, get_uint32(data+4));
+
+ tor_addr_to_str(tmpbuf, &destaddr, sizeof(tmpbuf), 1);
+
+ if (BUG(strlen(tmpbuf)+1 > MAX_SOCKS_ADDR_LEN)) {
+ /* LCOV_EXCL_START -- This branch is unreachable, given the
+ * size of tmpbuf and the actual value of MAX_SOCKS_ADDR_LEN */
+ socks_request_set_socks5_error(req, SOCKS5_GENERAL_ERROR);
+ log_warn(LD_APP,
+ "socks5 IP takes %d bytes, which doesn't fit in %d. "
+ "Rejecting.",
+ (int)strlen(tmpbuf)+1,(int)MAX_SOCKS_ADDR_LEN);
+ return -1;
+ /* LCOV_EXCL_STOP */
+ }
+ strlcpy(req->address,tmpbuf,sizeof(req->address));
+ req->port = ntohs(get_uint16(data+4+addrlen));
+ *drain_out = 6+addrlen;
+ if (req->command != SOCKS_COMMAND_RESOLVE_PTR &&
+ !addressmap_have_mapping(req->address,0)) {
+ log_unsafe_socks_warning(5, req->address, req->port, safe_socks);
+ if (safe_socks) {
+ socks_request_set_socks5_error(req, SOCKS5_NOT_ALLOWED);
+ return -1;
+ }
+ }
+ return 1;
+ }
+ case 3: /* fqdn */
+ log_debug(LD_APP,"socks5: fqdn address type");
+ if (req->command == SOCKS_COMMAND_RESOLVE_PTR) {
+ socks_request_set_socks5_error(req,
+ SOCKS5_ADDRESS_TYPE_NOT_SUPPORTED);
+ log_warn(LD_APP, "socks5 received RESOLVE_PTR command with "
+ "hostname type. Rejecting.");
+ return -1;
+ }
+ len = (unsigned char)*(data+4);
+ if (datalen < 7+len) { /* addr/port there? */
+ *want_length_out = 7+len;
+ return 0; /* not yet */
+ }
+ if (BUG(len+1 > MAX_SOCKS_ADDR_LEN)) {
+ /* LCOV_EXCL_START -- unreachable, since len is at most 255,
+ * and MAX_SOCKS_ADDR_LEN is 256. */
+ socks_request_set_socks5_error(req, SOCKS5_GENERAL_ERROR);
+ log_warn(LD_APP,
+ "socks5 hostname is %d bytes, which doesn't fit in "
+ "%d. Rejecting.", len+1,MAX_SOCKS_ADDR_LEN);
+ return -1;
+ /* LCOV_EXCL_STOP */
+ }
+ memcpy(req->address,data+5,len);
+ req->address[len] = 0;
+ req->port = ntohs(get_uint16(data+5+len));
+ *drain_out = 5+len+2;
+
+ if (!string_is_valid_dest(req->address)) {
+ socks_request_set_socks5_error(req, SOCKS5_GENERAL_ERROR);
+
+ log_warn(LD_PROTOCOL,
+ "Your application (using socks5 to port %d) gave Tor "
+ "a malformed hostname: %s. Rejecting the connection.",
+ req->port, escaped_safe_str_client(req->address));
+ return -1;
+ }
+ if (log_sockstype)
+ log_notice(LD_APP,
+ "Your application (using socks5 to port %d) instructed "
+ "Tor to take care of the DNS resolution itself if "
+ "necessary. This is good.", req->port);
+ return 1;
+ default: /* unsupported */
+ socks_request_set_socks5_error(req,
+ SOCKS5_ADDRESS_TYPE_NOT_SUPPORTED);
+ log_warn(LD_APP,"socks5: unsupported address type %d. Rejecting.",
+ (int) *(data+3));
+ return -1;
+ }
+ tor_assert(0);
+ break;
+ case 4: { /* socks4 */
+ enum {socks4, socks4a} socks4_prot = socks4a;
+ const char *authstart, *authend;
+ /* http://ss5.sourceforge.net/socks4.protocol.txt */
+ /* http://ss5.sourceforge.net/socks4A.protocol.txt */
+
+ req->socks_version = 4;
+ if (datalen < SOCKS4_NETWORK_LEN) {/* basic info available? */
+ *want_length_out = SOCKS4_NETWORK_LEN;
+ return 0; /* not yet */
+ }
+ // buf_pullup(buf, 1280);
+ req->command = (unsigned char) *(data+1);
+ if (req->command != SOCKS_COMMAND_CONNECT &&
+ req->command != SOCKS_COMMAND_RESOLVE) {
+ /* not a connect or resolve? we don't support it. (No resolve_ptr with
+ * socks4.) */
+ log_warn(LD_APP,"socks4: command %d not recognized. Rejecting.",
+ req->command);
+ return -1;
+ }
+
+ req->port = ntohs(get_uint16(data+2));
+ destip = ntohl(get_uint32(data+4));
+ if ((!req->port && req->command!=SOCKS_COMMAND_RESOLVE) || !destip) {
+ log_warn(LD_APP,"socks4: Port or DestIP is zero. Rejecting.");
+ return -1;
+ }
+ if (destip >> 8) {
+ log_debug(LD_APP,"socks4: destip not in form 0.0.0.x.");
+ in.s_addr = htonl(destip);
+ tor_inet_ntoa(&in,tmpbuf,sizeof(tmpbuf));
+ if (BUG(strlen(tmpbuf)+1 > MAX_SOCKS_ADDR_LEN)) {
+ /* LCOV_EXCL_START -- This branch is unreachable, given the
+ * size of tmpbuf and the actual value of MAX_SOCKS_ADDR_LEN */
+ log_debug(LD_APP,"socks4 addr (%d bytes) too long. Rejecting.",
+ (int)strlen(tmpbuf));
+ return -1;
+ /* LCOV_EXCL_STOP */
+ }
+ log_debug(LD_APP,
+ "socks4: successfully read destip (%s)",
+ safe_str_client(tmpbuf));
+ socks4_prot = socks4;
+ }
+
+ authstart = data + SOCKS4_NETWORK_LEN;
+ next = memchr(authstart, 0,
+ datalen-SOCKS4_NETWORK_LEN);
+ if (!next) {
+ if (datalen >= 1024) {
+ log_debug(LD_APP, "Socks4 user name too long; rejecting.");
+ return -1;
+ }
+ log_debug(LD_APP,"socks4: Username not here yet.");
+ *want_length_out = datalen+1024; /* More than we need, but safe */
+ return 0;
+ }
+ authend = next;
+ tor_assert(next < data+datalen);
+
+ startaddr = NULL;
+ if (socks4_prot != socks4a &&
+ !addressmap_have_mapping(tmpbuf,0)) {
+ log_unsafe_socks_warning(4, tmpbuf, req->port, safe_socks);
+
+ if (safe_socks)
+ return -1;
+ }
+ if (socks4_prot == socks4a) {
+ if (next+1 == data+datalen) {
+ log_debug(LD_APP,"socks4: No part of destaddr here yet.");
+ *want_length_out = datalen + 1024; /* More than we need, but safe */
+ return 0;
+ }
+ startaddr = next+1;
+ next = memchr(startaddr, 0, data + datalen - startaddr);
+ if (!next) {
+ if (datalen >= 1024) {
+ log_debug(LD_APP,"socks4: Destaddr too long.");
+ return -1;
+ }
+ log_debug(LD_APP,"socks4: Destaddr not all here yet.");
+ *want_length_out = datalen + 1024; /* More than we need, but safe */
+ return 0;
+ }
+ if (MAX_SOCKS_ADDR_LEN <= next-startaddr) {
+ log_warn(LD_APP,"socks4: Destaddr too long. Rejecting.");
+ return -1;
+ }
+ // tor_assert(next < buf->cur+buf_datalen(buf));
+
+ if (log_sockstype)
+ log_notice(LD_APP,
+ "Your application (using socks4a to port %d) instructed "
+ "Tor to take care of the DNS resolution itself if "
+ "necessary. This is good.", req->port);
+ }
+ log_debug(LD_APP,"socks4: Everything is here. Success.");
+ strlcpy(req->address, startaddr ? startaddr : tmpbuf,
+ sizeof(req->address));
+ if (!string_is_valid_dest(req->address)) {
+ log_warn(LD_PROTOCOL,
+ "Your application (using socks4 to port %d) gave Tor "
+ "a malformed hostname: %s. Rejecting the connection.",
+ req->port, escaped_safe_str_client(req->address));
+ return -1;
+ }
+ if (authend != authstart) {
+ req->got_auth = 1;
+ req->usernamelen = authend - authstart;
+ req->username = tor_memdup(authstart, authend - authstart);
+ }
+ /* next points to the final \0 on inbuf */
+ *drain_out = next - data + 1;
+ return 1;
+ }
+ case 'G': /* get */
+ case 'H': /* head */
+ case 'P': /* put/post */
+ case 'C': /* connect */
+ strlcpy((char*)req->reply, SOCKS_PROXY_IS_NOT_AN_HTTP_PROXY_MSG,
+ MAX_SOCKS_REPLY_LEN);
+ req->replylen = strlen((char*)req->reply)+1;
+ /* fall through */
+ default: /* version is not socks4 or socks5 */
+ log_warn(LD_APP,
+ "Socks version %d not recognized. (This port is not an "
+ "HTTP proxy; did you want to use HTTPTunnelPort?)",
+ *(data));
+ {
+ /* Tell the controller the first 8 bytes. */
+ char *tmp = tor_strndup(data, datalen < 8 ? datalen : 8);
+ control_event_client_status(LOG_WARN,
+ "SOCKS_UNKNOWN_PROTOCOL DATA=\"%s\"",
+ escaped(tmp));
+ tor_free(tmp);
+ }
+ return -1;
+ }
+}
+
+/** Inspect a reply from SOCKS server stored in <b>buf</b> according
+ * to <b>state</b>, removing the protocol data upon success. Return 0 on
+ * incomplete response, 1 on success and -1 on error, in which case
+ * <b>reason</b> is set to a descriptive message (free() when finished
+ * with it).
+ *
+ * As a special case, 2 is returned when user/pass is required
+ * during SOCKS5 handshake and user/pass is configured.
+ */
+int
+fetch_from_buf_socks_client(buf_t *buf, int state, char **reason)
+{
+ ssize_t drain = 0;
+ int r;
+ const char *head = NULL;
+ size_t datalen = 0;
+
+ if (buf_datalen(buf) < 2)
+ return 0;
+
+ buf_pullup(buf, MAX_SOCKS_MESSAGE_LEN, &head, &datalen);
+ tor_assert(head && datalen >= 2);
+
+ r = parse_socks_client((uint8_t*)head, datalen,
+ state, reason, &drain);
+ if (drain > 0)
+ buf_drain(buf, drain);
+ else if (drain < 0)
+ buf_clear(buf);
+
+ return r;
+}
+
+/** Implementation logic for fetch_from_*_socks_client. */
+static int
+parse_socks_client(const uint8_t *data, size_t datalen,
+ int state, char **reason,
+ ssize_t *drain_out)
+{
+ unsigned int addrlen;
+ *drain_out = 0;
+ if (datalen < 2)
+ return 0;
+
+ switch (state) {
+ case PROXY_SOCKS4_WANT_CONNECT_OK:
+ /* Wait for the complete response */
+ if (datalen < 8)
+ return 0;
+
+ if (data[1] != 0x5a) {
+ *reason = tor_strdup(socks4_response_code_to_string(data[1]));
+ return -1;
+ }
+
+ /* Success */
+ *drain_out = 8;
+ return 1;
+
+ case PROXY_SOCKS5_WANT_AUTH_METHOD_NONE:
+ /* we don't have any credentials */
+ if (data[1] != 0x00) {
+ *reason = tor_strdup("server doesn't support any of our "
+ "available authentication methods");
+ return -1;
+ }
+
+ log_info(LD_NET, "SOCKS 5 client: continuing without authentication");
+ *drain_out = -1;
+ return 1;
+
+ case PROXY_SOCKS5_WANT_AUTH_METHOD_RFC1929:
+ /* we have a username and password. return 1 if we can proceed without
+ * providing authentication, or 2 otherwise. */
+ switch (data[1]) {
+ case 0x00:
+ log_info(LD_NET, "SOCKS 5 client: we have auth details but server "
+ "doesn't require authentication.");
+ *drain_out = -1;
+ return 1;
+ case 0x02:
+ log_info(LD_NET, "SOCKS 5 client: need authentication.");
+ *drain_out = -1;
+ return 2;
+ /* fall through */
+ }
+
+ *reason = tor_strdup("server doesn't support any of our available "
+ "authentication methods");
+ return -1;
+
+ case PROXY_SOCKS5_WANT_AUTH_RFC1929_OK:
+ /* handle server reply to rfc1929 authentication */
+ if (data[1] != 0x00) {
+ *reason = tor_strdup("authentication failed");
+ return -1;
+ }
+
+ log_info(LD_NET, "SOCKS 5 client: authentication successful.");
+ *drain_out = -1;
+ return 1;
+
+ case PROXY_SOCKS5_WANT_CONNECT_OK:
+ /* response is variable length. BND.ADDR, etc, isn't needed
+ * (don't bother with buf_pullup()), but make sure to eat all
+ * the data used */
+
+ /* wait for address type field to arrive */
+ if (datalen < 4)
+ return 0;
+
+ switch (data[3]) {
+ case 0x01: /* ip4 */
+ addrlen = 4;
+ break;
+ case 0x04: /* ip6 */
+ addrlen = 16;
+ break;
+ case 0x03: /* fqdn (can this happen here?) */
+ if (datalen < 5)
+ return 0;
+ addrlen = 1 + data[4];
+ break;
+ default:
+ *reason = tor_strdup("invalid response to connect request");
+ return -1;
+ }
+
+ /* wait for address and port */
+ if (datalen < 6 + addrlen)
+ return 0;
+
+ if (data[1] != 0x00) {
+ *reason = tor_strdup(socks5_response_code_to_string(data[1]));
+ return -1;
+ }
+
+ *drain_out = 6 + addrlen;
+ return 1;
+ }
+
+ /* LCOV_EXCL_START */
+ /* shouldn't get here if the input state is one we know about... */
+ tor_assert(0);
+
+ return -1;
+ /* LCOV_EXCL_STOP */
+}
diff --git a/src/core/proto/proto_socks.h b/src/core/proto/proto_socks.h
new file mode 100644
index 0000000000..53de288f65
--- /dev/null
+++ b/src/core/proto/proto_socks.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_PROTO_SOCKS_H
+#define TOR_PROTO_SOCKS_H
+
+struct socks_request_t;
+struct buf_t;
+
+struct socks_request_t *socks_request_new(void);
+void socks_request_free_(struct socks_request_t *req);
+#define socks_request_free(req) \
+ FREE_AND_NULL(socks_request_t, socks_request_free_, (req))
+int fetch_from_buf_socks(struct buf_t *buf, socks_request_t *req,
+ int log_sockstype, int safe_socks);
+int fetch_from_buf_socks_client(buf_t *buf, int state, char **reason);
+
+#endif /* !defined(TOR_PROTO_SOCKS_H) */
diff --git a/src/core/proto/protover.c b/src/core/proto/protover.c
new file mode 100644
index 0000000000..f63c134565
--- /dev/null
+++ b/src/core/proto/protover.c
@@ -0,0 +1,923 @@
+/* Copyright (c) 2016-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file protover.c
+ * \brief Versioning information for different pieces of the Tor protocol.
+ *
+ * Starting in version 0.2.9.3-alpha, Tor places separate version numbers on
+ * each of the different components of its protocol. Relays use these numbers
+ * to advertise what versions of the protocols they can support, and clients
+ * use them to find what they can ask a given relay to do. Authorities vote
+ * on the supported protocol versions for each relay, and also vote on the
+ * which protocols you should have to support in order to be on the Tor
+ * network. All Tor instances use these required/recommended protocol versions
+ * to tell what level of support for recent protocols each relay has, and
+ * to decide whether they should be running given their current protocols.
+ *
+ * The main advantage of these protocol versions numbers over using Tor
+ * version numbers is that they allow different implementations of the Tor
+ * protocols to develop independently, without having to claim compatibility
+ * with specific versions of Tor.
+ **/
+
+#define PROTOVER_PRIVATE
+
+#include "or/or.h"
+#include "or/protover.h"
+#include "or/routerparse.h"
+
+#ifndef HAVE_RUST
+
+static const smartlist_t *get_supported_protocol_list(void);
+static int protocol_list_contains(const smartlist_t *protos,
+ protocol_type_t pr, uint32_t ver);
+
+/** Mapping between protocol type string and protocol type. */
+/// C_RUST_COUPLED: src/rust/protover/protover.rs `PROTOCOL_NAMES`
+static const struct {
+ protocol_type_t protover_type;
+ const char *name;
+} PROTOCOL_NAMES[] = {
+ { PRT_LINK, "Link" },
+ { PRT_LINKAUTH, "LinkAuth" },
+ { PRT_RELAY, "Relay" },
+ { PRT_DIRCACHE, "DirCache" },
+ { PRT_HSDIR, "HSDir" },
+ { PRT_HSINTRO, "HSIntro" },
+ { PRT_HSREND, "HSRend" },
+ { PRT_DESC, "Desc" },
+ { PRT_MICRODESC, "Microdesc"},
+ { PRT_CONS, "Cons" }
+};
+
+#define N_PROTOCOL_NAMES ARRAY_LENGTH(PROTOCOL_NAMES)
+
+/* Maximum allowed length of any single subprotocol name. */
+// C_RUST_COUPLED: src/rust/protover/protover.rs
+// `MAX_PROTOCOL_NAME_LENGTH`
+static const unsigned MAX_PROTOCOL_NAME_LENGTH = 100;
+
+/**
+ * Given a protocol_type_t, return the corresponding string used in
+ * descriptors.
+ */
+STATIC const char *
+protocol_type_to_str(protocol_type_t pr)
+{
+ unsigned i;
+ for (i=0; i < N_PROTOCOL_NAMES; ++i) {
+ if (PROTOCOL_NAMES[i].protover_type == pr)
+ return PROTOCOL_NAMES[i].name;
+ }
+ /* LCOV_EXCL_START */
+ tor_assert_nonfatal_unreached_once();
+ return "UNKNOWN";
+ /* LCOV_EXCL_STOP */
+}
+
+/**
+ * Given a string, find the corresponding protocol type and store it in
+ * <b>pr_out</b>. Return 0 on success, -1 on failure.
+ */
+STATIC int
+str_to_protocol_type(const char *s, protocol_type_t *pr_out)
+{
+ if (BUG(!pr_out))
+ return -1;
+
+ unsigned i;
+ for (i=0; i < N_PROTOCOL_NAMES; ++i) {
+ if (0 == strcmp(s, PROTOCOL_NAMES[i].name)) {
+ *pr_out = PROTOCOL_NAMES[i].protover_type;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+/**
+ * Release all space held by a single proto_entry_t structure
+ */
+STATIC void
+proto_entry_free_(proto_entry_t *entry)
+{
+ if (!entry)
+ return;
+ tor_free(entry->name);
+ SMARTLIST_FOREACH(entry->ranges, proto_range_t *, r, tor_free(r));
+ smartlist_free(entry->ranges);
+ tor_free(entry);
+}
+
+/** The largest possible protocol version. */
+#define MAX_PROTOCOL_VERSION (UINT32_MAX-1)
+
+/**
+ * Given a string <b>s</b> and optional end-of-string pointer
+ * <b>end_of_range</b>, parse the protocol range and store it in
+ * <b>low_out</b> and <b>high_out</b>. A protocol range has the format U, or
+ * U-U, where U is an unsigned 32-bit integer.
+ */
+static int
+parse_version_range(const char *s, const char *end_of_range,
+ uint32_t *low_out, uint32_t *high_out)
+{
+ uint32_t low, high;
+ char *next = NULL;
+ int ok;
+
+ tor_assert(high_out);
+ tor_assert(low_out);
+
+ if (BUG(!end_of_range))
+ end_of_range = s + strlen(s); // LCOV_EXCL_LINE
+
+ /* A range must start with a digit. */
+ if (!TOR_ISDIGIT(*s)) {
+ goto error;
+ }
+
+ /* Note that this wouldn't be safe if we didn't know that eventually,
+ * we'd hit a NUL */
+ low = (uint32_t) tor_parse_ulong(s, 10, 0, MAX_PROTOCOL_VERSION, &ok, &next);
+ if (!ok)
+ goto error;
+ if (next > end_of_range)
+ goto error;
+ if (next == end_of_range) {
+ high = low;
+ goto done;
+ }
+
+ if (*next != '-')
+ goto error;
+ s = next+1;
+
+ /* ibid */
+ if (!TOR_ISDIGIT(*s)) {
+ goto error;
+ }
+ high = (uint32_t) tor_parse_ulong(s, 10, 0,
+ MAX_PROTOCOL_VERSION, &ok, &next);
+ if (!ok)
+ goto error;
+ if (next != end_of_range)
+ goto error;
+
+ if (low > high)
+ goto error;
+
+ done:
+ *high_out = high;
+ *low_out = low;
+ return 0;
+
+ error:
+ return -1;
+}
+
+/** Parse a single protocol entry from <b>s</b> up to an optional
+ * <b>end_of_entry</b> pointer, and return that protocol entry. Return NULL
+ * on error.
+ *
+ * A protocol entry has a keyword, an = sign, and zero or more ranges. */
+static proto_entry_t *
+parse_single_entry(const char *s, const char *end_of_entry)
+{
+ proto_entry_t *out = tor_malloc_zero(sizeof(proto_entry_t));
+ const char *equals;
+
+ out->ranges = smartlist_new();
+
+ if (BUG (!end_of_entry))
+ end_of_entry = s + strlen(s); // LCOV_EXCL_LINE
+
+ /* There must be an =. */
+ equals = memchr(s, '=', end_of_entry - s);
+ if (!equals)
+ goto error;
+
+ /* The name must be nonempty */
+ if (equals == s)
+ goto error;
+
+ /* The name must not be longer than MAX_PROTOCOL_NAME_LENGTH. */
+ if (equals - s > (int)MAX_PROTOCOL_NAME_LENGTH) {
+ log_warn(LD_NET, "When parsing a protocol entry, I got a very large "
+ "protocol name. This is possibly an attack or a bug, unless "
+ "the Tor network truly supports protocol names larger than "
+ "%ud characters. The offending string was: %s",
+ MAX_PROTOCOL_NAME_LENGTH, escaped(out->name));
+ goto error;
+ }
+ out->name = tor_strndup(s, equals-s);
+
+ tor_assert(equals < end_of_entry);
+
+ s = equals + 1;
+ while (s < end_of_entry) {
+ const char *comma = memchr(s, ',', end_of_entry-s);
+ proto_range_t *range = tor_malloc_zero(sizeof(proto_range_t));
+ if (! comma)
+ comma = end_of_entry;
+
+ smartlist_add(out->ranges, range);
+ if (parse_version_range(s, comma, &range->low, &range->high) < 0) {
+ goto error;
+ }
+
+ s = comma;
+ while (*s == ',' && s < end_of_entry)
+ ++s;
+ }
+
+ return out;
+
+ error:
+ proto_entry_free(out);
+ return NULL;
+}
+
+/**
+ * Parse the protocol list from <b>s</b> and return it as a smartlist of
+ * proto_entry_t
+ */
+STATIC smartlist_t *
+parse_protocol_list(const char *s)
+{
+ smartlist_t *entries = smartlist_new();
+
+ while (*s) {
+ /* Find the next space or the NUL. */
+ const char *end_of_entry = strchr(s, ' ');
+ proto_entry_t *entry;
+ if (!end_of_entry)
+ end_of_entry = s + strlen(s);
+
+ entry = parse_single_entry(s, end_of_entry);
+
+ if (! entry)
+ goto error;
+
+ smartlist_add(entries, entry);
+
+ s = end_of_entry;
+ while (*s == ' ')
+ ++s;
+ }
+
+ return entries;
+
+ error:
+ SMARTLIST_FOREACH(entries, proto_entry_t *, ent, proto_entry_free(ent));
+ smartlist_free(entries);
+ return NULL;
+}
+
+/**
+ * Return true if the unparsed protover in <b>s</b> would contain a protocol
+ * name longer than MAX_PROTOCOL_NAME_LENGTH, and false otherwise.
+ */
+bool
+protover_contains_long_protocol_names(const char *s)
+{
+ smartlist_t *list = parse_protocol_list(s);
+ if (!list)
+ return true; /* yes, has a dangerous name */
+ SMARTLIST_FOREACH(list, proto_entry_t *, ent, proto_entry_free(ent));
+ smartlist_free(list);
+ return false; /* no, looks fine */
+}
+
+/**
+ * Given a protocol type and version number, return true iff we know
+ * how to speak that protocol.
+ */
+int
+protover_is_supported_here(protocol_type_t pr, uint32_t ver)
+{
+ const smartlist_t *ours = get_supported_protocol_list();
+ return protocol_list_contains(ours, pr, ver);
+}
+
+/**
+ * Return true iff "list" encodes a protocol list that includes support for
+ * the indicated protocol and version.
+ */
+int
+protocol_list_supports_protocol(const char *list, protocol_type_t tp,
+ uint32_t version)
+{
+ /* NOTE: This is a pretty inefficient implementation. If it ever shows
+ * up in profiles, we should memoize it.
+ */
+ smartlist_t *protocols = parse_protocol_list(list);
+ if (!protocols) {
+ return 0;
+ }
+ int contains = protocol_list_contains(protocols, tp, version);
+
+ SMARTLIST_FOREACH(protocols, proto_entry_t *, ent, proto_entry_free(ent));
+ smartlist_free(protocols);
+ return contains;
+}
+
+/**
+ * Return true iff "list" encodes a protocol list that includes support for
+ * the indicated protocol and version, or some later version.
+ */
+int
+protocol_list_supports_protocol_or_later(const char *list,
+ protocol_type_t tp,
+ uint32_t version)
+{
+ /* NOTE: This is a pretty inefficient implementation. If it ever shows
+ * up in profiles, we should memoize it.
+ */
+ smartlist_t *protocols = parse_protocol_list(list);
+ if (!protocols) {
+ return 0;
+ }
+ const char *pr_name = protocol_type_to_str(tp);
+
+ int contains = 0;
+ SMARTLIST_FOREACH_BEGIN(protocols, proto_entry_t *, proto) {
+ if (strcasecmp(proto->name, pr_name))
+ continue;
+ SMARTLIST_FOREACH_BEGIN(proto->ranges, const proto_range_t *, range) {
+ if (range->high >= version) {
+ contains = 1;
+ goto found;
+ }
+ } SMARTLIST_FOREACH_END(range);
+ } SMARTLIST_FOREACH_END(proto);
+
+ found:
+ SMARTLIST_FOREACH(protocols, proto_entry_t *, ent, proto_entry_free(ent));
+ smartlist_free(protocols);
+ return contains;
+}
+
+/** Return the canonical string containing the list of protocols
+ * that we support. */
+/// C_RUST_COUPLED: src/rust/protover/protover.rs `SUPPORTED_PROTOCOLS`
+const char *
+protover_get_supported_protocols(void)
+{
+ return
+ "Cons=1-2 "
+ "Desc=1-2 "
+ "DirCache=1-2 "
+ "HSDir=1-2 "
+ "HSIntro=3-4 "
+ "HSRend=1-2 "
+ "Link=1-5 "
+ "LinkAuth=1,3 "
+ "Microdesc=1-2 "
+ "Relay=1-2";
+}
+
+/** The protocols from protover_get_supported_protocols(), as parsed into a
+ * list of proto_entry_t values. Access this via
+ * get_supported_protocol_list. */
+static smartlist_t *supported_protocol_list = NULL;
+
+/** Return a pointer to a smartlist of proto_entry_t for the protocols
+ * we support. */
+static const smartlist_t *
+get_supported_protocol_list(void)
+{
+ if (PREDICT_UNLIKELY(supported_protocol_list == NULL)) {
+ supported_protocol_list =
+ parse_protocol_list(protover_get_supported_protocols());
+ }
+ return supported_protocol_list;
+}
+
+/**
+ * Given a protocol entry, encode it at the end of the smartlist <b>chunks</b>
+ * as one or more newly allocated strings.
+ */
+static void
+proto_entry_encode_into(smartlist_t *chunks, const proto_entry_t *entry)
+{
+ smartlist_add_asprintf(chunks, "%s=", entry->name);
+
+ SMARTLIST_FOREACH_BEGIN(entry->ranges, proto_range_t *, range) {
+ const char *comma = "";
+ if (range_sl_idx != 0)
+ comma = ",";
+
+ if (range->low == range->high) {
+ smartlist_add_asprintf(chunks, "%s%lu",
+ comma, (unsigned long)range->low);
+ } else {
+ smartlist_add_asprintf(chunks, "%s%lu-%lu",
+ comma, (unsigned long)range->low,
+ (unsigned long)range->high);
+ }
+ } SMARTLIST_FOREACH_END(range);
+}
+
+/** Given a list of space-separated proto_entry_t items,
+ * encode it into a newly allocated space-separated string. */
+STATIC char *
+encode_protocol_list(const smartlist_t *sl)
+{
+ const char *separator = "";
+ smartlist_t *chunks = smartlist_new();
+ SMARTLIST_FOREACH_BEGIN(sl, const proto_entry_t *, ent) {
+ smartlist_add_strdup(chunks, separator);
+
+ proto_entry_encode_into(chunks, ent);
+
+ separator = " ";
+ } SMARTLIST_FOREACH_END(ent);
+
+ char *result = smartlist_join_strings(chunks, "", 0, NULL);
+
+ SMARTLIST_FOREACH(chunks, char *, cp, tor_free(cp));
+ smartlist_free(chunks);
+
+ return result;
+}
+
+/* We treat any protocol list with more than this many subprotocols in it
+ * as a DoS attempt. */
+/// C_RUST_COUPLED: src/rust/protover/protover.rs
+/// `MAX_PROTOCOLS_TO_EXPAND`
+static const int MAX_PROTOCOLS_TO_EXPAND = (1<<16);
+
+/** Voting helper: Given a list of proto_entry_t, return a newly allocated
+ * smartlist of newly allocated strings, one for each included protocol
+ * version. (So 'Foo=3,5-7' expands to a list of 'Foo=3', 'Foo=5', 'Foo=6',
+ * 'Foo=7'.)
+ *
+ * Do not list any protocol version more than once.
+ *
+ * Return NULL if the list would be too big.
+ */
+static smartlist_t *
+expand_protocol_list(const smartlist_t *protos)
+{
+ smartlist_t *expanded = smartlist_new();
+ if (!protos)
+ return expanded;
+
+ SMARTLIST_FOREACH_BEGIN(protos, const proto_entry_t *, ent) {
+ const char *name = ent->name;
+ if (strlen(name) > MAX_PROTOCOL_NAME_LENGTH) {
+ log_warn(LD_NET, "When expanding a protocol entry, I got a very large "
+ "protocol name. This is possibly an attack or a bug, unless "
+ "the Tor network truly supports protocol names larger than "
+ "%ud characters. The offending string was: %s",
+ MAX_PROTOCOL_NAME_LENGTH, escaped(name));
+ continue;
+ }
+ SMARTLIST_FOREACH_BEGIN(ent->ranges, const proto_range_t *, range) {
+ uint32_t u;
+ for (u = range->low; u <= range->high; ++u) {
+ smartlist_add_asprintf(expanded, "%s=%lu", name, (unsigned long)u);
+ if (smartlist_len(expanded) > MAX_PROTOCOLS_TO_EXPAND)
+ goto too_many;
+ }
+ } SMARTLIST_FOREACH_END(range);
+ } SMARTLIST_FOREACH_END(ent);
+
+ smartlist_sort_strings(expanded);
+ smartlist_uniq_strings(expanded); // This makes voting work. do not remove
+ return expanded;
+
+ too_many:
+ SMARTLIST_FOREACH(expanded, char *, cp, tor_free(cp));
+ smartlist_free(expanded);
+ return NULL;
+}
+
+/** Voting helper: compare two singleton proto_entry_t items by version
+ * alone. (A singleton item is one with a single range entry where
+ * low==high.) */
+static int
+cmp_single_ent_by_version(const void **a_, const void **b_)
+{
+ const proto_entry_t *ent_a = *a_;
+ const proto_entry_t *ent_b = *b_;
+
+ tor_assert(smartlist_len(ent_a->ranges) == 1);
+ tor_assert(smartlist_len(ent_b->ranges) == 1);
+
+ const proto_range_t *a = smartlist_get(ent_a->ranges, 0);
+ const proto_range_t *b = smartlist_get(ent_b->ranges, 0);
+
+ tor_assert(a->low == a->high);
+ tor_assert(b->low == b->high);
+
+ if (a->low < b->low) {
+ return -1;
+ } else if (a->low == b->low) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+/** Voting helper: Given a list of singleton protocol strings (of the form
+ * Foo=7), return a canonical listing of all the protocol versions listed,
+ * with as few ranges as possible, with protocol versions sorted lexically and
+ * versions sorted in numerically increasing order, using as few range entries
+ * as possible.
+ **/
+static char *
+contract_protocol_list(const smartlist_t *proto_strings)
+{
+ if (smartlist_len(proto_strings) == 0) {
+ return tor_strdup("");
+ }
+
+ // map from name to list of single-version entries
+ strmap_t *entry_lists_by_name = strmap_new();
+ // list of protocol names
+ smartlist_t *all_names = smartlist_new();
+ // list of strings for the output we're building
+ smartlist_t *chunks = smartlist_new();
+
+ // Parse each item and stick it entry_lists_by_name. Build
+ // 'all_names' at the same time.
+ SMARTLIST_FOREACH_BEGIN(proto_strings, const char *, s) {
+ if (BUG(!s))
+ continue;// LCOV_EXCL_LINE
+ proto_entry_t *ent = parse_single_entry(s, s+strlen(s));
+ if (BUG(!ent))
+ continue; // LCOV_EXCL_LINE
+ smartlist_t *lst = strmap_get(entry_lists_by_name, ent->name);
+ if (!lst) {
+ smartlist_add(all_names, ent->name);
+ lst = smartlist_new();
+ strmap_set(entry_lists_by_name, ent->name, lst);
+ }
+ smartlist_add(lst, ent);
+ } SMARTLIST_FOREACH_END(s);
+
+ // We want to output the protocols sorted by their name.
+ smartlist_sort_strings(all_names);
+
+ SMARTLIST_FOREACH_BEGIN(all_names, const char *, name) {
+ const int first_entry = (name_sl_idx == 0);
+ smartlist_t *lst = strmap_get(entry_lists_by_name, name);
+ tor_assert(lst);
+ // Sort every entry with this name by version. They are
+ // singletons, so there can't be overlap.
+ smartlist_sort(lst, cmp_single_ent_by_version);
+
+ if (! first_entry)
+ smartlist_add_strdup(chunks, " ");
+
+ /* We're going to construct this entry from the ranges. */
+ proto_entry_t *entry = tor_malloc_zero(sizeof(proto_entry_t));
+ entry->ranges = smartlist_new();
+ entry->name = tor_strdup(name);
+
+ // Now, find all the ranges of versions start..end where
+ // all of start, start+1, start+2, ..end are included.
+ int start_of_cur_series = 0;
+ while (start_of_cur_series < smartlist_len(lst)) {
+ const proto_entry_t *ent = smartlist_get(lst, start_of_cur_series);
+ const proto_range_t *range = smartlist_get(ent->ranges, 0);
+ const uint32_t ver_low = range->low;
+ uint32_t ver_high = ver_low;
+
+ int idx;
+ for (idx = start_of_cur_series+1; idx < smartlist_len(lst); ++idx) {
+ ent = smartlist_get(lst, idx);
+ range = smartlist_get(ent->ranges, 0);
+ if (range->low != ver_high + 1)
+ break;
+ ver_high += 1;
+ }
+
+ // Now idx is either off the end of the list, or the first sequence
+ // break in the list.
+ start_of_cur_series = idx;
+
+ proto_range_t *new_range = tor_malloc_zero(sizeof(proto_range_t));
+ new_range->low = ver_low;
+ new_range->high = ver_high;
+ smartlist_add(entry->ranges, new_range);
+ }
+ proto_entry_encode_into(chunks, entry);
+ proto_entry_free(entry);
+
+ } SMARTLIST_FOREACH_END(name);
+
+ // Build the result...
+ char *result = smartlist_join_strings(chunks, "", 0, NULL);
+
+ // And free all the stuff we allocated.
+ SMARTLIST_FOREACH_BEGIN(all_names, const char *, name) {
+ smartlist_t *lst = strmap_get(entry_lists_by_name, name);
+ tor_assert(lst);
+ SMARTLIST_FOREACH(lst, proto_entry_t *, e, proto_entry_free(e));
+ smartlist_free(lst);
+ } SMARTLIST_FOREACH_END(name);
+
+ strmap_free(entry_lists_by_name, NULL);
+ smartlist_free(all_names);
+ SMARTLIST_FOREACH(chunks, char *, cp, tor_free(cp));
+ smartlist_free(chunks);
+
+ return result;
+}
+
+/**
+ * Protocol voting implementation.
+ *
+ * Given a list of strings describing protocol versions, return a newly
+ * allocated string encoding all of the protocols that are listed by at
+ * least <b>threshold</b> of the inputs.
+ *
+ * The string is minimal and sorted according to the rules of
+ * contract_protocol_list above.
+ */
+char *
+protover_compute_vote(const smartlist_t *list_of_proto_strings,
+ int threshold)
+{
+ if (smartlist_len(list_of_proto_strings) == 0) {
+ return tor_strdup("");
+ }
+
+ smartlist_t *all_entries = smartlist_new();
+
+ // First, parse the inputs and break them into singleton entries.
+ SMARTLIST_FOREACH_BEGIN(list_of_proto_strings, const char *, vote) {
+ smartlist_t *unexpanded = parse_protocol_list(vote);
+ if (! unexpanded) {
+ log_warn(LD_NET, "I failed with parsing a protocol list from "
+ "an authority. The offending string was: %s",
+ escaped(vote));
+ continue;
+ }
+ smartlist_t *this_vote = expand_protocol_list(unexpanded);
+ if (this_vote == NULL) {
+ log_warn(LD_NET, "When expanding a protocol list from an authority, I "
+ "got too many protocols. This is possibly an attack or a bug, "
+ "unless the Tor network truly has expanded to support over %d "
+ "different subprotocol versions. The offending string was: %s",
+ MAX_PROTOCOLS_TO_EXPAND, escaped(vote));
+ } else {
+ smartlist_add_all(all_entries, this_vote);
+ smartlist_free(this_vote);
+ }
+ SMARTLIST_FOREACH(unexpanded, proto_entry_t *, e, proto_entry_free(e));
+ smartlist_free(unexpanded);
+ } SMARTLIST_FOREACH_END(vote);
+
+ if (smartlist_len(all_entries) == 0) {
+ smartlist_free(all_entries);
+ return tor_strdup("");
+ }
+
+ // Now sort the singleton entries
+ smartlist_sort_strings(all_entries);
+
+ // Now find all the strings that appear at least 'threshold' times.
+ smartlist_t *include_entries = smartlist_new();
+ const char *cur_entry = smartlist_get(all_entries, 0);
+ int n_times = 0;
+ SMARTLIST_FOREACH_BEGIN(all_entries, const char *, ent) {
+ if (!strcmp(ent, cur_entry)) {
+ n_times++;
+ } else {
+ if (n_times >= threshold && cur_entry)
+ smartlist_add(include_entries, (void*)cur_entry);
+ cur_entry = ent;
+ n_times = 1 ;
+ }
+ } SMARTLIST_FOREACH_END(ent);
+
+ if (n_times >= threshold && cur_entry)
+ smartlist_add(include_entries, (void*)cur_entry);
+
+ // Finally, compress that list.
+ char *result = contract_protocol_list(include_entries);
+ smartlist_free(include_entries);
+ SMARTLIST_FOREACH(all_entries, char *, cp, tor_free(cp));
+ smartlist_free(all_entries);
+
+ return result;
+}
+
+/** Return true if every protocol version described in the string <b>s</b> is
+ * one that we support, and false otherwise. If <b>missing_out</b> is
+ * provided, set it to the list of protocols we do not support.
+ *
+ * NOTE: This is quadratic, but we don't do it much: only a few times per
+ * consensus. Checking signatures should be way more expensive than this
+ * ever would be.
+ **/
+int
+protover_all_supported(const char *s, char **missing_out)
+{
+ int all_supported = 1;
+ smartlist_t *missing_some;
+ smartlist_t *missing_completely;
+ smartlist_t *missing_all;
+
+ if (!s) {
+ return 1;
+ }
+
+ smartlist_t *entries = parse_protocol_list(s);
+ if (BUG(entries == NULL)) {
+ log_warn(LD_NET, "Received an unparseable protocol list %s"
+ " from the consensus", escaped(s));
+ return 1;
+ }
+
+ missing_some = smartlist_new();
+ missing_completely = smartlist_new();
+
+ SMARTLIST_FOREACH_BEGIN(entries, const proto_entry_t *, ent) {
+ protocol_type_t tp;
+ if (str_to_protocol_type(ent->name, &tp) < 0) {
+ if (smartlist_len(ent->ranges)) {
+ goto unsupported;
+ }
+ continue;
+ }
+
+ SMARTLIST_FOREACH_BEGIN(ent->ranges, const proto_range_t *, range) {
+ proto_entry_t *unsupported = tor_malloc_zero(sizeof(proto_entry_t));
+ proto_range_t *versions = tor_malloc_zero(sizeof(proto_range_t));
+ uint32_t i;
+
+ unsupported->name = tor_strdup(ent->name);
+ unsupported->ranges = smartlist_new();
+
+ for (i = range->low; i <= range->high; ++i) {
+ if (!protover_is_supported_here(tp, i)) {
+ if (versions->low == 0 && versions->high == 0) {
+ versions->low = i;
+ /* Pre-emptively add the high now, just in case we're in a single
+ * version range (e.g. "Link=999"). */
+ versions->high = i;
+ }
+ /* If the last one to be unsupported is one less than the current
+ * one, we're in a continuous range, so set the high field. */
+ if ((versions->high && versions->high == i - 1) ||
+ /* Similarly, if the last high wasn't set and we're currently
+ * one higher than the low, add current index as the highest
+ * known high. */
+ (!versions->high && versions->low == i - 1)) {
+ versions->high = i;
+ continue;
+ }
+ } else {
+ /* If we hit a supported version, and we previously had a range,
+ * we've hit a non-continuity. Copy the previous range and add it to
+ * the unsupported->ranges list and zero-out the previous range for
+ * the next iteration. */
+ if (versions->low != 0 && versions->high != 0) {
+ proto_range_t *versions_to_add = tor_malloc(sizeof(proto_range_t));
+
+ versions_to_add->low = versions->low;
+ versions_to_add->high = versions->high;
+ smartlist_add(unsupported->ranges, versions_to_add);
+
+ versions->low = 0;
+ versions->high = 0;
+ }
+ }
+ }
+ /* Once we've run out of versions to check, see if we had any unsupported
+ * ones and, if so, add them to unsupported->ranges. */
+ if (versions->low != 0 && versions->high != 0) {
+ smartlist_add(unsupported->ranges, versions);
+ }
+ /* Finally, if we had something unsupported, add it to the list of
+ * missing_some things and mark that there was something missing. */
+ if (smartlist_len(unsupported->ranges) != 0) {
+ smartlist_add(missing_some, (void*) unsupported);
+ all_supported = 0;
+ } else {
+ proto_entry_free(unsupported);
+ tor_free(versions);
+ }
+ } SMARTLIST_FOREACH_END(range);
+
+ continue;
+
+ unsupported:
+ all_supported = 0;
+ smartlist_add(missing_completely, (void*) ent);
+ } SMARTLIST_FOREACH_END(ent);
+
+ /* We keep the two smartlists separate so that we can free the proto_entry_t
+ * we created and put in missing_some, so here we add them together to build
+ * the string. */
+ missing_all = smartlist_new();
+ smartlist_add_all(missing_all, missing_some);
+ smartlist_add_all(missing_all, missing_completely);
+
+ if (missing_out && !all_supported) {
+ tor_assert(smartlist_len(missing_all) != 0);
+ *missing_out = encode_protocol_list(missing_all);
+ }
+ SMARTLIST_FOREACH(missing_some, proto_entry_t *, ent, proto_entry_free(ent));
+ smartlist_free(missing_some);
+ smartlist_free(missing_completely);
+ smartlist_free(missing_all);
+
+ SMARTLIST_FOREACH(entries, proto_entry_t *, ent, proto_entry_free(ent));
+ smartlist_free(entries);
+
+ return all_supported;
+}
+
+/** Helper: Given a list of proto_entry_t, return true iff
+ * <b>pr</b>=<b>ver</b> is included in that list. */
+static int
+protocol_list_contains(const smartlist_t *protos,
+ protocol_type_t pr, uint32_t ver)
+{
+ if (BUG(protos == NULL)) {
+ return 0; // LCOV_EXCL_LINE
+ }
+ const char *pr_name = protocol_type_to_str(pr);
+ if (BUG(pr_name == NULL)) {
+ return 0; // LCOV_EXCL_LINE
+ }
+
+ SMARTLIST_FOREACH_BEGIN(protos, const proto_entry_t *, ent) {
+ if (strcasecmp(ent->name, pr_name))
+ continue;
+ /* name matches; check the ranges */
+ SMARTLIST_FOREACH_BEGIN(ent->ranges, const proto_range_t *, range) {
+ if (ver >= range->low && ver <= range->high)
+ return 1;
+ } SMARTLIST_FOREACH_END(range);
+ } SMARTLIST_FOREACH_END(ent);
+
+ return 0;
+}
+
+/** Return a string describing the protocols supported by tor version
+ * <b>version</b>, or an empty string if we cannot tell.
+ *
+ * Note that this is only used to infer protocols for Tor versions that
+ * can't declare their own.
+ **/
+/// C_RUST_COUPLED: src/rust/protover/protover.rs `compute_for_old_tor`
+const char *
+protover_compute_for_old_tor(const char *version)
+{
+ if (version == NULL) {
+ /* No known version; guess the oldest series that is still supported. */
+ version = "0.2.5.15";
+ }
+
+ if (tor_version_as_new_as(version,
+ FIRST_TOR_VERSION_TO_ADVERTISE_PROTOCOLS)) {
+ return "";
+ } else if (tor_version_as_new_as(version, "0.2.9.1-alpha")) {
+ /* 0.2.9.1-alpha HSRend=2 */
+ return "Cons=1-2 Desc=1-2 DirCache=1 HSDir=1 HSIntro=3 HSRend=1-2 "
+ "Link=1-4 LinkAuth=1 "
+ "Microdesc=1-2 Relay=1-2";
+ } else if (tor_version_as_new_as(version, "0.2.7.5")) {
+ /* 0.2.7-stable added Desc=2, Microdesc=2, Cons=2, which indicate
+ * ed25519 support. We'll call them present only in "stable" 027,
+ * though. */
+ return "Cons=1-2 Desc=1-2 DirCache=1 HSDir=1 HSIntro=3 HSRend=1 "
+ "Link=1-4 LinkAuth=1 "
+ "Microdesc=1-2 Relay=1-2";
+ } else if (tor_version_as_new_as(version, "0.2.4.19")) {
+ /* No currently supported Tor server versions are older than this, or
+ * lack these protocols. */
+ return "Cons=1 Desc=1 DirCache=1 HSDir=1 HSIntro=3 HSRend=1 "
+ "Link=1-4 LinkAuth=1 "
+ "Microdesc=1 Relay=1-2";
+ } else {
+ /* Cannot infer protocols. */
+ return "";
+ }
+}
+
+/**
+ * Release all storage held by static fields in protover.c
+ */
+void
+protover_free_all(void)
+{
+ if (supported_protocol_list) {
+ smartlist_t *entries = supported_protocol_list;
+ SMARTLIST_FOREACH(entries, proto_entry_t *, ent, proto_entry_free(ent));
+ smartlist_free(entries);
+ supported_protocol_list = NULL;
+ }
+}
+
+#endif /* !defined(HAVE_RUST) */
+
diff --git a/src/core/proto/protover.h b/src/core/proto/protover.h
new file mode 100644
index 0000000000..7319d2f8c4
--- /dev/null
+++ b/src/core/proto/protover.h
@@ -0,0 +1,97 @@
+/* Copyright (c) 2016-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file protover.h
+ * \brief Headers and type declarations for protover.c
+ **/
+
+#ifndef TOR_PROTOVER_H
+#define TOR_PROTOVER_H
+
+#include <stdbool.h>
+#include "lib/cc/torint.h"
+#include "lib/testsupport/testsupport.h"
+struct smartlist_t;
+
+/** The first version of Tor that included "proto" entries in its
+ * descriptors. Authorities should use this to decide whether to
+ * guess proto lines. */
+/* This is a guess. */
+/// C_RUST_COUPLED: src/rust/protover/protover.rs
+/// `FIRST_TOR_VERSION_TO_ADVERTISE_PROTOCOLS`
+#define FIRST_TOR_VERSION_TO_ADVERTISE_PROTOCOLS "0.2.9.3-alpha"
+
+/** The protover version number that signifies HSDir support for HSv3 */
+#define PROTOVER_HSDIR_V3 2
+/** The protover version number that signifies HSv3 intro point support */
+#define PROTOVER_HS_INTRO_V3 4
+/** The protover version number that signifies HSv3 rendezvous point support */
+#define PROTOVER_HS_RENDEZVOUS_POINT_V3 2
+
+/** List of recognized subprotocols. */
+/// C_RUST_COUPLED: src/rust/protover/ffi.rs `translate_to_rust`
+/// C_RUST_COUPLED: src/rust/protover/protover.rs `Proto`
+typedef enum protocol_type_t {
+ PRT_LINK,
+ PRT_LINKAUTH,
+ PRT_RELAY,
+ PRT_DIRCACHE,
+ PRT_HSDIR,
+ PRT_HSINTRO,
+ PRT_HSREND,
+ PRT_DESC,
+ PRT_MICRODESC,
+ PRT_CONS,
+} protocol_type_t;
+
+bool protover_contains_long_protocol_names(const char *s);
+int protover_all_supported(const char *s, char **missing);
+int protover_is_supported_here(protocol_type_t pr, uint32_t ver);
+const char *protover_get_supported_protocols(void);
+
+char *protover_compute_vote(const struct smartlist_t *list_of_proto_strings,
+ int threshold);
+const char *protover_compute_for_old_tor(const char *version);
+int protocol_list_supports_protocol(const char *list, protocol_type_t tp,
+ uint32_t version);
+int protocol_list_supports_protocol_or_later(const char *list,
+ protocol_type_t tp,
+ uint32_t version);
+
+void protover_free_all(void);
+
+#ifdef PROTOVER_PRIVATE
+/** Represents a range of subprotocols of a given type. All subprotocols
+ * between <b>low</b> and <b>high</b> inclusive are included. */
+typedef struct proto_range_t {
+ uint32_t low;
+ uint32_t high;
+} proto_range_t;
+
+/** Represents a set of ranges of subprotocols of a given type. */
+typedef struct proto_entry_t {
+ /** The name of the protocol.
+ *
+ * (This needs to handle voting on protocols which
+ * we don't recognize yet, so it's a char* rather than a protocol_type_t.)
+ */
+ char *name;
+ /** Smartlist of proto_range_t */
+ struct smartlist_t *ranges;
+} proto_entry_t;
+
+#if !defined(HAVE_RUST) && defined(TOR_UNIT_TESTS)
+STATIC struct smartlist_t *parse_protocol_list(const char *s);
+STATIC char *encode_protocol_list(const struct smartlist_t *sl);
+STATIC const char *protocol_type_to_str(protocol_type_t pr);
+STATIC int str_to_protocol_type(const char *s, protocol_type_t *pr_out);
+STATIC void proto_entry_free_(proto_entry_t *entry);
+#endif /* !defined(HAVE_RUST) && defined(TOR_UNIT_TESTS) */
+
+#define proto_entry_free(entry) \
+ FREE_AND_NULL(proto_entry_t, proto_entry_free_, (entry))
+
+#endif /* defined(PROTOVER_PRIVATE) */
+
+#endif /* !defined(TOR_PROTOVER_H) */
diff --git a/src/core/proto/protover_rust.c b/src/core/proto/protover_rust.c
new file mode 100644
index 0000000000..bd2f88b98e
--- /dev/null
+++ b/src/core/proto/protover_rust.c
@@ -0,0 +1,34 @@
+/* Copyright (c) 2016-2018, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/*
+ * \file protover_rust.c
+ * \brief Provide a C wrapper for functions exposed in /src/rust/protover,
+ * and safe translation/handling between the Rust/C boundary.
+ */
+
+#include "or/or.h"
+#include "or/protover.h"
+
+#ifdef HAVE_RUST
+
+/* Define for compatibility, used in main.c */
+void
+protover_free_all(void)
+{
+}
+
+int protover_contains_long_protocol_names_(const char *s);
+
+/**
+ * Return true if the unparsed protover in <b>s</b> would contain a protocol
+ * name longer than MAX_PROTOCOL_NAME_LENGTH, and false otherwise.
+ */
+bool
+protover_contains_long_protocol_names(const char *s)
+{
+ return protover_contains_long_protocol_names_(s) != 0;
+}
+
+#endif /* defined(HAVE_RUST) */
+