aboutsummaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/core.md18
-rw-r--r--src/core/crypto/.may_include10
-rw-r--r--src/core/crypto/core_crypto.md6
-rw-r--r--src/core/crypto/hs_ntor.c60
-rw-r--r--src/core/crypto/hs_ntor.h32
-rw-r--r--src/core/crypto/include.am18
-rw-r--r--src/core/crypto/onion_crypto.c2
-rw-r--r--src/core/crypto/onion_crypto.h4
-rw-r--r--src/core/crypto/onion_fast.c2
-rw-r--r--src/core/crypto/onion_fast.h2
-rw-r--r--src/core/crypto/onion_ntor.c2
-rw-r--r--src/core/crypto/onion_ntor.h7
-rw-r--r--src/core/crypto/onion_tap.c2
-rw-r--r--src/core/crypto/onion_tap.h2
-rw-r--r--src/core/crypto/relay_crypto.c55
-rw-r--r--src/core/crypto/relay_crypto.h13
-rw-r--r--src/core/include.am363
-rw-r--r--src/core/mainloop/.may_include24
-rw-r--r--src/core/mainloop/connection.c545
-rw-r--r--src/core/mainloop/connection.h205
-rw-r--r--src/core/mainloop/core_mainloop.md10
-rw-r--r--src/core/mainloop/cpuworker.c15
-rw-r--r--src/core/mainloop/cpuworker.h6
-rw-r--r--src/core/mainloop/include.am22
-rw-r--r--src/core/mainloop/mainloop.c875
-rw-r--r--src/core/mainloop/mainloop.h25
-rw-r--r--src/core/mainloop/mainloop_pubsub.c179
-rw-r--r--src/core/mainloop/mainloop_pubsub.h61
-rw-r--r--src/core/mainloop/mainloop_state.inc19
-rw-r--r--src/core/mainloop/mainloop_state_st.h23
-rw-r--r--src/core/mainloop/mainloop_sys.c90
-rw-r--r--src/core/mainloop/mainloop_sys.h17
-rw-r--r--src/core/mainloop/netstatus.c144
-rw-r--r--src/core/mainloop/netstatus.h23
-rw-r--r--src/core/mainloop/periodic.c223
-rw-r--r--src/core/mainloop/periodic.h37
-rw-r--r--src/core/or/.may_include40
-rw-r--r--src/core/or/addr_policy_st.h11
-rw-r--r--src/core/or/address_set.c4
-rw-r--r--src/core/or/address_set.h2
-rw-r--r--src/core/or/cell_queue_st.h11
-rw-r--r--src/core/or/cell_st.h10
-rw-r--r--src/core/or/channel.c154
-rw-r--r--src/core/or/channel.h89
-rw-r--r--src/core/or/channelpadding.c11
-rw-r--r--src/core/or/channelpadding.h2
-rw-r--r--src/core/or/channeltls.c156
-rw-r--r--src/core/or/channeltls.h9
-rw-r--r--src/core/or/circuit_st.h77
-rw-r--r--src/core/or/circuitbuild.c501
-rw-r--r--src/core/or/circuitbuild.h39
-rw-r--r--src/core/or/circuitlist.c219
-rw-r--r--src/core/or/circuitlist.h42
-rw-r--r--src/core/or/circuitmux.c102
-rw-r--r--src/core/or/circuitmux.h70
-rw-r--r--src/core/or/circuitmux_ewma.c119
-rw-r--r--src/core/or/circuitmux_ewma.h114
-rw-r--r--src/core/or/circuitpadding.c3101
-rw-r--r--src/core/or/circuitpadding.h813
-rw-r--r--src/core/or/circuitpadding_machines.c454
-rw-r--r--src/core/or/circuitpadding_machines.h35
-rw-r--r--src/core/or/circuitstats.c26
-rw-r--r--src/core/or/circuitstats.h4
-rw-r--r--src/core/or/circuituse.c142
-rw-r--r--src/core/or/circuituse.h11
-rw-r--r--src/core/or/command.c27
-rw-r--r--src/core/or/command.h3
-rw-r--r--src/core/or/connection_edge.c286
-rw-r--r--src/core/or/connection_edge.h36
-rw-r--r--src/core/or/connection_or.c861
-rw-r--r--src/core/or/connection_or.h59
-rw-r--r--src/core/or/connection_st.h9
-rw-r--r--src/core/or/core_or.md62
-rw-r--r--src/core/or/cpath_build_state_st.h10
-rw-r--r--src/core/or/crypt_path.c262
-rw-r--r--src/core/or/crypt_path.h46
-rw-r--r--src/core/or/crypt_path_reference_st.h10
-rw-r--r--src/core/or/crypt_path_st.h32
-rw-r--r--src/core/or/dataflow.md236
-rw-r--r--src/core/or/destroy_cell_queue_st.h14
-rw-r--r--src/core/or/dos.c15
-rw-r--r--src/core/or/dos.h6
-rw-r--r--src/core/or/edge_connection_st.h10
-rw-r--r--src/core/or/entry_connection_st.h10
-rw-r--r--src/core/or/entry_port_cfg_st.h13
-rw-r--r--src/core/or/extend_info_st.h9
-rw-r--r--src/core/or/half_edge_st.h10
-rw-r--r--src/core/or/include.am96
-rw-r--r--src/core/or/listener_connection_st.h10
-rw-r--r--src/core/or/ocirc_event.c121
-rw-r--r--src/core/or/ocirc_event.h72
-rw-r--r--src/core/or/onion.c53
-rw-r--r--src/core/or/onion.h12
-rw-r--r--src/core/or/or.h44
-rw-r--r--src/core/or/or_circuit_st.h26
-rw-r--r--src/core/or/or_connection_st.h13
-rw-r--r--src/core/or/or_handshake_certs_st.h9
-rw-r--r--src/core/or/or_handshake_state_st.h10
-rw-r--r--src/core/or/or_periodic.c67
-rw-r--r--src/core/or/or_periodic.h17
-rw-r--r--src/core/or/or_sys.c56
-rw-r--r--src/core/or/or_sys.h21
-rw-r--r--src/core/or/orconn_event.c92
-rw-r--r--src/core/or/orconn_event.h103
-rw-r--r--src/core/or/origin_circuit_st.h13
-rw-r--r--src/core/or/policies.c243
-rw-r--r--src/core/or/policies.h4
-rw-r--r--src/core/or/port_cfg_st.h10
-rw-r--r--src/core/or/protover.c16
-rw-r--r--src/core/or/protover.h26
-rw-r--r--src/core/or/protover_rust.c2
-rw-r--r--src/core/or/reasons.c6
-rw-r--r--src/core/or/reasons.h2
-rw-r--r--src/core/or/relay.c724
-rw-r--r--src/core/or/relay.h23
-rw-r--r--src/core/or/relay_crypto_st.h13
-rw-r--r--src/core/or/scheduler.c88
-rw-r--r--src/core/or/scheduler.h17
-rw-r--r--src/core/or/scheduler_kist.c50
-rw-r--r--src/core/or/scheduler_vanilla.c12
-rw-r--r--src/core/or/sendme.c710
-rw-r--r--src/core/or/sendme.h80
-rw-r--r--src/core/or/server_port_cfg_st.h10
-rw-r--r--src/core/or/socks_request_st.h16
-rw-r--r--src/core/or/status.c5
-rw-r--r--src/core/or/status.h8
-rw-r--r--src/core/or/tor_version_st.h10
-rw-r--r--src/core/or/var_cell_st.h10
-rw-r--r--src/core/or/versions.c112
-rw-r--r--src/core/or/versions.h6
-rw-r--r--src/core/proto/.may_include14
-rw-r--r--src/core/proto/core_proto.md6
-rw-r--r--src/core/proto/include.am18
-rw-r--r--src/core/proto/proto_cell.c12
-rw-r--r--src/core/proto/proto_cell.h8
-rw-r--r--src/core/proto/proto_control0.c10
-rw-r--r--src/core/proto/proto_control0.h8
-rw-r--r--src/core/proto/proto_ext_or.c10
-rw-r--r--src/core/proto/proto_ext_or.h14
-rw-r--r--src/core/proto/proto_haproxy.c45
-rw-r--r--src/core/proto/proto_haproxy.h12
-rw-r--r--src/core/proto/proto_http.c10
-rw-r--r--src/core/proto/proto_http.h8
-rw-r--r--src/core/proto/proto_socks.c24
-rw-r--r--src/core/proto/proto_socks.h7
-rw-r--r--src/core/stA1RajU0
-rw-r--r--src/core/stiysZNDbin0 -> 19083264 bytes
147 files changed, 10890 insertions, 3889 deletions
diff --git a/src/core/core.md b/src/core/core.md
new file mode 100644
index 0000000000..8ecc43eaae
--- /dev/null
+++ b/src/core/core.md
@@ -0,0 +1,18 @@
+@dir /core
+@brief core: main loop and onion routing functionality
+
+The "core" directory has the central protocols for Tor, which every
+client and relay must implement in order to perform onion routing.
+
+It is divided into three lower-level pieces:
+
+ - \refdir{core/crypto} -- Tor-specific cryptography.
+
+ - \refdir{core/proto} -- Protocol encoding/decoding.
+
+ - \refdir{core/mainloop} -- A connection-oriented asynchronous mainloop.
+
+and one high-level piece:
+
+ - \refdir{core/or} -- Implements onion routing itself.
+
diff --git a/src/core/crypto/.may_include b/src/core/crypto/.may_include
new file mode 100644
index 0000000000..5782a36797
--- /dev/null
+++ b/src/core/crypto/.may_include
@@ -0,0 +1,10 @@
+!advisory
+
+orconfig.h
+
+lib/crypt_ops/*.h
+lib/ctime/*.h
+lib/cc/*.h
+lib/log/*.h
+
+core/crypto/*.h
diff --git a/src/core/crypto/core_crypto.md b/src/core/crypto/core_crypto.md
new file mode 100644
index 0000000000..26ade1f8f8
--- /dev/null
+++ b/src/core/crypto/core_crypto.md
@@ -0,0 +1,6 @@
+@dir /core/crypto
+@brief core/crypto: Tor-specific cryptography
+
+This module implements Tor's circuit-construction crypto and Tor's
+relay crypto.
+
diff --git a/src/core/crypto/hs_ntor.c b/src/core/crypto/hs_ntor.c
index c34073690e..07bcdc566c 100644
--- a/src/core/crypto/hs_ntor.c
+++ b/src/core/crypto/hs_ntor.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2019, The Tor Project, Inc. */
+/* Copyright (c) 2017-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/** \file hs_ntor.c
@@ -170,19 +170,18 @@ get_rendezvous1_key_material(const uint8_t *rend_secret_hs_input,
* necessary key material, and return 0. */
static void
get_introduce1_key_material(const uint8_t *secret_input,
- const uint8_t *subcredential,
+ const hs_subcredential_t *subcredential,
hs_ntor_intro_cell_keys_t *hs_ntor_intro_cell_keys_out)
{
uint8_t keystream[CIPHER256_KEY_LEN + DIGEST256_LEN];
uint8_t info_blob[INFO_BLOB_LEN];
uint8_t kdf_input[KDF_INPUT_LEN];
- crypto_xof_t *xof;
uint8_t *ptr;
/* Let's build info */
ptr = info_blob;
APPEND(ptr, M_HSEXPAND, strlen(M_HSEXPAND));
- APPEND(ptr, subcredential, DIGEST256_LEN);
+ APPEND(ptr, subcredential->subcred, SUBCRED_LEN);
tor_assert(ptr == info_blob + sizeof(info_blob));
/* Let's build the input to the KDF */
@@ -193,10 +192,8 @@ get_introduce1_key_material(const uint8_t *secret_input,
tor_assert(ptr == kdf_input + sizeof(kdf_input));
/* Now we need to run kdf_input over SHAKE-256 */
- xof = crypto_xof_new();
- crypto_xof_add_bytes(xof, kdf_input, sizeof(kdf_input));
- crypto_xof_squeeze_bytes(xof, keystream, sizeof(keystream)) ;
- crypto_xof_free(xof);
+ crypto_xof(keystream, sizeof(keystream),
+ kdf_input, sizeof(kdf_input));
{ /* Get the keys */
memcpy(&hs_ntor_intro_cell_keys_out->enc_key, keystream,CIPHER256_KEY_LEN);
@@ -320,7 +317,7 @@ hs_ntor_client_get_introduce1_keys(
const ed25519_public_key_t *intro_auth_pubkey,
const curve25519_public_key_t *intro_enc_pubkey,
const curve25519_keypair_t *client_ephemeral_enc_keypair,
- const uint8_t *subcredential,
+ const hs_subcredential_t *subcredential,
hs_ntor_intro_cell_keys_t *hs_ntor_intro_cell_keys_out)
{
int bad = 0;
@@ -453,9 +450,31 @@ hs_ntor_service_get_introduce1_keys(
const ed25519_public_key_t *intro_auth_pubkey,
const curve25519_keypair_t *intro_enc_keypair,
const curve25519_public_key_t *client_ephemeral_enc_pubkey,
- const uint8_t *subcredential,
+ const hs_subcredential_t *subcredential,
hs_ntor_intro_cell_keys_t *hs_ntor_intro_cell_keys_out)
{
+ return hs_ntor_service_get_introduce1_keys_multi(
+ intro_auth_pubkey,
+ intro_enc_keypair,
+ client_ephemeral_enc_pubkey,
+ 1,
+ subcredential,
+ hs_ntor_intro_cell_keys_out);
+}
+
+/**
+ * As hs_ntor_service_get_introduce1_keys(), but take multiple subcredentials
+ * as input, and yield multiple sets of keys as output.
+ **/
+int
+hs_ntor_service_get_introduce1_keys_multi(
+ const struct ed25519_public_key_t *intro_auth_pubkey,
+ const struct curve25519_keypair_t *intro_enc_keypair,
+ const struct curve25519_public_key_t *client_ephemeral_enc_pubkey,
+ size_t n_subcredentials,
+ const hs_subcredential_t *subcredentials,
+ hs_ntor_intro_cell_keys_t *hs_ntor_intro_cell_keys_out)
+{
int bad = 0;
uint8_t secret_input[INTRO_SECRET_HS_INPUT_LEN];
uint8_t dh_result[CURVE25519_OUTPUT_LEN];
@@ -463,7 +482,8 @@ hs_ntor_service_get_introduce1_keys(
tor_assert(intro_auth_pubkey);
tor_assert(intro_enc_keypair);
tor_assert(client_ephemeral_enc_pubkey);
- tor_assert(subcredential);
+ tor_assert(n_subcredentials >= 1);
+ tor_assert(subcredentials);
tor_assert(hs_ntor_intro_cell_keys_out);
/* Compute EXP(X, b) */
@@ -479,13 +499,16 @@ hs_ntor_service_get_introduce1_keys(
secret_input);
bad |= safe_mem_is_zero(secret_input, CURVE25519_OUTPUT_LEN);
- /* Get ENC_KEY and MAC_KEY! */
- get_introduce1_key_material(secret_input, subcredential,
- hs_ntor_intro_cell_keys_out);
+ for (unsigned i = 0; i < n_subcredentials; ++i) {
+ /* Get ENC_KEY and MAC_KEY! */
+ get_introduce1_key_material(secret_input, &subcredentials[i],
+ &hs_ntor_intro_cell_keys_out[i]);
+ }
memwipe(secret_input, 0, sizeof(secret_input));
if (bad) {
- memwipe(hs_ntor_intro_cell_keys_out, 0, sizeof(hs_ntor_intro_cell_keys_t));
+ memwipe(hs_ntor_intro_cell_keys_out, 0,
+ sizeof(hs_ntor_intro_cell_keys_t) * n_subcredentials);
}
return bad ? -1 : 0;
@@ -594,7 +617,6 @@ hs_ntor_circuit_key_expansion(const uint8_t *ntor_key_seed, size_t seed_len,
{
uint8_t *ptr;
uint8_t kdf_input[NTOR_KEY_EXPANSION_KDF_INPUT_LEN];
- crypto_xof_t *xof;
/* Sanity checks on lengths to make sure we are good */
if (BUG(seed_len != DIGEST256_LEN)) {
@@ -611,10 +633,8 @@ hs_ntor_circuit_key_expansion(const uint8_t *ntor_key_seed, size_t seed_len,
tor_assert(ptr == kdf_input + sizeof(kdf_input));
/* Generate the keys */
- xof = crypto_xof_new();
- crypto_xof_add_bytes(xof, kdf_input, sizeof(kdf_input));
- crypto_xof_squeeze_bytes(xof, keys_out, HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN);
- crypto_xof_free(xof);
+ crypto_xof(keys_out, HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN,
+ kdf_input, sizeof(kdf_input));
return 0;
}
diff --git a/src/core/crypto/hs_ntor.h b/src/core/crypto/hs_ntor.h
index e5a5171915..9a975dd83f 100644
--- a/src/core/crypto/hs_ntor.h
+++ b/src/core/crypto/hs_ntor.h
@@ -1,6 +1,11 @@
-/* Copyright (c) 2017-2019, The Tor Project, Inc. */
+/* Copyright (c) 2017-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file hs_ntor.h
+ * @brief Header for hs_ntor.c
+ **/
+
#ifndef TOR_HS_NTOR_H
#define TOR_HS_NTOR_H
@@ -14,7 +19,7 @@ struct curve25519_keypair_t;
(DIGEST256_LEN*2 + CIPHER256_KEY_LEN*2)
/* Key material needed to encode/decode INTRODUCE1 cells */
-typedef struct {
+typedef struct hs_ntor_intro_cell_keys_t {
/* Key used for encryption of encrypted INTRODUCE1 blob */
uint8_t enc_key[CIPHER256_KEY_LEN];
/* MAC key used to protect encrypted INTRODUCE1 blob */
@@ -22,7 +27,7 @@ typedef struct {
} hs_ntor_intro_cell_keys_t;
/* Key material needed to encode/decode RENDEZVOUS1 cells */
-typedef struct {
+typedef struct hs_ntor_rend_cell_keys_t {
/* This is the MAC of the HANDSHAKE_INFO field */
uint8_t rend_cell_auth_mac[DIGEST256_LEN];
/* This is the key seed used to derive further rendezvous crypto keys as
@@ -30,11 +35,20 @@ typedef struct {
uint8_t ntor_key_seed[DIGEST256_LEN];
} hs_ntor_rend_cell_keys_t;
+#define SUBCRED_LEN DIGEST256_LEN
+
+/**
+ * A 'subcredential' used to prove knowledge of a hidden service.
+ **/
+typedef struct hs_subcredential_t {
+ uint8_t subcred[SUBCRED_LEN];
+} hs_subcredential_t;
+
int hs_ntor_client_get_introduce1_keys(
const struct ed25519_public_key_t *intro_auth_pubkey,
const struct curve25519_public_key_t *intro_enc_pubkey,
const struct curve25519_keypair_t *client_ephemeral_enc_keypair,
- const uint8_t *subcredential,
+ const hs_subcredential_t *subcredential,
hs_ntor_intro_cell_keys_t *hs_ntor_intro_cell_keys_out);
int hs_ntor_client_get_rendezvous1_keys(
@@ -44,11 +58,19 @@ int hs_ntor_client_get_rendezvous1_keys(
const struct curve25519_public_key_t *service_ephemeral_rend_pubkey,
hs_ntor_rend_cell_keys_t *hs_ntor_rend_cell_keys_out);
+int hs_ntor_service_get_introduce1_keys_multi(
+ const struct ed25519_public_key_t *intro_auth_pubkey,
+ const struct curve25519_keypair_t *intro_enc_keypair,
+ const struct curve25519_public_key_t *client_ephemeral_enc_pubkey,
+ size_t n_subcredentials,
+ const hs_subcredential_t *subcredentials,
+ hs_ntor_intro_cell_keys_t *hs_ntor_intro_cell_keys_out);
+
int hs_ntor_service_get_introduce1_keys(
const struct ed25519_public_key_t *intro_auth_pubkey,
const struct curve25519_keypair_t *intro_enc_keypair,
const struct curve25519_public_key_t *client_ephemeral_enc_pubkey,
- const uint8_t *subcredential,
+ const hs_subcredential_t *subcredential,
hs_ntor_intro_cell_keys_t *hs_ntor_intro_cell_keys_out);
int hs_ntor_service_get_rendezvous1_keys(
diff --git a/src/core/crypto/include.am b/src/core/crypto/include.am
new file mode 100644
index 0000000000..28b7e22905
--- /dev/null
+++ b/src/core/crypto/include.am
@@ -0,0 +1,18 @@
+
+# ADD_C_FILE: INSERT SOURCES HERE.
+LIBTOR_APP_A_SOURCES += \
+ src/core/crypto/hs_ntor.c \
+ src/core/crypto/onion_crypto.c \
+ src/core/crypto/onion_fast.c \
+ src/core/crypto/onion_ntor.c \
+ src/core/crypto/onion_tap.c \
+ src/core/crypto/relay_crypto.c
+
+# ADD_C_FILE: INSERT HEADERS HERE.
+noinst_HEADERS += \
+ src/core/crypto/hs_ntor.h \
+ src/core/crypto/onion_crypto.h \
+ src/core/crypto/onion_fast.h \
+ src/core/crypto/onion_ntor.h \
+ src/core/crypto/onion_tap.h \
+ src/core/crypto/relay_crypto.h
diff --git a/src/core/crypto/onion_crypto.c b/src/core/crypto/onion_crypto.c
index 56b02e2996..69b4dc40aa 100644
--- a/src/core/crypto/onion_crypto.c
+++ b/src/core/crypto/onion_crypto.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
diff --git a/src/core/crypto/onion_crypto.h b/src/core/crypto/onion_crypto.h
index 1cddde3610..2665d326a3 100644
--- a/src/core/crypto/onion_crypto.h
+++ b/src/core/crypto/onion_crypto.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -44,4 +44,4 @@ void server_onion_keys_free_(server_onion_keys_t *keys);
#define server_onion_keys_free(keys) \
FREE_AND_NULL(server_onion_keys_t, server_onion_keys_free_, (keys))
-#endif
+#endif /* !defined(TOR_ONION_CRYPTO_H) */
diff --git a/src/core/crypto/onion_fast.c b/src/core/crypto/onion_fast.c
index 31bd20235f..d760549325 100644
--- a/src/core/crypto/onion_fast.c
+++ b/src/core/crypto/onion_fast.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
diff --git a/src/core/crypto/onion_fast.h b/src/core/crypto/onion_fast.h
index 0ba8cbbc35..da983a56d9 100644
--- a/src/core/crypto/onion_fast.h
+++ b/src/core/crypto/onion_fast.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
diff --git a/src/core/crypto/onion_ntor.c b/src/core/crypto/onion_ntor.c
index 7087fe1bd7..5a77230d02 100644
--- a/src/core/crypto/onion_ntor.c
+++ b/src/core/crypto/onion_ntor.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2019, The Tor Project, Inc. */
+/* Copyright (c) 2012-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
diff --git a/src/core/crypto/onion_ntor.h b/src/core/crypto/onion_ntor.h
index 51e72b4083..9473409e40 100644
--- a/src/core/crypto/onion_ntor.h
+++ b/src/core/crypto/onion_ntor.h
@@ -1,6 +1,11 @@
-/* Copyright (c) 2012-2019, The Tor Project, Inc. */
+/* Copyright (c) 2012-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file onion_ntor.h
+ * @brief Header for onion_ntor.c
+ **/
+
#ifndef TOR_ONION_NTOR_H
#define TOR_ONION_NTOR_H
diff --git a/src/core/crypto/onion_tap.c b/src/core/crypto/onion_tap.c
index 854889d88d..119f55f206 100644
--- a/src/core/crypto/onion_tap.c
+++ b/src/core/crypto/onion_tap.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
diff --git a/src/core/crypto/onion_tap.h b/src/core/crypto/onion_tap.h
index 0e43b9c8ba..78174b1fab 100644
--- a/src/core/crypto/onion_tap.h
+++ b/src/core/crypto/onion_tap.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
diff --git a/src/core/crypto/relay_crypto.c b/src/core/crypto/relay_crypto.c
index 0b83b2d0a5..3e6167e0e1 100644
--- a/src/core/crypto/relay_crypto.c
+++ b/src/core/crypto/relay_crypto.c
@@ -1,17 +1,24 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file relay_crypto.h
+ * @brief Header for relay_crypto.c
+ **/
+
#include "core/or/or.h"
#include "core/or/circuitlist.h"
+#include "core/or/crypt_path.h"
#include "app/config/config.h"
#include "lib/crypt_ops/crypto_cipher.h"
#include "lib/crypt_ops/crypto_util.h"
#include "core/crypto/hs_ntor.h" // for HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN
#include "core/or/relay.h"
#include "core/crypto/relay_crypto.h"
+#include "core/or/sendme.h"
#include "core/or/cell_st.h"
#include "core/or/or_circuit_st.h"
@@ -20,7 +27,7 @@
/** Update digest from the payload of cell. Assign integrity part to
* cell.
*/
-static void
+void
relay_set_digest(crypto_digest_t *digest, cell_t *cell)
{
char integrity[4];
@@ -84,12 +91,39 @@ relay_digest_matches(crypto_digest_t *digest, cell_t *cell)
*
* Note that we use the same operation for encrypting and for decrypting.
*/
-static void
+void
relay_crypt_one_payload(crypto_cipher_t *cipher, uint8_t *in)
{
crypto_cipher_crypt_inplace(cipher, (char*) in, CELL_PAYLOAD_SIZE);
}
+/** Return the sendme_digest within the <b>crypto</b> object. */
+uint8_t *
+relay_crypto_get_sendme_digest(relay_crypto_t *crypto)
+{
+ tor_assert(crypto);
+ return crypto->sendme_digest;
+}
+
+/** Record the cell digest, indicated by is_foward_digest or not, as the
+ * SENDME cell digest. */
+void
+relay_crypto_record_sendme_digest(relay_crypto_t *crypto,
+ bool is_foward_digest)
+{
+ struct crypto_digest_t *digest;
+
+ tor_assert(crypto);
+
+ digest = crypto->b_digest;
+ if (is_foward_digest) {
+ digest = crypto->f_digest;
+ }
+
+ crypto_digest_get_digest(digest, (char *) crypto->sendme_digest,
+ sizeof(crypto->sendme_digest));
+}
+
/** Do the appropriate en/decryptions for <b>cell</b> arriving on
* <b>circ</b> in direction <b>cell_direction</b>.
*
@@ -134,12 +168,12 @@ relay_decrypt_cell(circuit_t *circ, cell_t *cell,
tor_assert(thishop);
/* decrypt one layer */
- relay_crypt_one_payload(thishop->crypto.b_crypto, cell->payload);
+ cpath_crypt_cell(thishop, cell->payload, true);
relay_header_unpack(&rh, cell->payload);
if (rh.recognized == 0) {
/* it's possibly recognized. have to check digest to be sure. */
- if (relay_digest_matches(thishop->crypto.b_digest, cell)) {
+ if (relay_digest_matches(cpath_get_incoming_digest(thishop), cell)) {
*recognized = 1;
*layer_hint = thishop;
return 0;
@@ -187,14 +221,17 @@ relay_encrypt_cell_outbound(cell_t *cell,
crypt_path_t *layer_hint)
{
crypt_path_t *thishop; /* counter for repeated crypts */
- relay_set_digest(layer_hint->crypto.f_digest, cell);
+ cpath_set_cell_forward_digest(layer_hint, cell);
+
+ /* Record cell digest as the SENDME digest if need be. */
+ sendme_record_sending_cell_digest(TO_CIRCUIT(circ), layer_hint);
thishop = layer_hint;
/* moving from farthest to nearest hop */
do {
tor_assert(thishop);
log_debug(LD_OR,"encrypting a layer of the relay cell.");
- relay_crypt_one_payload(thishop->crypto.f_crypto, cell->payload);
+ cpath_crypt_cell(thishop, cell->payload, false);
thishop = thishop->prev;
} while (thishop != circ->cpath->prev);
@@ -212,6 +249,10 @@ relay_encrypt_cell_inbound(cell_t *cell,
or_circuit_t *or_circ)
{
relay_set_digest(or_circ->crypto.b_digest, cell);
+
+ /* Record cell digest as the SENDME digest if need be. */
+ sendme_record_sending_cell_digest(TO_CIRCUIT(or_circ), NULL);
+
/* encrypt one layer */
relay_crypt_one_payload(or_circ->crypto.b_crypto, cell->payload);
}
diff --git a/src/core/crypto/relay_crypto.h b/src/core/crypto/relay_crypto.h
index 45a21d14ab..5e36c7678c 100644
--- a/src/core/crypto/relay_crypto.h
+++ b/src/core/crypto/relay_crypto.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -27,5 +27,16 @@ void relay_crypto_clear(relay_crypto_t *crypto);
void relay_crypto_assert_ok(const relay_crypto_t *crypto);
+uint8_t *relay_crypto_get_sendme_digest(relay_crypto_t *crypto);
+
+void relay_crypto_record_sendme_digest(relay_crypto_t *crypto,
+ bool is_foward_digest);
+
+void
+relay_crypt_one_payload(crypto_cipher_t *cipher, uint8_t *in);
+
+void
+relay_set_digest(crypto_digest_t *digest, cell_t *cell);
+
#endif /* !defined(TOR_RELAY_CRYPTO_H) */
diff --git a/src/core/include.am b/src/core/include.am
index 1b8ef2ac58..7752a7974b 100644
--- a/src/core/include.am
+++ b/src/core/include.am
@@ -1,174 +1,21 @@
-noinst_LIBRARIES += \
- src/core/libtor-app.a
-if UNITTESTS_ENABLED
-noinst_LIBRARIES += \
- src/core/libtor-app-testing.a
-endif
-
-LIBTOR_APP_A_SOURCES = \
- src/app/config/config.c \
- src/app/config/confparse.c \
- src/app/config/statefile.c \
- src/app/main/main.c \
- src/core/crypto/hs_ntor.c \
- src/core/crypto/onion_crypto.c \
- src/core/crypto/onion_fast.c \
- src/core/crypto/onion_ntor.c \
- src/core/crypto/onion_tap.c \
- src/core/crypto/relay_crypto.c \
- src/core/mainloop/connection.c \
- src/core/mainloop/cpuworker.c \
- src/core/mainloop/mainloop.c \
- src/core/mainloop/netstatus.c \
- src/core/mainloop/periodic.c \
- src/core/or/address_set.c \
- src/core/or/channel.c \
- src/core/or/channelpadding.c \
- src/core/or/channeltls.c \
- src/core/or/circuitbuild.c \
- src/core/or/circuitlist.c \
- src/core/or/circuitmux.c \
- src/core/or/circuitmux_ewma.c \
- src/core/or/circuitstats.c \
- src/core/or/circuituse.c \
- src/core/or/command.c \
- src/core/or/connection_edge.c \
- src/core/or/connection_or.c \
- src/core/or/dos.c \
- src/core/or/onion.c \
- src/core/or/policies.c \
- src/core/or/protover.c \
- src/core/or/protover_rust.c \
- src/core/or/reasons.c \
- src/core/or/relay.c \
- src/core/or/scheduler.c \
- src/core/or/scheduler_kist.c \
- src/core/or/scheduler_vanilla.c \
- src/core/or/status.c \
- src/core/or/versions.c \
- src/core/proto/proto_cell.c \
- src/core/proto/proto_control0.c \
- src/core/proto/proto_ext_or.c \
- src/core/proto/proto_http.c \
- src/core/proto/proto_socks.c \
- src/feature/api/tor_api.c \
- src/feature/client/addressmap.c \
- src/feature/client/bridges.c \
- src/feature/client/circpathbias.c \
- src/feature/client/dnsserv.c \
- src/feature/client/entrynodes.c \
- src/feature/client/transports.c \
- src/feature/control/control.c \
- src/feature/control/fmt_serverstatus.c \
- src/feature/control/getinfo_geoip.c \
- src/feature/dirauth/keypin.c \
- src/feature/dircache/conscache.c \
- src/feature/dircache/consdiffmgr.c \
- src/feature/dircache/dircache.c \
- src/feature/dircache/dirserv.c \
- src/feature/dirclient/dirclient.c \
- src/feature/dirclient/dlstatus.c \
- src/feature/dircommon/consdiff.c \
- src/feature/dircommon/directory.c \
- src/feature/dircommon/fp_pair.c \
- src/feature/dircommon/voting_schedule.c \
- src/feature/dirparse/authcert_parse.c \
- src/feature/dirparse/microdesc_parse.c \
- src/feature/dirparse/ns_parse.c \
- src/feature/dirparse/parsecommon.c \
- src/feature/dirparse/policy_parse.c \
- src/feature/dirparse/routerparse.c \
- src/feature/dirparse/sigcommon.c \
- src/feature/dirparse/signing.c \
- src/feature/dirparse/unparseable.c \
- src/feature/hibernate/hibernate.c \
- src/feature/hs/hs_cache.c \
- src/feature/hs/hs_cell.c \
- src/feature/hs/hs_circuit.c \
- src/feature/hs/hs_circuitmap.c \
- src/feature/hs/hs_client.c \
- src/feature/hs/hs_common.c \
- src/feature/hs/hs_config.c \
- src/feature/hs/hs_control.c \
- src/feature/hs/hs_descriptor.c \
- src/feature/hs/hs_ident.c \
- src/feature/hs/hs_intropoint.c \
- src/feature/hs/hs_service.c \
- src/feature/hs/hs_stats.c \
- src/feature/hs_common/replaycache.c \
- src/feature/hs_common/shared_random_client.c \
- src/feature/keymgt/loadkey.c \
- src/feature/dirauth/keypin.c \
- src/feature/nodelist/authcert.c \
- src/feature/nodelist/describe.c \
- src/feature/nodelist/dirlist.c \
- src/feature/nodelist/microdesc.c \
- src/feature/nodelist/networkstatus.c \
- src/feature/nodelist/nickname.c \
- src/feature/nodelist/nodelist.c \
- src/feature/nodelist/node_select.c \
- src/feature/nodelist/routerinfo.c \
- src/feature/nodelist/routerlist.c \
- src/feature/nodelist/routerset.c \
- src/feature/nodelist/fmt_routerstatus.c \
- src/feature/nodelist/torcert.c \
- src/feature/relay/dns.c \
- src/feature/relay/ext_orport.c \
- src/feature/relay/onion_queue.c \
- src/feature/relay/router.c \
- src/feature/relay/routerkeys.c \
- src/feature/relay/routermode.c \
- src/feature/relay/selftest.c \
- src/feature/rend/rendcache.c \
- src/feature/rend/rendclient.c \
- src/feature/rend/rendcommon.c \
- src/feature/rend/rendmid.c \
- src/feature/rend/rendparse.c \
- src/feature/rend/rendservice.c \
- src/feature/stats/geoip_stats.c \
- src/feature/stats/rephist.c \
- src/feature/stats/predict_ports.c
-
-# These should eventually move into module_dirauth_sources, but for now
-# the separation is only in the code location.
-LIBTOR_APP_A_SOURCES += \
- src/feature/dirauth/bwauth.c \
- src/feature/dirauth/dsigs_parse.c \
- src/feature/dirauth/guardfraction.c \
- src/feature/dirauth/reachability.c \
- src/feature/dirauth/recommend_pkg.c \
- src/feature/dirauth/process_descs.c \
- src/feature/dirauth/voteflags.c
-
-if BUILD_NT_SERVICES
-LIBTOR_APP_A_SOURCES += src/app/main/ntmain.c
-endif
-
#
-# Modules are conditionnally compiled in tor starting here. We add the C files
+# Modules are conditionally compiled in tor starting here. We add the C files
# only if the modules has been enabled at configure time. We always add the
# source files of every module to libtor-testing.a so we can build the unit
# tests for everything. See the UNITTESTS_ENABLED branch below.
#
LIBTOR_APP_TESTING_A_SOURCES = $(LIBTOR_APP_A_SOURCES)
-# The Directory Authority module.
-MODULE_DIRAUTH_SOURCES = \
- src/feature/dirauth/authmode.c \
- src/feature/dirauth/dircollate.c \
- src/feature/dirauth/dirvote.c \
- src/feature/dirauth/shared_random.c \
- src/feature/dirauth/shared_random_state.c
-
-if BUILD_MODULE_DIRAUTH
-LIBTOR_APP_A_SOURCES += $(MODULE_DIRAUTH_SOURCES)
-endif
+src_core_libtor_app_a_SOURCES = \
+ $(LIBTOR_APP_A_SOURCES) \
+ $(LIBTOR_APP_A_STUB_SOURCES)
-src_core_libtor_app_a_SOURCES = $(LIBTOR_APP_A_SOURCES)
if UNITTESTS_ENABLED
# Add the sources of the modules that are needed for tests to work here.
+LIBTOR_APP_TESTING_A_SOURCES += $(MODULE_RELAY_SOURCES)
+LIBTOR_APP_TESTING_A_SOURCES += $(MODULE_DIRCACHE_SOURCES)
LIBTOR_APP_TESTING_A_SOURCES += $(MODULE_DIRAUTH_SOURCES)
src_core_libtor_app_testing_a_SOURCES = $(LIBTOR_APP_TESTING_A_SOURCES)
@@ -183,204 +30,6 @@ AM_CPPFLAGS += -DSHARE_DATADIR="\"$(datadir)\"" \
src_core_libtor_app_testing_a_CPPFLAGS = $(AM_CPPFLAGS) $(TEST_CPPFLAGS)
src_core_libtor_app_testing_a_CFLAGS = $(AM_CFLAGS) $(TEST_CFLAGS)
-noinst_HEADERS += \
- src/app/config/config.h \
- src/app/config/confparse.h \
- src/app/config/or_options_st.h \
- src/app/config/or_state_st.h \
- src/app/config/statefile.h \
- src/app/main/main.h \
- src/app/main/ntmain.h \
- src/core/crypto/hs_ntor.h \
- src/core/crypto/onion_crypto.h \
- src/core/crypto/onion_fast.h \
- src/core/crypto/onion_ntor.h \
- src/core/crypto/onion_tap.h \
- src/core/crypto/relay_crypto.h \
- src/core/mainloop/connection.h \
- src/core/mainloop/cpuworker.h \
- src/core/mainloop/mainloop.h \
- src/core/mainloop/netstatus.h \
- src/core/mainloop/periodic.h \
- src/core/or/addr_policy_st.h \
- src/core/or/address_set.h \
- src/core/or/cell_queue_st.h \
- src/core/or/cell_st.h \
- src/core/or/channel.h \
- src/core/or/channelpadding.h \
- src/core/or/channeltls.h \
- src/core/or/circuit_st.h \
- src/core/or/circuitbuild.h \
- src/core/or/circuitlist.h \
- src/core/or/circuitmux.h \
- src/core/or/circuitmux_ewma.h \
- src/core/or/circuitstats.h \
- src/core/or/circuituse.h \
- src/core/or/command.h \
- src/core/or/connection_edge.h \
- src/core/or/connection_or.h \
- src/core/or/connection_st.h \
- src/core/or/cpath_build_state_st.h \
- src/core/or/crypt_path_reference_st.h \
- src/core/or/crypt_path_st.h \
- src/core/or/destroy_cell_queue_st.h \
- src/core/or/dos.h \
- src/core/or/edge_connection_st.h \
- src/core/or/half_edge_st.h \
- src/core/or/entry_connection_st.h \
- src/core/or/entry_port_cfg_st.h \
- src/core/or/extend_info_st.h \
- src/core/or/listener_connection_st.h \
- src/core/or/onion.h \
- src/core/or/or.h \
- src/core/or/or_circuit_st.h \
- src/core/or/or_connection_st.h \
- src/core/or/or_handshake_certs_st.h \
- src/core/or/or_handshake_state_st.h \
- src/core/or/origin_circuit_st.h \
- src/core/or/policies.h \
- src/core/or/port_cfg_st.h \
- src/core/or/protover.h \
- src/core/or/reasons.h \
- src/core/or/relay.h \
- src/core/or/relay_crypto_st.h \
- src/core/or/scheduler.h \
- src/core/or/server_port_cfg_st.h \
- src/core/or/socks_request_st.h \
- src/core/or/status.h \
- src/core/or/tor_version_st.h \
- src/core/or/var_cell_st.h \
- src/core/or/versions.h \
- src/core/proto/proto_cell.h \
- src/core/proto/proto_control0.h \
- src/core/proto/proto_ext_or.h \
- src/core/proto/proto_http.h \
- src/core/proto/proto_socks.h \
- src/feature/api/tor_api_internal.h \
- src/feature/client/addressmap.h \
- src/feature/client/bridges.h \
- src/feature/client/circpathbias.h \
- src/feature/client/dnsserv.h \
- src/feature/client/entrynodes.h \
- src/feature/client/transports.h \
- src/feature/control/control.h \
- src/feature/control/control_connection_st.h \
- src/feature/control/fmt_serverstatus.h \
- src/feature/control/getinfo_geoip.h \
- src/feature/dirauth/authmode.h \
- src/feature/dirauth/bwauth.h \
- src/feature/dirauth/dircollate.h \
- src/feature/dirauth/dirvote.h \
- src/feature/dirauth/dsigs_parse.h \
- src/feature/dirauth/guardfraction.h \
- src/feature/dirauth/keypin.h \
- src/feature/dirauth/ns_detached_signatures_st.h \
- src/feature/dirauth/reachability.h \
- src/feature/dirauth/recommend_pkg.h \
- src/feature/dirauth/process_descs.h \
- src/feature/dirauth/shared_random.h \
- src/feature/dirauth/shared_random_state.h \
- src/feature/dirauth/vote_microdesc_hash_st.h \
- src/feature/dirauth/voteflags.h \
- src/feature/dircache/cached_dir_st.h \
- src/feature/dircache/conscache.h \
- src/feature/dircache/consdiffmgr.h \
- src/feature/dircache/dircache.h \
- src/feature/dircache/dirserv.h \
- src/feature/dirclient/dir_server_st.h \
- src/feature/dirclient/dirclient.h \
- src/feature/dirclient/dlstatus.h \
- src/feature/dirclient/download_status_st.h \
- src/feature/dircommon/consdiff.h \
- src/feature/dircommon/dir_connection_st.h \
- src/feature/dircommon/directory.h \
- src/feature/dircommon/fp_pair.h \
- src/feature/dircommon/vote_timing_st.h \
- src/feature/dircommon/voting_schedule.h \
- src/feature/dirparse/authcert_members.i \
- src/feature/dirparse/authcert_parse.h \
- src/feature/dirparse/microdesc_parse.h \
- src/feature/dirparse/ns_parse.h \
- src/feature/dirparse/parsecommon.h \
- src/feature/dirparse/policy_parse.h \
- src/feature/dirparse/routerparse.h \
- src/feature/dirparse/sigcommon.h \
- src/feature/dirparse/signing.h \
- src/feature/dirparse/unparseable.h \
- src/feature/hibernate/hibernate.h \
- src/feature/hs/hs_cache.h \
- src/feature/hs/hs_cell.h \
- src/feature/hs/hs_circuit.h \
- src/feature/hs/hs_circuitmap.h \
- src/feature/hs/hs_client.h \
- src/feature/hs/hs_common.h \
- src/feature/hs/hs_config.h \
- src/feature/hs/hs_control.h \
- src/feature/hs/hs_descriptor.h \
- src/feature/hs/hs_ident.h \
- src/feature/hs/hs_intropoint.h \
- src/feature/hs/hs_service.h \
- src/feature/hs/hs_stats.h \
- src/feature/hs/hsdir_index_st.h \
- src/feature/hs_common/replaycache.h \
- src/feature/hs_common/shared_random_client.h \
- src/feature/keymgt/loadkey.h \
- src/feature/nodelist/authcert.h \
- src/feature/nodelist/authority_cert_st.h \
- src/feature/nodelist/describe.h \
- src/feature/nodelist/desc_store_st.h \
- src/feature/nodelist/dirlist.h \
- src/feature/nodelist/document_signature_st.h \
- src/feature/nodelist/extrainfo_st.h \
- src/feature/nodelist/microdesc.h \
- src/feature/nodelist/microdesc_st.h \
- src/feature/nodelist/networkstatus.h \
- src/feature/nodelist/networkstatus_sr_info_st.h \
- src/feature/nodelist/networkstatus_st.h \
- src/feature/nodelist/networkstatus_voter_info_st.h \
- src/feature/nodelist/nickname.h \
- src/feature/nodelist/node_st.h \
- src/feature/nodelist/nodelist.h \
- src/feature/nodelist/node_select.h \
- src/feature/nodelist/routerinfo.h \
- src/feature/nodelist/routerinfo_st.h \
- src/feature/nodelist/routerlist.h \
- src/feature/nodelist/routerlist_st.h \
- src/feature/nodelist/routerset.h \
- src/feature/nodelist/fmt_routerstatus.h \
- src/feature/nodelist/routerstatus_st.h \
- src/feature/nodelist/signed_descriptor_st.h \
- src/feature/nodelist/torcert.h \
- src/feature/nodelist/vote_routerstatus_st.h \
- src/feature/relay/dns.h \
- src/feature/relay/dns_structs.h \
- src/feature/relay/ext_orport.h \
- src/feature/relay/onion_queue.h \
- src/feature/relay/router.h \
- src/feature/relay/routerkeys.h \
- src/feature/relay/routermode.h \
- src/feature/relay/selftest.h \
- src/feature/rend/rend_authorized_client_st.h \
- src/feature/rend/rend_encoded_v2_service_descriptor_st.h \
- src/feature/rend/rend_intro_point_st.h \
- src/feature/rend/rend_service_descriptor_st.h \
- src/feature/rend/rendcache.h \
- src/feature/rend/rendclient.h \
- src/feature/rend/rendcommon.h \
- src/feature/rend/rendmid.h \
- src/feature/rend/rendparse.h \
- src/feature/rend/rendservice.h \
- src/feature/stats/geoip_stats.h \
- src/feature/stats/rephist.h \
- src/feature/stats/predict_ports.h
-
-noinst_HEADERS += \
- src/app/config/auth_dirs.inc \
- src/app/config/fallback_dirs.inc
-
-# This may someday want to be an installed file?
-noinst_HEADERS += src/feature/api/tor_api.h
-
micro-revision.i: FORCE
$(AM_V_at)rm -f micro-revision.tmp; \
if test -r "$(top_srcdir)/.git" && \
diff --git a/src/core/mainloop/.may_include b/src/core/mainloop/.may_include
new file mode 100644
index 0000000000..8e01cf910e
--- /dev/null
+++ b/src/core/mainloop/.may_include
@@ -0,0 +1,24 @@
+!advisory
+
+orconfig.h
+
+lib/conf/*.h
+lib/container/*.h
+lib/dispatch/*.h
+lib/evloop/*.h
+lib/pubsub/*.h
+lib/subsys/*.h
+lib/buf/*.h
+lib/crypt_ops/*.h
+lib/err/*.h
+lib/tls/*.h
+lib/net/*.h
+lib/evloop/*.h
+lib/geoip/*.h
+lib/sandbox/*.h
+lib/smartlist_core/*.h
+lib/compress/*.h
+lib/log/*.h
+
+core/mainloop/*.h
+core/mainloop/*.inc \ No newline at end of file
diff --git a/src/core/mainloop/connection.c b/src/core/mainloop/connection.c
index 21d4332758..36b2c6ef63 100644
--- a/src/core/mainloop/connection.c
+++ b/src/core/mainloop/connection.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -57,7 +57,7 @@
#define CONNECTION_PRIVATE
#include "core/or/or.h"
#include "feature/client/bridges.h"
-#include "lib/container/buffers.h"
+#include "lib/buf/buffers.h"
#include "lib/tls/buffers_tls.h"
#include "lib/err/backtrace.h"
@@ -65,9 +65,9 @@
* Define this so we get channel internal functions, since we're implementing
* part of a subclass (channel_tls_t).
*/
-#define TOR_CHANNEL_INTERNAL_
-#define CONNECTION_PRIVATE
+#define CHANNEL_OBJECT_PRIVATE
#include "app/config/config.h"
+#include "app/config/resolve_addr.h"
#include "core/mainloop/connection.h"
#include "core/mainloop/mainloop.h"
#include "core/mainloop/netstatus.h"
@@ -82,13 +82,17 @@
#include "core/or/policies.h"
#include "core/or/reasons.h"
#include "core/or/relay.h"
+#include "core/or/crypt_path.h"
+#include "core/proto/proto_haproxy.h"
#include "core/proto/proto_http.h"
#include "core/proto/proto_socks.h"
#include "feature/client/dnsserv.h"
#include "feature/client/entrynodes.h"
#include "feature/client/transports.h"
#include "feature/control/control.h"
+#include "feature/control/control_events.h"
#include "feature/dirauth/authmode.h"
+#include "feature/dirauth/dirauth_config.h"
#include "feature/dircache/dirserv.h"
#include "feature/dircommon/directory.h"
#include "feature/hibernate/hibernate.h"
@@ -105,6 +109,7 @@
#include "lib/crypt_ops/crypto_util.h"
#include "lib/geoip/geoip.h"
+#include "lib/cc/ctassert.h"
#include "lib/sandbox/sandbox.h"
#include "lib/net/buffers_net.h"
#include "lib/tls/tortls.h"
@@ -641,7 +646,7 @@ connection_free_minimal(connection_t *conn)
}
}
- tor_free(conn->address);
+ tor_str_wipe_and_free(conn->address);
if (connection_speaks_cells(conn)) {
or_connection_t *or_conn = TO_OR_CONN(conn);
@@ -661,7 +666,7 @@ connection_free_minimal(connection_t *conn)
}
or_handshake_state_free(or_conn->handshake_state);
or_conn->handshake_state = NULL;
- tor_free(or_conn->nickname);
+ tor_str_wipe_and_free(or_conn->nickname);
if (or_conn->chan) {
/* Owww, this shouldn't happen, but... */
channel_t *base_chan = TLS_CHAN_TO_BASE(or_conn->chan);
@@ -681,8 +686,8 @@ connection_free_minimal(connection_t *conn)
}
if (conn->type == CONN_TYPE_AP) {
entry_connection_t *entry_conn = TO_ENTRY_CONN(conn);
- tor_free(entry_conn->chosen_exit_name);
- tor_free(entry_conn->original_dest_address);
+ tor_str_wipe_and_free(entry_conn->chosen_exit_name);
+ tor_str_wipe_and_free(entry_conn->original_dest_address);
if (entry_conn->socks_request)
socks_request_free(entry_conn->socks_request);
if (entry_conn->pending_optimistic_data) {
@@ -700,6 +705,7 @@ connection_free_minimal(connection_t *conn)
control_connection_t *control_conn = TO_CONTROL_CONN(conn);
tor_free(control_conn->safecookie_client_hash);
tor_free(control_conn->incoming_cmd);
+ tor_free(control_conn->current_cmd);
if (control_conn->ephemeral_onion_services) {
SMARTLIST_FOREACH(control_conn->ephemeral_onion_services, char *, cp, {
memwipe(cp, 0, strlen(cp));
@@ -719,11 +725,7 @@ connection_free_minimal(connection_t *conn)
tor_free(dir_conn->requested_resource);
tor_compress_free(dir_conn->compress_state);
- if (dir_conn->spool) {
- SMARTLIST_FOREACH(dir_conn->spool, spooled_resource_t *, spooled,
- spooled_resource_free(spooled));
- smartlist_free(dir_conn->spool);
- }
+ dir_conn_clear_spool(dir_conn);
rend_data_free(dir_conn->rend_data);
hs_ident_dir_conn_free(dir_conn->hs_ident);
@@ -1199,7 +1201,7 @@ make_win32_socket_exclusive(tor_socket_t sock)
return -1;
}
return 0;
-#else /* !(defined(SO_EXCLUSIVEADDRUSE)) */
+#else /* !defined(SO_EXCLUSIVEADDRUSE) */
(void) sock;
return 0;
#endif /* defined(SO_EXCLUSIVEADDRUSE) */
@@ -1470,6 +1472,20 @@ connection_listener_new(const struct sockaddr *listensockaddr,
tor_socket_strerror(tor_socket_errno(s)));
goto err;
}
+
+#ifndef __APPLE__
+ /* This code was introduced to help debug #28229. */
+ int value;
+ socklen_t len = sizeof(value);
+
+ if (!getsockopt(s, SOL_SOCKET, SO_ACCEPTCONN, &value, &len)) {
+ if (value == 0) {
+ log_err(LD_NET, "Could not listen on %s - "
+ "getsockopt(.,SO_ACCEPTCONN,.) yields 0.", address);
+ goto err;
+ }
+ }
+#endif /* !defined(__APPLE__) */
#endif /* defined(HAVE_SYS_UN_H) */
} else {
log_err(LD_BUG, "Got unexpected address family %d.",
@@ -1503,10 +1519,11 @@ connection_listener_new(const struct sockaddr *listensockaddr,
}
}
+ /* Force IPv4 and IPv6 traffic on for non-SOCKSPorts.
+ * Forcing options on isn't a good idea, see #32994 and #33607. */
if (type != CONN_TYPE_AP_LISTENER) {
lis_conn->entry_cfg.ipv4_traffic = 1;
lis_conn->entry_cfg.ipv6_traffic = 1;
- lis_conn->entry_cfg.prefer_ipv6 = 0;
}
if (connection_add(conn) < 0) { /* no space, forget it */
@@ -1648,7 +1665,7 @@ check_sockaddr(const struct sockaddr *sa, int len, int level)
len,(int)sizeof(struct sockaddr_in6));
ok = 0;
}
- if (tor_mem_is_zero((void*)sin6->sin6_addr.s6_addr, 16) ||
+ if (fast_mem_is_zero((void*)sin6->sin6_addr.s6_addr, 16) ||
sin6->sin6_port == 0) {
log_fn(level, LD_NET,
"Address for new connection has address/port equal to zero.");
@@ -1871,7 +1888,7 @@ connection_init_accepted_conn(connection_t *conn,
/* Initiate Extended ORPort authentication. */
return connection_ext_or_start_auth(TO_OR_CONN(conn));
case CONN_TYPE_OR:
- control_event_or_conn_status(TO_OR_CONN(conn), OR_CONN_EVENT_NEW, 0);
+ connection_or_event_status(TO_OR_CONN(conn), OR_CONN_EVENT_NEW, 0);
rv = connection_tls_start_handshake(TO_OR_CONN(conn), 1);
if (rv < 0) {
connection_or_close_for_error(TO_OR_CONN(conn), 0);
@@ -1884,11 +1901,16 @@ connection_init_accepted_conn(connection_t *conn,
TO_ENTRY_CONN(conn)->nym_epoch = get_signewnym_epoch();
TO_ENTRY_CONN(conn)->socks_request->listener_type = listener->base_.type;
+ /* Any incoming connection on an entry port counts as user activity. */
+ note_user_activity(approx_time());
+
switch (TO_CONN(listener)->type) {
case CONN_TYPE_AP_LISTENER:
conn->state = AP_CONN_STATE_SOCKS_WAIT;
TO_ENTRY_CONN(conn)->socks_request->socks_prefer_no_auth =
listener->entry_cfg.socks_prefer_no_auth;
+ TO_ENTRY_CONN(conn)->socks_request->socks_use_extended_errors =
+ listener->entry_cfg.extended_socks5_codes;
break;
case CONN_TYPE_AP_TRANS_LISTENER:
TO_ENTRY_CONN(conn)->is_transparent_ap = 1;
@@ -2083,6 +2105,11 @@ connection_connect_log_client_use_ip_version(const connection_t *conn)
return;
}
+ if (fascist_firewall_use_ipv6(options)) {
+ log_info(LD_NET, "Our outgoing connection is using IPv%d.",
+ tor_addr_family(&real_addr) == AF_INET6 ? 6 : 4);
+ }
+
/* Check if we couldn't satisfy an address family preference */
if ((!pref_ipv6 && tor_addr_family(&real_addr) == AF_INET6)
|| (pref_ipv6 && tor_addr_family(&real_addr) == AF_INET)) {
@@ -2261,9 +2288,12 @@ connection_proxy_state_to_string(int state)
"PROXY_SOCKS5_WANT_AUTH_METHOD_RFC1929",
"PROXY_SOCKS5_WANT_AUTH_RFC1929_OK",
"PROXY_SOCKS5_WANT_CONNECT_OK",
+ "PROXY_HAPROXY_WAIT_FOR_FLUSH",
"PROXY_CONNECTED",
};
+ CTASSERT(ARRAY_LENGTH(states) == PROXY_CONNECTED+1);
+
if (state < PROXY_NONE || state > PROXY_CONNECTED)
return unknown;
@@ -2296,7 +2326,11 @@ conn_get_proxy_type(const connection_t *conn)
return PROXY_SOCKS4;
else if (options->Socks5Proxy)
return PROXY_SOCKS5;
- else
+ else if (options->TCPProxy) {
+ /* The only supported protocol in TCPProxy is haproxy. */
+ tor_assert(options->TCPProxyProtocol == TCP_PROXY_PROTOCOL_HAPROXY);
+ return PROXY_HAPROXY;
+ } else
return PROXY_NONE;
}
@@ -2305,165 +2339,245 @@ conn_get_proxy_type(const connection_t *conn)
username NUL: */
#define SOCKS4_STANDARD_BUFFER_SIZE (1 + 1 + 2 + 4 + 1)
-/** Write a proxy request of <b>type</b> (socks4, socks5, https) to conn
- * for conn->addr:conn->port, authenticating with the auth details given
- * in the configuration (if available). SOCKS 5 and HTTP CONNECT proxies
- * support authentication.
+/** Write a proxy request of https to conn for conn->addr:conn->port,
+ * authenticating with the auth details given in the configuration
+ * (if available).
*
* Returns -1 if conn->addr is incompatible with the proxy protocol, and
* 0 otherwise.
- *
- * Use connection_read_proxy_handshake() to complete the handshake.
*/
-int
-connection_proxy_connect(connection_t *conn, int type)
+static int
+connection_https_proxy_connect(connection_t *conn)
{
- const or_options_t *options;
+ tor_assert(conn);
+
+ const or_options_t *options = get_options();
+ char buf[1024];
+ char *base64_authenticator = NULL;
+ const char *authenticator = options->HTTPSProxyAuthenticator;
+
+ /* Send HTTP CONNECT and authentication (if available) in
+ * one request */
+
+ if (authenticator) {
+ base64_authenticator = alloc_http_authenticator(authenticator);
+ if (!base64_authenticator)
+ log_warn(LD_OR, "Encoding https authenticator failed");
+ }
+
+ if (base64_authenticator) {
+ const char *addrport = fmt_addrport(&conn->addr, conn->port);
+ tor_snprintf(buf, sizeof(buf), "CONNECT %s HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "Proxy-Authorization: Basic %s\r\n\r\n",
+ addrport,
+ addrport,
+ base64_authenticator);
+ tor_free(base64_authenticator);
+ } else {
+ tor_snprintf(buf, sizeof(buf), "CONNECT %s HTTP/1.0\r\n\r\n",
+ fmt_addrport(&conn->addr, conn->port));
+ }
+
+ connection_buf_add(buf, strlen(buf), conn);
+ conn->proxy_state = PROXY_HTTPS_WANT_CONNECT_OK;
+
+ return 0;
+}
+/** Write a proxy request of socks4 to conn for conn->addr:conn->port.
+ *
+ * Returns -1 if conn->addr is incompatible with the proxy protocol, and
+ * 0 otherwise.
+ */
+static int
+connection_socks4_proxy_connect(connection_t *conn)
+{
tor_assert(conn);
- options = get_options();
+ unsigned char *buf;
+ uint16_t portn;
+ uint32_t ip4addr;
+ size_t buf_size = 0;
+ char *socks_args_string = NULL;
- switch (type) {
- case PROXY_CONNECT: {
- char buf[1024];
- char *base64_authenticator=NULL;
- const char *authenticator = options->HTTPSProxyAuthenticator;
-
- /* Send HTTP CONNECT and authentication (if available) in
- * one request */
-
- if (authenticator) {
- base64_authenticator = alloc_http_authenticator(authenticator);
- if (!base64_authenticator)
- log_warn(LD_OR, "Encoding https authenticator failed");
- }
+ /* Send a SOCKS4 connect request */
- if (base64_authenticator) {
- const char *addrport = fmt_addrport(&conn->addr, conn->port);
- tor_snprintf(buf, sizeof(buf), "CONNECT %s HTTP/1.1\r\n"
- "Host: %s\r\n"
- "Proxy-Authorization: Basic %s\r\n\r\n",
- addrport,
- addrport,
- base64_authenticator);
- tor_free(base64_authenticator);
- } else {
- tor_snprintf(buf, sizeof(buf), "CONNECT %s HTTP/1.0\r\n\r\n",
- fmt_addrport(&conn->addr, conn->port));
- }
+ if (tor_addr_family(&conn->addr) != AF_INET) {
+ log_warn(LD_NET, "SOCKS4 client is incompatible with IPv6");
+ return -1;
+ }
- connection_buf_add(buf, strlen(buf), conn);
- conn->proxy_state = PROXY_HTTPS_WANT_CONNECT_OK;
- break;
+ { /* If we are here because we are trying to connect to a
+ pluggable transport proxy, check if we have any SOCKS
+ arguments to transmit. If we do, compress all arguments to
+ a single string in 'socks_args_string': */
+
+ if (conn_get_proxy_type(conn) == PROXY_PLUGGABLE) {
+ socks_args_string =
+ pt_get_socks_args_for_proxy_addrport(&conn->addr, conn->port);
+ if (socks_args_string)
+ log_debug(LD_NET, "Sending out '%s' as our SOCKS argument string.",
+ socks_args_string);
}
+ }
- case PROXY_SOCKS4: {
- unsigned char *buf;
- uint16_t portn;
- uint32_t ip4addr;
- size_t buf_size = 0;
- char *socks_args_string = NULL;
+ { /* Figure out the buffer size we need for the SOCKS message: */
- /* Send a SOCKS4 connect request */
+ buf_size = SOCKS4_STANDARD_BUFFER_SIZE;
- if (tor_addr_family(&conn->addr) != AF_INET) {
- log_warn(LD_NET, "SOCKS4 client is incompatible with IPv6");
- return -1;
- }
+ /* If we have a SOCKS argument string, consider its size when
+ calculating the buffer size: */
+ if (socks_args_string)
+ buf_size += strlen(socks_args_string);
+ }
- { /* If we are here because we are trying to connect to a
- pluggable transport proxy, check if we have any SOCKS
- arguments to transmit. If we do, compress all arguments to
- a single string in 'socks_args_string': */
+ buf = tor_malloc_zero(buf_size);
- if (conn_get_proxy_type(conn) == PROXY_PLUGGABLE) {
- socks_args_string =
- pt_get_socks_args_for_proxy_addrport(&conn->addr, conn->port);
- if (socks_args_string)
- log_debug(LD_NET, "Sending out '%s' as our SOCKS argument string.",
- socks_args_string);
- }
- }
+ ip4addr = tor_addr_to_ipv4n(&conn->addr);
+ portn = htons(conn->port);
- { /* Figure out the buffer size we need for the SOCKS message: */
+ buf[0] = 4; /* version */
+ buf[1] = SOCKS_COMMAND_CONNECT; /* command */
+ memcpy(buf + 2, &portn, 2); /* port */
+ memcpy(buf + 4, &ip4addr, 4); /* addr */
+
+ /* Next packet field is the userid. If we have pluggable
+ transport SOCKS arguments, we have to embed them
+ there. Otherwise, we use an empty userid. */
+ if (socks_args_string) { /* place the SOCKS args string: */
+ tor_assert(strlen(socks_args_string) > 0);
+ tor_assert(buf_size >=
+ SOCKS4_STANDARD_BUFFER_SIZE + strlen(socks_args_string));
+ strlcpy((char *)buf + 8, socks_args_string, buf_size - 8);
+ tor_free(socks_args_string);
+ } else {
+ buf[8] = 0; /* no userid */
+ }
- buf_size = SOCKS4_STANDARD_BUFFER_SIZE;
+ connection_buf_add((char *)buf, buf_size, conn);
+ tor_free(buf);
- /* If we have a SOCKS argument string, consider its size when
- calculating the buffer size: */
- if (socks_args_string)
- buf_size += strlen(socks_args_string);
- }
+ conn->proxy_state = PROXY_SOCKS4_WANT_CONNECT_OK;
+ return 0;
+}
- buf = tor_malloc_zero(buf_size);
-
- ip4addr = tor_addr_to_ipv4n(&conn->addr);
- portn = htons(conn->port);
-
- buf[0] = 4; /* version */
- buf[1] = SOCKS_COMMAND_CONNECT; /* command */
- memcpy(buf + 2, &portn, 2); /* port */
- memcpy(buf + 4, &ip4addr, 4); /* addr */
-
- /* Next packet field is the userid. If we have pluggable
- transport SOCKS arguments, we have to embed them
- there. Otherwise, we use an empty userid. */
- if (socks_args_string) { /* place the SOCKS args string: */
- tor_assert(strlen(socks_args_string) > 0);
- tor_assert(buf_size >=
- SOCKS4_STANDARD_BUFFER_SIZE + strlen(socks_args_string));
- strlcpy((char *)buf + 8, socks_args_string, buf_size - 8);
- tor_free(socks_args_string);
- } else {
- buf[8] = 0; /* no userid */
- }
+/** Write a proxy request of socks5 to conn for conn->addr:conn->port,
+ * authenticating with the auth details given in the configuration
+ * (if available).
+ *
+ * Returns -1 if conn->addr is incompatible with the proxy protocol, and
+ * 0 otherwise.
+ */
+static int
+connection_socks5_proxy_connect(connection_t *conn)
+{
+ tor_assert(conn);
- connection_buf_add((char *)buf, buf_size, conn);
- tor_free(buf);
+ const or_options_t *options = get_options();
+ unsigned char buf[4]; /* fields: vers, num methods, method list */
- conn->proxy_state = PROXY_SOCKS4_WANT_CONNECT_OK;
- break;
- }
+ /* Send a SOCKS5 greeting (connect request must wait) */
- case PROXY_SOCKS5: {
- unsigned char buf[4]; /* fields: vers, num methods, method list */
+ buf[0] = 5; /* version */
- /* Send a SOCKS5 greeting (connect request must wait) */
+ /* We have to use SOCKS5 authentication, if we have a
+ Socks5ProxyUsername or if we want to pass arguments to our
+ pluggable transport proxy: */
+ if ((options->Socks5ProxyUsername) ||
+ (conn_get_proxy_type(conn) == PROXY_PLUGGABLE &&
+ (get_socks_args_by_bridge_addrport(&conn->addr, conn->port)))) {
+ /* number of auth methods */
+ buf[1] = 2;
+ buf[2] = 0x00; /* no authentication */
+ buf[3] = 0x02; /* rfc1929 Username/Passwd auth */
+ conn->proxy_state = PROXY_SOCKS5_WANT_AUTH_METHOD_RFC1929;
+ } else {
+ buf[1] = 1;
+ buf[2] = 0x00; /* no authentication */
+ conn->proxy_state = PROXY_SOCKS5_WANT_AUTH_METHOD_NONE;
+ }
- buf[0] = 5; /* version */
+ connection_buf_add((char *)buf, 2 + buf[1], conn);
+ return 0;
+}
- /* We have to use SOCKS5 authentication, if we have a
- Socks5ProxyUsername or if we want to pass arguments to our
- pluggable transport proxy: */
- if ((options->Socks5ProxyUsername) ||
- (conn_get_proxy_type(conn) == PROXY_PLUGGABLE &&
- (get_socks_args_by_bridge_addrport(&conn->addr, conn->port)))) {
- /* number of auth methods */
- buf[1] = 2;
- buf[2] = 0x00; /* no authentication */
- buf[3] = 0x02; /* rfc1929 Username/Passwd auth */
- conn->proxy_state = PROXY_SOCKS5_WANT_AUTH_METHOD_RFC1929;
- } else {
- buf[1] = 1;
- buf[2] = 0x00; /* no authentication */
- conn->proxy_state = PROXY_SOCKS5_WANT_AUTH_METHOD_NONE;
- }
+/** Write a proxy request of haproxy to conn for conn->addr:conn->port.
+ *
+ * Returns -1 if conn->addr is incompatible with the proxy protocol, and
+ * 0 otherwise.
+ */
+static int
+connection_haproxy_proxy_connect(connection_t *conn)
+{
+ int ret = 0;
+ tor_addr_port_t *addr_port = tor_addr_port_new(&conn->addr, conn->port);
+ char *buf = haproxy_format_proxy_header_line(addr_port);
+
+ if (buf == NULL) {
+ ret = -1;
+ goto done;
+ }
+
+ connection_buf_add(buf, strlen(buf), conn);
+ /* In haproxy, we don't have to wait for the response, but we wait for ack.
+ * So we can set the state to be PROXY_HAPROXY_WAIT_FOR_FLUSH. */
+ conn->proxy_state = PROXY_HAPROXY_WAIT_FOR_FLUSH;
+
+ ret = 0;
+ done:
+ tor_free(buf);
+ tor_free(addr_port);
+ return ret;
+}
+
+/** Write a proxy request of <b>type</b> (socks4, socks5, https, haproxy)
+ * to conn for conn->addr:conn->port, authenticating with the auth details
+ * given in the configuration (if available). SOCKS 5 and HTTP CONNECT
+ * proxies support authentication.
+ *
+ * Returns -1 if conn->addr is incompatible with the proxy protocol, and
+ * 0 otherwise.
+ *
+ * Use connection_read_proxy_handshake() to complete the handshake.
+ */
+int
+connection_proxy_connect(connection_t *conn, int type)
+{
+ int ret = 0;
+
+ tor_assert(conn);
+
+ switch (type) {
+ case PROXY_CONNECT:
+ ret = connection_https_proxy_connect(conn);
+ break;
- connection_buf_add((char *)buf, 2 + buf[1], conn);
+ case PROXY_SOCKS4:
+ ret = connection_socks4_proxy_connect(conn);
+ break;
+
+ case PROXY_SOCKS5:
+ ret = connection_socks5_proxy_connect(conn);
+ break;
+
+ case PROXY_HAPROXY:
+ ret = connection_haproxy_proxy_connect(conn);
break;
- }
default:
log_err(LD_BUG, "Invalid proxy protocol, %d", type);
tor_fragile_assert();
- return -1;
+ ret = -1;
+ break;
}
- log_debug(LD_NET, "set state %s",
- connection_proxy_state_to_string(conn->proxy_state));
+ if (ret == 0) {
+ log_debug(LD_NET, "set state %s",
+ connection_proxy_state_to_string(conn->proxy_state));
+ }
- return 0;
+ return ret;
}
/** Read conn's inbuf. If the http response from the proxy is all
@@ -2841,7 +2955,7 @@ retry_listener_ports(smartlist_t *old_conns,
SMARTLIST_DEL_CURRENT(old_conns, conn);
break;
}
-#endif
+#endif /* defined(ENABLE_LISTENER_REBIND) */
}
} SMARTLIST_FOREACH_END(wanted);
@@ -2905,6 +3019,10 @@ retry_all_listeners(smartlist_t *new_conns, int close_all_noncontrol)
retval = -1;
#ifdef ENABLE_LISTENER_REBIND
+ if (smartlist_len(replacements))
+ log_debug(LD_NET, "%d replacements - starting rebinding loop.",
+ smartlist_len(replacements));
+
SMARTLIST_FOREACH_BEGIN(replacements, listener_replacement_t *, r) {
int addr_in_use = 0;
int skip = 0;
@@ -2916,8 +3034,11 @@ retry_all_listeners(smartlist_t *new_conns, int close_all_noncontrol)
connection_listener_new_for_port(r->new_port, &skip, &addr_in_use);
connection_t *old_conn = r->old_conn;
- if (skip)
+ if (skip) {
+ log_debug(LD_NET, "Skipping creating new listener for %s:%d",
+ old_conn->address, old_conn->port);
continue;
+ }
connection_close_immediate(old_conn);
connection_mark_for_close(old_conn);
@@ -2943,7 +3064,7 @@ retry_all_listeners(smartlist_t *new_conns, int close_all_noncontrol)
conn_type_to_string(old_conn->type), old_conn->address,
old_conn->port, new_conn->address, new_conn->port);
} SMARTLIST_FOREACH_END(r);
-#endif
+#endif /* defined(ENABLE_LISTENER_REBIND) */
/* Any members that were still in 'listeners' don't correspond to
* any configured port. Kill 'em. */
@@ -3030,7 +3151,7 @@ connection_mark_all_noncontrol_connections(void)
* uses pluggable transports, since we should then limit it even if it
* comes from an internal IP address. */
static int
-connection_is_rate_limited(connection_t *conn)
+connection_is_rate_limited(const connection_t *conn)
{
const or_options_t *options = get_options();
if (conn->linked)
@@ -3166,14 +3287,14 @@ connection_bucket_write_limit(connection_t *conn, time_t now)
global_bucket_val, conn_bucket);
}
-/** Return 1 if the global write buckets are low enough that we
+/** Return true iff the global write buckets are low enough that we
* shouldn't send <b>attempt</b> bytes of low-priority directory stuff
- * out to <b>conn</b>. Else return 0.
-
- * Priority was 1 for v1 requests (directories and running-routers),
- * and 2 for v2 requests and later (statuses and descriptors).
+ * out to <b>conn</b>.
+ *
+ * If we are a directory authority, always answer dir requests thus true is
+ * always returned.
*
- * There are a lot of parameters we could use here:
+ * Note: There are a lot of parameters we could use here:
* - global_relayed_write_bucket. Low is bad.
* - global_write_bucket. Low is bad.
* - bandwidthrate. Low is bad.
@@ -3185,39 +3306,40 @@ connection_bucket_write_limit(connection_t *conn, time_t now)
* mean is "total directory bytes added to outbufs recently", but
* that's harder to quantify and harder to keep track of.
*/
-int
-global_write_bucket_low(connection_t *conn, size_t attempt, int priority)
+bool
+connection_dir_is_global_write_low(const connection_t *conn, size_t attempt)
{
size_t smaller_bucket =
MIN(token_bucket_rw_get_write(&global_bucket),
token_bucket_rw_get_write(&global_relayed_bucket));
- if (authdir_mode(get_options()) && priority>1)
- return 0; /* there's always room to answer v2 if we're an auth dir */
+
+ /* Special case for authorities (directory only). */
+ if (authdir_mode_v3(get_options())) {
+ /* Are we configured to possibly reject requests under load? */
+ if (!dirauth_should_reject_requests_under_load()) {
+ /* Answer request no matter what. */
+ return false;
+ }
+ /* Always answer requests from a known relay which includes the other
+ * authorities. The following looks up the addresses for relays that we
+ * have their descriptor _and_ any configured trusted directories. */
+ if (nodelist_probably_contains_address(&conn->addr)) {
+ return false;
+ }
+ }
if (!connection_is_rate_limited(conn))
- return 0; /* local conns don't get limited */
+ return false; /* local conns don't get limited */
if (smaller_bucket < attempt)
- return 1; /* not enough space no matter the priority */
+ return true; /* not enough space. */
{
const time_t diff = approx_time() - write_buckets_last_empty_at;
if (diff <= 1)
- return 1; /* we're already hitting our limits, no more please */
+ return true; /* we're already hitting our limits, no more please */
}
-
- if (priority == 1) { /* old-style v1 query */
- /* Could we handle *two* of these requests within the next two seconds? */
- const or_options_t *options = get_options();
- size_t can_write = (size_t) (smaller_bucket
- + 2*(options->RelayBandwidthRate ? options->RelayBandwidthRate :
- options->BandwidthRate));
- if (can_write < 2*attempt)
- return 1;
- } else { /* v2 query */
- /* no further constraints yet */
- }
- return 0;
+ return false;
}
/** When did we last tell the accounting subsystem about transmitted
@@ -3239,8 +3361,17 @@ record_num_bytes_transferred_impl(connection_t *conn,
rep_hist_note_dir_bytes_written(num_written, now);
}
+ /* Linked connections and internal IPs aren't counted for statistics or
+ * accounting:
+ * - counting linked connections would double-count BEGINDIR bytes, because
+ * they are sent as Dir bytes on the linked connection, and OR bytes on
+ * the OR connection;
+ * - relays and clients don't connect to internal IPs, unless specifically
+ * configured to do so. If they are configured that way, we don't count
+ * internal bytes.
+ */
if (!connection_is_rate_limited(conn))
- return; /* local IPs are free */
+ return;
if (conn->type == CONN_TYPE_OR)
rep_hist_note_or_conn_bytes(conn->global_identifier, num_read,
@@ -3696,6 +3827,12 @@ connection_buf_read_from_socket(connection_t *conn, ssize_t *max_to_read,
at_most = connection_bucket_read_limit(conn, approx_time());
}
+ /* Do not allow inbuf to grow past BUF_MAX_LEN. */
+ const ssize_t maximum = BUF_MAX_LEN - buf_datalen(conn->inbuf);
+ if (at_most > maximum) {
+ at_most = maximum;
+ }
+
slack_in_buf = buf_slack(conn->inbuf);
again:
if ((size_t)at_most > slack_in_buf && slack_in_buf >= 1024) {
@@ -3943,9 +4080,9 @@ update_send_buffer_size(tor_socket_t sock)
&isb, sizeof(isb), &bytesReturned, NULL, NULL)) {
setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (const char*)&isb, sizeof(isb));
}
-#else
+#else /* !defined(_WIN32) */
(void) sock;
-#endif
+#endif /* defined(_WIN32) */
}
/** Try to flush more bytes onto <b>conn</b>-\>s.
@@ -4080,6 +4217,7 @@ connection_handle_write_impl(connection_t *conn, int force)
switch (result) {
CASE_TOR_TLS_ERROR_ANY:
case TOR_TLS_CLOSE:
+ or_conn->tls_error = result;
log_info(LD_NET, result != TOR_TLS_CLOSE ?
"tls error. breaking.":"TLS connection closed on flush");
/* Don't flush; connection is dead. */
@@ -4343,6 +4481,23 @@ connection_write_to_buf_impl_,(const char *string, size_t len,
connection_write_to_buf_commit(conn, written);
}
+/**
+ * Write a <b>string</b> (of size <b>len</b> to directory connection
+ * <b>dir_conn</b>. Apply compression if connection is configured to use
+ * it and finalize it if <b>done</b> is true.
+ */
+void
+connection_dir_buf_add(const char *string, size_t len,
+ dir_connection_t *dir_conn, int done)
+{
+ if (dir_conn->compress_state != NULL) {
+ connection_buf_add_compress(string, len, dir_conn, done);
+ return;
+ }
+
+ connection_buf_add(string, len, TO_CONN(dir_conn));
+}
+
void
connection_buf_add_compress(const char *string, size_t len,
dir_connection_t *conn, int done)
@@ -4457,6 +4612,16 @@ connection_get_by_type_state(int type, int state)
CONN_GET_TEMPLATE(conn, conn->type == type && conn->state == state);
}
+/**
+ * Return a connection of type <b>type</b> that is not an internally linked
+ * connection, and is not marked for close.
+ **/
+MOCK_IMPL(connection_t *,
+connection_get_by_type_nonlinked,(int type))
+{
+ CONN_GET_TEMPLATE(conn, conn->type == type && !conn->linked);
+}
+
/** Return a connection of type <b>type</b> that has rendquery equal
* to <b>rendquery</b>, and that is not marked for close. If state
* is non-zero, conn must be of that state too.
@@ -4837,10 +5002,10 @@ connection_finished_flushing(connection_t *conn)
}
}
-/** Called when our attempt to connect() to another server has just
- * succeeded.
+/** Called when our attempt to connect() to a server has just succeeded.
*
- * This function just passes conn to the connection-specific
+ * This function checks if the interface address has changed (clients only),
+ * and then passes conn to the connection-specific
* connection_*_finished_connecting() function.
*/
static int
@@ -5299,7 +5464,7 @@ assert_connection_ok(connection_t *conn, time_t now)
tor_assert(entry_conn->socks_request->has_finished);
if (!conn->marked_for_close) {
tor_assert(ENTRY_TO_EDGE_CONN(entry_conn)->cpath_layer);
- assert_cpath_layer_ok(ENTRY_TO_EDGE_CONN(entry_conn)->cpath_layer);
+ cpath_assert_layer_ok(ENTRY_TO_EDGE_CONN(entry_conn)->cpath_layer);
}
}
}
@@ -5353,17 +5518,20 @@ assert_connection_ok(connection_t *conn, time_t now)
}
/** Fills <b>addr</b> and <b>port</b> with the details of the global
- * proxy server we are using.
- * <b>conn</b> contains the connection we are using the proxy for.
+ * proxy server we are using. Store a 1 to the int pointed to by
+ * <b>is_put_out</b> if the connection is using a pluggable
+ * transport; store 0 otherwise. <b>conn</b> contains the connection
+ * we are using the proxy for.
*
* Return 0 on success, -1 on failure.
*/
int
get_proxy_addrport(tor_addr_t *addr, uint16_t *port, int *proxy_type,
- const connection_t *conn)
+ int *is_pt_out, const connection_t *conn)
{
const or_options_t *options = get_options();
+ *is_pt_out = 0;
/* Client Transport Plugins can use another proxy, but that should be hidden
* from the rest of tor (as the plugin is responsible for dealing with the
* proxy), check it first, then check the rest of the proxy types to allow
@@ -5379,6 +5547,7 @@ get_proxy_addrport(tor_addr_t *addr, uint16_t *port, int *proxy_type,
tor_addr_copy(addr, &transport->addr);
*port = transport->port;
*proxy_type = transport->socks_version;
+ *is_pt_out = 1;
return 0;
}
@@ -5400,6 +5569,13 @@ get_proxy_addrport(tor_addr_t *addr, uint16_t *port, int *proxy_type,
*port = options->Socks5ProxyPort;
*proxy_type = PROXY_SOCKS5;
return 0;
+ } else if (options->TCPProxy) {
+ tor_addr_copy(addr, &options->TCPProxyAddr);
+ *port = options->TCPProxyPort;
+ /* The only supported protocol in TCPProxy is haproxy. */
+ tor_assert(options->TCPProxyProtocol == TCP_PROXY_PROTOCOL_HAPROXY);
+ *proxy_type = PROXY_HAPROXY;
+ return 0;
}
tor_addr_make_unspec(addr);
@@ -5415,11 +5591,13 @@ log_failed_proxy_connection(connection_t *conn)
{
tor_addr_t proxy_addr;
uint16_t proxy_port;
- int proxy_type;
+ int proxy_type, is_pt;
- if (get_proxy_addrport(&proxy_addr, &proxy_port, &proxy_type, conn) != 0)
+ if (get_proxy_addrport(&proxy_addr, &proxy_port, &proxy_type, &is_pt,
+ conn) != 0)
return; /* if we have no proxy set up, leave this function. */
+ (void)is_pt;
log_warn(LD_NET,
"The connection to the %s proxy server at %s just failed. "
"Make sure that the proxy server is up and running.",
@@ -5435,6 +5613,7 @@ proxy_type_to_string(int proxy_type)
case PROXY_CONNECT: return "HTTP";
case PROXY_SOCKS4: return "SOCKS4";
case PROXY_SOCKS5: return "SOCKS5";
+ case PROXY_HAPROXY: return "HAPROXY";
case PROXY_PLUGGABLE: return "pluggable transports SOCKS";
case PROXY_NONE: return "NULL";
default: tor_assert(0);
diff --git a/src/core/mainloop/connection.h b/src/core/mainloop/connection.h
index 8ecdd6f06f..bcd3d590a5 100644
--- a/src/core/mainloop/connection.h
+++ b/src/core/mainloop/connection.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -12,7 +12,25 @@
#ifndef TOR_CONNECTION_H
#define TOR_CONNECTION_H
-listener_connection_t *TO_LISTENER_CONN(connection_t *);
+#include "lib/smartlist_core/smartlist_core.h"
+#include "lib/log/log.h"
+
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+
+struct listener_connection_t;
+struct connection_t;
+struct dir_connection_t;
+struct or_connection_t;
+struct edge_connection_t;
+struct entry_connection_t;
+struct control_connection_t;
+struct port_cfg_t;
+struct tor_addr_t;
+struct or_options_t;
+
+struct listener_connection_t *TO_LISTENER_CONN(struct connection_t *);
struct buf_t;
@@ -56,7 +74,7 @@ struct buf_t;
#define CONN_TYPE_MAX_ 19
/* !!!! If _CONN_TYPE_MAX is ever over 31, we must grow the type field in
- * connection_t. */
+ * struct connection_t. */
/* Proxy client handshake states */
/* We use a proxy but we haven't even connected to it yet. */
@@ -75,8 +93,10 @@ struct buf_t;
#define PROXY_SOCKS5_WANT_AUTH_RFC1929_OK 6
/* We use a SOCKS5 proxy and we just sent our CONNECT command. */
#define PROXY_SOCKS5_WANT_CONNECT_OK 7
+/* We use an HAPROXY proxy and we just sent the proxy header. */
+#define PROXY_HAPROXY_WAIT_FOR_FLUSH 8
/* We use a proxy and we CONNECTed successfully!. */
-#define PROXY_CONNECTED 8
+#define PROXY_CONNECTED 9
/** State for any listener connection. */
#define LISTENER_STATE_READY 0
@@ -88,34 +108,36 @@ struct buf_t;
*/
typedef struct
{
- connection_t *old_conn; /* Old listener connection to be replaced */
- const port_cfg_t *new_port; /* New port configuration */
+ struct connection_t *old_conn; /* Old listener connection to be replaced */
+ const struct port_cfg_t *new_port; /* New port configuration */
} listener_replacement_t;
const char *conn_type_to_string(int type);
const char *conn_state_to_string(int type, int state);
int conn_listener_type_supports_af_unix(int type);
-dir_connection_t *dir_connection_new(int socket_family);
-or_connection_t *or_connection_new(int type, int socket_family);
-edge_connection_t *edge_connection_new(int type, int socket_family);
-entry_connection_t *entry_connection_new(int type, int socket_family);
-control_connection_t *control_connection_new(int socket_family);
-listener_connection_t *listener_connection_new(int type, int socket_family);
-connection_t *connection_new(int type, int socket_family);
-int connection_init_accepted_conn(connection_t *conn,
- const listener_connection_t *listener);
-void connection_link_connections(connection_t *conn_a, connection_t *conn_b);
-MOCK_DECL(void,connection_free_,(connection_t *conn));
+struct dir_connection_t *dir_connection_new(int socket_family);
+struct or_connection_t *or_connection_new(int type, int socket_family);
+struct edge_connection_t *edge_connection_new(int type, int socket_family);
+struct entry_connection_t *entry_connection_new(int type, int socket_family);
+struct control_connection_t *control_connection_new(int socket_family);
+struct listener_connection_t *listener_connection_new(int type,
+ int socket_family);
+struct connection_t *connection_new(int type, int socket_family);
+int connection_init_accepted_conn(struct connection_t *conn,
+ const struct listener_connection_t *listener);
+void connection_link_connections(struct connection_t *conn_a,
+ struct connection_t *conn_b);
+MOCK_DECL(void,connection_free_,(struct connection_t *conn));
#define connection_free(conn) \
- FREE_AND_NULL(connection_t, connection_free_, (conn))
+ FREE_AND_NULL(struct connection_t, connection_free_, (conn))
void connection_free_all(void);
-void connection_about_to_close_connection(connection_t *conn);
-void connection_close_immediate(connection_t *conn);
-void connection_mark_for_close_(connection_t *conn,
+void connection_about_to_close_connection(struct connection_t *conn);
+void connection_close_immediate(struct connection_t *conn);
+void connection_mark_for_close_(struct connection_t *conn,
int line, const char *file);
MOCK_DECL(void, connection_mark_for_close_internal_,
- (connection_t *conn, int line, const char *file));
+ (struct connection_t *conn, int line, const char *file));
#define connection_mark_for_close(c) \
connection_mark_for_close_((c), __LINE__, SHORT_FILE__)
@@ -130,11 +152,11 @@ MOCK_DECL(void, connection_mark_for_close_internal_,
* connection_or_notify_error()), or you actually are the
* connection_or_close_for_error() or connection_or_close_normally function.
* For all other cases, use connection_mark_and_flush() instead, which
- * checks for or_connection_t properly, instead. See below.
+ * checks for struct or_connection_t properly, instead. See below.
*/
#define connection_mark_and_flush_internal_(c,line,file) \
do { \
- connection_t *tmp_conn__ = (c); \
+ struct connection_t *tmp_conn__ = (c); \
connection_mark_for_close_internal_(tmp_conn__, (line), (file)); \
tmp_conn__->hold_open_until_flushed = 1; \
} while (0)
@@ -147,7 +169,7 @@ MOCK_DECL(void, connection_mark_for_close_internal_,
*/
#define connection_mark_and_flush_(c,line,file) \
do { \
- connection_t *tmp_conn_ = (c); \
+ struct connection_t *tmp_conn_ = (c); \
if (tmp_conn_->type == CONN_TYPE_OR) { \
log_warn(LD_CHANNEL | LD_BUG, \
"Something tried to close (and flush) an or_connection_t" \
@@ -164,13 +186,13 @@ MOCK_DECL(void, connection_mark_for_close_internal_,
void connection_expire_held_open(void);
-int connection_connect(connection_t *conn, const char *address,
- const tor_addr_t *addr,
+int connection_connect(struct connection_t *conn, const char *address,
+ const struct tor_addr_t *addr,
uint16_t port, int *socket_error);
#ifdef HAVE_SYS_UN_H
-int connection_connect_unix(connection_t *conn, const char *socket_path,
+int connection_connect_unix(struct connection_t *conn, const char *socket_path,
int *socket_error);
#endif /* defined(HAVE_SYS_UN_H) */
@@ -183,75 +205,86 @@ int connection_connect_unix(connection_t *conn, const char *socket_path,
username and password fields. */
#define MAX_SOCKS5_AUTH_SIZE_TOTAL 2*MAX_SOCKS5_AUTH_FIELD_SIZE
-int connection_proxy_connect(connection_t *conn, int type);
-int connection_read_proxy_handshake(connection_t *conn);
-void log_failed_proxy_connection(connection_t *conn);
-int get_proxy_addrport(tor_addr_t *addr, uint16_t *port, int *proxy_type,
- const connection_t *conn);
+int connection_proxy_connect(struct connection_t *conn, int type);
+int connection_read_proxy_handshake(struct connection_t *conn);
+void log_failed_proxy_connection(struct connection_t *conn);
+int get_proxy_addrport(struct tor_addr_t *addr, uint16_t *port,
+ int *proxy_type,
+ int *is_pt_out, const struct connection_t *conn);
-int retry_all_listeners(smartlist_t *new_conns,
+int retry_all_listeners(struct smartlist_t *new_conns,
int close_all_noncontrol);
void connection_mark_all_noncontrol_listeners(void);
void connection_mark_all_noncontrol_connections(void);
-ssize_t connection_bucket_write_limit(connection_t *conn, time_t now);
-int global_write_bucket_low(connection_t *conn, size_t attempt, int priority);
+ssize_t connection_bucket_write_limit(struct connection_t *conn, time_t now);
+bool connection_dir_is_global_write_low(const struct connection_t *conn,
+ size_t attempt);
void connection_bucket_init(void);
-void connection_bucket_adjust(const or_options_t *options);
+void connection_bucket_adjust(const struct or_options_t *options);
void connection_bucket_refill_all(time_t now,
uint32_t now_ts);
-void connection_read_bw_exhausted(connection_t *conn, bool is_global_bw);
-void connection_write_bw_exhausted(connection_t *conn, bool is_global_bw);
-void connection_consider_empty_read_buckets(connection_t *conn);
-void connection_consider_empty_write_buckets(connection_t *conn);
-
-int connection_handle_read(connection_t *conn);
-
-int connection_buf_get_bytes(char *string, size_t len, connection_t *conn);
-int connection_buf_get_line(connection_t *conn, char *data,
- size_t *data_len);
-int connection_fetch_from_buf_http(connection_t *conn,
+void connection_read_bw_exhausted(struct connection_t *conn,
+ bool is_global_bw);
+void connection_write_bw_exhausted(struct connection_t *conn,
+ bool is_global_bw);
+void connection_consider_empty_read_buckets(struct connection_t *conn);
+void connection_consider_empty_write_buckets(struct connection_t *conn);
+
+int connection_handle_read(struct connection_t *conn);
+
+int connection_buf_get_bytes(char *string, size_t len,
+ struct connection_t *conn);
+int connection_buf_get_line(struct connection_t *conn, char *data,
+ size_t *data_len);
+int connection_fetch_from_buf_http(struct connection_t *conn,
char **headers_out, size_t max_headerlen,
char **body_out, size_t *body_used,
size_t max_bodylen, int force_complete);
-int connection_wants_to_flush(connection_t *conn);
-int connection_outbuf_too_full(connection_t *conn);
-int connection_handle_write(connection_t *conn, int force);
-int connection_flush(connection_t *conn);
+int connection_wants_to_flush(struct connection_t *conn);
+int connection_outbuf_too_full(struct connection_t *conn);
+int connection_handle_write(struct connection_t *conn, int force);
+int connection_flush(struct connection_t *conn);
MOCK_DECL(void, connection_write_to_buf_impl_,
- (const char *string, size_t len, connection_t *conn, int zlib));
+ (const char *string, size_t len, struct connection_t *conn,
+ int zlib));
/* DOCDOC connection_write_to_buf */
static void connection_buf_add(const char *string, size_t len,
- connection_t *conn);
+ struct connection_t *conn);
+void connection_dir_buf_add(const char *string, size_t len,
+ struct dir_connection_t *dir_conn, int done);
static inline void
-connection_buf_add(const char *string, size_t len, connection_t *conn)
+connection_buf_add(const char *string, size_t len, struct connection_t *conn)
{
connection_write_to_buf_impl_(string, len, conn, 0);
}
void connection_buf_add_compress(const char *string, size_t len,
- dir_connection_t *conn, int done);
-void connection_buf_add_buf(connection_t *conn, struct buf_t *buf);
-
-size_t connection_get_inbuf_len(connection_t *conn);
-size_t connection_get_outbuf_len(connection_t *conn);
-connection_t *connection_get_by_global_id(uint64_t id);
-
-connection_t *connection_get_by_type(int type);
-MOCK_DECL(connection_t *,connection_get_by_type_addr_port_purpose,(int type,
- const tor_addr_t *addr,
- uint16_t port, int purpose));
-connection_t *connection_get_by_type_state(int type, int state);
-connection_t *connection_get_by_type_state_rendquery(int type, int state,
+ struct dir_connection_t *conn, int done);
+void connection_buf_add_buf(struct connection_t *conn, struct buf_t *buf);
+
+size_t connection_get_inbuf_len(struct connection_t *conn);
+size_t connection_get_outbuf_len(struct connection_t *conn);
+struct connection_t *connection_get_by_global_id(uint64_t id);
+
+struct connection_t *connection_get_by_type(int type);
+MOCK_DECL(struct connection_t *,connection_get_by_type_nonlinked,(int type));
+MOCK_DECL(struct connection_t *,connection_get_by_type_addr_port_purpose,
+ (int type,
+ const struct tor_addr_t *addr,
+ uint16_t port, int purpose));
+struct connection_t *connection_get_by_type_state(int type, int state);
+struct connection_t *connection_get_by_type_state_rendquery(
+ int type, int state,
const char *rendquery);
-smartlist_t *connection_list_by_type_state(int type, int state);
-smartlist_t *connection_list_by_type_purpose(int type, int purpose);
-smartlist_t *connection_dir_list_by_purpose_and_resource(
+struct smartlist_t *connection_list_by_type_state(int type, int state);
+struct smartlist_t *connection_list_by_type_purpose(int type, int purpose);
+struct smartlist_t *connection_dir_list_by_purpose_and_resource(
int purpose,
const char *resource);
-smartlist_t *connection_dir_list_by_purpose_resource_and_state(
+struct smartlist_t *connection_dir_list_by_purpose_resource_and_state(
int purpose,
const char *resource,
int state);
@@ -270,7 +303,7 @@ connection_dir_count_by_purpose_and_resource(
int purpose,
const char *resource)
{
- smartlist_t *conns = connection_dir_list_by_purpose_and_resource(
+ struct smartlist_t *conns = connection_dir_list_by_purpose_and_resource(
purpose,
resource);
CONN_LEN_AND_FREE_TEMPLATE(conns);
@@ -284,7 +317,7 @@ connection_dir_count_by_purpose_resource_and_state(
const char *resource,
int state)
{
- smartlist_t *conns =
+ struct smartlist_t *conns =
connection_dir_list_by_purpose_resource_and_state(
purpose,
resource,
@@ -294,26 +327,26 @@ connection_dir_count_by_purpose_resource_and_state(
#undef CONN_LEN_AND_FREE_TEMPLATE
-int any_other_active_or_conns(const or_connection_t *this_conn);
+int any_other_active_or_conns(const struct or_connection_t *this_conn);
/* || 0 is for -Wparentheses-equality (-Wall?) appeasement under clang */
#define connection_speaks_cells(conn) (((conn)->type == CONN_TYPE_OR) || 0)
-int connection_is_listener(connection_t *conn);
-int connection_state_is_open(connection_t *conn);
-int connection_state_is_connecting(connection_t *conn);
+int connection_is_listener(struct connection_t *conn);
+int connection_state_is_open(struct connection_t *conn);
+int connection_state_is_connecting(struct connection_t *conn);
char *alloc_http_authenticator(const char *authenticator);
-void assert_connection_ok(connection_t *conn, time_t now);
-int connection_or_nonopen_was_started_here(or_connection_t *conn);
+void assert_connection_ok(struct connection_t *conn, time_t now);
+int connection_or_nonopen_was_started_here(struct or_connection_t *conn);
void connection_dump_buffer_mem_stats(int severity);
MOCK_DECL(void, clock_skew_warning,
- (const connection_t *conn, long apparent_skew, int trusted,
+ (const struct connection_t *conn, long apparent_skew, int trusted,
log_domain_mask_t domain, const char *received,
const char *source));
-int connection_is_moribund(connection_t *conn);
+int connection_is_moribund(struct connection_t *conn);
void connection_check_oos(int n_socks, int failed);
/** Execute the statement <b>stmt</b>, which may log events concerning the
@@ -335,18 +368,18 @@ void connection_check_oos(int n_socks, int failed);
STMT_END
#ifdef CONNECTION_PRIVATE
-STATIC void connection_free_minimal(connection_t *conn);
+STATIC void connection_free_minimal(struct connection_t *conn);
/* Used only by connection.c and test*.c */
MOCK_DECL(STATIC int,connection_connect_sockaddr,
- (connection_t *conn,
+ (struct connection_t *conn,
const struct sockaddr *sa,
socklen_t sa_len,
const struct sockaddr *bindaddr,
socklen_t bindaddr_len,
int *socket_error));
-MOCK_DECL(STATIC void, kill_conn_list_for_oos, (smartlist_t *conns));
-MOCK_DECL(STATIC smartlist_t *, pick_oos_victims, (int n));
+MOCK_DECL(STATIC void, kill_conn_list_for_oos, (struct smartlist_t *conns));
+MOCK_DECL(STATIC struct smartlist_t *, pick_oos_victims, (int n));
#endif /* defined(CONNECTION_PRIVATE) */
diff --git a/src/core/mainloop/core_mainloop.md b/src/core/mainloop/core_mainloop.md
new file mode 100644
index 0000000000..fee8a8179c
--- /dev/null
+++ b/src/core/mainloop/core_mainloop.md
@@ -0,0 +1,10 @@
+@dir /core/mainloop
+@brief core/mainloop: Non-onion-routing mainloop functionality
+
+This module uses the event-loop code of \refdir{lib/evloop} to implement an
+asynchronous connection-oriented protocol handler.
+
+The layering here is imperfect: the code here was split from \refdir{core/or}
+without refactoring how the two modules call one another. Probably many
+functions should be moved and refactored.
+
diff --git a/src/core/mainloop/cpuworker.c b/src/core/mainloop/cpuworker.c
index e704d55642..485ddb9741 100644
--- a/src/core/mainloop/cpuworker.c
+++ b/src/core/mainloop/cpuworker.c
@@ -1,6 +1,6 @@
/* Copyright (c) 2003-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -19,7 +19,6 @@
**/
#include "core/or/or.h"
#include "core/or/channel.h"
-#include "core/or/circuitbuild.h"
#include "core/or/circuitlist.h"
#include "core/or/connection_or.h"
#include "app/config/config.h"
@@ -27,6 +26,7 @@
#include "lib/crypt_ops/crypto_rand.h"
#include "lib/crypt_ops/crypto_util.h"
#include "core/or/onion.h"
+#include "feature/relay/circuitbuild_relay.h"
#include "feature/relay/onion_queue.h"
#include "feature/stats/rephist.h"
#include "feature/relay/router.h"
@@ -34,11 +34,10 @@
#include "core/crypto/onion_crypto.h"
#include "core/or/or_circuit_st.h"
-#include "lib/intmath/weakrng.h"
static void queue_pending_tasks(void);
-typedef struct worker_state_s {
+typedef struct worker_state_t {
int generation;
server_onion_keys_t *onion_keys;
} worker_state_t;
@@ -74,8 +73,6 @@ worker_state_free_void(void *arg)
static replyqueue_t *replyqueue = NULL;
static threadpool_t *threadpool = NULL;
-static tor_weak_rng_t request_sample_rng = TOR_WEAK_RNG_INIT;
-
static int total_pending_tasks = 0;
static int max_pending_tasks = 128;
@@ -109,7 +106,6 @@ cpu_init(void)
/* Total voodoo. Can we make this more sensible? */
max_pending_tasks = get_num_cpus(get_options()) * 64;
- crypto_seed_weak_rng(&request_sample_rng);
}
/** Magic numbers to make sure our cpuworker_requests don't grow any
@@ -164,7 +160,7 @@ typedef struct cpuworker_reply_t {
uint8_t rend_auth_material[DIGEST_LEN];
} cpuworker_reply_t;
-typedef struct cpuworker_job_u {
+typedef struct cpuworker_job_u_t {
or_circuit_t *circ;
union {
cpuworker_request_t request;
@@ -235,9 +231,10 @@ should_time_request(uint16_t onionskin_type)
* sample */
if (onionskins_n_processed[onionskin_type] < 4096)
return 1;
+
/** Otherwise, measure with P=1/128. We avoid doing this for every
* handshake, since the measurement itself can take a little time. */
- return tor_weak_random_one_in_n(&request_sample_rng, 128);
+ return crypto_fast_rng_one_in_n(get_thread_fast_rng(), 128);
}
/** Return an estimate of how many microseconds we will need for a single
diff --git a/src/core/mainloop/cpuworker.h b/src/core/mainloop/cpuworker.h
index 77e2c42508..7e71961750 100644
--- a/src/core/mainloop/cpuworker.h
+++ b/src/core/mainloop/cpuworker.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -14,10 +14,10 @@
void cpu_init(void);
void cpuworkers_rotate_keyinfo(void);
-struct workqueue_entry_s;
+struct workqueue_entry_t;
enum workqueue_reply_t;
enum workqueue_priority_t;
-MOCK_DECL(struct workqueue_entry_s *, cpuworker_queue_work, (
+MOCK_DECL(struct workqueue_entry_t *, cpuworker_queue_work, (
enum workqueue_priority_t priority,
enum workqueue_reply_t (*fn)(void *, void *),
void (*reply_fn)(void *),
diff --git a/src/core/mainloop/include.am b/src/core/mainloop/include.am
new file mode 100644
index 0000000000..63643127f3
--- /dev/null
+++ b/src/core/mainloop/include.am
@@ -0,0 +1,22 @@
+
+# ADD_C_FILE: INSERT SOURCES HERE.
+LIBTOR_APP_A_SOURCES += \
+ src/core/mainloop/connection.c \
+ src/core/mainloop/cpuworker.c \
+ src/core/mainloop/mainloop.c \
+ src/core/mainloop/mainloop_pubsub.c \
+ src/core/mainloop/mainloop_sys.c \
+ src/core/mainloop/netstatus.c \
+ src/core/mainloop/periodic.c
+
+# ADD_C_FILE: INSERT HEADERS HERE.
+noinst_HEADERS += \
+ src/core/mainloop/connection.h \
+ src/core/mainloop/cpuworker.h \
+ src/core/mainloop/mainloop.h \
+ src/core/mainloop/mainloop_pubsub.h \
+ src/core/mainloop/mainloop_state.inc \
+ src/core/mainloop/mainloop_state_st.h \
+ src/core/mainloop/mainloop_sys.h \
+ src/core/mainloop/netstatus.h \
+ src/core/mainloop/periodic.h
diff --git a/src/core/mainloop/mainloop.c b/src/core/mainloop/mainloop.c
index e67e1299b2..b4dbedbfe4 100644
--- a/src/core/mainloop/mainloop.c
+++ b/src/core/mainloop/mainloop.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -71,12 +71,13 @@
#include "feature/client/bridges.h"
#include "feature/client/dnsserv.h"
#include "feature/client/entrynodes.h"
+#include "feature/client/proxymode.h"
#include "feature/client/transports.h"
#include "feature/control/control.h"
+#include "feature/control/control_events.h"
#include "feature/dirauth/authmode.h"
-#include "feature/dirauth/reachability.h"
#include "feature/dircache/consdiffmgr.h"
-#include "feature/dircache/dirserv.h"
+#include "feature/dirclient/dirclient_modes.h"
#include "feature/dircommon/directory.h"
#include "feature/hibernate/hibernate.h"
#include "feature/hs/hs_cache.h"
@@ -95,7 +96,7 @@
#include "feature/stats/geoip_stats.h"
#include "feature/stats/predict_ports.h"
#include "feature/stats/rephist.h"
-#include "lib/container/buffers.h"
+#include "lib/buf/buffers.h"
#include "lib/crypt_ops/crypto_rand.h"
#include "lib/err/backtrace.h"
#include "lib/tls/buffers_tls.h"
@@ -105,9 +106,6 @@
#include <event2/event.h>
-#include "feature/dirauth/dirvote.h"
-#include "feature/dirauth/authmode.h"
-
#include "core/or/cell_st.h"
#include "core/or/entry_connection_st.h"
#include "feature/nodelist/networkstatus_st.h"
@@ -200,12 +198,10 @@ static int can_complete_circuits = 0;
#define LAZY_DESCRIPTOR_RETRY_INTERVAL (60)
static int conn_close_if_marked(int i);
-static int run_main_loop_until_done(void);
static void connection_start_reading_from_linked_conn(connection_t *conn);
static int connection_should_read_from_linked_conn(connection_t *conn);
static void conn_read_callback(evutil_socket_t fd, short event, void *_conn);
static void conn_write_callback(evutil_socket_t fd, short event, void *_conn);
-static void second_elapsed_callback(periodic_timer_t *timer, void *args);
static void shutdown_did_not_work_callback(evutil_socket_t fd, short event,
void *arg) ATTR_NORETURN;
@@ -759,7 +755,7 @@ tor_shutdown_event_loop_for_restart_cb(
tor_event_free(tor_shutdown_event_loop_for_restart_event);
tor_shutdown_event_loop_and_exit(0);
}
-#endif
+#endif /* defined(ENABLE_RESTART_DEBUGGING) */
/**
* After finishing the current callback (if any), shut down the main loop,
@@ -970,7 +966,6 @@ conn_close_if_marked(int i)
return 0; /* nothing to see here, move along */
now = time(NULL);
assert_connection_ok(conn, now);
- /* assert_all_pending_dns_resolves_ok(); */
log_debug(LD_NET,"Cleaning up connection (fd "TOR_SOCKET_T_FORMAT").",
conn->s);
@@ -1137,14 +1132,14 @@ directory_info_has_arrived(time_t now, int from_cache, int suppress_logs)
if (!router_have_minimum_dir_info()) {
int quiet = suppress_logs || from_cache ||
- directory_too_idle_to_fetch_descriptors(options, now);
+ dirclient_too_idle_to_fetch_descriptors(options, now);
tor_log(quiet ? LOG_INFO : LOG_NOTICE, LD_DIR,
"I learned some more directory information, but not enough to "
"build a circuit: %s", get_dir_info_status_string());
update_all_descriptor_downloads(now);
return;
} else {
- if (directory_fetches_from_authorities(options)) {
+ if (dirclient_fetches_from_authorities(options)) {
update_all_descriptor_downloads(now);
}
@@ -1357,123 +1352,101 @@ get_signewnym_epoch(void)
static int periodic_events_initialized = 0;
/* Declare all the timer callback functions... */
+#ifndef COCCI
#undef CALLBACK
#define CALLBACK(name) \
static int name ## _callback(time_t, const or_options_t *)
+
CALLBACK(add_entropy);
-CALLBACK(check_authority_cert);
-CALLBACK(check_canonical_channels);
-CALLBACK(check_descriptor);
-CALLBACK(check_dns_honesty);
-CALLBACK(check_ed_keys);
CALLBACK(check_expired_networkstatus);
-CALLBACK(check_for_reachability_bw);
-CALLBACK(check_onion_keys_expiry_time);
CALLBACK(clean_caches);
CALLBACK(clean_consdiffmgr);
-CALLBACK(dirvote);
-CALLBACK(downrate_stability);
-CALLBACK(expire_old_ciruits_serverside);
CALLBACK(fetch_networkstatus);
CALLBACK(heartbeat);
CALLBACK(hs_service);
CALLBACK(launch_descriptor_fetches);
-CALLBACK(launch_reachability_tests);
-CALLBACK(reachability_warnings);
+CALLBACK(prune_old_routers);
CALLBACK(record_bridge_stats);
CALLBACK(rend_cache_failure_clean);
CALLBACK(reset_padding_counts);
-CALLBACK(retry_dns);
CALLBACK(retry_listeners);
-CALLBACK(rotate_onion_key);
CALLBACK(rotate_x509_certificate);
-CALLBACK(save_stability);
CALLBACK(save_state);
-CALLBACK(write_bridge_ns);
CALLBACK(write_stats_file);
+CALLBACK(control_per_second_events);
+CALLBACK(second_elapsed);
#undef CALLBACK
/* Now we declare an array of periodic_event_item_t for each periodic event */
-#define CALLBACK(name, r, f) PERIODIC_EVENT(name, r, f)
-
-STATIC periodic_event_item_t periodic_events[] = {
- /* Everyone needs to run those. */
- CALLBACK(add_entropy, PERIODIC_EVENT_ROLE_ALL, 0),
- CALLBACK(check_expired_networkstatus, PERIODIC_EVENT_ROLE_ALL, 0),
- CALLBACK(clean_caches, PERIODIC_EVENT_ROLE_ALL, 0),
- CALLBACK(fetch_networkstatus, PERIODIC_EVENT_ROLE_ALL,
- PERIODIC_EVENT_FLAG_NEED_NET),
- CALLBACK(heartbeat, PERIODIC_EVENT_ROLE_ALL, 0),
- CALLBACK(launch_descriptor_fetches, PERIODIC_EVENT_ROLE_ALL,
- PERIODIC_EVENT_FLAG_NEED_NET),
- CALLBACK(reset_padding_counts, PERIODIC_EVENT_ROLE_ALL, 0),
- CALLBACK(retry_listeners, PERIODIC_EVENT_ROLE_ALL,
- PERIODIC_EVENT_FLAG_NEED_NET),
- CALLBACK(save_state, PERIODIC_EVENT_ROLE_ALL, 0),
- CALLBACK(rotate_x509_certificate, PERIODIC_EVENT_ROLE_ALL, 0),
- CALLBACK(write_stats_file, PERIODIC_EVENT_ROLE_ALL, 0),
-
- /* Routers (bridge and relay) only. */
- CALLBACK(check_descriptor, PERIODIC_EVENT_ROLE_ROUTER,
- PERIODIC_EVENT_FLAG_NEED_NET),
- CALLBACK(check_ed_keys, PERIODIC_EVENT_ROLE_ROUTER, 0),
- CALLBACK(check_for_reachability_bw, PERIODIC_EVENT_ROLE_ROUTER,
- PERIODIC_EVENT_FLAG_NEED_NET),
- CALLBACK(check_onion_keys_expiry_time, PERIODIC_EVENT_ROLE_ROUTER, 0),
- CALLBACK(expire_old_ciruits_serverside, PERIODIC_EVENT_ROLE_ROUTER,
- PERIODIC_EVENT_FLAG_NEED_NET),
- CALLBACK(reachability_warnings, PERIODIC_EVENT_ROLE_ROUTER,
- PERIODIC_EVENT_FLAG_NEED_NET),
- CALLBACK(retry_dns, PERIODIC_EVENT_ROLE_ROUTER, 0),
- CALLBACK(rotate_onion_key, PERIODIC_EVENT_ROLE_ROUTER, 0),
-
- /* Authorities (bridge and directory) only. */
- CALLBACK(downrate_stability, PERIODIC_EVENT_ROLE_AUTHORITIES, 0),
- CALLBACK(launch_reachability_tests, PERIODIC_EVENT_ROLE_AUTHORITIES,
- PERIODIC_EVENT_FLAG_NEED_NET),
- CALLBACK(save_stability, PERIODIC_EVENT_ROLE_AUTHORITIES, 0),
-
- /* Directory authority only. */
- CALLBACK(check_authority_cert, PERIODIC_EVENT_ROLE_DIRAUTH, 0),
- CALLBACK(dirvote, PERIODIC_EVENT_ROLE_DIRAUTH, PERIODIC_EVENT_FLAG_NEED_NET),
-
- /* Relay only. */
- CALLBACK(check_canonical_channels, PERIODIC_EVENT_ROLE_RELAY,
- PERIODIC_EVENT_FLAG_NEED_NET),
- CALLBACK(check_dns_honesty, PERIODIC_EVENT_ROLE_RELAY,
- PERIODIC_EVENT_FLAG_NEED_NET),
+#define CALLBACK(name, r, f) \
+ PERIODIC_EVENT(name, PERIODIC_EVENT_ROLE_ ## r, f)
+#define FL(name) (PERIODIC_EVENT_FLAG_ ## name)
+#endif /* !defined(COCCI) */
+
+STATIC periodic_event_item_t mainloop_periodic_events[] = {
+
+ /* Everyone needs to run these. They need to have very long timeouts for
+ * that to be safe. */
+ CALLBACK(add_entropy, ALL, 0),
+ CALLBACK(heartbeat, ALL, 0),
+ CALLBACK(reset_padding_counts, ALL, 0),
+
+ /* This is a legacy catch-all callback that runs once per second if
+ * we are online and active. */
+ CALLBACK(second_elapsed, NET_PARTICIPANT,
+ FL(RUN_ON_DISABLE)),
+
+ /* XXXX Do we have a reason to do this on a callback? Does it do any good at
+ * all? For now, if we're dormant, we can let our listeners decay. */
+ CALLBACK(retry_listeners, NET_PARTICIPANT, FL(NEED_NET)),
+
+ /* We need to do these if we're participating in the Tor network. */
+ CALLBACK(check_expired_networkstatus, NET_PARTICIPANT, 0),
+ CALLBACK(fetch_networkstatus, NET_PARTICIPANT, 0),
+ CALLBACK(launch_descriptor_fetches, NET_PARTICIPANT, FL(NEED_NET)),
+ CALLBACK(rotate_x509_certificate, NET_PARTICIPANT, 0),
+ CALLBACK(check_network_participation, NET_PARTICIPANT, 0),
+
+ /* We need to do these if we're participating in the Tor network, and
+ * immediately before we stop. */
+ CALLBACK(clean_caches, NET_PARTICIPANT, FL(RUN_ON_DISABLE)),
+ CALLBACK(save_state, NET_PARTICIPANT, FL(RUN_ON_DISABLE)),
+ CALLBACK(write_stats_file, NET_PARTICIPANT, FL(RUN_ON_DISABLE)),
+ CALLBACK(prune_old_routers, NET_PARTICIPANT, FL(RUN_ON_DISABLE)),
/* Hidden Service service only. */
- CALLBACK(hs_service, PERIODIC_EVENT_ROLE_HS_SERVICE,
- PERIODIC_EVENT_FLAG_NEED_NET),
+ CALLBACK(hs_service, HS_SERVICE, FL(NEED_NET)), // XXXX break this down more
/* Bridge only. */
- CALLBACK(record_bridge_stats, PERIODIC_EVENT_ROLE_BRIDGE, 0),
+ CALLBACK(record_bridge_stats, BRIDGE, 0),
/* Client only. */
- CALLBACK(rend_cache_failure_clean, PERIODIC_EVENT_ROLE_CLIENT, 0),
-
- /* Bridge Authority only. */
- CALLBACK(write_bridge_ns, PERIODIC_EVENT_ROLE_BRIDGEAUTH, 0),
+ /* XXXX this could be restricted to CLIENT+NET_PARTICIPANT */
+ CALLBACK(rend_cache_failure_clean, NET_PARTICIPANT, FL(RUN_ON_DISABLE)),
/* Directory server only. */
- CALLBACK(clean_consdiffmgr, PERIODIC_EVENT_ROLE_DIRSERVER, 0),
+ CALLBACK(clean_consdiffmgr, DIRSERVER, 0),
+
+ /* Controller with per-second events only. */
+ CALLBACK(control_per_second_events, CONTROLEV, 0),
END_OF_PERIODIC_EVENTS
};
+#ifndef COCCI
#undef CALLBACK
+#undef FL
+#endif
/* These are pointers to members of periodic_events[] that are used to
* implement particular callbacks. We keep them separate here so that we
* can access them by name. We also keep them inside periodic_events[]
* so that we can implement "reset all timers" in a reasonable way. */
-static periodic_event_item_t *check_descriptor_event=NULL;
-static periodic_event_item_t *dirvote_event=NULL;
static periodic_event_item_t *fetch_networkstatus_event=NULL;
static periodic_event_item_t *launch_descriptor_fetches_event=NULL;
static periodic_event_item_t *check_dns_honesty_event=NULL;
static periodic_event_item_t *save_state_event=NULL;
+static periodic_event_item_t *prune_old_routers_event=NULL;
/** Reset all the periodic events so we'll do all our actions again as if we
* just started up.
@@ -1483,24 +1456,7 @@ static periodic_event_item_t *save_state_event=NULL;
void
reset_all_main_loop_timers(void)
{
- int i;
- for (i = 0; periodic_events[i].name; ++i) {
- periodic_event_reschedule(&periodic_events[i]);
- }
-}
-
-/** Return the member of periodic_events[] whose name is <b>name</b>.
- * Return NULL if no such event is found.
- */
-static periodic_event_item_t *
-find_periodic_event(const char *name)
-{
- int i;
- for (i = 0; periodic_events[i].name; ++i) {
- if (strcmp(name, periodic_events[i].name) == 0)
- return &periodic_events[i];
- }
- return NULL;
+ periodic_events_reset_all();
}
/** Return a bitmask of the roles this tor instance is configured for using
@@ -1510,7 +1466,7 @@ get_my_roles(const or_options_t *options)
{
tor_assert(options);
- int roles = 0;
+ int roles = PERIODIC_EVENT_ROLE_ALL;
int is_bridge = options->BridgeRelay;
int is_relay = server_mode(options);
int is_dirauth = authdir_mode_v3(options);
@@ -1518,6 +1474,8 @@ get_my_roles(const or_options_t *options)
int is_hidden_service = !!hs_service_get_num_services() ||
!!rend_num_services();
int is_dirserver = dir_server_mode(options);
+ int sending_control_events = control_any_per_second_event_enabled();
+
/* We also consider tor to have the role of a client if the ControlPort is
* set because a lot of things can be done over the control port which
* requires tor to have basic functionnalities. */
@@ -1525,6 +1483,9 @@ get_my_roles(const or_options_t *options)
options->ControlPort_set ||
options->OwningControllerFD != UINT64_MAX;
+ int is_net_participant = is_participating_on_network() ||
+ is_relay || is_hidden_service;
+
if (is_bridge) roles |= PERIODIC_EVENT_ROLE_BRIDGE;
if (is_client) roles |= PERIODIC_EVENT_ROLE_CLIENT;
if (is_relay) roles |= PERIODIC_EVENT_ROLE_RELAY;
@@ -1532,6 +1493,8 @@ get_my_roles(const or_options_t *options)
if (is_bridgeauth) roles |= PERIODIC_EVENT_ROLE_BRIDGEAUTH;
if (is_hidden_service) roles |= PERIODIC_EVENT_ROLE_HS_SERVICE;
if (is_dirserver) roles |= PERIODIC_EVENT_ROLE_DIRSERVER;
+ if (is_net_participant) roles |= PERIODIC_EVENT_ROLE_NET_PARTICIPANT;
+ if (sending_control_events) roles |= PERIODIC_EVENT_ROLE_CONTROLEV;
return roles;
}
@@ -1556,9 +1519,9 @@ initialize_periodic_events_cb(evutil_socket_t fd, short events, void *data)
rescan_periodic_events(get_options());
}
-/** Set up all the members of periodic_events[], and configure them all to be
- * launched from a callback. */
-STATIC void
+/** Set up all the members of mainloop_periodic_events[], and configure them
+ * all to be launched from a callback. */
+void
initialize_periodic_events(void)
{
if (periodic_events_initialized)
@@ -1566,39 +1529,60 @@ initialize_periodic_events(void)
periodic_events_initialized = 1;
- /* Set up all periodic events. We'll launch them by roles. */
- int i;
- for (i = 0; periodic_events[i].name; ++i) {
- periodic_event_setup(&periodic_events[i]);
+ for (int i = 0; mainloop_periodic_events[i].name; ++i) {
+ periodic_events_register(&mainloop_periodic_events[i]);
}
+ /* Set up all periodic events. We'll launch them by roles. */
+
+#ifndef COCCI
#define NAMED_CALLBACK(name) \
- STMT_BEGIN name ## _event = find_periodic_event( #name ); STMT_END
+ STMT_BEGIN name ## _event = periodic_events_find( #name ); STMT_END
+#endif
- NAMED_CALLBACK(check_descriptor);
- NAMED_CALLBACK(dirvote);
+ NAMED_CALLBACK(prune_old_routers);
NAMED_CALLBACK(fetch_networkstatus);
NAMED_CALLBACK(launch_descriptor_fetches);
NAMED_CALLBACK(check_dns_honesty);
NAMED_CALLBACK(save_state);
-
- struct timeval one_second = { 1, 0 };
- initialize_periodic_events_event = tor_evtimer_new(
- tor_libevent_get_base(),
- initialize_periodic_events_cb, NULL);
- event_add(initialize_periodic_events_event, &one_second);
}
STATIC void
teardown_periodic_events(void)
{
- int i;
- for (i = 0; periodic_events[i].name; ++i) {
- periodic_event_destroy(&periodic_events[i]);
- }
+ periodic_events_disconnect_all();
+ fetch_networkstatus_event = NULL;
+ launch_descriptor_fetches_event = NULL;
+ check_dns_honesty_event = NULL;
+ save_state_event = NULL;
+ prune_old_routers_event = NULL;
periodic_events_initialized = 0;
}
+static mainloop_event_t *rescan_periodic_events_ev = NULL;
+
+/** Callback: rescan the periodic event list. */
+static void
+rescan_periodic_events_cb(mainloop_event_t *event, void *arg)
+{
+ (void)event;
+ (void)arg;
+ rescan_periodic_events(get_options());
+}
+
+/**
+ * Schedule an event that will rescan which periodic events should run.
+ **/
+MOCK_IMPL(void,
+schedule_rescan_periodic_events,(void))
+{
+ if (!rescan_periodic_events_ev) {
+ rescan_periodic_events_ev =
+ mainloop_event_new(rescan_periodic_events_cb, NULL);
+ }
+ mainloop_event_activate(rescan_periodic_events_ev);
+}
+
/** Do a pass at all our periodic events, disable those we don't need anymore
* and enable those we need now using the given options. */
void
@@ -1606,36 +1590,7 @@ rescan_periodic_events(const or_options_t *options)
{
tor_assert(options);
- /* Avoid scanning the event list if we haven't initialized it yet. This is
- * particularly useful for unit tests in order to avoid initializing main
- * loop events everytime. */
- if (!periodic_events_initialized) {
- return;
- }
-
- int roles = get_my_roles(options);
-
- for (int i = 0; periodic_events[i].name; ++i) {
- periodic_event_item_t *item = &periodic_events[i];
-
- int enable = !!(item->roles & roles);
-
- /* Handle the event flags. */
- if (net_is_disabled() &&
- (item->flags & PERIODIC_EVENT_FLAG_NEED_NET)) {
- enable = 0;
- }
-
- /* Enable the event if needed. It is safe to enable an event that was
- * already enabled. Same goes for disabling it. */
- if (enable) {
- log_debug(LD_GENERAL, "Launching periodic event %s", item->name);
- periodic_event_enable(item);
- } else {
- log_debug(LD_GENERAL, "Disabling periodic event %s", item->name);
- periodic_event_disable(item);
- }
- }
+ periodic_events_rescan_by_roles(get_my_roles(options), net_is_disabled());
}
/* We just got new options globally set, see if we need to enabled or disable
@@ -1643,26 +1598,7 @@ rescan_periodic_events(const or_options_t *options)
void
periodic_events_on_new_options(const or_options_t *options)
{
- /* Only if we've already initialized the events, rescan the list which will
- * enable or disable events depending on our roles. This will be called at
- * bootup and we don't want this function to initialize the events because
- * they aren't set up at this stage. */
- if (periodic_events_initialized) {
- rescan_periodic_events(options);
- }
-}
-
-/**
- * Update our schedule so that we'll check whether we need to update our
- * descriptor immediately, rather than after up to CHECK_DESCRIPTOR_INTERVAL
- * seconds.
- */
-void
-reschedule_descriptor_update_check(void)
-{
- if (check_descriptor_event) {
- periodic_event_reschedule(check_descriptor_event);
- }
+ rescan_periodic_events(options);
}
/**
@@ -1708,40 +1644,41 @@ mainloop_schedule_postloop_cleanup(void)
mainloop_event_activate(postloop_cleanup_ev);
}
-#define LONGEST_TIMER_PERIOD (30 * 86400)
-/** Helper: Return the number of seconds between <b>now</b> and <b>next</b>,
- * clipped to the range [1 second, LONGEST_TIMER_PERIOD]. */
-static inline int
-safe_timer_diff(time_t now, time_t next)
-{
- if (next > now) {
- /* There were no computers at signed TIME_MIN (1902 on 32-bit systems),
- * and nothing that could run Tor. It's a bug if 'next' is around then.
- * On 64-bit systems with signed TIME_MIN, TIME_MIN is before the Big
- * Bang. We cannot extrapolate past a singularity, but there was probably
- * nothing that could run Tor then, either.
- **/
- tor_assert(next > TIME_MIN + LONGEST_TIMER_PERIOD);
-
- if (next - LONGEST_TIMER_PERIOD > now)
- return LONGEST_TIMER_PERIOD;
- return (int)(next - now);
- } else {
- return 1;
+/** Event to run 'scheduled_shutdown_cb' */
+static mainloop_event_t *scheduled_shutdown_ev=NULL;
+
+/** Callback: run a scheduled shutdown */
+static void
+scheduled_shutdown_cb(mainloop_event_t *ev, void *arg)
+{
+ (void)ev;
+ (void)arg;
+ log_notice(LD_GENERAL, "Clean shutdown finished. Exiting.");
+ tor_shutdown_event_loop_and_exit(0);
+}
+
+/** Schedule the mainloop to exit after <b>delay_sec</b> seconds. */
+void
+mainloop_schedule_shutdown(int delay_sec)
+{
+ const struct timeval delay_tv = { delay_sec, 0 };
+ if (! scheduled_shutdown_ev) {
+ scheduled_shutdown_ev = mainloop_event_new(scheduled_shutdown_cb, NULL);
}
+ mainloop_event_schedule(scheduled_shutdown_ev, &delay_tv);
}
/** Perform regular maintenance tasks. This function gets run once per
- * second by second_elapsed_callback().
+ * second.
*/
-static void
-run_scheduled_events(time_t now)
+static int
+second_elapsed_callback(time_t now, const or_options_t *options)
{
- const or_options_t *options = get_options();
-
- /* 0. See if we've been asked to shut down and our timeout has
- * expired; or if our bandwidth limits are exhausted and we
- * should hibernate; or if it's time to wake up from hibernation.
+ /* 0. See if our bandwidth limits are exhausted and we should hibernate
+ *
+ * Note: we have redundant mechanisms to handle the case where it's
+ * time to wake up from hibernation; or where we have a scheduled
+ * shutdown and it's time to run it, but this will also handle those.
*/
consider_hibernation(now);
@@ -1751,10 +1688,13 @@ run_scheduled_events(time_t now)
if (options->UseBridges && !net_is_disabled()) {
/* Note: this check uses net_is_disabled(), not should_delay_dir_fetches()
* -- the latter is only for fetching consensus-derived directory info. */
+ // TODO: client
+ // Also, schedule this rather than probing 1x / sec
fetch_bridge_descriptors(options, now);
}
if (accounting_is_enabled(options)) {
+ // TODO: refactor or rewrite?
accounting_run_housekeeping(now);
}
@@ -1765,6 +1705,7 @@ run_scheduled_events(time_t now)
*/
/* (If our circuit build timeout can ever become lower than a second (which
* it can't, currently), we should do this more often.) */
+ // TODO: All expire stuff can become NET_PARTICIPANT, RUN_ON_DISABLE
circuit_expire_building();
circuit_expire_waiting_for_better_guard();
@@ -1798,80 +1739,8 @@ run_scheduled_events(time_t now)
run_connection_housekeeping(i, now);
}
- /* 11b. check pending unconfigured managed proxies */
- if (!net_is_disabled() && pt_proxies_configuration_pending())
- pt_configure_remaining_proxies();
-}
-
-/* Periodic callback: rotate the onion keys after the period defined by the
- * "onion-key-rotation-days" consensus parameter, shut down and restart all
- * cpuworkers, and update our descriptor if necessary.
- */
-static int
-rotate_onion_key_callback(time_t now, const or_options_t *options)
-{
- if (server_mode(options)) {
- int onion_key_lifetime = get_onion_key_lifetime();
- time_t rotation_time = get_onion_key_set_at()+onion_key_lifetime;
- if (rotation_time > now) {
- return ONION_KEY_CONSENSUS_CHECK_INTERVAL;
- }
-
- log_info(LD_GENERAL,"Rotating onion key.");
- rotate_onion_key();
- cpuworkers_rotate_keyinfo();
- if (router_rebuild_descriptor(1)<0) {
- log_info(LD_CONFIG, "Couldn't rebuild router descriptor");
- }
- if (advertised_server_mode() && !net_is_disabled())
- router_upload_dir_desc_to_dirservers(0);
- return ONION_KEY_CONSENSUS_CHECK_INTERVAL;
- }
- return PERIODIC_EVENT_NO_UPDATE;
-}
-
-/* Period callback: Check if our old onion keys are still valid after the
- * period of time defined by the consensus parameter
- * "onion-key-grace-period-days", otherwise expire them by setting them to
- * NULL.
- */
-static int
-check_onion_keys_expiry_time_callback(time_t now, const or_options_t *options)
-{
- if (server_mode(options)) {
- int onion_key_grace_period = get_onion_key_grace_period();
- time_t expiry_time = get_onion_key_set_at()+onion_key_grace_period;
- if (expiry_time > now) {
- return ONION_KEY_CONSENSUS_CHECK_INTERVAL;
- }
-
- log_info(LD_GENERAL, "Expiring old onion keys.");
- expire_old_onion_keys();
- cpuworkers_rotate_keyinfo();
- return ONION_KEY_CONSENSUS_CHECK_INTERVAL;
- }
-
- return PERIODIC_EVENT_NO_UPDATE;
-}
-
-/* Periodic callback: Every 30 seconds, check whether it's time to make new
- * Ed25519 subkeys.
- */
-static int
-check_ed_keys_callback(time_t now, const or_options_t *options)
-{
- if (server_mode(options)) {
- if (should_make_new_ed_keys(options, now)) {
- int new_signing_key = load_ed_keys(options, now);
- if (new_signing_key < 0 ||
- generate_ed_link_cert(options, now, new_signing_key > 0)) {
- log_err(LD_OR, "Unable to update Ed25519 keys! Exiting.");
- tor_shutdown_event_loop_and_exit(1);
- }
- }
- return 30;
- }
- return PERIODIC_EVENT_NO_UPDATE;
+ /* Run again in a second. */
+ return 1;
}
/**
@@ -1947,100 +1816,53 @@ add_entropy_callback(time_t now, const or_options_t *options)
return ENTROPY_INTERVAL;
}
-/**
- * Periodic callback: if we're an authority, make sure we test
- * the routers on the network for reachability.
- */
-static int
-launch_reachability_tests_callback(time_t now, const or_options_t *options)
+/** Periodic callback: if there has been no network usage in a while,
+ * enter a dormant state. */
+STATIC int
+check_network_participation_callback(time_t now, const or_options_t *options)
{
- if (authdir_mode_tests_reachability(options) &&
- !net_is_disabled()) {
- /* try to determine reachability of the other Tor relays */
- dirserv_test_reachability(now);
+ /* If we're a server, we can't become dormant. */
+ if (server_mode(options)) {
+ goto found_activity;
}
- return REACHABILITY_TEST_INTERVAL;
-}
-/**
- * Periodic callback: if we're an authority, discount the stability
- * information (and other rephist information) that's older.
- */
-static int
-downrate_stability_callback(time_t now, const or_options_t *options)
-{
- (void)options;
- /* 1d. Periodically, we discount older stability information so that new
- * stability info counts more, and save the stability information to disk as
- * appropriate. */
- time_t next = rep_hist_downrate_old_runs(now);
- return safe_timer_diff(now, next);
-}
+ /* If we're running an onion service, we can't become dormant. */
+ /* XXXX this would be nice to change, so that we can be dormant with a
+ * service. */
+ if (hs_service_get_num_services() || rend_num_services()) {
+ goto found_activity;
+ }
-/**
- * Periodic callback: if we're an authority, record our measured stability
- * information from rephist in an mtbf file.
- */
-static int
-save_stability_callback(time_t now, const or_options_t *options)
-{
- if (authdir_mode_tests_reachability(options)) {
- if (rep_hist_record_mtbf_data(now, 1)<0) {
- log_warn(LD_GENERAL, "Couldn't store mtbf data.");
+ /* If we have any currently open entry streams other than "linked"
+ * connections used for directory requests, those count as user activity.
+ */
+ if (options->DormantTimeoutDisabledByIdleStreams) {
+ if (connection_get_by_type_nonlinked(CONN_TYPE_AP) != NULL) {
+ goto found_activity;
}
}
-#define SAVE_STABILITY_INTERVAL (30*60)
- return SAVE_STABILITY_INTERVAL;
-}
-/**
- * Periodic callback: if we're an authority, check on our authority
- * certificate (the one that authenticates our authority signing key).
- */
-static int
-check_authority_cert_callback(time_t now, const or_options_t *options)
-{
- (void)now;
- (void)options;
- /* 1e. Periodically, if we're a v3 authority, we check whether our cert is
- * close to expiring and warn the admin if it is. */
- v3_authority_check_key_expiry();
-#define CHECK_V3_CERTIFICATE_INTERVAL (5*60)
- return CHECK_V3_CERTIFICATE_INTERVAL;
-}
+ /* XXXX Make this configurable? */
+/** How often do we check whether we have had network activity? */
+#define CHECK_PARTICIPATION_INTERVAL (5*60)
-/**
- * Scheduled callback: Run directory-authority voting functionality.
- *
- * The schedule is a bit complicated here, so dirvote_act() manages the
- * schedule itself.
- **/
-static int
-dirvote_callback(time_t now, const or_options_t *options)
-{
- if (!authdir_mode_v3(options)) {
- tor_assert_nonfatal_unreached();
- return 3600;
+ /* Become dormant if there has been no user activity in a long time.
+ * (The funny checks below are in order to prevent overflow.) */
+ time_t time_since_last_activity = 0;
+ if (get_last_user_activity_time() < now)
+ time_since_last_activity = now - get_last_user_activity_time();
+ if (time_since_last_activity >= options->DormantClientTimeout) {
+ log_notice(LD_GENERAL, "No user activity in a long time: becoming"
+ " dormant.");
+ set_network_participation(false);
+ rescan_periodic_events(options);
}
- time_t next = dirvote_act(options, now);
- if (BUG(next == TIME_MAX)) {
- /* This shouldn't be returned unless we called dirvote_act() without
- * being an authority. If it happens, maybe our configuration will
- * fix itself in an hour or so? */
- return 3600;
- }
- return safe_timer_diff(now, next);
-}
+ return CHECK_PARTICIPATION_INTERVAL;
-/** Reschedule the directory-authority voting event. Run this whenever the
- * schedule has changed. */
-void
-reschedule_dirvote(const or_options_t *options)
-{
- if (periodic_events_initialized && authdir_mode_v3(options)) {
- periodic_event_reschedule(dirvote_event);
- }
+ found_activity:
+ note_user_activity(now);
+ return CHECK_PARTICIPATION_INTERVAL;
}
/**
@@ -2053,11 +1875,9 @@ check_expired_networkstatus_callback(time_t now, const or_options_t *options)
(void)options;
/* Check whether our networkstatus has expired. */
networkstatus_t *ns = networkstatus_get_latest_consensus();
- /*XXXX RD: This value needs to be the same as REASONABLY_LIVE_TIME in
- * networkstatus_get_reasonably_live_consensus(), but that value is way
- * way too high. Arma: is the bridge issue there resolved yet? -NM */
-#define NS_EXPIRY_SLOP (24*60*60)
- if (ns && ns->valid_until < (now - NS_EXPIRY_SLOP) &&
+ /* Use reasonably live consensuses until they are no longer reasonably live.
+ */
+ if (ns && !networkstatus_consensus_reasonably_live(ns, now) &&
router_have_minimum_dir_info()) {
router_dir_info_changed();
}
@@ -2143,17 +1963,6 @@ write_stats_file_callback(time_t now, const or_options_t *options)
return safe_timer_diff(now, next_time_to_write_stats_files);
}
-#define CHANNEL_CHECK_INTERVAL (60*60)
-static int
-check_canonical_channels_callback(time_t now, const or_options_t *options)
-{
- (void)now;
- if (public_server_mode(options))
- channel_check_for_duplicates();
-
- return CHANNEL_CHECK_INTERVAL;
-}
-
static int
reset_padding_counts_callback(time_t now, const or_options_t *options)
{
@@ -2228,87 +2037,24 @@ rend_cache_failure_clean_callback(time_t now, const or_options_t *options)
}
/**
- * Periodic callback: If we're a server and initializing dns failed, retry.
+ * Periodic callback: prune routerlist of old information about Tor network.
*/
static int
-retry_dns_callback(time_t now, const or_options_t *options)
+prune_old_routers_callback(time_t now, const or_options_t *options)
{
+#define ROUTERLIST_PRUNING_INTERVAL (60*60) // 1 hour.
(void)now;
-#define RETRY_DNS_INTERVAL (10*60)
- if (server_mode(options) && has_dns_init_failed())
- dns_init();
- return RETRY_DNS_INTERVAL;
-}
-
-/** Periodic callback: consider rebuilding or and re-uploading our descriptor
- * (if we've passed our internal checks). */
-static int
-check_descriptor_callback(time_t now, const or_options_t *options)
-{
-/** How often do we check whether part of our router info has changed in a
- * way that would require an upload? That includes checking whether our IP
- * address has changed. */
-#define CHECK_DESCRIPTOR_INTERVAL (60)
-
(void)options;
- /* 2b. Once per minute, regenerate and upload the descriptor if the old
- * one is inaccurate. */
if (!net_is_disabled()) {
- check_descriptor_bandwidth_changed(now);
- check_descriptor_ipaddress_changed(now);
- mark_my_descriptor_dirty_if_too_old(now);
- consider_publishable_server(0);
/* If any networkstatus documents are no longer recent, we need to
* update all the descriptors' running status. */
/* Remove dead routers. */
- /* XXXX This doesn't belong here, but it was here in the pre-
- * XXXX refactoring code. */
+ log_debug(LD_GENERAL, "Pruning routerlist...");
routerlist_remove_old_routers();
}
- return CHECK_DESCRIPTOR_INTERVAL;
-}
-
-/**
- * Periodic callback: check whether we're reachable (as a relay), and
- * whether our bandwidth has changed enough that we need to
- * publish a new descriptor.
- */
-static int
-check_for_reachability_bw_callback(time_t now, const or_options_t *options)
-{
- /* XXXX This whole thing was stuck in the middle of what is now
- * XXXX check_descriptor_callback. I'm not sure it's right. */
-
- static int dirport_reachability_count = 0;
- /* also, check religiously for reachability, if it's within the first
- * 20 minutes of our uptime. */
- if (server_mode(options) &&
- (have_completed_a_circuit() || !any_predicted_circuits(now)) &&
- !net_is_disabled()) {
- if (get_uptime() < TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT) {
- router_do_reachability_checks(1, dirport_reachability_count==0);
- if (++dirport_reachability_count > 5)
- dirport_reachability_count = 0;
- return 1;
- } else {
- /* If we haven't checked for 12 hours and our bandwidth estimate is
- * low, do another bandwidth test. This is especially important for
- * bridges, since they might go long periods without much use. */
- const routerinfo_t *me = router_get_my_routerinfo();
- static int first_time = 1;
- if (!first_time && me &&
- me->bandwidthcapacity < me->bandwidthrate &&
- me->bandwidthcapacity < 51200) {
- reset_bandwidth_test();
- }
- first_time = 0;
-#define BANDWIDTH_RECHECK_INTERVAL (12*60*60)
- return BANDWIDTH_RECHECK_INTERVAL;
- }
- }
- return CHECK_DESCRIPTOR_INTERVAL;
+ return ROUTERLIST_PRUNING_INTERVAL;
}
/**
@@ -2322,7 +2068,7 @@ fetch_networkstatus_callback(time_t now, const or_options_t *options)
* documents? */
const int we_are_bootstrapping = networkstatus_consensus_is_bootstrapping(
now);
- const int prefer_mirrors = !directory_fetches_from_authorities(
+ const int prefer_mirrors = !dirclient_fetches_from_authorities(
get_options());
int networkstatus_dl_check_interval = 60;
/* check more often when testing, or when bootstrapping from mirrors
@@ -2353,109 +2099,6 @@ retry_listeners_callback(time_t now, const or_options_t *options)
return PERIODIC_EVENT_NO_UPDATE;
}
-/**
- * Periodic callback: as a server, see if we have any old unused circuits
- * that should be expired */
-static int
-expire_old_ciruits_serverside_callback(time_t now, const or_options_t *options)
-{
- (void)options;
- /* every 11 seconds, so not usually the same second as other such events */
- circuit_expire_old_circuits_serverside(now);
- return 11;
-}
-
-/**
- * Callback: Send warnings if Tor doesn't find its ports reachable.
- */
-static int
-reachability_warnings_callback(time_t now, const or_options_t *options)
-{
- (void) now;
-
- if (get_uptime() < TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT) {
- return (int)(TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT - get_uptime());
- }
-
- if (server_mode(options) &&
- !net_is_disabled() &&
- have_completed_a_circuit()) {
- /* every 20 minutes, check and complain if necessary */
- const routerinfo_t *me = router_get_my_routerinfo();
- if (me && !check_whether_orport_reachable(options)) {
- char *address = tor_dup_ip(me->addr);
- log_warn(LD_CONFIG,"Your server (%s:%d) has not managed to confirm that "
- "its ORPort is reachable. Relays do not publish descriptors "
- "until their ORPort and DirPort are reachable. Please check "
- "your firewalls, ports, address, /etc/hosts file, etc.",
- address, me->or_port);
- control_event_server_status(LOG_WARN,
- "REACHABILITY_FAILED ORADDRESS=%s:%d",
- address, me->or_port);
- tor_free(address);
- }
-
- if (me && !check_whether_dirport_reachable(options)) {
- char *address = tor_dup_ip(me->addr);
- log_warn(LD_CONFIG,
- "Your server (%s:%d) has not managed to confirm that its "
- "DirPort is reachable. Relays do not publish descriptors "
- "until their ORPort and DirPort are reachable. Please check "
- "your firewalls, ports, address, /etc/hosts file, etc.",
- address, me->dir_port);
- control_event_server_status(LOG_WARN,
- "REACHABILITY_FAILED DIRADDRESS=%s:%d",
- address, me->dir_port);
- tor_free(address);
- }
- }
-
- return TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT;
-}
-
-static int dns_honesty_first_time = 1;
-
-/**
- * Periodic event: if we're an exit, see if our DNS server is telling us
- * obvious lies.
- */
-static int
-check_dns_honesty_callback(time_t now, const or_options_t *options)
-{
- (void)now;
- /* 9. and if we're an exit node, check whether our DNS is telling stories
- * to us. */
- if (net_is_disabled() ||
- ! public_server_mode(options) ||
- router_my_exit_policy_is_reject_star())
- return PERIODIC_EVENT_NO_UPDATE;
-
- if (dns_honesty_first_time) {
- /* Don't launch right when we start */
- dns_honesty_first_time = 0;
- return crypto_rand_int_range(60, 180);
- }
-
- dns_launch_correctness_checks();
- return 12*3600 + crypto_rand_int(12*3600);
-}
-
-/**
- * Periodic callback: if we're the bridge authority, write a networkstatus
- * file to disk.
- */
-static int
-write_bridge_ns_callback(time_t now, const or_options_t *options)
-{
- /* 10. write bridge networkstatus file to disk */
- if (options->BridgeAuthoritativeDir) {
- networkstatus_dump_bridge_status_to_file(now);
-#define BRIDGE_STATUSFILE_INTERVAL (30*60)
- return BRIDGE_STATUSFILE_INTERVAL;
- }
- return PERIODIC_EVENT_NO_UPDATE;
-}
-
static int heartbeat_callback_first_time = 1;
/**
@@ -2523,36 +2166,19 @@ hs_service_callback(time_t now, const or_options_t *options)
return 1;
}
-/** Timer: used to invoke second_elapsed_callback() once per second. */
-static periodic_timer_t *second_timer = NULL;
-
-/**
- * Enable or disable the per-second timer as appropriate, creating it if
- * necessary.
+/*
+ * Periodic callback: Send once-per-second events to the controller(s).
+ * This is called every second.
*/
-void
-reschedule_per_second_timer(void)
+static int
+control_per_second_events_callback(time_t now, const or_options_t *options)
{
- struct timeval one_second;
- one_second.tv_sec = 1;
- one_second.tv_usec = 0;
-
- if (! second_timer) {
- second_timer = periodic_timer_new(tor_libevent_get_base(),
- &one_second,
- second_elapsed_callback,
- NULL);
- tor_assert(second_timer);
- }
+ (void) options;
+ (void) now;
- const bool run_per_second_events =
- control_any_per_second_event_enabled() || ! net_is_completely_disabled();
+ control_per_second_events();
- if (run_per_second_events) {
- periodic_timer_launch(second_timer, &one_second);
- } else {
- periodic_timer_disable(second_timer);
- }
+ return 1;
}
/** Last time that update_current_time was called. */
@@ -2582,6 +2208,17 @@ update_current_time(time_t now)
memcpy(&last_updated, &current_second_last_changed, sizeof(last_updated));
monotime_coarse_get(&current_second_last_changed);
+ /** How much clock jumping means that we should adjust our idea of when
+ * to go dormant? */
+#define NUM_JUMPED_SECONDS_BEFORE_NETSTATUS_UPDATE 20
+
+ /* Don't go dormant early or late just because we jumped in time. */
+ if (ABS(seconds_elapsed) >= NUM_JUMPED_SECONDS_BEFORE_NETSTATUS_UPDATE) {
+ if (is_participating_on_network()) {
+ netstatus_note_clock_jumped(seconds_elapsed);
+ }
+ }
+
/** How much clock jumping do we tolerate? */
#define NUM_JUMPED_SECONDS_BEFORE_WARN 100
@@ -2591,6 +2228,7 @@ update_current_time(time_t now)
if (seconds_elapsed < -NUM_JUMPED_SECONDS_BEFORE_WARN) {
// moving back in time is always a bad sign.
circuit_note_clock_jumped(seconds_elapsed, false);
+
} else if (seconds_elapsed >= NUM_JUMPED_SECONDS_BEFORE_WARN) {
/* Compare the monotonic clock to the result of time(). */
const int32_t monotime_msec_passed =
@@ -2620,31 +2258,6 @@ update_current_time(time_t now)
current_second = now;
}
-/** Libevent callback: invoked once every second. */
-static void
-second_elapsed_callback(periodic_timer_t *timer, void *arg)
-{
- /* XXXX This could be sensibly refactored into multiple callbacks, and we
- * could use Libevent's timers for this rather than checking the current
- * time against a bunch of timeouts every second. */
- time_t now;
- (void)timer;
- (void)arg;
-
- now = time(NULL);
-
- /* We don't need to do this once-per-second any more: time-updating is
- * only in this callback _because it is a callback_. It should be fine
- * to disable this callback, and the time will still get updated.
- */
- update_current_time(now);
-
- /* Maybe some controller events are ready to fire */
- control_per_second_events();
-
- run_scheduled_events(now);
-}
-
#ifdef HAVE_SYSTEMD_209
static periodic_timer_t *systemd_watchdog_timer = NULL;
@@ -2660,18 +2273,23 @@ systemd_watchdog_callback(periodic_timer_t *timer, void *arg)
#define UPTIME_CUTOFF_FOR_NEW_BANDWIDTH_TEST (6*60*60)
-/** Called when our IP address seems to have changed. <b>at_interface</b>
- * should be true if we detected a change in our interface, and false if we
- * detected a change in our published address. */
+/** Called when our IP address seems to have changed. <b>on_client_conn</b>
+ * should be true if:
+ * - we detected a change in our interface address, using an outbound
+ * connection, and therefore
+ * - our client TLS keys need to be rotated.
+ * Otherwise, it should be false, and:
+ * - we detected a change in our published address
+ * (using some other method), and therefore
+ * - the published addresses in our descriptor need to change.
+ */
void
-ip_address_changed(int at_interface)
+ip_address_changed(int on_client_conn)
{
const or_options_t *options = get_options();
int server = server_mode(options);
- int exit_reject_interfaces = (server && options->ExitRelay
- && options->ExitPolicyRejectLocalInterfaces);
- if (at_interface) {
+ if (on_client_conn) {
if (! server) {
/* Okay, change our keys. */
if (init_keys_client() < 0)
@@ -2683,15 +2301,14 @@ ip_address_changed(int at_interface)
reset_bandwidth_test();
reset_uptime();
router_reset_reachability();
+ /* All relays include their IP addresses as their ORPort addresses in
+ * their descriptor.
+ * Exit relays also incorporate interface addresses in their exit
+ * policies, when ExitPolicyRejectLocalInterfaces is set. */
+ mark_my_descriptor_dirty("IP address changed");
}
}
- /* Exit relays incorporate interface addresses in their exit policies when
- * ExitPolicyRejectLocalInterfaces is set */
- if (exit_reject_interfaces || (server && !at_interface)) {
- mark_my_descriptor_dirty("IP address changed");
- }
-
dns_servers_relaunch_checks();
}
@@ -2702,8 +2319,7 @@ dns_servers_relaunch_checks(void)
{
if (server_mode(get_options())) {
dns_reset_correctness_checks();
- if (periodic_events_initialized) {
- tor_assert(check_dns_honesty_event);
+ if (check_dns_honesty_event) {
periodic_event_reschedule(check_dns_honesty_event);
}
}
@@ -2713,8 +2329,6 @@ dns_servers_relaunch_checks(void)
void
initialize_mainloop_events(void)
{
- initialize_periodic_events();
-
if (!schedule_active_linked_connections_event) {
schedule_active_linked_connections_event =
mainloop_event_postloop_new(schedule_active_linked_connections_cb, NULL);
@@ -2732,11 +2346,16 @@ do_main_loop(void)
/* initialize the periodic events first, so that code that depends on the
* events being present does not assert.
*/
- initialize_periodic_events();
+ tor_assert(periodic_events_initialized);
initialize_mainloop_events();
- /* set up once-a-second callback. */
- reschedule_per_second_timer();
+ periodic_events_connect_all();
+
+ struct timeval one_second = { 1, 0 };
+ initialize_periodic_events_event = tor_evtimer_new(
+ tor_libevent_get_base(),
+ initialize_periodic_events_cb, NULL);
+ event_add(initialize_periodic_events_event, &one_second);
#ifdef HAVE_SYSTEMD_209
uint64_t watchdog_delay;
@@ -2759,10 +2378,6 @@ do_main_loop(void)
}
}
#endif /* defined(HAVE_SYSTEMD_209) */
-
- main_loop_should_exit = 0;
- main_loop_exit_value = 0;
-
#ifdef ENABLE_RESTART_DEBUGGING
{
static int first_time = 1;
@@ -2786,7 +2401,7 @@ do_main_loop(void)
event_add(tor_shutdown_event_loop_for_restart_event, &restart_after);
}
}
-#endif
+#endif /* defined(ENABLE_RESTART_DEBUGGING) */
return run_main_loop_until_done();
}
@@ -2888,10 +2503,14 @@ run_main_loop_once(void)
*
* Shadow won't invoke this function, so don't fill it up with things.
*/
-static int
+STATIC int
run_main_loop_until_done(void)
{
int loop_result = 1;
+
+ main_loop_should_exit = 0;
+ main_loop_exit_value = 0;
+
do {
loop_result = run_main_loop_once();
} while (loop_result == 1);
@@ -2922,7 +2541,6 @@ tor_mainloop_free_all(void)
smartlist_free(connection_array);
smartlist_free(closeable_connection_lst);
smartlist_free(active_linked_connection_lst);
- periodic_timer_free(second_timer);
teardown_periodic_events();
tor_event_free(shutdown_did_not_work_event);
tor_event_free(initialize_periodic_events_event);
@@ -2930,6 +2548,8 @@ tor_mainloop_free_all(void)
mainloop_event_free(schedule_active_linked_connections_event);
mainloop_event_free(postloop_cleanup_ev);
mainloop_event_free(handle_deferred_signewnym_ev);
+ mainloop_event_free(scheduled_shutdown_ev);
+ mainloop_event_free(rescan_periodic_events_ev);
#ifdef HAVE_SYSTEMD_209
periodic_timer_free(systemd_watchdog_timer);
@@ -2949,7 +2569,6 @@ tor_mainloop_free_all(void)
can_complete_circuits = 0;
quiet_level = 0;
should_init_bridge_stats = 1;
- dns_honesty_first_time = 1;
heartbeat_callback_first_time = 1;
current_second = 0;
memset(&current_second_last_changed, 0,
diff --git a/src/core/mainloop/mainloop.h b/src/core/mainloop/mainloop.h
index c5669fc4e0..1ddfec2162 100644
--- a/src/core/mainloop/mainloop.h
+++ b/src/core/mainloop/mainloop.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -56,15 +56,14 @@ MOCK_DECL(int, connection_count_moribund, (void));
void directory_all_unreachable(time_t now);
void directory_info_has_arrived(time_t now, int from_cache, int suppress_logs);
-void ip_address_changed(int at_interface);
+void ip_address_changed(int on_client_conn);
void dns_servers_relaunch_checks(void);
void reset_all_main_loop_timers(void);
-void reschedule_descriptor_update_check(void);
void reschedule_directory_downloads(void);
void reschedule_or_state_save(void);
-void reschedule_dirvote(const or_options_t *options);
void mainloop_schedule_postloop_cleanup(void);
void rescan_periodic_events(const or_options_t *options);
+MOCK_DECL(void, schedule_rescan_periodic_events,(void));
void update_current_time(time_t now);
@@ -81,34 +80,38 @@ uint64_t get_main_loop_error_count(void);
uint64_t get_main_loop_idle_count(void);
void periodic_events_on_new_options(const or_options_t *options);
-void reschedule_per_second_timer(void);
void do_signewnym(time_t);
time_t get_last_signewnym_time(void);
+void mainloop_schedule_shutdown(int delay_sec);
+
void tor_init_connection_lists(void);
void initialize_mainloop_events(void);
+void initialize_periodic_events(void);
void tor_mainloop_free_all(void);
struct token_bucket_rw_t;
extern time_t time_of_process_start;
-extern int quiet_level;
extern struct token_bucket_rw_t global_bucket;
extern struct token_bucket_rw_t global_relayed_bucket;
#ifdef MAINLOOP_PRIVATE
+STATIC int run_main_loop_until_done(void);
STATIC void close_closeable_connections(void);
-STATIC void initialize_periodic_events(void);
STATIC void teardown_periodic_events(void);
STATIC int get_my_roles(const or_options_t *);
+STATIC int check_network_participation_callback(time_t now,
+ const or_options_t *options);
+
#ifdef TOR_UNIT_TESTS
extern smartlist_t *connection_array;
/* We need the periodic_event_item_t definition. */
#include "core/mainloop/periodic.h"
-extern periodic_event_item_t periodic_events[];
-#endif
-#endif /* defined(MAIN_PRIVATE) */
+extern periodic_event_item_t mainloop_periodic_events[];
+#endif /* defined(TOR_UNIT_TESTS) */
+#endif /* defined(MAINLOOP_PRIVATE) */
-#endif
+#endif /* !defined(TOR_MAINLOOP_H) */
diff --git a/src/core/mainloop/mainloop_pubsub.c b/src/core/mainloop/mainloop_pubsub.c
new file mode 100644
index 0000000000..0e982d4c40
--- /dev/null
+++ b/src/core/mainloop/mainloop_pubsub.c
@@ -0,0 +1,179 @@
+/* Copyright (c) 2001, Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file mainloop_pubsub.c
+ * @brief Connect the publish-subscribe code to the main-loop.
+ *
+ * This module is responsible for instantiating all the channels used by the
+ * publish-subscribe code, and making sure that each one's messages are
+ * processed when appropriate.
+ **/
+
+#include "orconfig.h"
+
+#include "core/or/or.h"
+#include "core/mainloop/mainloop.h"
+#include "core/mainloop/mainloop_pubsub.h"
+
+#include "lib/container/smartlist.h"
+#include "lib/dispatch/dispatch.h"
+#include "lib/dispatch/dispatch_naming.h"
+#include "lib/evloop/compat_libevent.h"
+#include "lib/pubsub/pubsub.h"
+#include "lib/pubsub/pubsub_build.h"
+
+/**
+ * Dispatcher to use for delivering messages.
+ **/
+static dispatch_t *the_dispatcher = NULL;
+static pubsub_items_t *the_pubsub_items = NULL;
+/**
+ * A list of mainloop_event_t, indexed by channel ID, to flush the messages
+ * on a channel.
+ **/
+static smartlist_t *alert_events = NULL;
+
+/**
+ * Mainloop event callback: flush all the messages in a channel.
+ *
+ * The channel is encoded as a pointer, and passed via arg.
+ **/
+static void
+flush_channel_event(mainloop_event_t *ev, void *arg)
+{
+ (void)ev;
+ if (!the_dispatcher)
+ return;
+
+ channel_id_t chan = (channel_id_t)(uintptr_t)(arg);
+ dispatch_flush(the_dispatcher, chan, INT_MAX);
+}
+
+/**
+ * Construct our global pubsub object from <b>builder</b>. Return 0 on
+ * success, -1 on failure. */
+int
+tor_mainloop_connect_pubsub(struct pubsub_builder_t *builder)
+{
+ int rv = -1;
+ tor_mainloop_disconnect_pubsub();
+
+ the_dispatcher = pubsub_builder_finalize(builder, &the_pubsub_items);
+ if (! the_dispatcher)
+ goto err;
+
+ rv = 0;
+ goto done;
+ err:
+ tor_mainloop_disconnect_pubsub();
+ done:
+ return rv;
+}
+
+/**
+ * Install libevent events for all of the pubsub channels.
+ *
+ * Invoke this after tor_mainloop_connect_pubsub, and after libevent has been
+ * initialized.
+ */
+void
+tor_mainloop_connect_pubsub_events(void)
+{
+ tor_assert(the_dispatcher);
+ tor_assert(! alert_events);
+
+ const size_t num_channels = get_num_channel_ids();
+ alert_events = smartlist_new();
+ for (size_t i = 0; i < num_channels; ++i) {
+ smartlist_add(alert_events,
+ mainloop_event_postloop_new(flush_channel_event,
+ (void*)(uintptr_t)(i)));
+ }
+}
+
+/**
+ * Dispatch alertfn callback: do nothing. Implements DELIV_NEVER.
+ **/
+static void
+alertfn_never(dispatch_t *d, channel_id_t chan, void *arg)
+{
+ (void)d;
+ (void)chan;
+ (void)arg;
+}
+
+/**
+ * Dispatch alertfn callback: activate a mainloop event. Implements
+ * DELIV_PROMPT.
+ **/
+static void
+alertfn_prompt(dispatch_t *d, channel_id_t chan, void *arg)
+{
+ (void)d;
+ (void)chan;
+ mainloop_event_t *event = arg;
+ mainloop_event_activate(event);
+}
+
+/**
+ * Dispatch alertfn callback: flush all messages right now. Implements
+ * DELIV_IMMEDIATE.
+ **/
+static void
+alertfn_immediate(dispatch_t *d, channel_id_t chan, void *arg)
+{
+ (void) arg;
+ dispatch_flush(d, chan, INT_MAX);
+}
+
+/**
+ * Set the strategy to be used for delivering messages on the named channel.
+ *
+ * This function needs to be called once globally for each channel, to
+ * set up how messages are delivered.
+ **/
+int
+tor_mainloop_set_delivery_strategy(const char *msg_channel_name,
+ deliv_strategy_t strategy)
+{
+ channel_id_t chan = get_channel_id(msg_channel_name);
+ if (BUG(chan == ERROR_ID) ||
+ BUG(chan >= smartlist_len(alert_events)))
+ return -1;
+
+ switch (strategy) {
+ case DELIV_NEVER:
+ dispatch_set_alert_fn(the_dispatcher, chan, alertfn_never, NULL);
+ break;
+ case DELIV_PROMPT:
+ dispatch_set_alert_fn(the_dispatcher, chan, alertfn_prompt,
+ smartlist_get(alert_events, chan));
+ break;
+ case DELIV_IMMEDIATE:
+ dispatch_set_alert_fn(the_dispatcher, chan, alertfn_immediate, NULL);
+ break;
+ }
+ return 0;
+}
+
+/**
+ * Remove all pubsub dispatchers and events from the mainloop.
+ **/
+void
+tor_mainloop_disconnect_pubsub(void)
+{
+ if (the_pubsub_items) {
+ pubsub_items_clear_bindings(the_pubsub_items);
+ pubsub_items_free(the_pubsub_items);
+ }
+ if (alert_events) {
+ SMARTLIST_FOREACH(alert_events, mainloop_event_t *, ev,
+ mainloop_event_free(ev));
+ smartlist_free(alert_events);
+ }
+ dispatch_free(the_dispatcher);
+}
diff --git a/src/core/mainloop/mainloop_pubsub.h b/src/core/mainloop/mainloop_pubsub.h
new file mode 100644
index 0000000000..3698fd8d03
--- /dev/null
+++ b/src/core/mainloop/mainloop_pubsub.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2001, Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file mainloop_pubsub.h
+ * @brief Header for mainloop_pubsub.c
+ **/
+
+#ifndef TOR_MAINLOOP_PUBSUB_H
+#define TOR_MAINLOOP_PUBSUB_H
+
+struct pubsub_builder_t;
+
+/**
+ * Describe when and how messages are delivered on message channel.
+ *
+ * Every message channel must be associated with one of these strategies.
+ **/
+typedef enum {
+ /**
+ * Never deliver messages automatically.
+ *
+ * If a message channel uses this strategy, then no matter now many
+ * messages are published on it, they are not delivered until something
+ * manually calls dispatch_flush() for that channel
+ **/
+ DELIV_NEVER=0,
+ /**
+ * Deliver messages promptly, via the event loop.
+ *
+ * If a message channel uses this strategy, then publishing a messages
+ * that channel activates an event that causes messages to be handled
+ * later in the mainloop. The messages will be processed at some point
+ * very soon, delaying only for pending IO events and the like.
+ *
+ * Generally this is the best choice for a delivery strategy, since
+ * it avoids stack explosion.
+ **/
+ DELIV_PROMPT,
+ /**
+ * Deliver messages immediately, skipping the event loop.
+ *
+ * Every event on this channel is flushed immediately after it is queued,
+ * using the stack.
+ *
+ * This delivery type should be used with caution, since it can cause
+ * unexpected call chains, resource starvation, and the like.
+ **/
+ DELIV_IMMEDIATE,
+} deliv_strategy_t;
+
+int tor_mainloop_connect_pubsub(struct pubsub_builder_t *builder);
+void tor_mainloop_connect_pubsub_events(void);
+int tor_mainloop_set_delivery_strategy(const char *msg_channel_name,
+ deliv_strategy_t strategy);
+void tor_mainloop_disconnect_pubsub(void);
+
+#endif /* !defined(TOR_MAINLOOP_PUBSUB_H) */
diff --git a/src/core/mainloop/mainloop_state.inc b/src/core/mainloop/mainloop_state.inc
new file mode 100644
index 0000000000..34a37caaa2
--- /dev/null
+++ b/src/core/mainloop/mainloop_state.inc
@@ -0,0 +1,19 @@
+
+/**
+ * @file mainloop_state.inc
+ * @brief Declare configuration options for the crypto_ops module.
+ **/
+
+/** Holds state for the mainloop, corresponding to part of the state
+ * file in Tor's DataDirectory. */
+BEGIN_CONF_STRUCT(mainloop_state_t)
+
+/** Number of minutes since the last user-initiated request (as defined by
+ * the dormant net-status system.) Set to zero if we are dormant. */
+CONF_VAR(MinutesSinceUserActivity, POSINT, 0, NULL)
+
+/** True if we were dormant when we last wrote the file; false if we
+ * weren't. "auto" on initial startup. */
+CONF_VAR(Dormant, AUTOBOOL, 0, "auto")
+
+END_CONF_STRUCT(mainloop_state_t)
diff --git a/src/core/mainloop/mainloop_state_st.h b/src/core/mainloop/mainloop_state_st.h
new file mode 100644
index 0000000000..5649b536f9
--- /dev/null
+++ b/src/core/mainloop/mainloop_state_st.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file mainloop_state_st.h
+ * @brief Declare a state structure for mainloop-relevant fields
+ **/
+
+#ifndef TOR_CORE_MAINLOOP_MAINLOOP_STATE_ST_H
+#define TOR_CORE_MAINLOOP_MAINLOOP_STATE_ST_H
+
+#include "lib/conf/confdecl.h"
+
+#define CONF_CONTEXT STRUCT
+#include "core/mainloop/mainloop_state.inc"
+#undef CONF_CONTEXT
+
+typedef struct mainloop_state_t mainloop_state_t;
+
+#endif /* !defined(TOR_CORE_MAINLOOP_MAINLOOP_STATE_ST_H) */
diff --git a/src/core/mainloop/mainloop_sys.c b/src/core/mainloop/mainloop_sys.c
new file mode 100644
index 0000000000..884bae1c59
--- /dev/null
+++ b/src/core/mainloop/mainloop_sys.c
@@ -0,0 +1,90 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file mainloop_sys.c
+ * @brief Declare the "mainloop" subsystem.
+ **/
+
+#include "core/or/or.h"
+#include "core/mainloop/mainloop_sys.h"
+#include "core/mainloop/mainloop.h"
+#include "core/mainloop/mainloop_state_st.h"
+#include "core/mainloop/netstatus.h"
+#include "lib/conf/conftypes.h"
+#include "lib/conf/confdecl.h"
+
+#include "lib/subsys/subsys.h"
+
+static int
+subsys_mainloop_initialize(void)
+{
+ initialize_periodic_events();
+ return 0;
+}
+
+static void
+subsys_mainloop_shutdown(void)
+{
+ tor_mainloop_free_all();
+}
+
+/** Declare a list of state variables for mainloop state. */
+#define CONF_CONTEXT TABLE
+#include "core/mainloop/mainloop_state.inc"
+#undef CONF_CONTEXT
+
+/** Magic number for mainloop state objects */
+#define MAINLOOP_STATE_MAGIC 0x59455449
+
+/**
+ * Format object for mainloop state.
+ **/
+static config_format_t mainloop_state_fmt = {
+ .size = sizeof(mainloop_state_t),
+ .magic = { "mainloop_state",
+ MAINLOOP_STATE_MAGIC,
+ offsetof(mainloop_state_t, magic)
+ },
+ .vars = mainloop_state_t_vars,
+};
+
+/**
+ */
+static int
+mainloop_set_state(void *arg)
+{
+ const mainloop_state_t *state = arg;
+ tor_assert(state->magic == MAINLOOP_STATE_MAGIC);
+
+ netstatus_load_from_state(state, approx_time());
+
+ return 0;
+}
+
+static int
+mainloop_flush_state(void *arg)
+{
+ mainloop_state_t *state = arg;
+ tor_assert(state->magic == MAINLOOP_STATE_MAGIC);
+
+ netstatus_flush_to_state(state, approx_time());
+
+ return 0;
+}
+
+const struct subsys_fns_t sys_mainloop = {
+ .name = "mainloop",
+ SUBSYS_DECLARE_LOCATION(),
+ .supported = true,
+ .level = 5,
+ .initialize = subsys_mainloop_initialize,
+ .shutdown = subsys_mainloop_shutdown,
+
+ .state_format = &mainloop_state_fmt,
+ .set_state = mainloop_set_state,
+ .flush_state = mainloop_flush_state,
+};
diff --git a/src/core/mainloop/mainloop_sys.h b/src/core/mainloop/mainloop_sys.h
new file mode 100644
index 0000000000..b3ade33cd1
--- /dev/null
+++ b/src/core/mainloop/mainloop_sys.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file mainloop_sys.h
+ * @brief Header for mainloop_sys.c
+ **/
+
+#ifndef MAINLOOP_SYS_H
+#define MAINLOOP_SYS_H
+
+extern const struct subsys_fns_t sys_mainloop;
+
+#endif /* !defined(MAINLOOP_SYS_H) */
diff --git a/src/core/mainloop/netstatus.c b/src/core/mainloop/netstatus.c
index 1444ca5db2..61a3469eaa 100644
--- a/src/core/mainloop/netstatus.c
+++ b/src/core/mainloop/netstatus.c
@@ -1,14 +1,23 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file netstatus.c
+ * @brief Track whether the network is disabled, dormant, etc.
+ **/
+
#include "core/or/or.h"
#include "core/mainloop/netstatus.h"
+#include "core/mainloop/mainloop.h"
+#include "core/mainloop/mainloop_state_st.h"
#include "app/config/config.h"
#include "feature/hibernate/hibernate.h"
+#include "app/config/or_state_st.h"
+
/** Return true iff our network is in some sense disabled or shutting down:
* either we're hibernating, entering hibernation, or the network is turned
* off with DisableNetwork. */
@@ -26,3 +35,136 @@ net_is_completely_disabled(void)
{
return get_options()->DisableNetwork || we_are_fully_hibernating();
}
+
+/**
+ * The time at which we've last seen "user activity" -- that is, any activity
+ * that should keep us as a participant on the network.
+ *
+ * This is not actually the true time. We will adjust this forward if
+ * our clock jumps, or if Tor is shut down for a while, so that the time
+ * since our last activity remains as it was before the jump or shutdown.
+ */
+static time_t last_user_activity_seen = 0;
+
+/**
+ * True iff we are currently a "network participant" -- that is, we
+ * are building circuits, fetching directory information, and so on.
+ **/
+static bool participating_on_network = false;
+
+/**
+ * Record the fact that we have seen "user activity" at the time now. Move
+ * "last activity seen" time forwards, but never backwards.
+ *
+ * If we were previously not participating on the network, set our
+ * participation status to true, and launch periodic events as appropriate.
+ **/
+void
+note_user_activity(time_t now)
+{
+ last_user_activity_seen = MAX(now, last_user_activity_seen);
+
+ if (! participating_on_network) {
+ log_notice(LD_GENERAL, "Tor is no longer dormant.");
+ set_network_participation(true);
+ schedule_rescan_periodic_events();
+ }
+}
+
+/**
+ * Change the time at which "user activitiy" was last seen to <b>now</b>.
+ *
+ * Unlike note_user_actity, this function sets the time without checking
+ * whether it is in the past, and without causing any rescan of periodic events
+ * or change in participation status.
+ */
+void
+reset_user_activity(time_t now)
+{
+ last_user_activity_seen = now;
+}
+
+/**
+ * Return the most recent time at which we recorded "user activity".
+ **/
+time_t
+get_last_user_activity_time(void)
+{
+ return last_user_activity_seen;
+}
+
+/**
+ * Set the field that remembers whether we are currently participating on the
+ * network. Does not schedule or un-schedule periodic events.
+ **/
+void
+set_network_participation(bool participation)
+{
+ participating_on_network = participation;
+}
+
+/**
+ * Return true iff we are currently participating on the network.
+ **/
+bool
+is_participating_on_network(void)
+{
+ return participating_on_network;
+}
+
+/**
+ * Update 'state' with the last time at which we were active on the network.
+ **/
+void
+netstatus_flush_to_state(mainloop_state_t *state, time_t now)
+{
+ state->Dormant = ! participating_on_network;
+ if (participating_on_network) {
+ time_t sec_since_activity = MAX(0, now - last_user_activity_seen);
+ state->MinutesSinceUserActivity = (int)(sec_since_activity / 60);
+ } else {
+ state->MinutesSinceUserActivity = 0;
+ }
+}
+
+/**
+ * Update our current view of network participation from an or_state_t object.
+ **/
+void
+netstatus_load_from_state(const mainloop_state_t *state, time_t now)
+{
+ time_t last_activity;
+ if (state->Dormant == -1) { // Initial setup.
+ if (get_options()->DormantOnFirstStartup) {
+ last_activity = 0;
+ participating_on_network = false;
+ } else {
+ // Start up as active, treat activity as happening now.
+ last_activity = now;
+ participating_on_network = true;
+ }
+ } else if (state->Dormant) {
+ last_activity = 0;
+ participating_on_network = false;
+ } else {
+ last_activity = now - 60 * state->MinutesSinceUserActivity;
+ participating_on_network = true;
+ }
+ if (get_options()->DormantCanceledByStartup) {
+ last_activity = now;
+ participating_on_network = true;
+ }
+ reset_user_activity(last_activity);
+}
+
+/**
+ * Adjust the time at which the user was last active by <b>seconds_diff</b>
+ * in response to a clock jump.
+ */
+void
+netstatus_note_clock_jumped(time_t seconds_diff)
+{
+ time_t last_active = get_last_user_activity_time();
+ if (last_active)
+ reset_user_activity(last_active + seconds_diff);
+}
diff --git a/src/core/mainloop/netstatus.h b/src/core/mainloop/netstatus.h
index ae8547b30d..5f54e54553 100644
--- a/src/core/mainloop/netstatus.h
+++ b/src/core/mainloop/netstatus.h
@@ -1,13 +1,32 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file netstatus.h
+ * @brief Header for netstatus.c
+ **/
+
#ifndef TOR_NETSTATUS_H
#define TOR_NETSTATUS_H
int net_is_disabled(void);
int net_is_completely_disabled(void);
-#endif
+void note_user_activity(time_t now);
+void reset_user_activity(time_t now);
+time_t get_last_user_activity_time(void);
+
+void set_network_participation(bool participation);
+bool is_participating_on_network(void);
+
+struct mainloop_state_t;
+
+void netstatus_flush_to_state(struct mainloop_state_t *state, time_t now);
+void netstatus_load_from_state(const struct mainloop_state_t *state,
+ time_t now);
+void netstatus_note_clock_jumped(time_t seconds_diff);
+
+#endif /* !defined(TOR_NETSTATUS_H) */
diff --git a/src/core/mainloop/periodic.c b/src/core/mainloop/periodic.c
index 2651bbbc89..b5fd8fab61 100644
--- a/src/core/mainloop/periodic.c
+++ b/src/core/mainloop/periodic.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2019, The Tor Project, Inc. */
+/* Copyright (c) 2015-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -6,9 +6,22 @@
*
* \brief Generic backend for handling periodic events.
*
- * The events in this module are used by main.c to track items that need
+ * The events in this module are used to track items that need
* to fire once every N seconds, possibly picking a new interval each time
- * that they fire. See periodic_events[] in main.c for examples.
+ * that they fire. See periodic_events[] in mainloop.c for examples.
+ *
+ * This module manages a global list of periodic_event_item_t objects,
+ * each corresponding to a single event. To register an event, pass it to
+ * periodic_events_register() when initializing your subsystem.
+ *
+ * Registering an event makes the periodic event subsystem know about it, but
+ * doesn't cause the event to get created immediately. Before the event can
+ * be started, periodic_event_connect_all() must be called by mainloop.c to
+ * connect all the events to Libevent.
+ *
+ * We expect that periodic_event_item_t objects will be statically allocated;
+ * we set them up and tear them down here, but we don't take ownership of
+ * them.
*/
#include "core/or/or.h"
@@ -16,7 +29,6 @@
#include "app/config/config.h"
#include "core/mainloop/mainloop.h"
#include "core/mainloop/periodic.h"
-#include "lib/evloop/compat_libevent.h"
/** We disable any interval greater than this number of seconds, on the
* grounds that it is probably an absolute time mistakenly passed in as a
@@ -24,6 +36,12 @@
*/
static const int MAX_INTERVAL = 10 * 365 * 86400;
+/**
+ * Global list of periodic events that have been registered with
+ * <b>periodic_event_register</b>.
+ **/
+static smartlist_t *the_periodic_events = NULL;
+
/** Set the event <b>event</b> to run in <b>next_interval</b> seconds from
* now. */
static void
@@ -45,10 +63,6 @@ periodic_event_dispatch(mainloop_event_t *ev, void *data)
periodic_event_item_t *event = data;
tor_assert(ev == event->ev);
- if (BUG(!periodic_event_is_enabled(event))) {
- return;
- }
-
time_t now = time(NULL);
update_current_time(now);
const or_options_t *options = get_options();
@@ -57,7 +71,7 @@ periodic_event_dispatch(mainloop_event_t *ev, void *data)
int next_interval = 0;
if (!periodic_event_is_enabled(event)) {
- /* The event got disabled from inside its callback; no need to
+ /* The event got disabled from inside its callback, or before: no need to
* reschedule. */
return;
}
@@ -91,15 +105,16 @@ periodic_event_dispatch(mainloop_event_t *ev, void *data)
void
periodic_event_reschedule(periodic_event_item_t *event)
{
- /* Don't reschedule a disabled event. */
- if (periodic_event_is_enabled(event)) {
+ /* Don't reschedule a disabled or uninitialized event. */
+ if (event->ev && periodic_event_is_enabled(event)) {
periodic_event_set_interval(event, 1);
}
}
-/** Initializes the libevent backend for a periodic event. */
+/** Connects a periodic event to the Libevent backend. Does not launch the
+ * event immediately. */
void
-periodic_event_setup(periodic_event_item_t *event)
+periodic_event_connect(periodic_event_item_t *event)
{
if (event->ev) { /* Already setup? This is a bug */
log_err(LD_BUG, "Initial dispatch should only be done once.");
@@ -117,7 +132,7 @@ void
periodic_event_launch(periodic_event_item_t *event)
{
if (! event->ev) { /* Not setup? This is a bug */
- log_err(LD_BUG, "periodic_event_launch without periodic_event_setup");
+ log_err(LD_BUG, "periodic_event_launch without periodic_event_connect");
tor_assert(0);
}
/* Event already enabled? This is a bug */
@@ -131,9 +146,9 @@ periodic_event_launch(periodic_event_item_t *event)
periodic_event_dispatch(event->ev, event);
}
-/** Release all storage associated with <b>event</b> */
-void
-periodic_event_destroy(periodic_event_item_t *event)
+/** Disconnect and unregister the periodic event in <b>event</b> */
+static void
+periodic_event_disconnect(periodic_event_item_t *event)
{
if (!event)
return;
@@ -177,3 +192,177 @@ periodic_event_disable(periodic_event_item_t *event)
mainloop_event_cancel(event->ev);
event->enabled = 0;
}
+
+/**
+ * Disable an event, then schedule it to run once.
+ * Do nothing if the event was already disabled.
+ */
+void
+periodic_event_schedule_and_disable(periodic_event_item_t *event)
+{
+ tor_assert(event);
+ if (!periodic_event_is_enabled(event))
+ return;
+
+ periodic_event_disable(event);
+
+ mainloop_event_activate(event->ev);
+}
+
+/**
+ * Add <b>item</b> to the list of periodic events.
+ *
+ * Note that <b>item</b> should be statically allocated: we do not
+ * take ownership of it.
+ **/
+void
+periodic_events_register(periodic_event_item_t *item)
+{
+ if (!the_periodic_events)
+ the_periodic_events = smartlist_new();
+
+ if (BUG(smartlist_contains(the_periodic_events, item)))
+ return;
+
+ smartlist_add(the_periodic_events, item);
+}
+
+/**
+ * Make all registered periodic events connect to the libevent backend.
+ */
+void
+periodic_events_connect_all(void)
+{
+ if (! the_periodic_events)
+ return;
+
+ SMARTLIST_FOREACH_BEGIN(the_periodic_events, periodic_event_item_t *, item) {
+ if (item->ev)
+ continue;
+ periodic_event_connect(item);
+ } SMARTLIST_FOREACH_END(item);
+}
+
+/**
+ * Reset all the registered periodic events so we'll do all our actions again
+ * as if we just started up.
+ *
+ * Useful if our clock just moved back a long time from the future,
+ * so we don't wait until that future arrives again before acting.
+ */
+void
+periodic_events_reset_all(void)
+{
+ if (! the_periodic_events)
+ return;
+
+ SMARTLIST_FOREACH_BEGIN(the_periodic_events, periodic_event_item_t *, item) {
+ if (!item->ev)
+ continue;
+
+ periodic_event_reschedule(item);
+ } SMARTLIST_FOREACH_END(item);
+}
+
+/**
+ * Return the registered periodic event whose name is <b>name</b>.
+ * Return NULL if no such event is found.
+ */
+periodic_event_item_t *
+periodic_events_find(const char *name)
+{
+ if (! the_periodic_events)
+ return NULL;
+
+ SMARTLIST_FOREACH_BEGIN(the_periodic_events, periodic_event_item_t *, item) {
+ if (strcmp(name, item->name) == 0)
+ return item;
+ } SMARTLIST_FOREACH_END(item);
+ return NULL;
+}
+
+/**
+ * Start or stop registered periodic events, depending on our current set of
+ * roles.
+ *
+ * Invoked when our list of roles, or the net_disabled flag has changed.
+ **/
+void
+periodic_events_rescan_by_roles(int roles, bool net_disabled)
+{
+ if (! the_periodic_events)
+ return;
+
+ SMARTLIST_FOREACH_BEGIN(the_periodic_events, periodic_event_item_t *, item) {
+ if (!item->ev)
+ continue;
+
+ int enable = !!(item->roles & roles);
+
+ /* Handle the event flags. */
+ if (net_disabled &&
+ (item->flags & PERIODIC_EVENT_FLAG_NEED_NET)) {
+ enable = 0;
+ }
+
+ /* Enable the event if needed. It is safe to enable an event that was
+ * already enabled. Same goes for disabling it. */
+ if (enable) {
+ log_debug(LD_GENERAL, "Launching periodic event %s", item->name);
+ periodic_event_enable(item);
+ } else {
+ log_debug(LD_GENERAL, "Disabling periodic event %s", item->name);
+ if (item->flags & PERIODIC_EVENT_FLAG_RUN_ON_DISABLE) {
+ periodic_event_schedule_and_disable(item);
+ } else {
+ periodic_event_disable(item);
+ }
+ }
+ } SMARTLIST_FOREACH_END(item);
+}
+
+/**
+ * Invoked at shutdown: disconnect and unregister all periodic events.
+ *
+ * Does not free the periodic_event_item_t object themselves, because we do
+ * not own them.
+ */
+void
+periodic_events_disconnect_all(void)
+{
+ if (! the_periodic_events)
+ return;
+
+ SMARTLIST_FOREACH_BEGIN(the_periodic_events, periodic_event_item_t *, item) {
+ periodic_event_disconnect(item);
+ } SMARTLIST_FOREACH_END(item);
+
+ smartlist_free(the_periodic_events);
+}
+
+#define LONGEST_TIMER_PERIOD (30 * 86400)
+/** Helper: Return the number of seconds between <b>now</b> and <b>next</b>,
+ * clipped to the range [1 second, LONGEST_TIMER_PERIOD].
+ *
+ * We use this to answer the question, "how many seconds is it from now until
+ * next" in periodic timer callbacks. Don't use it for other purposes
+ **/
+int
+safe_timer_diff(time_t now, time_t next)
+{
+ if (next > now) {
+ /* There were no computers at signed TIME_MIN (1902 on 32-bit systems),
+ * and nothing that could run Tor. It's a bug if 'next' is around then.
+ * On 64-bit systems with signed TIME_MIN, TIME_MIN is before the Big
+ * Bang. We cannot extrapolate past a singularity, but there was probably
+ * nothing that could run Tor then, either.
+ **/
+ tor_assert(next > TIME_MIN + LONGEST_TIMER_PERIOD);
+
+ if (next - LONGEST_TIMER_PERIOD > now)
+ return LONGEST_TIMER_PERIOD;
+ return (int)(next - now);
+ } else {
+ return 1;
+ }
+}
diff --git a/src/core/mainloop/periodic.h b/src/core/mainloop/periodic.h
index e49fafd174..de556a6bdb 100644
--- a/src/core/mainloop/periodic.h
+++ b/src/core/mainloop/periodic.h
@@ -1,6 +1,11 @@
-/* Copyright (c) 2015-2019, The Tor Project, Inc. */
+/* Copyright (c) 2015-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file periodic.h
+ * @brief Header for periodic.c
+ **/
+
#ifndef TOR_PERIODIC_H
#define TOR_PERIODIC_H
@@ -15,6 +20,10 @@
#define PERIODIC_EVENT_ROLE_BRIDGEAUTH (1U << 4)
#define PERIODIC_EVENT_ROLE_HS_SERVICE (1U << 5)
#define PERIODIC_EVENT_ROLE_DIRSERVER (1U << 6)
+#define PERIODIC_EVENT_ROLE_CONTROLEV (1U << 7)
+
+#define PERIODIC_EVENT_ROLE_NET_PARTICIPANT (1U << 8)
+#define PERIODIC_EVENT_ROLE_ALL (1U << 9)
/* Helper macro to make it a bit less annoying to defined groups of roles that
* are often used. */
@@ -25,10 +34,6 @@
/* Authorities that is both bridge and directory. */
#define PERIODIC_EVENT_ROLE_AUTHORITIES \
(PERIODIC_EVENT_ROLE_BRIDGEAUTH | PERIODIC_EVENT_ROLE_DIRAUTH)
-/* All roles. */
-#define PERIODIC_EVENT_ROLE_ALL \
- (PERIODIC_EVENT_ROLE_AUTHORITIES | PERIODIC_EVENT_ROLE_CLIENT | \
- PERIODIC_EVENT_ROLE_HS_SERVICE | PERIODIC_EVENT_ROLE_ROUTER)
/*
* Event flags which can change the behavior of an event.
@@ -39,6 +44,11 @@
* the net_is_disabled() check. */
#define PERIODIC_EVENT_FLAG_NEED_NET (1U << 0)
+/* Indicate that if the event is enabled, it needs to be run once before
+ * it becomes disabled.
+ */
+#define PERIODIC_EVENT_FLAG_RUN_ON_DISABLE (1U << 1)
+
/** Callback function for a periodic event to take action. The return value
* influences the next time the function will get called. Return
* PERIODIC_EVENT_NO_UPDATE to not update <b>last_action_time</b> and be polled
@@ -66,8 +76,10 @@ typedef struct periodic_event_item_t {
} periodic_event_item_t;
/** events will get their interval from first execution */
+#ifndef COCCI
#define PERIODIC_EVENT(fn, r, f) { fn##_callback, 0, NULL, #fn, r, f, 0 }
#define END_OF_PERIODIC_EVENTS { NULL, 0, NULL, NULL, 0, 0, 0 }
+#endif
/* Return true iff the given event was setup before thus is enabled to be
* scheduled. */
@@ -78,11 +90,20 @@ periodic_event_is_enabled(const periodic_event_item_t *item)
}
void periodic_event_launch(periodic_event_item_t *event);
-void periodic_event_setup(periodic_event_item_t *event);
-void periodic_event_destroy(periodic_event_item_t *event);
+void periodic_event_connect(periodic_event_item_t *event);
+//void periodic_event_disconnect(periodic_event_item_t *event);
void periodic_event_reschedule(periodic_event_item_t *event);
void periodic_event_enable(periodic_event_item_t *event);
void periodic_event_disable(periodic_event_item_t *event);
+void periodic_event_schedule_and_disable(periodic_event_item_t *event);
-#endif /* !defined(TOR_PERIODIC_H) */
+void periodic_events_register(periodic_event_item_t *item);
+void periodic_events_connect_all(void);
+void periodic_events_reset_all(void);
+periodic_event_item_t *periodic_events_find(const char *name);
+void periodic_events_rescan_by_roles(int roles, bool net_disabled);
+void periodic_events_disconnect_all(void);
+
+int safe_timer_diff(time_t now, time_t next);
+#endif /* !defined(TOR_PERIODIC_H) */
diff --git a/src/core/or/.may_include b/src/core/or/.may_include
new file mode 100644
index 0000000000..beb12f155d
--- /dev/null
+++ b/src/core/or/.may_include
@@ -0,0 +1,40 @@
+!advisory
+
+orconfig.h
+
+lib/arch/*.h
+lib/buf/*.h
+lib/cc/*.h
+lib/compress/*.h
+lib/container/*.h
+lib/crypt_ops/*.h
+lib/ctime/*.h
+lib/defs/*.h
+lib/encoding/*.h
+lib/err/*.h
+lib/evloop/*.h
+lib/fs/*.h
+lib/geoip/*.h
+lib/intmath/*.h
+lib/log/*.h
+lib/malloc/*.h
+lib/math/*.h
+lib/net/*.h
+lib/pubsub/*.h
+lib/string/*.h
+lib/subsys/*.h
+lib/test/*.h
+lib/testsupport/*.h
+lib/thread/*.h
+lib/time/*.h
+lib/tls/*.h
+lib/wallclock/*.h
+
+trunnel/*.h
+
+core/mainloop/*.h
+core/proto/*.h
+core/crypto/*.h
+core/or/*.h
+
+ext/*.h
diff --git a/src/core/or/addr_policy_st.h b/src/core/or/addr_policy_st.h
index a75f1a731d..08d16ee616 100644
--- a/src/core/or/addr_policy_st.h
+++ b/src/core/or/addr_policy_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file addr_policy_st.h
+ * @brief Address policy structures.
+ **/
+
#ifndef TOR_ADDR_POLICY_ST_H
#define TOR_ADDR_POLICY_ST_H
@@ -33,7 +38,7 @@ struct addr_policy_t {
/** Base address to accept or reject.
*
* Note that wildcards are treated
- * differntly depending on address family. An AF_UNSPEC address means
+ * differently depending on address family. An AF_UNSPEC address means
* "All addresses, IPv4 or IPv6." An AF_INET address with maskbits==0 means
* "All IPv4 addresses" and an AF_INET6 address with maskbits == 0 means
* "All IPv6 addresses".
@@ -43,4 +48,4 @@ struct addr_policy_t {
uint16_t prt_max; /**< Highest port number to accept/reject. */
};
-#endif
+#endif /* !defined(TOR_ADDR_POLICY_ST_H) */
diff --git a/src/core/or/address_set.c b/src/core/or/address_set.c
index 7ada4446c4..9bd3cc0f2d 100644
--- a/src/core/or/address_set.c
+++ b/src/core/or/address_set.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019, The Tor Project, Inc. */
+/* Copyright (c) 2018-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -16,7 +16,7 @@
#include "lib/container/bloomfilt.h"
#include "lib/crypt_ops/crypto_rand.h"
-/* Wrap our hash function to have the signature that the bloom filter
+/** Wrap our hash function to have the signature that the bloom filter
* needs. */
static uint64_t
bloomfilt_addr_hash(const struct sipkey *key,
diff --git a/src/core/or/address_set.h b/src/core/or/address_set.h
index a505d31628..b4d94b65a9 100644
--- a/src/core/or/address_set.h
+++ b/src/core/or/address_set.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019, The Tor Project, Inc. */
+/* Copyright (c) 2018-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
diff --git a/src/core/or/cell_queue_st.h b/src/core/or/cell_queue_st.h
index 130b95a011..0681dba1b8 100644
--- a/src/core/or/cell_queue_st.h
+++ b/src/core/or/cell_queue_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file cell_queue_st.h
+ * @brief Cell queue structures
+ **/
+
#ifndef PACKED_CELL_ST_H
#define PACKED_CELL_ST_H
@@ -22,8 +27,8 @@ struct packed_cell_t {
* or_connection_t's outbuf. */
struct cell_queue_t {
/** Linked list of packed_cell_t*/
- TOR_SIMPLEQ_HEAD(cell_simpleq, packed_cell_t) head;
+ TOR_SIMPLEQ_HEAD(cell_simpleq_t, packed_cell_t) head;
int n; /**< The number of cells in the queue. */
};
-#endif
+#endif /* !defined(PACKED_CELL_ST_H) */
diff --git a/src/core/or/cell_st.h b/src/core/or/cell_st.h
index 7ab7eceb50..a640d6a456 100644
--- a/src/core/or/cell_st.h
+++ b/src/core/or/cell_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file cell_st.h
+ * @brief Fixed-size cell structure.
+ **/
+
#ifndef CELL_ST_H
#define CELL_ST_H
@@ -16,5 +21,4 @@ struct cell_t {
uint8_t payload[CELL_PAYLOAD_SIZE]; /**< Cell body. */
};
-#endif
-
+#endif /* !defined(CELL_ST_H) */
diff --git a/src/core/or/channel.c b/src/core/or/channel.c
index 9649bdf278..50c03de846 100644
--- a/src/core/or/channel.c
+++ b/src/core/or/channel.c
@@ -1,5 +1,4 @@
-
-/* * Copyright (c) 2012-2019, The Tor Project, Inc. */
+/* * Copyright (c) 2012-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -52,10 +51,10 @@
* Define this so channel.h gives us things only channel_t subclasses
* should touch.
*/
-#define TOR_CHANNEL_INTERNAL_
+#define CHANNEL_OBJECT_PRIVATE
/* This one's for stuff only channel.c and the test suite should see */
-#define CHANNEL_PRIVATE_
+#define CHANNEL_FILE_PRIVATE
#include "core/or/or.h"
#include "app/config/config.h"
@@ -85,6 +84,13 @@
#include "core/or/cell_queue_st.h"
+/* Static function prototypes */
+
+static bool channel_matches_target_addr_for_extend(
+ channel_t *chan,
+ const tor_addr_t *target_ipv4_addr,
+ const tor_addr_t *target_ipv6_addr);
+
/* Global lists of channels */
/* All channel_t instances */
@@ -107,7 +113,7 @@ static smartlist_t *finished_listeners = NULL;
/** Map from channel->global_identifier to channel. Contains the same
* elements as all_channels. */
-static HT_HEAD(channel_gid_map, channel_s) channel_gid_map = HT_INITIALIZER();
+static HT_HEAD(channel_gid_map, channel_t) channel_gid_map = HT_INITIALIZER();
static unsigned
channel_id_hash(const channel_t *chan)
@@ -119,13 +125,13 @@ channel_id_eq(const channel_t *a, const channel_t *b)
{
return a->global_identifier == b->global_identifier;
}
-HT_PROTOTYPE(channel_gid_map, channel_s, gidmap_node,
- channel_id_hash, channel_id_eq)
-HT_GENERATE2(channel_gid_map, channel_s, gidmap_node,
+HT_PROTOTYPE(channel_gid_map, channel_t, gidmap_node,
+ channel_id_hash, channel_id_eq);
+HT_GENERATE2(channel_gid_map, channel_t, gidmap_node,
channel_id_hash, channel_id_eq,
- 0.6, tor_reallocarray_, tor_free_)
+ 0.6, tor_reallocarray_, tor_free_);
-HANDLE_IMPL(channel, channel_s,)
+HANDLE_IMPL(channel, channel_t,)
/* Counter for ID numbers */
static uint64_t n_channels_allocated = 0;
@@ -138,13 +144,13 @@ static uint64_t n_channels_allocated = 0;
* If more than one channel exists, follow the next_with_same_id pointer
* as a linked list.
*/
-static HT_HEAD(channel_idmap, channel_idmap_entry_s) channel_identity_map =
+static HT_HEAD(channel_idmap, channel_idmap_entry_t) channel_identity_map =
HT_INITIALIZER();
-typedef struct channel_idmap_entry_s {
- HT_ENTRY(channel_idmap_entry_s) node;
+typedef struct channel_idmap_entry_t {
+ HT_ENTRY(channel_idmap_entry_t) node;
uint8_t digest[DIGEST_LEN];
- TOR_LIST_HEAD(channel_list_s, channel_s) channel_list;
+ TOR_LIST_HEAD(channel_list_t, channel_t) channel_list;
} channel_idmap_entry_t;
static inline unsigned
@@ -160,10 +166,10 @@ channel_idmap_eq(const channel_idmap_entry_t *a,
return tor_memeq(a->digest, b->digest, DIGEST_LEN);
}
-HT_PROTOTYPE(channel_idmap, channel_idmap_entry_s, node, channel_idmap_hash,
- channel_idmap_eq)
-HT_GENERATE2(channel_idmap, channel_idmap_entry_s, node, channel_idmap_hash,
- channel_idmap_eq, 0.5, tor_reallocarray_, tor_free_)
+HT_PROTOTYPE(channel_idmap, channel_idmap_entry_t, node, channel_idmap_hash,
+ channel_idmap_eq);
+HT_GENERATE2(channel_idmap, channel_idmap_entry_t, node, channel_idmap_hash,
+ channel_idmap_eq, 0.5, tor_reallocarray_, tor_free_);
/* Functions to maintain the digest map */
static void channel_remove_from_digest_map(channel_t *chan);
@@ -1092,23 +1098,6 @@ channel_get_cell_handler(channel_t *chan)
}
/**
- * Return the variable-length cell handler for a channel.
- *
- * This function gets the handler for incoming variable-length cells
- * installed on a channel.
- */
-channel_var_cell_handler_fn_ptr
-channel_get_var_cell_handler(channel_t *chan)
-{
- tor_assert(chan);
-
- if (CHANNEL_CAN_HANDLE_CELLS(chan))
- return chan->var_cell_handler;
-
- return NULL;
-}
-
-/**
* Set both cell handlers for a channel.
*
* This function sets both the fixed-length and variable length cell handlers
@@ -1116,9 +1105,7 @@ channel_get_var_cell_handler(channel_t *chan)
*/
void
channel_set_cell_handlers(channel_t *chan,
- channel_cell_handler_fn_ptr cell_handler,
- channel_var_cell_handler_fn_ptr
- var_cell_handler)
+ channel_cell_handler_fn_ptr cell_handler)
{
tor_assert(chan);
tor_assert(CHANNEL_CAN_HANDLE_CELLS(chan));
@@ -1126,13 +1113,9 @@ channel_set_cell_handlers(channel_t *chan,
log_debug(LD_CHANNEL,
"Setting cell_handler callback for channel %p to %p",
chan, cell_handler);
- log_debug(LD_CHANNEL,
- "Setting var_cell_handler callback for channel %p to %p",
- chan, var_cell_handler);
/* Change them */
chan->cell_handler = cell_handler;
- chan->var_cell_handler = var_cell_handler;
}
/*
@@ -1443,6 +1426,7 @@ write_packed_cell(channel_t *chan, packed_cell_t *cell)
{
int ret = -1;
size_t cell_bytes;
+ uint8_t command = packed_cell_get_command(cell, chan->wide_circ_ids);
tor_assert(chan);
tor_assert(cell);
@@ -1477,6 +1461,16 @@ write_packed_cell(channel_t *chan, packed_cell_t *cell)
/* Successfully sent the cell. */
ret = 0;
+ /* Update padding statistics for the packed codepath.. */
+ rep_hist_padding_count_write(PADDING_TYPE_TOTAL);
+ if (command == CELL_PADDING)
+ rep_hist_padding_count_write(PADDING_TYPE_CELL);
+ if (chan->padding_enabled) {
+ rep_hist_padding_count_write(PADDING_TYPE_ENABLED_TOTAL);
+ if (command == CELL_PADDING)
+ rep_hist_padding_count_write(PADDING_TYPE_ENABLED_CELL);
+ }
+
done:
return ret;
}
@@ -1893,11 +1887,11 @@ channel_do_open_actions(channel_t *chan)
geoip_note_client_seen(GEOIP_CLIENT_CONNECT,
&remote_addr, transport_name,
now);
- tor_free(transport_name);
/* Notify the DoS subsystem of a new client. */
if (tlschan && tlschan->conn) {
dos_new_client_conn(tlschan->conn, transport_name);
}
+ tor_free(transport_name);
}
/* Otherwise the underlying transport can't tell us this, so skip it */
}
@@ -2372,7 +2366,7 @@ channel_is_better(channel_t *a, channel_t *b)
if (!a->is_canonical_to_peer && b->is_canonical_to_peer) return 0;
/*
- * Okay, if we're here they tied on canonicity, the prefer the older
+ * Okay, if we're here they tied on canonicity. Prefer the older
* connection, so that the adversary can't create a new connection
* and try to switch us over to it (which will leak information
* about long-lived circuits). Additionally, switching connections
@@ -2397,19 +2391,23 @@ channel_is_better(channel_t *a, channel_t *b)
/**
* Get a channel to extend a circuit.
*
- * Pick a suitable channel to extend a circuit to given the desired digest
- * the address we believe is correct for that digest; this tries to see
- * if we already have one for the requested endpoint, but if there is no good
- * channel, set *msg_out to a message describing the channel's state
- * and our next action, and set *launch_out to a boolean indicated whether
- * the caller should try to launch a new channel with channel_connect().
+ * Given the desired relay identity, pick a suitable channel to extend a
+ * circuit to the target IPv4 or IPv6 address requsted by the client. Search
+ * for an existing channel for the requested endpoint. Make sure the channel
+ * is usable for new circuits, and matches one of the target addresses.
+ *
+ * Try to return the best channel. But if there is no good channel, set
+ * *msg_out to a message describing the channel's state and our next action,
+ * and set *launch_out to a boolean indicated whether the caller should try to
+ * launch a new channel with channel_connect().
*/
-channel_t *
-channel_get_for_extend(const char *rsa_id_digest,
- const ed25519_public_key_t *ed_id,
- const tor_addr_t *target_addr,
- const char **msg_out,
- int *launch_out)
+MOCK_IMPL(channel_t *,
+channel_get_for_extend,(const char *rsa_id_digest,
+ const ed25519_public_key_t *ed_id,
+ const tor_addr_t *target_ipv4_addr,
+ const tor_addr_t *target_ipv6_addr,
+ const char **msg_out,
+ int *launch_out))
{
channel_t *chan, *best = NULL;
int n_inprogress_goodaddr = 0, n_old = 0;
@@ -2420,9 +2418,7 @@ channel_get_for_extend(const char *rsa_id_digest,
chan = channel_find_by_remote_identity(rsa_id_digest, ed_id);
- /* Walk the list, unrefing the old one and refing the new at each
- * iteration.
- */
+ /* Walk the list of channels */
for (; chan; chan = channel_next_with_rsa_identity(chan)) {
tor_assert(tor_memeq(chan->identity_digest,
rsa_id_digest, DIGEST_LEN));
@@ -2441,11 +2437,15 @@ channel_get_for_extend(const char *rsa_id_digest,
continue;
}
+ const bool matches_target =
+ channel_matches_target_addr_for_extend(chan,
+ target_ipv4_addr,
+ target_ipv6_addr);
/* Never return a non-open connection. */
if (!CHANNEL_IS_OPEN(chan)) {
/* If the address matches, don't launch a new connection for this
* circuit. */
- if (channel_matches_target_addr_for_extend(chan, target_addr))
+ if (matches_target)
++n_inprogress_goodaddr;
continue;
}
@@ -2458,8 +2458,7 @@ channel_get_for_extend(const char *rsa_id_digest,
/* Only return canonical connections or connections where the address
* is the address we wanted. */
- if (!channel_is_canonical(chan) &&
- !channel_matches_target_addr_for_extend(chan, target_addr)) {
+ if (!channel_is_canonical(chan) && !matches_target) {
++n_noncanonical;
continue;
}
@@ -2841,8 +2840,8 @@ channel_get_actual_remote_address(channel_t *chan)
* Subsequent calls to channel_get_{actual,canonical}_remote_{address,descr}
* may invalidate the return value from this function.
*/
-const char *
-channel_get_canonical_remote_descr(channel_t *chan)
+MOCK_IMPL(const char *,
+channel_get_canonical_remote_descr,(channel_t *chan))
{
tor_assert(chan);
tor_assert(chan->get_remote_descr);
@@ -3303,20 +3302,33 @@ channel_matches_extend_info(channel_t *chan, extend_info_t *extend_info)
}
/**
- * Check if a channel matches a given target address; return true iff we do.
+ * Check if a channel matches the given target IPv4 or IPv6 addresses.
+ * If either address matches, return true. If neither address matches,
+ * return false.
+ *
+ * Both addresses can't be NULL.
*
* This function calls into the lower layer and asks if this channel thinks
- * it matches a given target address for circuit extension purposes.
+ * it matches the target addresses for circuit extension purposes.
*/
-int
+static bool
channel_matches_target_addr_for_extend(channel_t *chan,
- const tor_addr_t *target)
+ const tor_addr_t *target_ipv4_addr,
+ const tor_addr_t *target_ipv6_addr)
{
tor_assert(chan);
tor_assert(chan->matches_target);
- tor_assert(target);
- return chan->matches_target(chan, target);
+ IF_BUG_ONCE(!target_ipv4_addr && !target_ipv6_addr)
+ return false;
+
+ if (target_ipv4_addr && chan->matches_target(chan, target_ipv4_addr))
+ return true;
+
+ if (target_ipv6_addr && chan->matches_target(chan, target_ipv6_addr))
+ return true;
+
+ return false;
}
/**
@@ -3389,7 +3401,7 @@ channel_sort_by_ed25519_identity(const void **a_, const void **b_)
* all of which MUST have the same RSA ID. (They MAY have different
* Ed25519 IDs.) */
static void
-channel_rsa_id_group_set_badness(struct channel_list_s *lst, int force)
+channel_rsa_id_group_set_badness(struct channel_list_t *lst, int force)
{
/*XXXX This function should really be about channels. 15056 */
channel_t *chan = TOR_LIST_FIRST(lst);
diff --git a/src/core/or/channel.h b/src/core/or/channel.h
index d41f0d70bb..fa4ce4f703 100644
--- a/src/core/or/channel.h
+++ b/src/core/or/channel.h
@@ -1,4 +1,4 @@
-/* * Copyright (c) 2012-2019, The Tor Project, Inc. */
+/* * Copyright (c) 2012-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -14,6 +14,7 @@
#include "lib/container/handles.h"
#include "lib/crypt_ops/crypto_ed25519.h"
+#include "ext/ht.h"
#include "tor_queue.h"
#define tor_timer_t timeout
@@ -22,7 +23,6 @@ struct tor_timer_t;
/* Channel handler function pointer typedefs */
typedef void (*channel_listener_fn_ptr)(channel_listener_t *, channel_t *);
typedef void (*channel_cell_handler_fn_ptr)(channel_t *, cell_t *);
-typedef void (*channel_var_cell_handler_fn_ptr)(channel_t *, var_cell_t *);
/**
* This enum is used by channelpadding to decide when to pad channels.
@@ -48,7 +48,7 @@ typedef enum {
/* channel states for channel_t */
typedef enum {
- /*
+ /**
* Closed state - channel is inactive
*
* Permitted transitions from:
@@ -57,7 +57,7 @@ typedef enum {
* - CHANNEL_STATE_OPENING
*/
CHANNEL_STATE_CLOSED = 0,
- /*
+ /**
* Opening state - channel is trying to connect
*
* Permitted transitions from:
@@ -68,7 +68,7 @@ typedef enum {
* - CHANNEL_STATE_OPEN
*/
CHANNEL_STATE_OPENING,
- /*
+ /**
* Open state - channel is active and ready for use
*
* Permitted transitions from:
@@ -80,7 +80,7 @@ typedef enum {
* - CHANNEL_STATE_MAINT
*/
CHANNEL_STATE_OPEN,
- /*
+ /**
* Maintenance state - channel is temporarily offline for subclass specific
* maintenance activities such as TLS renegotiation.
*
@@ -92,7 +92,7 @@ typedef enum {
* - CHANNEL_STATE_OPEN
*/
CHANNEL_STATE_MAINT,
- /*
+ /**
* Closing state - channel is shutting down
*
* Permitted transitions from:
@@ -103,7 +103,7 @@ typedef enum {
* - CHANNEL_STATE_ERROR
*/
CHANNEL_STATE_CLOSING,
- /*
+ /**
* Error state - channel has experienced a permanent error
*
* Permitted transitions from:
@@ -115,7 +115,7 @@ typedef enum {
* - None
*/
CHANNEL_STATE_ERROR,
- /*
+ /**
* Placeholder for maximum state value
*/
CHANNEL_STATE_LAST
@@ -124,7 +124,7 @@ typedef enum {
/* channel listener states for channel_listener_t */
typedef enum {
- /*
+ /**
* Closed state - channel listener is inactive
*
* Permitted transitions from:
@@ -133,7 +133,7 @@ typedef enum {
* - CHANNEL_LISTENER_STATE_LISTENING
*/
CHANNEL_LISTENER_STATE_CLOSED = 0,
- /*
+ /**
* Listening state - channel listener is listening for incoming
* connections
*
@@ -144,7 +144,7 @@ typedef enum {
* - CHANNEL_LISTENER_STATE_ERROR
*/
CHANNEL_LISTENER_STATE_LISTENING,
- /*
+ /**
* Closing state - channel listener is shutting down
*
* Permitted transitions from:
@@ -154,7 +154,7 @@ typedef enum {
* - CHANNEL_LISTENER_STATE_ERROR
*/
CHANNEL_LISTENER_STATE_CLOSING,
- /*
+ /**
* Error state - channel listener has experienced a permanent error
*
* Permitted transitions from:
@@ -164,7 +164,7 @@ typedef enum {
* - None
*/
CHANNEL_LISTENER_STATE_ERROR,
- /*
+ /**
* Placeholder for maximum state value
*/
CHANNEL_LISTENER_STATE_LAST
@@ -178,15 +178,15 @@ typedef enum {
* to a particular node, and once constructed support the abstract operations
* defined below.
*/
-struct channel_s {
+struct channel_t {
/** Magic number for type-checking cast macros */
uint32_t magic;
/** List entry for hashtable for global-identifier lookup. */
- HT_ENTRY(channel_s) gidmap_node;
+ HT_ENTRY(channel_t) gidmap_node;
/** Handle entry for handle-based lookup */
- HANDLE_ENTRY(channel, channel_s);
+ HANDLE_ENTRY(channel, channel_t);
/** Current channel state */
channel_state_t state;
@@ -267,21 +267,21 @@ struct channel_s {
/** State variable for use by the scheduler */
enum {
- /*
+ /**
* The channel is not open, or it has a full output buffer but no queued
* cells.
*/
SCHED_CHAN_IDLE = 0,
- /*
+ /**
* The channel has space on its output buffer to write, but no queued
* cells.
*/
SCHED_CHAN_WAITING_FOR_CELLS,
- /*
+ /**
* The scheduler has queued cells but no output buffer space to write.
*/
SCHED_CHAN_WAITING_TO_WRITE,
- /*
+ /**
* The scheduler has both queued cells and output buffer space, and is
* eligible for the scheduler loop.
*/
@@ -320,7 +320,6 @@ struct channel_s {
/** Registered handlers for incoming cells */
channel_cell_handler_fn_ptr cell_handler;
- channel_var_cell_handler_fn_ptr var_cell_handler;
/* Methods implemented by the lower layer */
@@ -395,7 +394,7 @@ struct channel_s {
* Linked list of channels with the same RSA identity digest, for use with
* the digest->channel map
*/
- TOR_LIST_ENTRY(channel_s) next_with_same_id;
+ TOR_LIST_ENTRY(channel_t) next_with_same_id;
/** Circuit mux for circuits sending on this channel */
circuitmux_t *cmux;
@@ -442,9 +441,9 @@ struct channel_s {
ratelim_t last_warned_circ_ids_exhausted;
/** Channel timestamps for cell channels */
- time_t timestamp_client; /* Client used this, according to relay.c */
- time_t timestamp_recv; /* Cell received from lower layer */
- time_t timestamp_xmit; /* Cell sent to lower layer */
+ time_t timestamp_client; /*(< Client used this, according to relay.c */
+ time_t timestamp_recv; /**< Cell received from lower layer */
+ time_t timestamp_xmit; /**< Cell sent to lower layer */
/** Timestamp for run_connection_housekeeping(). We update this once a
* second when we run housekeeping and find a circuit on this channel, and
@@ -456,16 +455,17 @@ struct channel_s {
* distinct namespace. */
uint64_t dirreq_id;
- /** Channel counters for cell channels */
+ /** Channel counters for cells and bytes we have received. */
uint64_t n_cells_recved, n_bytes_recved;
+ /** Channel counters for cells and bytes we have sent. */
uint64_t n_cells_xmitted, n_bytes_xmitted;
};
-struct channel_listener_s {
- /* Current channel listener state */
+struct channel_listener_t {
+ /** Current channel listener state */
channel_listener_state_t state;
- /* Globally unique ID number for a channel over the lifetime of a Tor
+ /** Globally unique ID number for a channel over the lifetime of a Tor
* process.
*/
uint64_t global_identifier;
@@ -539,13 +539,8 @@ void channel_listener_set_listener_fn(channel_listener_t *chan,
/* Incoming cell callbacks */
channel_cell_handler_fn_ptr channel_get_cell_handler(channel_t *chan);
-channel_var_cell_handler_fn_ptr
-channel_get_var_cell_handler(channel_t *chan);
-
void channel_set_cell_handlers(channel_t *chan,
- channel_cell_handler_fn_ptr cell_handler,
- channel_var_cell_handler_fn_ptr
- var_cell_handler);
+ channel_cell_handler_fn_ptr cell_handler);
/* Clean up closed channels and channel listeners periodically; these are
* called from run_scheduled_events() in main.c.
@@ -560,13 +555,13 @@ void channel_free_all(void);
void channel_dumpstats(int severity);
void channel_listener_dumpstats(int severity);
-#ifdef TOR_CHANNEL_INTERNAL_
+#ifdef CHANNEL_OBJECT_PRIVATE
-#ifdef CHANNEL_PRIVATE_
+#ifdef CHANNEL_FILE_PRIVATE
STATIC void channel_add_to_digest_map(channel_t *chan);
-#endif /* defined(CHANNEL_PRIVATE_) */
+#endif /* defined(CHANNEL_FILE_PRIVATE) */
/* Channel operations for subclasses and internal use only */
@@ -645,7 +640,7 @@ void channel_notify_flushed(channel_t *chan);
/* Handle stuff we need to do on open like notifying circuits */
void channel_do_open_actions(channel_t *chan);
-#endif /* defined(TOR_CHANNEL_INTERNAL_) */
+#endif /* defined(CHANNEL_OBJECT_PRIVATE) */
/* Helper functions to perform operations on channels */
@@ -661,11 +656,13 @@ channel_t * channel_connect(const tor_addr_t *addr, uint16_t port,
const char *rsa_id_digest,
const struct ed25519_public_key_t *ed_id);
-channel_t * channel_get_for_extend(const char *rsa_id_digest,
+MOCK_DECL(channel_t *, channel_get_for_extend,(
+ const char *rsa_id_digest,
const struct ed25519_public_key_t *ed_id,
- const tor_addr_t *target_addr,
+ const tor_addr_t *target_ipv4_addr,
+ const tor_addr_t *target_ipv6_addr,
const char **msg_out,
- int *launch_out);
+ int *launch_out));
/* Ask which of two channels is better for circuit-extension purposes */
int channel_is_better(channel_t *a, channel_t *b);
@@ -726,7 +723,7 @@ const char * channel_get_actual_remote_descr(channel_t *chan);
const char * channel_get_actual_remote_address(channel_t *chan);
MOCK_DECL(int, channel_get_addr_if_possible, (channel_t *chan,
tor_addr_t *addr_out));
-const char * channel_get_canonical_remote_descr(channel_t *chan);
+MOCK_DECL(const char *, channel_get_canonical_remote_descr,(channel_t *chan));
int channel_has_queued_writes(channel_t *chan);
int channel_is_bad_for_new_circs(channel_t *chan);
void channel_mark_bad_for_new_circs(channel_t *chan);
@@ -741,8 +738,6 @@ int channel_matches_extend_info(channel_t *chan, extend_info_t *extend_info);
int channel_remote_identity_matches(const channel_t *chan,
const char *rsa_id_digest,
const ed25519_public_key_t *ed_id);
-int channel_matches_target_addr_for_extend(channel_t *chan,
- const tor_addr_t *target);
unsigned int channel_num_circuits(channel_t *chan);
MOCK_DECL(void,channel_set_circid_type,(channel_t *chan,
crypto_pk_t *identity_rcvd,
@@ -772,7 +767,7 @@ int packed_cell_is_destroy(channel_t *chan,
circid_t *circid_out);
/* Declare the handle helpers */
-HANDLE_DECL(channel, channel_s,)
+HANDLE_DECL(channel, channel_t,)
#define channel_handle_free(h) \
FREE_AND_NULL(channel_handle_t, channel_handle_free_, (h))
#undef tor_timer_t
diff --git a/src/core/or/channelpadding.c b/src/core/or/channelpadding.c
index 4a0f0e00da..be2ce78a17 100644
--- a/src/core/or/channelpadding.c
+++ b/src/core/or/channelpadding.c
@@ -1,12 +1,17 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
-/* TOR_CHANNEL_INTERNAL_ define needed for an O(1) implementation of
+/**
+ * @file channelpadding.c
+ * @brief Link-level padding code.
+ **/
+
+/* CHANNEL_OBJECT_PRIVATE define needed for an O(1) implementation of
* channelpadding_channel_to_channelinfo() */
-#define TOR_CHANNEL_INTERNAL_
+#define CHANNEL_OBJECT_PRIVATE
#include "core/or/or.h"
#include "core/or/channel.h"
diff --git a/src/core/or/channelpadding.h b/src/core/or/channelpadding.h
index 48002eedb7..d1c7192ffd 100644
--- a/src/core/or/channelpadding.h
+++ b/src/core/or/channelpadding.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
diff --git a/src/core/or/channeltls.c b/src/core/or/channeltls.c
index 18025ff73a..f9eb67c399 100644
--- a/src/core/or/channeltls.c
+++ b/src/core/or/channeltls.c
@@ -1,4 +1,4 @@
-/* * Copyright (c) 2012-2019, The Tor Project, Inc. */
+/* * Copyright (c) 2012-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -34,7 +34,7 @@
* Define this so channel.h gives us things only channel_t subclasses
* should touch.
*/
-#define TOR_CHANNEL_INTERNAL_
+#define CHANNEL_OBJECT_PRIVATE
#define CHANNELTLS_PRIVATE
@@ -45,8 +45,10 @@
#include "core/or/circuitmux_ewma.h"
#include "core/or/command.h"
#include "app/config/config.h"
+#include "app/config/resolve_addr.h"
#include "core/mainloop/connection.h"
#include "core/or/connection_or.h"
+#include "feature/relay/relay_handshake.h"
#include "feature/control/control.h"
#include "feature/client/entrynodes.h"
#include "trunnel/link_handshake.h"
@@ -59,6 +61,7 @@
#include "feature/nodelist/torcert.h"
#include "feature/nodelist/networkstatus.h"
#include "trunnel/channelpadding_negotiation.h"
+#include "trunnel/netinfo.h"
#include "core/or/channelpadding.h"
#include "core/or/cell_st.h"
@@ -562,9 +565,7 @@ channel_tls_get_transport_name_method(channel_t *chan, char **transport_out)
static const char *
channel_tls_get_remote_descr_method(channel_t *chan, int flags)
{
-#define MAX_DESCR_LEN 32
-
- static char buf[MAX_DESCR_LEN + 1];
+ static char buf[TOR_ADDRPORT_BUF_LEN];
channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan);
connection_t *conn;
const char *answer = NULL;
@@ -577,15 +578,14 @@ channel_tls_get_remote_descr_method(channel_t *chan, int flags)
switch (flags) {
case 0:
/* Canonical address with port*/
- tor_snprintf(buf, MAX_DESCR_LEN + 1,
+ tor_snprintf(buf, TOR_ADDRPORT_BUF_LEN,
"%s:%u", conn->address, conn->port);
answer = buf;
break;
case GRD_FLAG_ORIGINAL:
/* Actual address with port */
addr_str = tor_addr_to_str_dup(&(tlschan->conn->real_addr));
- tor_snprintf(buf, MAX_DESCR_LEN + 1,
- "%s:%u", addr_str, conn->port);
+ tor_snprintf(buf, TOR_ADDRPORT_BUF_LEN, "%s:%u", addr_str, conn->port);
tor_free(addr_str);
answer = buf;
break;
@@ -724,10 +724,13 @@ channel_tls_matches_target_method(channel_t *chan,
* base_.addr is updated by connection_or_init_conn_from_address()
* to be the address in the descriptor. It may be tempting to
* allow either address to be allowed, but if we did so, it would
- * enable someone who steals a relay's keys to impersonate/MITM it
+ * enable someone who steals a relay's keys to covertly impersonate/MITM it
* from anywhere on the Internet! (Because they could make long-lived
* TLS connections from anywhere to all relays, and wait for them to
* be used for extends).
+ *
+ * An adversary who has stolen a relay's keys could also post a fake relay
+ * descriptor, but that attack is easier to detect.
*/
return tor_addr_eq(&(tlschan->conn->real_addr), target);
}
@@ -937,7 +940,6 @@ channel_tls_listener_describe_transport_method(channel_listener_t *chan_l)
void
channel_tls_handle_state_change_on_orconn(channel_tls_t *chan,
or_connection_t *conn,
- uint8_t old_state,
uint8_t state)
{
channel_t *base_chan;
@@ -946,8 +948,6 @@ channel_tls_handle_state_change_on_orconn(channel_tls_t *chan,
tor_assert(conn);
tor_assert(conn->chan == chan);
tor_assert(chan->conn == conn);
- /* Shut the compiler up without triggering -Wtautological-compare */
- (void)old_state;
base_chan = TLS_CHAN_TO_BASE(chan);
@@ -1017,6 +1017,16 @@ channel_tls_time_process_cell(cell_t *cell, channel_tls_t *chan, int *time,
}
#endif /* defined(KEEP_TIMING_STATS) */
+#ifdef KEEP_TIMING_STATS
+#define PROCESS_CELL(tp, cl, cn) STMT_BEGIN { \
+ ++num ## tp; \
+ channel_tls_time_process_cell(cl, cn, & tp ## time , \
+ channel_tls_process_ ## tp ## _cell); \
+ } STMT_END
+#else /* !defined(KEEP_TIMING_STATS) */
+#define PROCESS_CELL(tp, cl, cn) channel_tls_process_ ## tp ## _cell(cl, cn)
+#endif /* defined(KEEP_TIMING_STATS) */
+
/**
* Handle an incoming cell on a channel_tls_t.
*
@@ -1036,16 +1046,6 @@ channel_tls_handle_cell(cell_t *cell, or_connection_t *conn)
channel_tls_t *chan;
int handshaking;
-#ifdef KEEP_TIMING_STATS
-#define PROCESS_CELL(tp, cl, cn) STMT_BEGIN { \
- ++num ## tp; \
- channel_tls_time_process_cell(cl, cn, & tp ## time , \
- channel_tls_process_ ## tp ## _cell); \
- } STMT_END
-#else /* !(defined(KEEP_TIMING_STATS)) */
-#define PROCESS_CELL(tp, cl, cn) channel_tls_process_ ## tp ## _cell(cl, cn)
-#endif /* defined(KEEP_TIMING_STATS) */
-
tor_assert(cell);
tor_assert(conn);
@@ -1063,7 +1063,8 @@ channel_tls_handle_cell(cell_t *cell, or_connection_t *conn)
return;
/* Reject all but VERSIONS and NETINFO when handshaking. */
- /* (VERSIONS should actually be impossible; it's variable-length.) */
+ /* (VERSIONS actually indicates a protocol warning: it's variable-length,
+ * so if it reaches this function, we're on a v1 connection.) */
if (handshaking && cell->command != CELL_VERSIONS &&
cell->command != CELL_NETINFO) {
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
@@ -1084,13 +1085,13 @@ channel_tls_handle_cell(cell_t *cell, or_connection_t *conn)
entry_guards_note_internet_connectivity(get_guard_selection_info());
rep_hist_padding_count_read(PADDING_TYPE_TOTAL);
- if (TLS_CHAN_TO_BASE(chan)->currently_padding)
+ if (TLS_CHAN_TO_BASE(chan)->padding_enabled)
rep_hist_padding_count_read(PADDING_TYPE_ENABLED_TOTAL);
switch (cell->command) {
case CELL_PADDING:
rep_hist_padding_count_read(PADDING_TYPE_CELL);
- if (TLS_CHAN_TO_BASE(chan)->currently_padding)
+ if (TLS_CHAN_TO_BASE(chan)->padding_enabled)
rep_hist_padding_count_read(PADDING_TYPE_ENABLED_CELL);
++stats_n_padding_cells_processed;
/* do nothing */
@@ -1317,6 +1318,8 @@ channel_tls_handle_var_cell(var_cell_t *var_cell, or_connection_t *conn)
}
}
+#undef PROCESS_CELL
+
/**
* Update channel marks after connection_or.c has changed an address.
*
@@ -1632,6 +1635,35 @@ channel_tls_process_padding_negotiate_cell(cell_t *cell, channel_tls_t *chan)
}
/**
+ * Convert <b>netinfo_addr</b> into corresponding <b>tor_addr</b>.
+ * Return 0 on success; on failure, return -1 and log a warning.
+ */
+static int
+tor_addr_from_netinfo_addr(tor_addr_t *tor_addr,
+ const netinfo_addr_t *netinfo_addr) {
+ tor_assert(tor_addr);
+ tor_assert(netinfo_addr);
+
+ uint8_t type = netinfo_addr_get_addr_type(netinfo_addr);
+ uint8_t len = netinfo_addr_get_len(netinfo_addr);
+
+ if (type == NETINFO_ADDR_TYPE_IPV4 && len == 4) {
+ uint32_t ipv4 = netinfo_addr_get_addr_ipv4(netinfo_addr);
+ tor_addr_from_ipv4h(tor_addr, ipv4);
+ } else if (type == NETINFO_ADDR_TYPE_IPV6 && len == 16) {
+ const uint8_t *ipv6_bytes = netinfo_addr_getconstarray_addr_ipv6(
+ netinfo_addr);
+ tor_addr_from_ipv6_bytes(tor_addr, ipv6_bytes);
+ } else {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR, "Cannot read address from NETINFO "
+ "- wrong type/length.");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
* Helper: compute the absolute value of a time_t.
*
* (we need this because labs() doesn't always work for time_t, since
@@ -1655,8 +1687,6 @@ channel_tls_process_netinfo_cell(cell_t *cell, channel_tls_t *chan)
time_t timestamp;
uint8_t my_addr_type;
uint8_t my_addr_len;
- const uint8_t *my_addr_ptr;
- const uint8_t *cp, *end;
uint8_t n_other_addrs;
time_t now = time(NULL);
const routerinfo_t *me = router_get_my_routerinfo();
@@ -1704,7 +1734,7 @@ channel_tls_process_netinfo_cell(cell_t *cell, channel_tls_t *chan)
tor_assert(tor_digest_is_zero(
(const char*)(chan->conn->handshake_state->
authenticated_rsa_peer_id)));
- tor_assert(tor_mem_is_zero(
+ tor_assert(fast_mem_is_zero(
(const char*)(chan->conn->handshake_state->
authenticated_ed25519_peer_id.pubkey), 32));
/* If the client never authenticated, it's a tor client or bridge
@@ -1727,38 +1757,48 @@ channel_tls_process_netinfo_cell(cell_t *cell, channel_tls_t *chan)
}
/* Decode the cell. */
- timestamp = ntohl(get_uint32(cell->payload));
- const time_t sent_versions_at =
- chan->conn->handshake_state->sent_versions_at;
- if (now > sent_versions_at && (now - sent_versions_at) < 180) {
- /* If we have gotten the NETINFO cell reasonably soon after having
- * sent our VERSIONS cell, maybe we can learn skew information from it. */
- apparent_skew = now - timestamp;
+ netinfo_cell_t *netinfo_cell = NULL;
+
+ ssize_t parsed = netinfo_cell_parse(&netinfo_cell, cell->payload,
+ CELL_PAYLOAD_SIZE);
+
+ if (parsed < 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_OR,
+ "Failed to parse NETINFO cell - closing connection.");
+ connection_or_close_for_error(chan->conn, 0);
+ return;
}
- my_addr_type = (uint8_t) cell->payload[4];
- my_addr_len = (uint8_t) cell->payload[5];
- my_addr_ptr = (uint8_t*) cell->payload + 6;
- end = cell->payload + CELL_PAYLOAD_SIZE;
- cp = cell->payload + 6 + my_addr_len;
+ timestamp = netinfo_cell_get_timestamp(netinfo_cell);
+ const netinfo_addr_t *my_addr =
+ netinfo_cell_getconst_other_addr(netinfo_cell);
+
+ my_addr_type = netinfo_addr_get_addr_type(my_addr);
+ my_addr_len = netinfo_addr_get_len(my_addr);
+
+ if ((now - chan->conn->handshake_state->sent_versions_at) < 180) {
+ apparent_skew = now - timestamp;
+ }
/* We used to check:
* if (my_addr_len >= CELL_PAYLOAD_SIZE - 6) {
*
* This is actually never going to happen, since my_addr_len is at most 255,
* and CELL_PAYLOAD_LEN - 6 is 503. So we know that cp is < end. */
- if (my_addr_type == RESOLVED_TYPE_IPV4 && my_addr_len == 4) {
- tor_addr_from_ipv4n(&my_apparent_addr, get_uint32(my_addr_ptr));
+ if (tor_addr_from_netinfo_addr(&my_apparent_addr, my_addr) == -1) {
+ connection_or_close_for_error(chan->conn, 0);
+ netinfo_cell_free(netinfo_cell);
+ return;
+ }
+ if (my_addr_type == NETINFO_ADDR_TYPE_IPV4 && my_addr_len == 4) {
if (!get_options()->BridgeRelay && me &&
- get_uint32(my_addr_ptr) == htonl(me->addr)) {
+ tor_addr_eq_ipv4h(&my_apparent_addr, me->addr)) {
TLS_CHAN_TO_BASE(chan)->is_canonical_to_peer = 1;
}
-
- } else if (my_addr_type == RESOLVED_TYPE_IPV6 && my_addr_len == 16) {
- tor_addr_from_ipv6_bytes(&my_apparent_addr, (const char *) my_addr_ptr);
-
+ } else if (my_addr_type == NETINFO_ADDR_TYPE_IPV6 &&
+ my_addr_len == 16) {
if (!get_options()->BridgeRelay && me &&
!tor_addr_is_null(&me->ipv6_addr) &&
tor_addr_eq(&my_apparent_addr, &me->ipv6_addr)) {
@@ -1766,18 +1806,20 @@ channel_tls_process_netinfo_cell(cell_t *cell, channel_tls_t *chan)
}
}
- n_other_addrs = (uint8_t) *cp++;
- while (n_other_addrs && cp < end-2) {
+ n_other_addrs = netinfo_cell_get_n_my_addrs(netinfo_cell);
+ for (uint8_t i = 0; i < n_other_addrs; i++) {
/* Consider all the other addresses; if any matches, this connection is
* "canonical." */
+
+ const netinfo_addr_t *netinfo_addr =
+ netinfo_cell_getconst_my_addrs(netinfo_cell, i);
+
tor_addr_t addr;
- const uint8_t *next =
- decode_address_from_payload(&addr, cp, (int)(end-cp));
- if (next == NULL) {
+
+ if (tor_addr_from_netinfo_addr(&addr, netinfo_addr) == -1) {
log_fn(LOG_PROTOCOL_WARN, LD_OR,
- "Bad address in netinfo cell; closing connection.");
- connection_or_close_for_error(chan->conn, 0);
- return;
+ "Bad address in netinfo cell; Skipping.");
+ continue;
}
/* A relay can connect from anywhere and be canonical, so
* long as it tells you from where it came. This may sound a bit
@@ -1790,10 +1832,10 @@ channel_tls_process_netinfo_cell(cell_t *cell, channel_tls_t *chan)
connection_or_set_canonical(chan->conn, 1);
break;
}
- cp = next;
- --n_other_addrs;
}
+ netinfo_cell_free(netinfo_cell);
+
if (me && !TLS_CHAN_TO_BASE(chan)->is_canonical_to_peer &&
channel_is_canonical(TLS_CHAN_TO_BASE(chan))) {
const char *descr =
diff --git a/src/core/or/channeltls.h b/src/core/or/channeltls.h
index a2648ff537..f04ce0fa9c 100644
--- a/src/core/or/channeltls.h
+++ b/src/core/or/channeltls.h
@@ -1,4 +1,4 @@
-/* * Copyright (c) 2012-2019, The Tor Project, Inc. */
+/* * Copyright (c) 2012-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -22,16 +22,16 @@ struct curve25519_public_key_t;
#define TLS_CHAN_MAGIC 0x8a192427U
-#ifdef TOR_CHANNEL_INTERNAL_
+#ifdef CHANNEL_OBJECT_PRIVATE
-struct channel_tls_s {
+struct channel_tls_t {
/* Base channel_t struct */
channel_t base_;
/* or_connection_t pointer */
or_connection_t *conn;
};
-#endif /* defined(TOR_CHANNEL_INTERNAL_) */
+#endif /* defined(CHANNEL_OBJECT_PRIVATE) */
channel_t * channel_tls_connect(const tor_addr_t *addr, uint16_t port,
const char *id_digest,
@@ -49,7 +49,6 @@ channel_tls_t * channel_tls_from_base(channel_t *chan);
void channel_tls_handle_cell(cell_t *cell, or_connection_t *conn);
void channel_tls_handle_state_change_on_orconn(channel_tls_t *chan,
or_connection_t *conn,
- uint8_t old_state,
uint8_t state);
void channel_tls_handle_var_cell(var_cell_t *var_cell,
or_connection_t *conn);
diff --git a/src/core/or/circuit_st.h b/src/core/or/circuit_st.h
index d4339ff50d..4baafb1848 100644
--- a/src/core/or/circuit_st.h
+++ b/src/core/or/circuit_st.h
@@ -1,17 +1,30 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file circuit_st.h
+ * @brief Base circuit structure.
+ **/
+
#ifndef CIRCUIT_ST_H
#define CIRCUIT_ST_H
#include "core/or/or.h"
+#include "lib/container/handles.h"
+
#include "core/or/cell_queue_st.h"
+#include "ext/ht.h"
struct hs_token_t;
+struct circpad_machine_spec_t;
+struct circpad_machine_runtime_t;
+
+/** Number of padding state machines on a circuit. */
+#define CIRCPAD_MAX_MACHINES (2)
/** "magic" value for an origin_circuit_t */
#define ORIGIN_CIRCUIT_MAGIC 0x35315243u
@@ -49,6 +62,9 @@ struct circuit_t {
uint32_t magic; /**< For memory and type debugging: must equal
* ORIGIN_CIRCUIT_MAGIC or OR_CIRCUIT_MAGIC. */
+ /** Handle entry for handle-based lookup */
+ HANDLE_ENTRY(circuit, circuit_t);
+
/** The channel that is next in this circuit. */
channel_t *n_chan;
@@ -61,12 +77,6 @@ struct circuit_t {
*/
circid_t n_circ_id;
- /**
- * Circuit mux associated with n_chan to which this circuit is attached;
- * NULL if we have no n_chan.
- */
- circuitmux_t *n_mux;
-
/** Queue of cells waiting to be transmitted on n_chan */
cell_queue_t n_chan_cells;
@@ -93,6 +103,10 @@ struct circuit_t {
/** True iff this circuit has received a DESTROY cell in either direction */
unsigned int received_destroy : 1;
+ /** True iff we have sent a sufficiently random data cell since last
+ * we reset send_randomness_after_n_cells. */
+ unsigned int have_sent_sufficiently_random_cell : 1;
+
uint8_t state; /**< Current status of this circuit. */
uint8_t purpose; /**< Why are we creating this circuit? */
@@ -105,6 +119,32 @@ struct circuit_t {
* circuit-level sendme cells to indicate that we're willing to accept
* more. */
int deliver_window;
+ /**
+ * How many cells do we have until we need to send one that contains
+ * sufficient randomness? Used to ensure that authenticated SENDME cells
+ * will reflect some unpredictable information.
+ **/
+ uint16_t send_randomness_after_n_cells;
+
+ /** FIFO containing the digest of the cells that are just before a SENDME is
+ * sent by the client. It is done at the last cell before our package_window
+ * goes down to 0 which is when we expect a SENDME.
+ *
+ * Our current circuit package window is capped to 1000
+ * (CIRCWINDOW_START_MAX) which is also the start value. The increment is
+ * set to 100 (CIRCWINDOW_INCREMENT) which means we don't allow more than
+ * 1000/100 = 10 outstanding SENDME cells worth of data. Meaning that this
+ * list can not contain more than 10 digests of DIGEST_LEN bytes (20).
+ *
+ * At position i in the list, the digest corresponds to the
+ * (CIRCWINDOW_INCREMENT * i)-nth cell received since we expect a SENDME to
+ * be received containing that cell digest.
+ *
+ * For example, position 2 (starting at 0) means that we've received 300
+ * cells so the 300th cell digest is kept at index 2.
+ *
+ * At maximum, this list contains 200 bytes plus the smartlist overhead. */
+ smartlist_t *sendme_last_digests;
/** Temporary field used during circuits_handle_oom. */
uint32_t age_tmp;
@@ -177,6 +217,27 @@ struct circuit_t {
/** Hashtable node: used to look up the circuit by its HS token using the HS
circuitmap. */
HT_ENTRY(circuit_t) hs_circuitmap_node;
+
+ /** Adaptive Padding state machines: these are immutable. The state machines
+ * that come from the consensus are saved to a global structure, to avoid
+ * per-circuit allocations. This merely points to the global copy in
+ * origin_padding_machines or relay_padding_machines that should never
+ * change or get deallocated.
+ *
+ * Each element of this array corresponds to a different padding machine,
+ * and we can have up to CIRCPAD_MAX_MACHINES such machines. */
+ const struct circpad_machine_spec_t *padding_machine[CIRCPAD_MAX_MACHINES];
+
+ /** Adaptive Padding machine runtime info for above machines. This is
+ * the per-circuit mutable information, such as the current state and
+ * histogram token counts. Some of it is optional (aka NULL).
+ * If a machine is being shut down, these indexes can be NULL
+ * without the corresponding padding_machine being NULL, while we
+ * wait for the other end to respond to our shutdown request.
+ *
+ * Each element of this array corresponds to a different padding machine,
+ * and we can have up to CIRCPAD_MAX_MACHINES such machines. */
+ struct circpad_machine_runtime_t *padding_info[CIRCPAD_MAX_MACHINES];
};
-#endif
+#endif /* !defined(CIRCUIT_ST_H) */
diff --git a/src/core/or/circuitbuild.c b/src/core/or/circuitbuild.c
index 70b5d8215a..ec61b4a455 100644
--- a/src/core/or/circuitbuild.c
+++ b/src/core/or/circuitbuild.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -21,20 +21,19 @@
* cells arrive, the client will invoke circuit_send_next_onion_skin() to send
* CREATE or RELAY_EXTEND cells.
*
- * On the server side, this module also handles the logic of responding to
- * RELAY_EXTEND requests, using circuit_extend().
+ * The server side is handled in feature/relay/circuitbuild_relay.c.
**/
#define CIRCUITBUILD_PRIVATE
+#define OCIRC_EVENT_PRIVATE
#include "core/or/or.h"
#include "app/config/config.h"
-#include "app/config/confparse.h"
+#include "lib/confmgt/confmgt.h"
#include "core/crypto/hs_ntor.h"
#include "core/crypto/onion_crypto.h"
#include "core/crypto/onion_fast.h"
#include "core/crypto/onion_tap.h"
-#include "core/crypto/relay_crypto.h"
#include "core/mainloop/connection.h"
#include "core/mainloop/mainloop.h"
#include "core/or/channel.h"
@@ -42,17 +41,20 @@
#include "core/or/circuitlist.h"
#include "core/or/circuitstats.h"
#include "core/or/circuituse.h"
+#include "core/or/circuitpadding.h"
#include "core/or/command.h"
#include "core/or/connection_edge.h"
#include "core/or/connection_or.h"
#include "core/or/onion.h"
+#include "core/or/ocirc_event.h"
#include "core/or/policies.h"
#include "core/or/relay.h"
+#include "core/or/crypt_path.h"
#include "feature/client/bridges.h"
#include "feature/client/circpathbias.h"
#include "feature/client/entrynodes.h"
#include "feature/client/transports.h"
-#include "feature/control/control.h"
+#include "feature/control/control_events.h"
#include "feature/dircommon/directory.h"
#include "feature/nodelist/describe.h"
#include "feature/nodelist/microdesc.h"
@@ -80,15 +82,6 @@
#include "feature/nodelist/routerinfo_st.h"
#include "feature/nodelist/routerstatus_st.h"
-static channel_t * channel_connect_for_circuit(const tor_addr_t *addr,
- uint16_t port,
- const char *id_digest,
- const ed25519_public_key_t *ed_id);
-static int circuit_deliver_create_cell(circuit_t *circ,
- const create_cell_t *create_cell,
- int relayed);
-static crypt_path_t *onion_next_hop_in_cpath(crypt_path_t *cpath);
-STATIC int onion_append_hop(crypt_path_t **head_ptr, extend_info_t *choice);
static int circuit_send_first_onion_skin(origin_circuit_t *circ);
static int circuit_build_no_more_hops(origin_circuit_t *circ);
static int circuit_send_intermediate_onion_skin(origin_circuit_t *circ,
@@ -102,10 +95,10 @@ static const node_t *choose_good_middle_server(uint8_t purpose,
* and then calls command_setup_channel() to give it the right
* callbacks.
*/
-static channel_t *
-channel_connect_for_circuit(const tor_addr_t *addr, uint16_t port,
- const char *id_digest,
- const ed25519_public_key_t *ed_id)
+MOCK_IMPL(channel_t *,
+channel_connect_for_circuit,(const tor_addr_t *addr, uint16_t port,
+ const char *id_digest,
+ const struct ed25519_public_key_t *ed_id))
{
channel_t *chan;
@@ -492,7 +485,7 @@ circuit_establish_circuit(uint8_t purpose, extend_info_t *exit_ei, int flags)
return NULL;
}
- control_event_circuit_status(circ, CIRC_EVENT_LAUNCHED, 0);
+ circuit_event_status(circ, CIRC_EVENT_LAUNCHED, 0);
if ((err_reason = circuit_handle_first_hop(circ)) < 0) {
circuit_mark_for_close(TO_CIRCUIT(circ), -err_reason);
@@ -508,6 +501,27 @@ origin_circuit_get_guard_state(origin_circuit_t *circ)
return circ->guard_state;
}
+/**
+ * Helper function to publish a channel association message
+ *
+ * circuit_handle_first_hop() calls this to notify subscribers about a
+ * channel launch event, which associates a circuit with a channel.
+ * This doesn't always correspond to an assignment of the circuit's
+ * n_chan field, because that seems to be only for fully-open
+ * channels.
+ **/
+static void
+circuit_chan_publish(const origin_circuit_t *circ, const channel_t *chan)
+{
+ ocirc_chan_msg_t *msg = tor_malloc(sizeof(*msg));
+
+ msg->gid = circ->global_identifier;
+ msg->chan = chan->global_identifier;
+ msg->onehop = circ->build_state->onehop_tunnel;
+
+ ocirc_chan_publish(msg);
+}
+
/** Start establishing the first hop of our circuit. Figure out what
* OR we should connect to, and if necessary start the connection to
* it. If we're already connected, then send the 'create' cell.
@@ -522,7 +536,7 @@ circuit_handle_first_hop(origin_circuit_t *circ)
int should_launch = 0;
const or_options_t *options = get_options();
- firsthop = onion_next_hop_in_cpath(circ->cpath);
+ firsthop = cpath_get_next_non_open_hop(circ->cpath);
tor_assert(firsthop);
tor_assert(firsthop->extend_info);
@@ -545,11 +559,17 @@ circuit_handle_first_hop(origin_circuit_t *circ)
fmt_addrport(&firsthop->extend_info->addr,
firsthop->extend_info->port));
- n_chan = channel_get_for_extend(firsthop->extend_info->identity_digest,
- &firsthop->extend_info->ed_identity,
- &firsthop->extend_info->addr,
- &msg,
- &should_launch);
+ /* We'll cleanup this code in #33220, when we add an IPv6 address to
+ * extend_info_t. */
+ const bool addr_is_ipv4 =
+ (tor_addr_family(&firsthop->extend_info->addr) == AF_INET);
+ n_chan = channel_get_for_extend(
+ firsthop->extend_info->identity_digest,
+ &firsthop->extend_info->ed_identity,
+ addr_is_ipv4 ? &firsthop->extend_info->addr : NULL,
+ addr_is_ipv4 ? NULL : &firsthop->extend_info->addr,
+ &msg,
+ &should_launch);
if (!n_chan) {
/* not currently connected in a useful way. */
@@ -559,8 +579,6 @@ circuit_handle_first_hop(origin_circuit_t *circ)
circ->base_.n_hop = extend_info_dup(firsthop->extend_info);
if (should_launch) {
- if (circ->build_state->onehop_tunnel)
- control_event_bootstrap(BOOTSTRAP_STATUS_CONN_DIR, 0);
n_chan = channel_connect_for_circuit(
&firsthop->extend_info->addr,
firsthop->extend_info->port,
@@ -570,6 +588,7 @@ circuit_handle_first_hop(origin_circuit_t *circ)
log_info(LD_CIRC,"connect to firsthop failed. Closing.");
return -END_CIRC_REASON_CONNECTFAILED;
}
+ circuit_chan_publish(circ, n_chan);
}
log_debug(LD_CIRC,"connecting in progress (or finished). Good.");
@@ -581,6 +600,7 @@ circuit_handle_first_hop(origin_circuit_t *circ)
} else { /* it's already open. use it. */
tor_assert(!circ->base_.n_hop);
circ->base_.n_chan = n_chan;
+ circuit_chan_publish(circ, n_chan);
log_debug(LD_CIRC,"Conn open. Delivering first onion skin.");
if ((err_reason = circuit_send_next_onion_skin(circ)) < 0) {
log_info(LD_CIRC,"circuit_send_next_onion_skin failed.");
@@ -700,9 +720,10 @@ circuit_n_chan_done(channel_t *chan, int status, int close_origin_circuits)
* gave us via an EXTEND cell, so we shouldn't worry if we don't understand
* it. Return -1 if we failed to find a suitable circid, else return 0.
*/
-static int
-circuit_deliver_create_cell(circuit_t *circ, const create_cell_t *create_cell,
- int relayed)
+MOCK_IMPL(int,
+circuit_deliver_create_cell,(circuit_t *circ,
+ const struct create_cell_t *create_cell,
+ int relayed))
{
cell_t cell;
circid_t id;
@@ -762,40 +783,6 @@ circuit_deliver_create_cell(circuit_t *circ, const create_cell_t *create_cell,
return -1;
}
-/** We've decided to start our reachability testing. If all
- * is set, log this to the user. Return 1 if we did, or 0 if
- * we chose not to log anything. */
-int
-inform_testing_reachability(void)
-{
- char dirbuf[128];
- char *address;
- const routerinfo_t *me = router_get_my_routerinfo();
- if (!me)
- return 0;
- address = tor_dup_ip(me->addr);
- control_event_server_status(LOG_NOTICE,
- "CHECKING_REACHABILITY ORADDRESS=%s:%d",
- address, me->or_port);
- if (me->dir_port) {
- tor_snprintf(dirbuf, sizeof(dirbuf), " and DirPort %s:%d",
- address, me->dir_port);
- control_event_server_status(LOG_NOTICE,
- "CHECKING_REACHABILITY DIRADDRESS=%s:%d",
- address, me->dir_port);
- }
- log_notice(LD_OR, "Now checking whether ORPort %s:%d%s %s reachable... "
- "(this may take up to %d minutes -- look for log "
- "messages indicating success)",
- address, me->or_port,
- me->dir_port ? dirbuf : "",
- me->dir_port ? "are" : "is",
- TIMEOUT_UNTIL_UNREACHABILITY_COMPLAINT/60);
-
- tor_free(address);
- return 1;
-}
-
/** Return true iff we should send a create_fast cell to start building a given
* circuit */
static inline int
@@ -941,15 +928,18 @@ circuit_send_next_onion_skin(origin_circuit_t *circ)
tor_assert(circ->cpath->state == CPATH_STATE_OPEN);
tor_assert(circ->base_.state == CIRCUIT_STATE_BUILDING);
- crypt_path_t *hop = onion_next_hop_in_cpath(circ->cpath);
+ crypt_path_t *hop = cpath_get_next_non_open_hop(circ->cpath);
circuit_build_times_handle_completed_hop(circ);
+ circpad_machine_event_circ_added_hop(circ);
+
if (hop) {
/* Case two: we're on a hop after the first. */
return circuit_send_intermediate_onion_skin(circ, hop);
}
/* Case three: the circuit is finished. Do housekeeping tasks on it. */
+ circpad_machine_event_circ_built(circ);
return circuit_build_no_more_hops(circ);
}
@@ -1192,192 +1182,6 @@ circuit_note_clock_jumped(int64_t seconds_elapsed, bool was_idle)
}
}
-/** Take the 'extend' <b>cell</b>, pull out addr/port plus the onion
- * skin and identity digest for the next hop. If we're already connected,
- * pass the onion skin to the next hop using a create cell; otherwise
- * launch a new OR connection, and <b>circ</b> will notice when the
- * connection succeeds or fails.
- *
- * Return -1 if we want to warn and tear down the circuit, else return 0.
- */
-int
-circuit_extend(cell_t *cell, circuit_t *circ)
-{
- channel_t *n_chan;
- relay_header_t rh;
- extend_cell_t ec;
- const char *msg = NULL;
- int should_launch = 0;
-
- if (circ->n_chan) {
- log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
- "n_chan already set. Bug/attack. Closing.");
- return -1;
- }
- if (circ->n_hop) {
- log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
- "conn to next hop already launched. Bug/attack. Closing.");
- return -1;
- }
-
- if (!server_mode(get_options())) {
- log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
- "Got an extend cell, but running as a client. Closing.");
- return -1;
- }
-
- relay_header_unpack(&rh, cell->payload);
-
- if (extend_cell_parse(&ec, rh.command,
- cell->payload+RELAY_HEADER_SIZE,
- rh.length) < 0) {
- log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
- "Can't parse extend cell. Closing circuit.");
- return -1;
- }
-
- if (!ec.orport_ipv4.port || tor_addr_is_null(&ec.orport_ipv4.addr)) {
- log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
- "Client asked me to extend to zero destination port or addr.");
- return -1;
- }
-
- if (tor_addr_is_internal(&ec.orport_ipv4.addr, 0) &&
- !get_options()->ExtendAllowPrivateAddresses) {
- log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
- "Client asked me to extend to a private address");
- return -1;
- }
-
- /* Check if they asked us for 0000..0000. We support using
- * an empty fingerprint for the first hop (e.g. for a bridge relay),
- * but we don't want to let clients send us extend cells for empty
- * fingerprints -- a) because it opens the user up to a mitm attack,
- * and b) because it lets an attacker force the relay to hold open a
- * new TLS connection for each extend request. */
- if (tor_digest_is_zero((const char*)ec.node_id)) {
- log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
- "Client asked me to extend without specifying an id_digest.");
- return -1;
- }
-
- /* Fill in ed_pubkey if it was not provided and we can infer it from
- * our networkstatus */
- if (ed25519_public_key_is_zero(&ec.ed_pubkey)) {
- const node_t *node = node_get_by_id((const char*)ec.node_id);
- const ed25519_public_key_t *node_ed_id = NULL;
- if (node &&
- node_supports_ed25519_link_authentication(node, 1) &&
- (node_ed_id = node_get_ed25519_id(node))) {
- ed25519_pubkey_copy(&ec.ed_pubkey, node_ed_id);
- }
- }
-
- /* Next, check if we're being asked to connect to the hop that the
- * extend cell came from. There isn't any reason for that, and it can
- * assist circular-path attacks. */
- if (tor_memeq(ec.node_id,
- TO_OR_CIRCUIT(circ)->p_chan->identity_digest,
- DIGEST_LEN)) {
- log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
- "Client asked me to extend back to the previous hop.");
- return -1;
- }
-
- /* Check the previous hop Ed25519 ID too */
- if (! ed25519_public_key_is_zero(&ec.ed_pubkey) &&
- ed25519_pubkey_eq(&ec.ed_pubkey,
- &TO_OR_CIRCUIT(circ)->p_chan->ed25519_identity)) {
- log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
- "Client asked me to extend back to the previous hop "
- "(by Ed25519 ID).");
- return -1;
- }
-
- n_chan = channel_get_for_extend((const char*)ec.node_id,
- &ec.ed_pubkey,
- &ec.orport_ipv4.addr,
- &msg,
- &should_launch);
-
- if (!n_chan) {
- log_debug(LD_CIRC|LD_OR,"Next router (%s): %s",
- fmt_addrport(&ec.orport_ipv4.addr,ec.orport_ipv4.port),
- msg?msg:"????");
-
- circ->n_hop = extend_info_new(NULL /*nickname*/,
- (const char*)ec.node_id,
- &ec.ed_pubkey,
- NULL, /*onion_key*/
- NULL, /*curve25519_key*/
- &ec.orport_ipv4.addr,
- ec.orport_ipv4.port);
-
- circ->n_chan_create_cell = tor_memdup(&ec.create_cell,
- sizeof(ec.create_cell));
-
- circuit_set_state(circ, CIRCUIT_STATE_CHAN_WAIT);
-
- if (should_launch) {
- /* we should try to open a connection */
- n_chan = channel_connect_for_circuit(&ec.orport_ipv4.addr,
- ec.orport_ipv4.port,
- (const char*)ec.node_id,
- &ec.ed_pubkey);
- if (!n_chan) {
- log_info(LD_CIRC,"Launching n_chan failed. Closing circuit.");
- circuit_mark_for_close(circ, END_CIRC_REASON_CONNECTFAILED);
- return 0;
- }
- log_debug(LD_CIRC,"connecting in progress (or finished). Good.");
- }
- /* return success. The onion/circuit/etc will be taken care of
- * automatically (may already have been) whenever n_chan reaches
- * OR_CONN_STATE_OPEN.
- */
- return 0;
- }
-
- tor_assert(!circ->n_hop); /* Connection is already established. */
- circ->n_chan = n_chan;
- log_debug(LD_CIRC,
- "n_chan is %s",
- channel_get_canonical_remote_descr(n_chan));
-
- if (circuit_deliver_create_cell(circ, &ec.create_cell, 1) < 0)
- return -1;
-
- return 0;
-}
-
-/** Initialize cpath-\>{f|b}_{crypto|digest} from the key material in key_data.
- *
- * If <b>is_hs_v3</b> is set, this cpath will be used for next gen hidden
- * service circuits and <b>key_data</b> must be at least
- * HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN bytes in length.
- *
- * If <b>is_hs_v3</b> is not set, key_data must contain CPATH_KEY_MATERIAL_LEN
- * bytes, which are used as follows:
- * - 20 to initialize f_digest
- * - 20 to initialize b_digest
- * - 16 to key f_crypto
- * - 16 to key b_crypto
- *
- * (If 'reverse' is true, then f_XX and b_XX are swapped.)
- *
- * Return 0 if init was successful, else -1 if it failed.
- */
-int
-circuit_init_cpath_crypto(crypt_path_t *cpath,
- const char *key_data, size_t key_data_len,
- int reverse, int is_hs_v3)
-{
-
- tor_assert(cpath);
- return relay_crypto_init(&cpath->crypto, key_data, key_data_len, reverse,
- is_hs_v3);
-}
-
/** A "created" cell <b>reply</b> came back to us on circuit <b>circ</b>.
* (The body of <b>reply</b> varies depending on what sort of handshake
* this is.)
@@ -1403,7 +1207,7 @@ circuit_finish_handshake(origin_circuit_t *circ,
if (circ->cpath->state == CPATH_STATE_AWAITING_KEYS) {
hop = circ->cpath;
} else {
- hop = onion_next_hop_in_cpath(circ->cpath);
+ hop = cpath_get_next_non_open_hop(circ->cpath);
if (!hop) { /* got an extended when we're all done? */
log_warn(LD_PROTOCOL,"got extended when circ already built? Closing.");
return - END_CIRC_REASON_TORPROTOCOL;
@@ -1427,14 +1231,14 @@ circuit_finish_handshake(origin_circuit_t *circ,
onion_handshake_state_release(&hop->handshake_state);
- if (circuit_init_cpath_crypto(hop, keys, sizeof(keys), 0, 0)<0) {
+ if (cpath_init_circuit_crypto(hop, keys, sizeof(keys), 0, 0)<0) {
return -END_CIRC_REASON_TORPROTOCOL;
}
hop->state = CPATH_STATE_OPEN;
log_info(LD_CIRC,"Finished building circuit hop:");
circuit_log_path(LOG_INFO,LD_CIRC,circ);
- control_event_circuit_status(circ, CIRC_EVENT_EXTENDED, 0);
+ circuit_event_status(circ, CIRC_EVENT_EXTENDED, 0);
return 0;
}
@@ -1479,7 +1283,7 @@ circuit_truncated(origin_circuit_t *circ, int reason)
}
layer->next = victim->next;
- circuit_free_cpath_node(victim);
+ cpath_free(victim);
}
log_info(LD_CIRC, "finished");
@@ -1487,61 +1291,6 @@ circuit_truncated(origin_circuit_t *circ, int reason)
#endif /* 0 */
}
-/** Given a response payload and keys, initialize, then send a created
- * cell back.
- */
-int
-onionskin_answer(or_circuit_t *circ,
- const created_cell_t *created_cell,
- const char *keys, size_t keys_len,
- const uint8_t *rend_circ_nonce)
-{
- cell_t cell;
-
- tor_assert(keys_len == CPATH_KEY_MATERIAL_LEN);
-
- if (created_cell_format(&cell, created_cell) < 0) {
- log_warn(LD_BUG,"couldn't format created cell (type=%d, len=%d)",
- (int)created_cell->cell_type, (int)created_cell->handshake_len);
- return -1;
- }
- cell.circ_id = circ->p_circ_id;
-
- circuit_set_state(TO_CIRCUIT(circ), CIRCUIT_STATE_OPEN);
-
- log_debug(LD_CIRC,"init digest forward 0x%.8x, backward 0x%.8x.",
- (unsigned int)get_uint32(keys),
- (unsigned int)get_uint32(keys+20));
- if (relay_crypto_init(&circ->crypto, keys, keys_len, 0, 0)<0) {
- log_warn(LD_BUG,"Circuit initialization failed");
- return -1;
- }
-
- memcpy(circ->rend_circ_nonce, rend_circ_nonce, DIGEST_LEN);
-
- int used_create_fast = (created_cell->cell_type == CELL_CREATED_FAST);
-
- append_cell_to_circuit_queue(TO_CIRCUIT(circ),
- circ->p_chan, &cell, CELL_DIRECTION_IN, 0);
- log_debug(LD_CIRC,"Finished sending '%s' cell.",
- used_create_fast ? "created_fast" : "created");
-
- /* Ignore the local bit when ExtendAllowPrivateAddresses is set:
- * it violates the assumption that private addresses are local.
- * Also, many test networks run on local addresses, and
- * TestingTorNetwork sets ExtendAllowPrivateAddresses. */
- if ((!channel_is_local(circ->p_chan)
- || get_options()->ExtendAllowPrivateAddresses)
- && !channel_is_outgoing(circ->p_chan)) {
- /* record that we could process create cells from a non-local conn
- * that we didn't initiate; presumably this means that create cells
- * can reach us too. */
- router_orport_found_reachable();
- }
-
- return 0;
-}
-
/** Helper for new_route_len(). Choose a circuit length for purpose
* <b>purpose</b>: DEFAULT_ROUTE_LEN (+ 1 if someone else chose the
* exit). If someone else chose the exit, they could be colluding
@@ -1673,24 +1422,28 @@ route_len_for_purpose(uint8_t purpose, extend_info_t *exit_ei)
* to handle the desired path length, return -1.
*/
STATIC int
-new_route_len(uint8_t purpose, extend_info_t *exit_ei, smartlist_t *nodes)
+new_route_len(uint8_t purpose, extend_info_t *exit_ei,
+ const smartlist_t *nodes)
{
- int num_acceptable_routers;
int routelen;
tor_assert(nodes);
routelen = route_len_for_purpose(purpose, exit_ei);
- num_acceptable_routers = count_acceptable_nodes(nodes);
+ int num_acceptable_direct = count_acceptable_nodes(nodes, 1);
+ int num_acceptable_indirect = count_acceptable_nodes(nodes, 0);
- log_debug(LD_CIRC,"Chosen route length %d (%d/%d routers suitable).",
- routelen, num_acceptable_routers, smartlist_len(nodes));
+ log_debug(LD_CIRC,"Chosen route length %d (%d direct and %d indirect "
+ "routers suitable).", routelen, num_acceptable_direct,
+ num_acceptable_indirect);
- if (num_acceptable_routers < routelen) {
+ if (num_acceptable_direct < 1 || num_acceptable_indirect < routelen - 1) {
log_info(LD_CIRC,
- "Not enough acceptable routers (%d/%d). Discarding this circuit.",
- num_acceptable_routers, routelen);
+ "Not enough acceptable routers (%d/%d direct and %d/%d "
+ "indirect routers suitable). Discarding this circuit.",
+ num_acceptable_direct, routelen,
+ num_acceptable_indirect, routelen);
return -1;
}
@@ -2294,7 +2047,7 @@ circuit_append_new_exit(origin_circuit_t *circ, extend_info_t *exit_ei)
state->chosen_exit = extend_info_dup(exit_ei);
++circ->build_state->desired_path_len;
- onion_append_hop(&circ->cpath, exit_ei);
+ cpath_append_hop(&circ->cpath, exit_ei);
return 0;
}
@@ -2332,7 +2085,7 @@ circuit_extend_to_new_exit(origin_circuit_t *circ, extend_info_t *exit_ei)
* particular router. See bug #25885.)
*/
MOCK_IMPL(STATIC int,
-count_acceptable_nodes, (smartlist_t *nodes))
+count_acceptable_nodes, (const smartlist_t *nodes, int direct))
{
int num=0;
@@ -2346,7 +2099,7 @@ count_acceptable_nodes, (smartlist_t *nodes))
if (! node->is_valid)
// log_debug(LD_CIRC,"Nope, the directory says %d is not valid.",i);
continue;
- if (! node_has_any_descriptor(node))
+ if (! node_has_preferred_descriptor(node, direct))
continue;
/* The node has a descriptor, so we can just check the ntor key directly */
if (!node_has_curve25519_onion_key(node))
@@ -2359,47 +2112,6 @@ count_acceptable_nodes, (smartlist_t *nodes))
return num;
}
-/** Add <b>new_hop</b> to the end of the doubly-linked-list <b>head_ptr</b>.
- * This function is used to extend cpath by another hop.
- */
-void
-onion_append_to_cpath(crypt_path_t **head_ptr, crypt_path_t *new_hop)
-{
- if (*head_ptr) {
- new_hop->next = (*head_ptr);
- new_hop->prev = (*head_ptr)->prev;
- (*head_ptr)->prev->next = new_hop;
- (*head_ptr)->prev = new_hop;
- } else {
- *head_ptr = new_hop;
- new_hop->prev = new_hop->next = new_hop;
- }
-}
-
-#ifdef TOR_UNIT_TESTS
-
-/** Unittest helper function: Count number of hops in cpath linked list. */
-unsigned int
-cpath_get_n_hops(crypt_path_t **head_ptr)
-{
- unsigned int n_hops = 0;
- crypt_path_t *tmp;
-
- if (!*head_ptr) {
- return 0;
- }
-
- tmp = *head_ptr;
- do {
- n_hops++;
- tmp = tmp->next;
- } while (tmp != *head_ptr);
-
- return n_hops;
-}
-
-#endif /* defined(TOR_UNIT_TESTS) */
-
/**
* Build the exclude list for vanguard circuits.
*
@@ -2597,7 +2309,24 @@ choose_good_middle_server(uint8_t purpose,
return choice;
}
- choice = router_choose_random_node(excluded, options->ExcludeNodes, flags);
+ if (options->MiddleNodes) {
+ smartlist_t *sl = smartlist_new();
+ routerset_get_all_nodes(sl, options->MiddleNodes,
+ options->ExcludeNodes, 1);
+
+ smartlist_subtract(sl, excluded);
+
+ choice = node_sl_choose_by_bandwidth(sl, WEIGHT_FOR_MID);
+ smartlist_free(sl);
+ if (choice) {
+ log_fn(LOG_INFO, LD_CIRC, "Chose fixed middle node: %s",
+ hex_str(choice->identity, DIGEST_LEN));
+ } else {
+ log_fn(LOG_NOTICE, LD_CIRC, "Restricted middle not available");
+ }
+ } else {
+ choice = router_choose_random_node(excluded, options->ExcludeNodes, flags);
+ }
smartlist_free(excluded);
return choice;
}
@@ -2657,20 +2386,6 @@ choose_good_entry_server(uint8_t purpose, cpath_build_state_t *state,
return choice;
}
-/** Return the first non-open hop in cpath, or return NULL if all
- * hops are open. */
-static crypt_path_t *
-onion_next_hop_in_cpath(crypt_path_t *cpath)
-{
- crypt_path_t *hop = cpath;
- do {
- if (hop->state != CPATH_STATE_OPEN)
- return hop;
- hop = hop->next;
- } while (hop != cpath);
- return NULL;
-}
-
/** Choose a suitable next hop for the circuit <b>circ</b>.
* Append the hop info to circ->cpath.
*
@@ -2727,33 +2442,11 @@ onion_extend_cpath(origin_circuit_t *circ)
extend_info_describe(info),
cur_len+1, build_state_get_exit_nickname(state));
- onion_append_hop(&circ->cpath, info);
+ cpath_append_hop(&circ->cpath, info);
extend_info_free(info);
return 0;
}
-/** Create a new hop, annotate it with information about its
- * corresponding router <b>choice</b>, and append it to the
- * end of the cpath <b>head_ptr</b>. */
-STATIC int
-onion_append_hop(crypt_path_t **head_ptr, extend_info_t *choice)
-{
- crypt_path_t *hop = tor_malloc_zero(sizeof(crypt_path_t));
-
- /* link hop into the cpath, at the end. */
- onion_append_to_cpath(head_ptr, hop);
-
- hop->magic = CRYPT_PATH_MAGIC;
- hop->state = CPATH_STATE_CLOSED;
-
- hop->extend_info = extend_info_dup(choice);
-
- hop->package_window = circuit_initial_package_window();
- hop->deliver_window = CIRCWINDOW_START;
-
- return 0;
-}
-
/** Allocate a new extend_info object based on the various arguments. */
extend_info_t *
extend_info_new(const char *nickname,
@@ -2895,8 +2588,8 @@ extend_info_dup(extend_info_t *info)
* If there is no chosen exit, or if we don't know the node_t for
* the chosen exit, return NULL.
*/
-const node_t *
-build_state_get_exit_node(cpath_build_state_t *state)
+MOCK_IMPL(const node_t *,
+build_state_get_exit_node,(cpath_build_state_t *state))
{
if (!state || !state->chosen_exit)
return NULL;
@@ -2958,7 +2651,7 @@ extend_info_supports_ntor(const extend_info_t* ei)
{
tor_assert(ei);
/* Valid ntor keys have at least one non-zero byte */
- return !tor_mem_is_zero(
+ return !fast_mem_is_zero(
(const char*)ei->curve25519_onion_key.public_key,
CURVE25519_PUBKEY_LEN);
}
diff --git a/src/core/or/circuitbuild.h b/src/core/or/circuitbuild.h
index 969f5b5cc3..e62bb41de9 100644
--- a/src/core/or/circuitbuild.h
+++ b/src/core/or/circuitbuild.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -29,29 +29,19 @@ struct circuit_guard_state_t *origin_circuit_get_guard_state(
int circuit_handle_first_hop(origin_circuit_t *circ);
void circuit_n_chan_done(channel_t *chan, int status,
int close_origin_circuits);
-int inform_testing_reachability(void);
int circuit_timeout_want_to_count_circ(const origin_circuit_t *circ);
int circuit_send_next_onion_skin(origin_circuit_t *circ);
void circuit_note_clock_jumped(int64_t seconds_elapsed, bool was_idle);
-int circuit_extend(cell_t *cell, circuit_t *circ);
-int circuit_init_cpath_crypto(crypt_path_t *cpath,
- const char *key_data, size_t key_data_len,
- int reverse, int is_hs_v3);
struct created_cell_t;
int circuit_finish_handshake(origin_circuit_t *circ,
const struct created_cell_t *created_cell);
int circuit_truncated(origin_circuit_t *circ, int reason);
-int onionskin_answer(or_circuit_t *circ,
- const struct created_cell_t *created_cell,
- const char *keys, size_t keys_len,
- const uint8_t *rend_circ_nonce);
MOCK_DECL(int, circuit_all_predicted_ports_handled, (time_t now,
int *need_uptime,
int *need_capacity));
int circuit_append_new_exit(origin_circuit_t *circ, extend_info_t *info);
int circuit_extend_to_new_exit(origin_circuit_t *circ, extend_info_t *info);
-void onion_append_to_cpath(crypt_path_t **head_ptr, crypt_path_t *new_hop);
extend_info_t *extend_info_new(const char *nickname,
const char *rsa_id_digest,
const struct ed25519_public_key_t *ed_id,
@@ -70,7 +60,8 @@ int circuit_can_use_tap(const origin_circuit_t *circ);
int circuit_has_usable_onion_key(const origin_circuit_t *circ);
int extend_info_has_preferred_onion_key(const extend_info_t* ei);
const uint8_t *build_state_get_exit_rsa_id(cpath_build_state_t *state);
-const node_t *build_state_get_exit_node(cpath_build_state_t *state);
+MOCK_DECL(const node_t *,
+ build_state_get_exit_node,(cpath_build_state_t *state));
const char *build_state_get_exit_nickname(cpath_build_state_t *state);
struct circuit_guard_state_t;
@@ -80,11 +71,26 @@ const node_t *choose_good_entry_server(uint8_t purpose,
struct circuit_guard_state_t **guard_state_out);
void circuit_upgrade_circuits_from_guard_wait(void);
+struct ed25519_public_key_t;
+
+MOCK_DECL(channel_t *,
+channel_connect_for_circuit,(const tor_addr_t *addr,
+ uint16_t port,
+ const char *id_digest,
+ const struct ed25519_public_key_t *ed_id));
+
+struct create_cell_t;
+MOCK_DECL(int,
+circuit_deliver_create_cell,(circuit_t *circ,
+ const struct create_cell_t *create_cell,
+ int relayed));
+
#ifdef CIRCUITBUILD_PRIVATE
STATIC circid_t get_unique_circ_id_by_chan(channel_t *chan);
STATIC int new_route_len(uint8_t purpose, extend_info_t *exit_ei,
- smartlist_t *nodes);
-MOCK_DECL(STATIC int, count_acceptable_nodes, (smartlist_t *nodes));
+ const smartlist_t *nodes);
+MOCK_DECL(STATIC int, count_acceptable_nodes, (const smartlist_t *nodes,
+ int direct));
STATIC int onion_extend_cpath(origin_circuit_t *circ);
@@ -92,11 +98,6 @@ STATIC int
onion_pick_cpath_exit(origin_circuit_t *circ, extend_info_t *exit_ei,
int is_hs_v3_rp_circuit);
-#if defined(TOR_UNIT_TESTS)
-unsigned int cpath_get_n_hops(crypt_path_t **head_ptr);
-
-#endif /* defined(TOR_UNIT_TESTS) */
-
#endif /* defined(CIRCUITBUILD_PRIVATE) */
#endif /* !defined(TOR_CIRCUITBUILD_H) */
diff --git a/src/core/or/circuitlist.c b/src/core/or/circuitlist.c
index ccf3041bb4..384835667d 100644
--- a/src/core/or/circuitlist.c
+++ b/src/core/or/circuitlist.c
@@ -1,7 +1,7 @@
/* Copyright 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -51,6 +51,7 @@
* logic, which was originally circuit-focused.
**/
#define CIRCUITLIST_PRIVATE
+#define OCIRC_EVENT_PRIVATE
#include "lib/cc/torint.h" /* TOR_PRIuSZ */
#include "core/or/or.h"
@@ -61,11 +62,13 @@
#include "core/or/circuitlist.h"
#include "core/or/circuituse.h"
#include "core/or/circuitstats.h"
+#include "core/or/circuitpadding.h"
+#include "core/or/crypt_path.h"
#include "core/mainloop/connection.h"
#include "app/config/config.h"
#include "core/or/connection_edge.h"
#include "core/or/connection_or.h"
-#include "feature/control/control.h"
+#include "feature/control/control_events.h"
#include "lib/crypt_ops/crypto_rand.h"
#include "lib/crypt_ops/crypto_util.h"
#include "lib/crypt_ops/crypto_dh.h"
@@ -94,7 +97,9 @@
#include "lib/compress/compress_lzma.h"
#include "lib/compress/compress_zlib.h"
#include "lib/compress/compress_zstd.h"
-#include "lib/container/buffers.h"
+#include "lib/buf/buffers.h"
+
+#include "core/or/ocirc_event.h"
#include "ht.h"
@@ -127,7 +132,6 @@ static smartlist_t *circuits_pending_other_guards = NULL;
* circuit_mark_for_close and which are waiting for circuit_about_to_free. */
static smartlist_t *circuits_pending_close = NULL;
-static void circuit_free_cpath_node(crypt_path_t *victim);
static void cpath_ref_decref(crypt_path_reference_t *cpath_ref);
static void circuit_about_to_free_atexit(circuit_t *circ);
static void circuit_about_to_free(circuit_t *circ);
@@ -142,6 +146,9 @@ static int any_opened_circs_cached_val = 0;
/********* END VARIABLES ************/
+/* Implement circuit handle helpers. */
+HANDLE_IMPL(circuit, circuit_t,)
+
or_circuit_t *
TO_OR_CIRCUIT(circuit_t *x)
{
@@ -208,10 +215,10 @@ chan_circid_entry_hash_(chan_circid_circuit_map_t *a)
static HT_HEAD(chan_circid_map, chan_circid_circuit_map_t)
chan_circid_map = HT_INITIALIZER();
HT_PROTOTYPE(chan_circid_map, chan_circid_circuit_map_t, node,
- chan_circid_entry_hash_, chan_circid_entries_eq_)
+ chan_circid_entry_hash_, chan_circid_entries_eq_);
HT_GENERATE2(chan_circid_map, chan_circid_circuit_map_t, node,
chan_circid_entry_hash_, chan_circid_entries_eq_, 0.6,
- tor_reallocarray_, tor_free_)
+ tor_reallocarray_, tor_free_);
/** The most recently returned entry from circuit_get_by_circid_chan;
* used to improve performance when many cells arrive in a row from the
@@ -481,6 +488,54 @@ circuit_set_n_circid_chan(circuit_t *circ, circid_t id,
}
}
+/**
+ * Helper function to publish a message about events on an origin circuit
+ *
+ * Publishes a message to subscribers of origin circuit events, and
+ * sends the control event.
+ **/
+int
+circuit_event_status(origin_circuit_t *circ, circuit_status_event_t tp,
+ int reason_code)
+{
+ ocirc_cevent_msg_t *msg = tor_malloc(sizeof(*msg));
+
+ tor_assert(circ);
+
+ msg->gid = circ->global_identifier;
+ msg->evtype = tp;
+ msg->reason = reason_code;
+ msg->onehop = circ->build_state->onehop_tunnel;
+
+ ocirc_cevent_publish(msg);
+ return control_event_circuit_status(circ, tp, reason_code);
+}
+
+/**
+ * Helper function to publish a state change message
+ *
+ * circuit_set_state() calls this to notify subscribers about a change
+ * of the state of an origin circuit. @a circ must be an origin
+ * circuit.
+ **/
+static void
+circuit_state_publish(const circuit_t *circ)
+{
+ ocirc_state_msg_t *msg = tor_malloc(sizeof(*msg));
+ const origin_circuit_t *ocirc;
+
+ tor_assert(CIRCUIT_IS_ORIGIN(circ));
+ ocirc = CONST_TO_ORIGIN_CIRCUIT(circ);
+ /* Only inbound OR circuits can be in this state, not origin circuits. */
+ tor_assert(circ->state != CIRCUIT_STATE_ONIONSKIN_PENDING);
+
+ msg->gid = ocirc->global_identifier;
+ msg->state = circ->state;
+ msg->onehop = ocirc->build_state->onehop_tunnel;
+
+ ocirc_state_publish(msg);
+}
+
/** Change the state of <b>circ</b> to <b>state</b>, adding it to or removing
* it from lists as appropriate. */
void
@@ -510,6 +565,8 @@ circuit_set_state(circuit_t *circ, uint8_t state)
if (state == CIRCUIT_STATE_GUARD_WAIT || state == CIRCUIT_STATE_OPEN)
tor_assert(!circ->n_chan_create_cell);
circ->state = state;
+ if (CIRCUIT_IS_ORIGIN(circ))
+ circuit_state_publish(circ);
}
/** Append to <b>out</b> all circuits in state CHAN_WAIT waiting for
@@ -767,6 +824,8 @@ circuit_purpose_to_controller_string(uint8_t purpose)
return "PATH_BIAS_TESTING";
case CIRCUIT_PURPOSE_HS_VANGUARDS:
return "HS_VANGUARDS";
+ case CIRCUIT_PURPOSE_C_CIRCUIT_PADDING:
+ return "CIRCUIT_PADDING";
default:
tor_snprintf(buf, sizeof(buf), "UNKNOWN_%d", (int)purpose);
@@ -796,6 +855,7 @@ circuit_purpose_to_controller_hs_state_string(uint8_t purpose)
case CIRCUIT_PURPOSE_CONTROLLER:
case CIRCUIT_PURPOSE_PATH_BIAS_TESTING:
case CIRCUIT_PURPOSE_HS_VANGUARDS:
+ case CIRCUIT_PURPOSE_C_CIRCUIT_PADDING:
return NULL;
case CIRCUIT_PURPOSE_INTRO_POINT:
@@ -896,6 +956,9 @@ circuit_purpose_to_string(uint8_t purpose)
case CIRCUIT_PURPOSE_HS_VANGUARDS:
return "Hidden service: Pre-built vanguard circuit";
+ case CIRCUIT_PURPOSE_C_CIRCUIT_PADDING:
+ return "Circuit kept open for padding";
+
default:
tor_snprintf(buf, sizeof(buf), "UNKNOWN_%d", (int)purpose);
return buf;
@@ -931,6 +994,7 @@ init_circuit_base(circuit_t *circ)
circ->package_window = circuit_initial_package_window();
circ->deliver_window = CIRCWINDOW_START;
+ circuit_reset_sendme_randomness(circ);
cell_queue_init(&circ->n_chan_cells);
smartlist_add(circuit_get_global_list(), circ);
@@ -1072,7 +1136,7 @@ circuit_free_(circuit_t *circ)
* circuit is closed. This is to avoid any code path that free registered
* circuits without closing them before. This needs to be done before the
* hs identifier is freed. */
- hs_circ_cleanup(circ);
+ hs_circ_cleanup_on_free(circ);
if (CIRCUIT_IS_ORIGIN(circ)) {
origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
@@ -1092,7 +1156,7 @@ circuit_free_(circuit_t *circ)
if (ocirc->build_state) {
extend_info_free(ocirc->build_state->chosen_exit);
- circuit_free_cpath_node(ocirc->build_state->pending_final_cpath);
+ cpath_free(ocirc->build_state->pending_final_cpath);
cpath_ref_decref(ocirc->build_state->service_pending_final_cpath_ref);
}
tor_free(ocirc->build_state);
@@ -1171,11 +1235,23 @@ circuit_free_(circuit_t *circ)
* "active" checks will be violated. */
cell_queue_clear(&circ->n_chan_cells);
+ /* Cleanup possible SENDME state. */
+ if (circ->sendme_last_digests) {
+ SMARTLIST_FOREACH(circ->sendme_last_digests, uint8_t *, d, tor_free(d));
+ smartlist_free(circ->sendme_last_digests);
+ }
+
log_info(LD_CIRC, "Circuit %u (id: %" PRIu32 ") has been freed.",
n_circ_id,
CIRCUIT_IS_ORIGIN(circ) ?
TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0);
+ /* Free any circuit padding structures */
+ circpad_circuit_free_all_machineinfos(circ);
+
+ /* Clear all dangling handle references. */
+ circuit_handles_clear(circ);
+
if (should_free) {
memwipe(mem, 0xAA, memlen); /* poison memory */
tor_free(mem);
@@ -1207,10 +1283,10 @@ circuit_clear_cpath(origin_circuit_t *circ)
while (cpath->next && cpath->next != head) {
victim = cpath;
cpath = victim->next;
- circuit_free_cpath_node(victim);
+ cpath_free(victim);
}
- circuit_free_cpath_node(cpath);
+ cpath_free(cpath);
circ->cpath = NULL;
}
@@ -1267,29 +1343,13 @@ circuit_free_all(void)
HT_CLEAR(chan_circid_map, &chan_circid_map);
}
-/** Deallocate space associated with the cpath node <b>victim</b>. */
-static void
-circuit_free_cpath_node(crypt_path_t *victim)
-{
- if (!victim)
- return;
-
- relay_crypto_clear(&victim->crypto);
- onion_handshake_state_release(&victim->handshake_state);
- crypto_dh_free(victim->rend_dh_handshake_state);
- extend_info_free(victim->extend_info);
-
- memwipe(victim, 0xBB, sizeof(crypt_path_t)); /* poison memory */
- tor_free(victim);
-}
-
/** Release a crypt_path_reference_t*, which may be NULL. */
static void
cpath_ref_decref(crypt_path_reference_t *cpath_ref)
{
if (cpath_ref != NULL) {
if (--(cpath_ref->refcount) == 0) {
- circuit_free_cpath_node(cpath_ref->cpath);
+ cpath_free(cpath_ref->cpath);
tor_free(cpath_ref);
}
}
@@ -2140,6 +2200,11 @@ circuit_mark_for_close_, (circuit_t *circ, int reason, int line,
tor_assert(line);
tor_assert(file);
+ /* Check whether the circuitpadding subsystem wants to block this close */
+ if (circpad_marked_circuit_for_padding(circ, reason)) {
+ return;
+ }
+
if (circ->marked_for_close) {
log_warn(LD_BUG,
"Duplicate call to circuit_mark_for_close at %s:%d"
@@ -2195,7 +2260,7 @@ circuit_mark_for_close_, (circuit_t *circ, int reason, int line,
}
/* Notify the HS subsystem that this circuit is closing. */
- hs_circ_cleanup(circ);
+ hs_circ_cleanup_on_close(circ);
if (circuits_pending_close == NULL)
circuits_pending_close = smartlist_new();
@@ -2270,50 +2335,13 @@ circuit_about_to_free(circuit_t *circ)
smartlist_remove(circuits_pending_other_guards, circ);
}
if (CIRCUIT_IS_ORIGIN(circ)) {
- control_event_circuit_status(TO_ORIGIN_CIRCUIT(circ),
+ circuit_event_status(TO_ORIGIN_CIRCUIT(circ),
(circ->state == CIRCUIT_STATE_OPEN ||
circ->state == CIRCUIT_STATE_GUARD_WAIT) ?
CIRC_EVENT_CLOSED:CIRC_EVENT_FAILED,
orig_reason);
}
- if (circ->purpose == CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT) {
- origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
- int timed_out = (reason == END_CIRC_REASON_TIMEOUT);
- tor_assert(circ->state == CIRCUIT_STATE_OPEN);
- tor_assert(ocirc->build_state->chosen_exit);
- if (orig_reason != END_CIRC_REASON_IP_NOW_REDUNDANT &&
- ocirc->rend_data) {
- /* treat this like getting a nack from it */
- log_info(LD_REND, "Failed intro circ %s to %s (awaiting ack). %s",
- safe_str_client(rend_data_get_address(ocirc->rend_data)),
- safe_str_client(build_state_get_exit_nickname(ocirc->build_state)),
- timed_out ? "Recording timeout." : "Removing from descriptor.");
- rend_client_report_intro_point_failure(ocirc->build_state->chosen_exit,
- ocirc->rend_data,
- timed_out ?
- INTRO_POINT_FAILURE_TIMEOUT :
- INTRO_POINT_FAILURE_GENERIC);
- }
- } else if (circ->purpose == CIRCUIT_PURPOSE_C_INTRODUCING &&
- reason != END_CIRC_REASON_TIMEOUT) {
- origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
- if (ocirc->build_state->chosen_exit && ocirc->rend_data) {
- if (orig_reason != END_CIRC_REASON_IP_NOW_REDUNDANT &&
- ocirc->rend_data) {
- log_info(LD_REND, "Failed intro circ %s to %s "
- "(building circuit to intro point). "
- "Marking intro point as possibly unreachable.",
- safe_str_client(rend_data_get_address(ocirc->rend_data)),
- safe_str_client(build_state_get_exit_nickname(
- ocirc->build_state)));
- rend_client_report_intro_point_failure(ocirc->build_state->chosen_exit,
- ocirc->rend_data,
- INTRO_POINT_FAILURE_UNREACHABLE);
- }
- }
- }
-
if (circ->n_chan) {
circuit_clear_cell_queue(circ, circ->n_chan);
/* Only send destroy if the channel isn't closing anyway */
@@ -2374,13 +2402,9 @@ marked_circuit_free_cells(circuit_t *circ)
return;
}
cell_queue_clear(&circ->n_chan_cells);
- if (circ->n_mux)
- circuitmux_clear_num_cells(circ->n_mux, circ);
if (! CIRCUIT_IS_ORIGIN(circ)) {
or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
cell_queue_clear(&orcirc->p_chan_cells);
- if (orcirc->p_mux)
- circuitmux_clear_num_cells(orcirc->p_mux, circ);
}
}
@@ -2724,59 +2748,6 @@ circuits_handle_oom(size_t current_allocation)
n_dirconns_killed);
}
-/** Verify that cpath layer <b>cp</b> has all of its invariants
- * correct. Trigger an assert if anything is invalid.
- */
-void
-assert_cpath_layer_ok(const crypt_path_t *cp)
-{
-// tor_assert(cp->addr); /* these are zero for rendezvous extra-hops */
-// tor_assert(cp->port);
- tor_assert(cp);
- tor_assert(cp->magic == CRYPT_PATH_MAGIC);
- switch (cp->state)
- {
- case CPATH_STATE_OPEN:
- relay_crypto_assert_ok(&cp->crypto);
- FALLTHROUGH;
- case CPATH_STATE_CLOSED:
- /*XXXX Assert that there's no handshake_state either. */
- tor_assert(!cp->rend_dh_handshake_state);
- break;
- case CPATH_STATE_AWAITING_KEYS:
- /* tor_assert(cp->dh_handshake_state); */
- break;
- default:
- log_fn(LOG_ERR, LD_BUG, "Unexpected state %d", cp->state);
- tor_assert(0);
- }
- tor_assert(cp->package_window >= 0);
- tor_assert(cp->deliver_window >= 0);
-}
-
-/** Verify that cpath <b>cp</b> has all of its invariants
- * correct. Trigger an assert if anything is invalid.
- */
-static void
-assert_cpath_ok(const crypt_path_t *cp)
-{
- const crypt_path_t *start = cp;
-
- do {
- assert_cpath_layer_ok(cp);
- /* layers must be in sequence of: "open* awaiting? closed*" */
- if (cp != start) {
- if (cp->state == CPATH_STATE_AWAITING_KEYS) {
- tor_assert(cp->prev->state == CPATH_STATE_OPEN);
- } else if (cp->state == CPATH_STATE_OPEN) {
- tor_assert(cp->prev->state == CPATH_STATE_OPEN);
- }
- }
- cp = cp->next;
- tor_assert(cp);
- } while (cp != start);
-}
-
/** Verify that circuit <b>c</b> has all of its invariants
* correct. Trigger an assert if anything is invalid.
*/
@@ -2838,7 +2809,7 @@ assert_circuit_ok,(const circuit_t *c))
!smartlist_contains(circuits_pending_chans, c));
}
if (origin_circ && origin_circ->cpath) {
- assert_cpath_ok(origin_circ->cpath);
+ cpath_assert_ok(origin_circ->cpath);
}
if (c->purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED) {
tor_assert(or_circ);
diff --git a/src/core/or/circuitlist.h b/src/core/or/circuitlist.h
index b87c6a3667..fd7e22e4c0 100644
--- a/src/core/or/circuitlist.h
+++ b/src/core/or/circuitlist.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -12,8 +12,10 @@
#ifndef TOR_CIRCUITLIST_H
#define TOR_CIRCUITLIST_H
+#include "lib/container/handles.h"
#include "lib/testsupport/testsupport.h"
#include "feature/hs/hs_ident.h"
+#include "core/or/ocirc_event.h"
/** Circuit state: I'm the origin, still haven't done all my handshakes. */
#define CIRCUIT_STATE_BUILDING 0
@@ -91,31 +93,33 @@
#define CIRCUIT_PURPOSE_C_HS_MAX_ 13
/** This circuit is used for build time measurement only */
#define CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT 14
-#define CIRCUIT_PURPOSE_C_MAX_ 14
+/** This circuit is being held open by circuit padding */
+#define CIRCUIT_PURPOSE_C_CIRCUIT_PADDING 15
+#define CIRCUIT_PURPOSE_C_MAX_ 15
-#define CIRCUIT_PURPOSE_S_HS_MIN_ 15
+#define CIRCUIT_PURPOSE_S_HS_MIN_ 16
/** Hidden-service-side circuit purpose: at the service, waiting for
* introductions. */
-#define CIRCUIT_PURPOSE_S_ESTABLISH_INTRO 15
+#define CIRCUIT_PURPOSE_S_ESTABLISH_INTRO 16
/** Hidden-service-side circuit purpose: at the service, successfully
* established intro. */
-#define CIRCUIT_PURPOSE_S_INTRO 16
+#define CIRCUIT_PURPOSE_S_INTRO 17
/** Hidden-service-side circuit purpose: at the service, connecting to rend
* point. */
-#define CIRCUIT_PURPOSE_S_CONNECT_REND 17
+#define CIRCUIT_PURPOSE_S_CONNECT_REND 18
/** Hidden-service-side circuit purpose: at the service, rendezvous
* established. */
-#define CIRCUIT_PURPOSE_S_REND_JOINED 18
+#define CIRCUIT_PURPOSE_S_REND_JOINED 19
/** This circuit is used for uploading hsdirs */
-#define CIRCUIT_PURPOSE_S_HSDIR_POST 19
-#define CIRCUIT_PURPOSE_S_HS_MAX_ 19
+#define CIRCUIT_PURPOSE_S_HSDIR_POST 20
+#define CIRCUIT_PURPOSE_S_HS_MAX_ 20
/** A testing circuit; not meant to be used for actual traffic. */
-#define CIRCUIT_PURPOSE_TESTING 20
+#define CIRCUIT_PURPOSE_TESTING 21
/** A controller made this circuit and Tor should not use it. */
-#define CIRCUIT_PURPOSE_CONTROLLER 21
+#define CIRCUIT_PURPOSE_CONTROLLER 22
/** This circuit is used for path bias probing only */
-#define CIRCUIT_PURPOSE_PATH_BIAS_TESTING 22
+#define CIRCUIT_PURPOSE_PATH_BIAS_TESTING 23
/** This circuit is used for vanguards/restricted paths.
*
@@ -123,9 +127,9 @@
* on-demand. When an HS operation needs to take place (e.g. connect to an
* intro point), these circuits are then cannibalized and repurposed to the
* actual needed HS purpose. */
-#define CIRCUIT_PURPOSE_HS_VANGUARDS 23
+#define CIRCUIT_PURPOSE_HS_VANGUARDS 24
-#define CIRCUIT_PURPOSE_MAX_ 23
+#define CIRCUIT_PURPOSE_MAX_ 24
/** A catch-all for unrecognized purposes. Currently we don't expect
* to make or see any circuits with this purpose. */
#define CIRCUIT_PURPOSE_UNKNOWN 255
@@ -184,6 +188,8 @@ void channel_mark_circid_unusable(channel_t *chan, circid_t id);
void channel_mark_circid_usable(channel_t *chan, circid_t id);
time_t circuit_id_when_marked_unusable_on_channel(circid_t circ_id,
channel_t *chan);
+int circuit_event_status(origin_circuit_t *circ, circuit_status_event_t tp,
+ int reason_code);
void circuit_set_state(circuit_t *circ, uint8_t state);
void circuit_close_all_marked(void);
int32_t circuit_initial_package_window(void);
@@ -213,7 +219,7 @@ void circuit_mark_all_dirty_circs_as_unusable(void);
void circuit_synchronize_written_or_bandwidth(const circuit_t *c,
circuit_channel_direction_t dir);
MOCK_DECL(void, circuit_mark_for_close_, (circuit_t *circ, int reason,
- int line, const char *file));
+ int line, const char *cfile));
int circuit_get_cpath_len(origin_circuit_t *circ);
int circuit_get_cpath_opened_len(const origin_circuit_t *);
void circuit_clear_cpath(origin_circuit_t *circ);
@@ -225,7 +231,6 @@ int circuit_count_pending_on_channel(channel_t *chan);
#define circuit_mark_for_close(c, reason) \
circuit_mark_for_close_((c), (reason), __LINE__, SHORT_FILE__)
-void assert_cpath_layer_ok(const crypt_path_t *cp);
MOCK_DECL(void, assert_circuit_ok,(const circuit_t *c));
void circuit_free_all(void);
void circuits_handle_oom(size_t current_allocation);
@@ -238,6 +243,11 @@ MOCK_DECL(void, channel_note_destroy_not_pending,
smartlist_t *circuit_find_circuits_to_upgrade_from_guard_wait(void);
+/* Declare the handle helpers */
+HANDLE_DECL(circuit, circuit_t, )
+#define circuit_handle_free(h) \
+ FREE_AND_NULL(circuit_handle_t, circuit_handle_free_, (h))
+
#ifdef CIRCUITLIST_PRIVATE
STATIC void circuit_free_(circuit_t *circ);
#define circuit_free(circ) FREE_AND_NULL(circuit_t, circuit_free_, (circ))
diff --git a/src/core/or/circuitmux.c b/src/core/or/circuitmux.c
index e7309553c4..be54ae6ec6 100644
--- a/src/core/or/circuitmux.c
+++ b/src/core/or/circuitmux.c
@@ -1,4 +1,4 @@
-/* * Copyright (c) 2012-2019, The Tor Project, Inc. */
+/* * Copyright (c) 2012-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -69,25 +69,21 @@
* made to attach all existing circuits to the new policy.
**/
+#define CIRCUITMUX_PRIVATE
+
#include "core/or/or.h"
#include "core/or/channel.h"
#include "core/or/circuitlist.h"
#include "core/or/circuitmux.h"
#include "core/or/relay.h"
-#include "core/or/cell_queue_st.h"
-#include "core/or/destroy_cell_queue_st.h"
#include "core/or/or_circuit_st.h"
-/*
- * Private typedefs for circuitmux.c
- */
+#include "lib/crypt_ops/crypto_util.h"
/*
- * Map of muxinfos for circuitmux_t to use; struct is defined below (name
- * of struct must match HT_HEAD line).
+ * Private typedefs for circuitmux.c
*/
-typedef struct chanid_circid_muxinfo_map chanid_circid_muxinfo_map_t;
/*
* Hash table entry (yeah, calling it chanid_circid_muxinfo_s seems to
@@ -100,57 +96,14 @@ typedef struct chanid_circid_muxinfo_t chanid_circid_muxinfo_t;
* a count of queued cells.
*/
-typedef struct circuit_muxinfo_s circuit_muxinfo_t;
-
-/*
- * Structures for circuitmux.c
- */
-
-struct circuitmux_s {
- /* Keep count of attached, active circuits */
- unsigned int n_circuits, n_active_circuits;
-
- /* Total number of queued cells on all circuits */
- unsigned int n_cells;
-
- /*
- * Map from (channel ID, circuit ID) pairs to circuit_muxinfo_t
- */
- chanid_circid_muxinfo_map_t *chanid_circid_map;
-
- /** List of queued destroy cells */
- destroy_cell_queue_t destroy_cell_queue;
- /** Boolean: True iff the last cell to circuitmux_get_first_active_circuit
- * returned the destroy queue. Used to force alternation between
- * destroy/non-destroy cells.
- *
- * XXXX There is no reason to think that alternating is a particularly good
- * approach -- it's just designed to prevent destroys from starving other
- * cells completely.
- */
- unsigned int last_cell_was_destroy : 1;
- /** Destroy counter: increment this when a destroy gets queued, decrement
- * when we unqueue it, so we can test to make sure they don't starve.
- */
- int64_t destroy_ctr;
-
- /*
- * Circuitmux policy; if this is non-NULL, it can override the built-
- * in round-robin active circuits behavior. This is how EWMA works in
- * the new circuitmux_t world.
- */
- const circuitmux_policy_t *policy;
-
- /* Policy-specific data */
- circuitmux_policy_data_t *policy_data;
-};
+typedef struct circuit_muxinfo_t circuit_muxinfo_t;
/*
* This struct holds whatever we want to store per attached circuit on a
* circuitmux_t; right now, just the count of queued cells and the direction.
*/
-struct circuit_muxinfo_s {
+struct circuit_muxinfo_t {
/* Count of cells on this circuit at last update */
unsigned int cell_count;
/* Direction of flow */
@@ -222,15 +175,12 @@ chanid_circid_entry_hash(chanid_circid_muxinfo_t *a)
return (unsigned) siphash24g(data, sizeof(data));
}
-/* Declare the struct chanid_circid_muxinfo_map type */
-HT_HEAD(chanid_circid_muxinfo_map, chanid_circid_muxinfo_t);
-
/* Emit a bunch of hash table stuff */
HT_PROTOTYPE(chanid_circid_muxinfo_map, chanid_circid_muxinfo_t, node,
- chanid_circid_entry_hash, chanid_circid_entries_eq)
+ chanid_circid_entry_hash, chanid_circid_entries_eq);
HT_GENERATE2(chanid_circid_muxinfo_map, chanid_circid_muxinfo_t, node,
chanid_circid_entry_hash, chanid_circid_entries_eq, 0.6,
- tor_reallocarray_, tor_free_)
+ tor_reallocarray_, tor_free_);
/*
* Circuitmux alloc/free functions
@@ -295,9 +245,6 @@ circuitmux_detach_all_circuits(circuitmux_t *cmux, smartlist_t *detached_out)
circuitmux_make_circuit_inactive(cmux, circ);
}
- /* Clear n_mux */
- circ->n_mux = NULL;
-
if (detached_out)
smartlist_add(detached_out, circ);
} else if (circ->magic == OR_CIRCUIT_MAGIC) {
@@ -310,12 +257,6 @@ circuitmux_detach_all_circuits(circuitmux_t *cmux, smartlist_t *detached_out)
circuitmux_make_circuit_inactive(cmux, circ);
}
- /*
- * It has a sensible p_chan and direction == CELL_DIRECTION_IN,
- * so clear p_mux.
- */
- TO_OR_CIRCUIT(circ)->p_mux = NULL;
-
if (detached_out)
smartlist_add(detached_out, circ);
} else {
@@ -837,18 +778,14 @@ circuitmux_attach_circuit,(circuitmux_t *cmux, circuit_t *circ,
*/
log_info(LD_CIRC,
"Circuit %u on channel %"PRIu64 " was already attached to "
- "cmux %p (trying to attach to %p)",
+ "(trying to attach to %p)",
(unsigned)circ_id, (channel_id),
- ((direction == CELL_DIRECTION_OUT) ?
- circ->n_mux : TO_OR_CIRCUIT(circ)->p_mux),
cmux);
/*
* The mux pointer on this circuit and the direction in result should
* match; otherwise assert.
*/
- if (direction == CELL_DIRECTION_OUT) tor_assert(circ->n_mux == cmux);
- else tor_assert(TO_OR_CIRCUIT(circ)->p_mux == cmux);
tor_assert(hashent->muxinfo.direction == direction);
/*
@@ -873,13 +810,6 @@ circuitmux_attach_circuit,(circuitmux_t *cmux, circuit_t *circ,
"Attaching circuit %u on channel %"PRIu64 " to cmux %p",
(unsigned)circ_id, (channel_id), cmux);
- /*
- * Assert that the circuit doesn't already have a mux for this
- * direction.
- */
- if (direction == CELL_DIRECTION_OUT) tor_assert(circ->n_mux == NULL);
- else tor_assert(TO_OR_CIRCUIT(circ)->p_mux == NULL);
-
/* Insert it in the map */
hashent = tor_malloc_zero(sizeof(*hashent));
hashent->chan_id = channel_id;
@@ -903,10 +833,6 @@ circuitmux_attach_circuit,(circuitmux_t *cmux, circuit_t *circ,
HT_INSERT(chanid_circid_muxinfo_map, cmux->chanid_circid_map,
hashent);
- /* Set the circuit's mux for this direction */
- if (direction == CELL_DIRECTION_OUT) circ->n_mux = cmux;
- else TO_OR_CIRCUIT(circ)->p_mux = cmux;
-
/* Update counters */
++(cmux->n_circuits);
if (cell_count > 0) {
@@ -994,14 +920,14 @@ circuitmux_detach_circuit,(circuitmux_t *cmux, circuit_t *circ))
/* Consistency check: the direction must match the direction searched */
tor_assert(last_searched_direction == hashent->muxinfo.direction);
- /* Clear the circuit's mux for this direction */
- if (last_searched_direction == CELL_DIRECTION_OUT) circ->n_mux = NULL;
- else TO_OR_CIRCUIT(circ)->p_mux = NULL;
/* Now remove it from the map */
HT_REMOVE(chanid_circid_muxinfo_map, cmux->chanid_circid_map, hashent);
- /* Free the hash entry */
+ /* Wipe and free the hash entry */
+ // This isn't sensitive, but we want to be sure to know if we're accessing
+ // this accidentally.
+ memwipe(hashent, 0xef, sizeof(*hashent));
tor_free(hashent);
}
}
diff --git a/src/core/or/circuitmux.h b/src/core/or/circuitmux.h
index 67cd9bcdd8..191ca12e30 100644
--- a/src/core/or/circuitmux.h
+++ b/src/core/or/circuitmux.h
@@ -1,4 +1,4 @@
-/* * Copyright (c) 2012-2019, The Tor Project, Inc. */
+/* * Copyright (c) 2012-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -12,11 +12,11 @@
#include "core/or/or.h"
#include "lib/testsupport/testsupport.h"
-typedef struct circuitmux_policy_s circuitmux_policy_t;
-typedef struct circuitmux_policy_data_s circuitmux_policy_data_t;
-typedef struct circuitmux_policy_circ_data_s circuitmux_policy_circ_data_t;
+typedef struct circuitmux_policy_t circuitmux_policy_t;
+typedef struct circuitmux_policy_data_t circuitmux_policy_data_t;
+typedef struct circuitmux_policy_circ_data_t circuitmux_policy_circ_data_t;
-struct circuitmux_policy_s {
+struct circuitmux_policy_t {
/* Allocate cmux-wide policy-specific data */
circuitmux_policy_data_t * (*alloc_cmux_data)(circuitmux_t *cmux);
/* Free cmux-wide policy-specific data */
@@ -67,7 +67,7 @@ struct circuitmux_policy_s {
* wide data; it just has the magic number in the base struct.
*/
-struct circuitmux_policy_data_s {
+struct circuitmux_policy_data_t {
uint32_t magic;
};
@@ -76,7 +76,7 @@ struct circuitmux_policy_data_s {
* specific data; it just has the magic number in the base struct.
*/
-struct circuitmux_policy_circ_data_s {
+struct circuitmux_policy_circ_data_t {
uint32_t magic;
};
@@ -158,5 +158,61 @@ void circuitmux_mark_destroyed_circids_usable(circuitmux_t *cmux,
MOCK_DECL(int, circuitmux_compare_muxes,
(circuitmux_t *cmux_1, circuitmux_t *cmux_2));
+#ifdef CIRCUITMUX_PRIVATE
+
+#include "core/or/destroy_cell_queue_st.h"
+
+/*
+ * Map of muxinfos for circuitmux_t to use; struct is defined below (name
+ * of struct must match HT_HEAD line).
+ */
+typedef HT_HEAD(chanid_circid_muxinfo_map, chanid_circid_muxinfo_t)
+ chanid_circid_muxinfo_map_t;
+
+/*
+ * Structures for circuitmux.c
+ */
+
+struct circuitmux_t {
+ /* Keep count of attached, active circuits */
+ unsigned int n_circuits, n_active_circuits;
+
+ /* Total number of queued cells on all circuits */
+ unsigned int n_cells;
+
+ /*
+ * Map from (channel ID, circuit ID) pairs to circuit_muxinfo_t
+ */
+ chanid_circid_muxinfo_map_t *chanid_circid_map;
+
+ /** List of queued destroy cells */
+ destroy_cell_queue_t destroy_cell_queue;
+ /** Boolean: True iff the last cell to circuitmux_get_first_active_circuit
+ * returned the destroy queue. Used to force alternation between
+ * destroy/non-destroy cells.
+ *
+ * XXXX There is no reason to think that alternating is a particularly good
+ * approach -- it's just designed to prevent destroys from starving other
+ * cells completely.
+ */
+ unsigned int last_cell_was_destroy : 1;
+ /** Destroy counter: increment this when a destroy gets queued, decrement
+ * when we unqueue it, so we can test to make sure they don't starve.
+ */
+ int64_t destroy_ctr;
+
+ /*
+ * Circuitmux policy; if this is non-NULL, it can override the built-
+ * in round-robin active circuits behavior. This is how EWMA works in
+ * the new circuitmux_t world.
+ */
+ const circuitmux_policy_t *policy;
+
+ /* Policy-specific data */
+ circuitmux_policy_data_t *policy_data;
+};
+
+#endif /* defined(CIRCUITMUX_PRIVATE) */
+
#endif /* !defined(TOR_CIRCUITMUX_H) */
diff --git a/src/core/or/circuitmux_ewma.c b/src/core/or/circuitmux_ewma.c
index 3f83c3fd5a..0dcd22e8a7 100644
--- a/src/core/or/circuitmux_ewma.c
+++ b/src/core/or/circuitmux_ewma.c
@@ -1,4 +1,4 @@
-/* * Copyright (c) 2012-2019, The Tor Project, Inc. */
+/* * Copyright (c) 2012-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -38,6 +38,7 @@
#include "core/or/circuitmux.h"
#include "core/or/circuitmux_ewma.h"
#include "lib/crypt_ops/crypto_rand.h"
+#include "lib/crypt_ops/crypto_util.h"
#include "feature/nodelist/networkstatus.h"
#include "app/config/or_options_st.h"
@@ -58,115 +59,6 @@
/** The natural logarithm of 0.5. */
#define LOG_ONEHALF -0.69314718055994529
-/*** EWMA structures ***/
-
-typedef struct cell_ewma_s cell_ewma_t;
-typedef struct ewma_policy_data_s ewma_policy_data_t;
-typedef struct ewma_policy_circ_data_s ewma_policy_circ_data_t;
-
-/**
- * The cell_ewma_t structure keeps track of how many cells a circuit has
- * transferred recently. It keeps an EWMA (exponentially weighted moving
- * average) of the number of cells flushed from the circuit queue onto a
- * connection in channel_flush_from_first_active_circuit().
- */
-
-struct cell_ewma_s {
- /** The last 'tick' at which we recalibrated cell_count.
- *
- * A cell sent at exactly the start of this tick has weight 1.0. Cells sent
- * since the start of this tick have weight greater than 1.0; ones sent
- * earlier have less weight. */
- unsigned int last_adjusted_tick;
- /** The EWMA of the cell count. */
- double cell_count;
- /** True iff this is the cell count for a circuit's previous
- * channel. */
- unsigned int is_for_p_chan : 1;
- /** The position of the circuit within the OR connection's priority
- * queue. */
- int heap_index;
-};
-
-struct ewma_policy_data_s {
- circuitmux_policy_data_t base_;
-
- /**
- * Priority queue of cell_ewma_t for circuits with queued cells waiting
- * for room to free up on the channel that owns this circuitmux. Kept
- * in heap order according to EWMA. This was formerly in channel_t, and
- * in or_connection_t before that.
- */
- smartlist_t *active_circuit_pqueue;
-
- /**
- * The tick on which the cell_ewma_ts in active_circuit_pqueue last had
- * their ewma values rescaled. This was formerly in channel_t, and in
- * or_connection_t before that.
- */
- unsigned int active_circuit_pqueue_last_recalibrated;
-};
-
-struct ewma_policy_circ_data_s {
- circuitmux_policy_circ_data_t base_;
-
- /**
- * The EWMA count for the number of cells flushed from this circuit
- * onto this circuitmux. Used to determine which circuit to flush
- * from next. This was formerly in circuit_t and or_circuit_t.
- */
- cell_ewma_t cell_ewma;
-
- /**
- * Pointer back to the circuit_t this is for; since we're separating
- * out circuit selection policy like this, we can't attach cell_ewma_t
- * to the circuit_t any more, so we can't use SUBTYPE_P directly to a
- * circuit_t like before; instead get it here.
- */
- circuit_t *circ;
-};
-
-#define EWMA_POL_DATA_MAGIC 0x2fd8b16aU
-#define EWMA_POL_CIRC_DATA_MAGIC 0x761e7747U
-
-/*** Downcasts for the above types ***/
-
-static ewma_policy_data_t *
-TO_EWMA_POL_DATA(circuitmux_policy_data_t *);
-
-static ewma_policy_circ_data_t *
-TO_EWMA_POL_CIRC_DATA(circuitmux_policy_circ_data_t *);
-
-/**
- * Downcast a circuitmux_policy_data_t to an ewma_policy_data_t and assert
- * if the cast is impossible.
- */
-
-static inline ewma_policy_data_t *
-TO_EWMA_POL_DATA(circuitmux_policy_data_t *pol)
-{
- if (!pol) return NULL;
- else {
- tor_assert(pol->magic == EWMA_POL_DATA_MAGIC);
- return DOWNCAST(ewma_policy_data_t, pol);
- }
-}
-
-/**
- * Downcast a circuitmux_policy_circ_data_t to an ewma_policy_circ_data_t
- * and assert if the cast is impossible.
- */
-
-static inline ewma_policy_circ_data_t *
-TO_EWMA_POL_CIRC_DATA(circuitmux_policy_circ_data_t *pol)
-{
- if (!pol) return NULL;
- else {
- tor_assert(pol->magic == EWMA_POL_CIRC_DATA_MAGIC);
- return DOWNCAST(ewma_policy_circ_data_t, pol);
- }
-}
-
/*** Static declarations for circuitmux_ewma.c ***/
static void add_cell_ewma(ewma_policy_data_t *pol, cell_ewma_t *ewma);
@@ -295,6 +187,7 @@ ewma_free_cmux_data(circuitmux_t *cmux,
pol = TO_EWMA_POL_DATA(pol_data);
smartlist_free(pol->active_circuit_pqueue);
+ memwipe(pol, 0xda, sizeof(ewma_policy_data_t));
tor_free(pol);
}
@@ -361,7 +254,7 @@ ewma_free_circ_data(circuitmux_t *cmux,
if (!pol_circ_data) return;
cdata = TO_EWMA_POL_CIRC_DATA(pol_circ_data);
-
+ memwipe(cdata, 0xdc, sizeof(ewma_policy_circ_data_t));
tor_free(cdata);
}
@@ -530,7 +423,7 @@ ewma_cmp_cmux(circuitmux_t *cmux_1, circuitmux_policy_data_t *pol_data_1,
/* Pick whichever one has the better best circuit */
return compare_cell_ewma_counts(ce1, ce2);
} else {
- if (ce1 != NULL ) {
+ if (ce1 != NULL) {
/* We only have a circuit on cmux_1, so prefer it */
return -1;
} else if (ce2 != NULL) {
@@ -716,7 +609,7 @@ cmux_ewma_set_options(const or_options_t *options,
/* convert halflife into halflife-per-tick. */
halflife /= EWMA_TICK_LEN;
/* compute per-tick scale factor. */
- ewma_scale_factor = exp( LOG_ONEHALF / halflife );
+ ewma_scale_factor = exp(LOG_ONEHALF / halflife);
log_info(LD_OR,
"Enabled cell_ewma algorithm because of value in %s; "
"scale factor is %f per %d seconds",
diff --git a/src/core/or/circuitmux_ewma.h b/src/core/or/circuitmux_ewma.h
index b45ce1f916..e41cf9e0f0 100644
--- a/src/core/or/circuitmux_ewma.h
+++ b/src/core/or/circuitmux_ewma.h
@@ -1,4 +1,4 @@
-/* * Copyright (c) 2012-2019, The Tor Project, Inc. */
+/* * Copyright (c) 2012-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -22,9 +22,117 @@ void cmux_ewma_set_options(const or_options_t *options,
void circuitmux_ewma_free_all(void);
#ifdef CIRCUITMUX_EWMA_PRIVATE
+
+/*** EWMA structures ***/
+
+typedef struct cell_ewma_t cell_ewma_t;
+typedef struct ewma_policy_data_t ewma_policy_data_t;
+typedef struct ewma_policy_circ_data_t ewma_policy_circ_data_t;
+
+/**
+ * The cell_ewma_t structure keeps track of how many cells a circuit has
+ * transferred recently. It keeps an EWMA (exponentially weighted moving
+ * average) of the number of cells flushed from the circuit queue onto a
+ * connection in channel_flush_from_first_active_circuit().
+ */
+
+struct cell_ewma_t {
+ /** The last 'tick' at which we recalibrated cell_count.
+ *
+ * A cell sent at exactly the start of this tick has weight 1.0. Cells sent
+ * since the start of this tick have weight greater than 1.0; ones sent
+ * earlier have less weight. */
+ unsigned int last_adjusted_tick;
+ /** The EWMA of the cell count. */
+ double cell_count;
+ /** True iff this is the cell count for a circuit's previous
+ * channel. */
+ unsigned int is_for_p_chan : 1;
+ /** The position of the circuit within the OR connection's priority
+ * queue. */
+ int heap_index;
+};
+
+struct ewma_policy_data_t {
+ circuitmux_policy_data_t base_;
+
+ /**
+ * Priority queue of cell_ewma_t for circuits with queued cells waiting
+ * for room to free up on the channel that owns this circuitmux. Kept
+ * in heap order according to EWMA. This was formerly in channel_t, and
+ * in or_connection_t before that.
+ */
+ smartlist_t *active_circuit_pqueue;
+
+ /**
+ * The tick on which the cell_ewma_ts in active_circuit_pqueue last had
+ * their ewma values rescaled. This was formerly in channel_t, and in
+ * or_connection_t before that.
+ */
+ unsigned int active_circuit_pqueue_last_recalibrated;
+};
+
+struct ewma_policy_circ_data_t {
+ circuitmux_policy_circ_data_t base_;
+
+ /**
+ * The EWMA count for the number of cells flushed from this circuit
+ * onto this circuitmux. Used to determine which circuit to flush
+ * from next. This was formerly in circuit_t and or_circuit_t.
+ */
+ cell_ewma_t cell_ewma;
+
+ /**
+ * Pointer back to the circuit_t this is for; since we're separating
+ * out circuit selection policy like this, we can't attach cell_ewma_t
+ * to the circuit_t any more, so we can't use SUBTYPE_P directly to a
+ * circuit_t like before; instead get it here.
+ */
+ circuit_t *circ;
+};
+
+#define EWMA_POL_DATA_MAGIC 0x2fd8b16aU
+#define EWMA_POL_CIRC_DATA_MAGIC 0x761e7747U
+
+/*** Downcasts for the above types ***/
+
+/**
+ * Downcast a circuitmux_policy_data_t to an ewma_policy_data_t and assert
+ * if the cast is impossible.
+ */
+
+static inline ewma_policy_data_t *
+TO_EWMA_POL_DATA(circuitmux_policy_data_t *pol)
+{
+ if (!pol) return NULL;
+ else {
+ tor_assertf(pol->magic == EWMA_POL_DATA_MAGIC,
+ "Mismatch: %"PRIu32" != %"PRIu32,
+ pol->magic, EWMA_POL_DATA_MAGIC);
+ return DOWNCAST(ewma_policy_data_t, pol);
+ }
+}
+
+/**
+ * Downcast a circuitmux_policy_circ_data_t to an ewma_policy_circ_data_t
+ * and assert if the cast is impossible.
+ */
+
+static inline ewma_policy_circ_data_t *
+TO_EWMA_POL_CIRC_DATA(circuitmux_policy_circ_data_t *pol)
+{
+ if (!pol) return NULL;
+ else {
+ tor_assertf(pol->magic == EWMA_POL_CIRC_DATA_MAGIC,
+ "Mismatch: %"PRIu32" != %"PRIu32,
+ pol->magic, EWMA_POL_CIRC_DATA_MAGIC);
+ return DOWNCAST(ewma_policy_circ_data_t, pol);
+ }
+}
+
STATIC unsigned cell_ewma_get_current_tick_and_fraction(double *remainder_out);
STATIC void cell_ewma_initialize_ticks(void);
-#endif
-#endif /* !defined(TOR_CIRCUITMUX_EWMA_H) */
+#endif /* defined(CIRCUITMUX_EWMA_PRIVATE) */
+#endif /* !defined(TOR_CIRCUITMUX_EWMA_H) */
diff --git a/src/core/or/circuitpadding.c b/src/core/or/circuitpadding.c
new file mode 100644
index 0000000000..43f4a31624
--- /dev/null
+++ b/src/core/or/circuitpadding.c
@@ -0,0 +1,3101 @@
+/* Copyright (c) 2017 The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitpadding.c
+ * \brief Circuit-level padding implementation
+ *
+ * \details
+ *
+ * This file implements Tor proposal 254 "Padding Negotiation" which is heavily
+ * inspired by the paper "Toward an Efficient Website Fingerprinting Defense"
+ * by M. Juarez, M. Imani, M. Perry, C. Diaz, M. Wright.
+ *
+ * In particular the code in this file describes mechanisms for clients to
+ * negotiate various types of circuit-level padding from relays.
+ *
+ * Each padding type is described by a state machine (circpad_machine_spec_t),
+ * which is also referred as a "padding machine" in this file. Currently,
+ * these state machines are hardcoded in the source code (e.g. see
+ * circpad_machines_init()), but in the future we will be able to
+ * serialize them in the torrc or the consensus.
+ *
+ * As specified by prop#254, clients can negotiate padding with relays by using
+ * PADDING_NEGOTIATE cells. After successful padding negotiation, padding
+ * machines are assigned to the circuit in their mutable form as a
+ * circpad_machine_runtime_t.
+ *
+ * Each state of a padding state machine can be either:
+ * - A histogram that specifies inter-arrival padding delays.
+ * - Or a parametrized probability distribution that specifies inter-arrival
+ * delays (see circpad_distribution_type_t).
+ *
+ * Padding machines start from the START state and finish with the END
+ * state. They can transition between states using the events in
+ * circpad_event_t.
+ *
+ * When a padding machine reaches the END state, it gets wiped from the circuit
+ * so that other padding machines can take over if needed (see
+ * circpad_machine_spec_transitioned_to_end()).
+ *
+ ****************************
+ * General notes:
+ *
+ * All used machines should be heap allocated and placed into
+ * origin_padding_machines/relay_padding_machines so that they get correctly
+ * cleaned up by the circpad_free_all() function.
+ **/
+
+#define CIRCUITPADDING_PRIVATE
+
+#include <math.h>
+#include "lib/math/fp.h"
+#include "lib/math/prob_distr.h"
+#include "core/or/or.h"
+#include "core/or/circuitpadding.h"
+#include "core/or/circuitpadding_machines.h"
+#include "core/or/circuitlist.h"
+#include "core/or/circuituse.h"
+#include "core/mainloop/netstatus.h"
+#include "core/or/relay.h"
+#include "feature/stats/rephist.h"
+#include "feature/nodelist/networkstatus.h"
+
+#include "core/or/channel.h"
+
+#include "lib/time/compat_time.h"
+#include "lib/defs/time.h"
+#include "lib/crypt_ops/crypto_rand.h"
+
+#include "core/or/crypt_path_st.h"
+#include "core/or/circuit_st.h"
+#include "core/or/origin_circuit_st.h"
+#include "core/or/or_circuit_st.h"
+#include "feature/nodelist/routerstatus_st.h"
+#include "feature/nodelist/node_st.h"
+#include "core/or/cell_st.h"
+#include "core/or/extend_info_st.h"
+#include "core/crypto/relay_crypto.h"
+#include "feature/nodelist/nodelist.h"
+
+#include "app/config/config.h"
+
+static inline circpad_circuit_state_t circpad_circuit_state(
+ origin_circuit_t *circ);
+static void circpad_setup_machine_on_circ(circuit_t *on_circ,
+ const circpad_machine_spec_t *machine);
+static double circpad_distribution_sample(circpad_distribution_t dist);
+
+static inline void circpad_machine_update_state_length_for_nonpadding(
+ circpad_machine_runtime_t *mi);
+
+/** Cached consensus params */
+static uint8_t circpad_padding_disabled;
+static uint8_t circpad_padding_reduced;
+static uint8_t circpad_global_max_padding_percent;
+static uint16_t circpad_global_allowed_cells;
+static uint16_t circpad_max_circ_queued_cells;
+
+/** Global cell counts, for rate limiting */
+static uint64_t circpad_global_padding_sent;
+static uint64_t circpad_global_nonpadding_sent;
+
+/** This is the list of circpad_machine_spec_t's parsed from consensus and
+ * torrc that have origin_side == 1 (ie: are for client side).
+ *
+ * The machines in this smartlist are considered immutable and they are used
+ * as-is by circuits so they should not change or get deallocated in Tor's
+ * runtime and as long as circuits are alive. */
+STATIC smartlist_t *origin_padding_machines = NULL;
+
+/** This is the list of circpad_machine_spec_t's parsed from consensus and
+ * torrc that have origin_side == 0 (ie: are for relay side).
+ *
+ * The machines in this smartlist are considered immutable and they are used
+ * as-is by circuits so they should not change or get deallocated in Tor's
+ * runtime and as long as circuits are alive. */
+STATIC smartlist_t *relay_padding_machines = NULL;
+
+#ifndef COCCI
+/** Loop over the current padding state machines using <b>loop_var</b> as the
+ * loop variable. */
+#define FOR_EACH_CIRCUIT_MACHINE_BEGIN(loop_var) \
+ STMT_BEGIN \
+ for (int loop_var = 0; loop_var < CIRCPAD_MAX_MACHINES; loop_var++) {
+#define FOR_EACH_CIRCUIT_MACHINE_END } STMT_END ;
+
+/** Loop over the current active padding state machines using <b>loop_var</b>
+ * as the loop variable. If a machine is not active, skip it. */
+#define FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(loop_var, circ) \
+ FOR_EACH_CIRCUIT_MACHINE_BEGIN(loop_var) \
+ if (!(circ)->padding_info[loop_var]) \
+ continue;
+#define FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END } STMT_END ;
+#endif /* !defined(COCCI) */
+
+/**
+ * Free the machineinfo at an index
+ */
+static void
+circpad_circuit_machineinfo_free_idx(circuit_t *circ, int idx)
+{
+ if (circ->padding_info[idx]) {
+ log_fn(LOG_INFO,LD_CIRC, "Freeing padding info idx %d on circuit %u (%d)",
+ idx, CIRCUIT_IS_ORIGIN(circ) ?
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0,
+ circ->purpose);
+
+ tor_free(circ->padding_info[idx]->histogram);
+ timer_free(circ->padding_info[idx]->padding_timer);
+ tor_free(circ->padding_info[idx]);
+ }
+}
+
+/**
+ * Return true if circpad has decided to hold the circuit open for additional
+ * padding. This function is used to take and retain ownership of certain
+ * types of circuits that have padding machines on them, that have been passed
+ * to circuit_mark_for_close().
+ *
+ * circuit_mark_for_close() calls this function to ask circpad if any padding
+ * machines want to keep the circuit open longer to pad.
+ *
+ * Any non-measurement circuit that was closed for a normal, non-error reason
+ * code may be held open for up to CIRCPAD_DELAY_INFINITE microseconds between
+ * network-driven cell events.
+ *
+ * After CIRCPAD_DELAY_INFINITE microseconds of silence on a circuit, this
+ * function will no longer hold it open (it will return 0 regardless of
+ * what the machines ask for, and thus circuit_expire_old_circuits_clientside()
+ * will close the circuit after roughly 1.25hr of idle time, maximum,
+ * regardless of the padding machine state.
+ */
+int
+circpad_marked_circuit_for_padding(circuit_t *circ, int reason)
+{
+ /* If the circuit purpose is measurement or path bias, don't
+ * hold it open */
+ if (circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING ||
+ circ->purpose == CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT) {
+ return 0;
+ }
+
+ /* If the circuit is closed for any reason other than these three valid,
+ * client-side close reasons, do not try to keep it open. It is probably
+ * damaged or unusable. Note this is OK with vanguards because
+ * controller-closed circuits have REASON=REQUESTED, so vanguards-closed
+ * circuits will not be held open (we want them to close ASAP). */
+ if (!(reason == END_CIRC_REASON_NONE ||
+ reason == END_CIRC_REASON_FINISHED ||
+ reason == END_CIRC_REASON_IP_NOW_REDUNDANT)) {
+ return 0;
+ }
+
+ FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, circ) {
+ circpad_machine_runtime_t *mi = circ->padding_info[i];
+ if (!mi) {
+ continue; // No padding runtime info; check next machine
+ }
+
+ const circpad_state_t *state = circpad_machine_current_state(mi);
+
+ /* If we're in END state (NULL here), then check next machine */
+ if (!state) {
+ continue; // check next machine
+ }
+
+ /* If the machine does not want to control the circuit close itself, then
+ * check the next machine */
+ if (!circ->padding_machine[i]->manage_circ_lifetime) {
+ continue; // check next machine
+ }
+
+ /* If the machine has reached the END state, we can close. Check next
+ * machine. */
+ if (mi->current_state == CIRCPAD_STATE_END) {
+ continue; // check next machine
+ }
+
+ log_info(LD_CIRC, "Circuit %d is not marked for close because of a "
+ "pending padding machine in index %d.",
+ CIRCUIT_IS_ORIGIN(circ) ?
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0, i);
+
+ /* If the machine has had no network events at all within the
+ * last circpad_delay_t timespan, it's in some deadlock state.
+ * Tell circuit_mark_for_close() that we don't own it anymore.
+ * This will allow circuit_expire_old_circuits_clientside() to
+ * close it.
+ */
+ if (circ->padding_info[i]->last_cell_time_sec +
+ (time_t)CIRCPAD_DELAY_MAX_SECS < approx_time()) {
+ log_notice(LD_BUG, "Circuit %d was not marked for close because of a "
+ "pending padding machine in index %d for over an hour. "
+ "Circuit is a %s",
+ CIRCUIT_IS_ORIGIN(circ) ?
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0,
+ i, circuit_purpose_to_string(circ->purpose));
+
+ return 0; // abort timer reached; mark the circuit for close now
+ }
+
+ /* If we weren't marked dirty yet, let's pretend we're dirty now.
+ * ("Dirty" means that a circuit has been used for application traffic
+ * by Tor.. Dirty circuits have different expiry times, and are not
+ * considered in counts of built circuits, etc. By claiming that we're
+ * dirty, the rest of Tor will make decisions as if we were actually
+ * used by application data.
+ *
+ * This is most important for circuit_expire_old_circuits_clientside(),
+ * where we want that function to expire us after the padding machine
+ * has shut down, but using the MaxCircuitDirtiness timer instead of
+ * the idle circuit timer (again, we want this because we're not
+ * supposed to look idle to Guard nodes that can see our lifespan). */
+ if (!circ->timestamp_dirty)
+ circ->timestamp_dirty = approx_time();
+
+ /* Take ownership of the circuit */
+ circuit_change_purpose(circ, CIRCUIT_PURPOSE_C_CIRCUIT_PADDING);
+
+ return 1;
+ } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
+
+ return 0; // No machine wanted to keep the circuit open; mark for close
+}
+
+/**
+ * Free all the machineinfos in <b>circ</b> that match <b>machine_num</b>.
+ *
+ * Returns true if any machineinfos with that number were freed.
+ * False otherwise. */
+static int
+free_circ_machineinfos_with_machine_num(circuit_t *circ, int machine_num)
+{
+ int found = 0;
+ FOR_EACH_CIRCUIT_MACHINE_BEGIN(i) {
+ if (circ->padding_machine[i] &&
+ circ->padding_machine[i]->machine_num == machine_num) {
+ circpad_circuit_machineinfo_free_idx(circ, i);
+ circ->padding_machine[i] = NULL;
+ found = 1;
+ }
+ } FOR_EACH_CIRCUIT_MACHINE_END;
+
+ return found;
+}
+
+/**
+ * Free all padding machines and mutable info associated with circuit
+ */
+void
+circpad_circuit_free_all_machineinfos(circuit_t *circ)
+{
+ FOR_EACH_CIRCUIT_MACHINE_BEGIN(i) {
+ circpad_circuit_machineinfo_free_idx(circ, i);
+ } FOR_EACH_CIRCUIT_MACHINE_END;
+}
+
+/**
+ * Allocate a new mutable machineinfo structure.
+ */
+STATIC circpad_machine_runtime_t *
+circpad_circuit_machineinfo_new(circuit_t *on_circ, int machine_index)
+{
+ circpad_machine_runtime_t *mi =
+ tor_malloc_zero(sizeof(circpad_machine_runtime_t));
+ mi->machine_index = machine_index;
+ mi->on_circ = on_circ;
+ mi->last_cell_time_sec = approx_time();
+
+ return mi;
+}
+
+/**
+ * Return the circpad_state_t for the current state based on the
+ * mutable info.
+ *
+ * This function returns NULL when the machine is in the end state or in an
+ * invalid state.
+ */
+STATIC const circpad_state_t *
+circpad_machine_current_state(const circpad_machine_runtime_t *mi)
+{
+ const circpad_machine_spec_t *machine = CIRCPAD_GET_MACHINE(mi);
+
+ if (mi->current_state == CIRCPAD_STATE_END) {
+ return NULL;
+ } else if (BUG(mi->current_state >= machine->num_states)) {
+ log_fn(LOG_WARN,LD_CIRC,
+ "Invalid circuit padding state %d",
+ mi->current_state);
+
+ return NULL;
+ }
+
+ return &machine->states[mi->current_state];
+}
+
+/**
+ * Get the lower bound of a histogram bin.
+ *
+ * You can obtain the upper bound using histogram_get_bin_upper_bound().
+ *
+ * This function can also be called with 'bin' set to a value equal or greater
+ * than histogram_len in which case the infinity bin is chosen and
+ * CIRCPAD_DELAY_INFINITE is returned.
+ */
+STATIC circpad_delay_t
+circpad_histogram_bin_to_usec(const circpad_machine_runtime_t *mi,
+ circpad_hist_index_t bin)
+{
+ const circpad_state_t *state = circpad_machine_current_state(mi);
+ circpad_delay_t rtt_add_usec = 0;
+
+ /* Our state should have been checked to be non-null by the caller
+ * (circpad_machine_remove_token()) */
+ if (BUG(state == NULL)) {
+ return CIRCPAD_DELAY_INFINITE;
+ }
+
+ /* The infinity bin has an upper bound of infinity, so make sure we return
+ * that if they ask for it. */
+ if (bin > CIRCPAD_INFINITY_BIN(state)) {
+ return CIRCPAD_DELAY_INFINITE;
+ }
+
+ /* If we are using an RTT estimate, consider it as well. */
+ if (state->use_rtt_estimate) {
+ rtt_add_usec = mi->rtt_estimate_usec;
+ }
+
+ return state->histogram_edges[bin] + rtt_add_usec;
+}
+
+/**
+ * Like circpad_histogram_bin_to_usec() but return the upper bound of bin.
+ * (The upper bound is included in the bin.)
+ */
+STATIC circpad_delay_t
+histogram_get_bin_upper_bound(const circpad_machine_runtime_t *mi,
+ circpad_hist_index_t bin)
+{
+ return circpad_histogram_bin_to_usec(mi, bin+1) - 1;
+}
+
+/** Return the midpoint of the histogram bin <b>bin_index</b>. */
+static circpad_delay_t
+circpad_get_histogram_bin_midpoint(const circpad_machine_runtime_t *mi,
+ int bin_index)
+{
+ circpad_delay_t left_bound = circpad_histogram_bin_to_usec(mi, bin_index);
+ circpad_delay_t right_bound = histogram_get_bin_upper_bound(mi, bin_index);
+
+ return left_bound + (right_bound - left_bound)/2;
+}
+
+/**
+ * Return the bin that contains the usec argument.
+ * "Contains" is defined as us in [lower, upper).
+ *
+ * This function will never return the infinity bin (histogram_len-1), in order
+ * to simplify the rest of the code, so if a usec is provided that falls above
+ * the highest non-infinity bin, that bin index will be returned.
+ */
+STATIC circpad_hist_index_t
+circpad_histogram_usec_to_bin(const circpad_machine_runtime_t *mi,
+ circpad_delay_t usec)
+{
+ const circpad_state_t *state = circpad_machine_current_state(mi);
+ circpad_delay_t rtt_add_usec = 0;
+ circpad_hist_index_t bin;
+
+ /* Our state should have been checked to be non-null by the caller
+ * (circpad_machine_remove_token()) */
+ if (BUG(state == NULL)) {
+ return 0;
+ }
+
+ /* If we are using an RTT estimate, consider it as well. */
+ if (state->use_rtt_estimate) {
+ rtt_add_usec = mi->rtt_estimate_usec;
+ }
+
+ /* Walk through the bins and check the upper bound of each bin, if 'usec' is
+ * less-or-equal to that, return that bin. If rtt_estimate is enabled then
+ * add that to the upper bound of each bin.
+ *
+ * We don't want to return the infinity bin here, so don't go there. */
+ for (bin = 0 ; bin < CIRCPAD_INFINITY_BIN(state) ; bin++) {
+ if (usec <= histogram_get_bin_upper_bound(mi, bin) + rtt_add_usec) {
+ return bin;
+ }
+ }
+
+ /* We don't want to return the infinity bin here, so if we still didn't find
+ * the right bin, return the highest non-infinity bin */
+ return CIRCPAD_INFINITY_BIN(state)-1;
+}
+
+/**
+ * Return true if the machine supports token removal.
+ *
+ * Token removal is equivalent to having a mutable histogram in the
+ * circpad_machine_runtime_t mutable info. So while we're at it,
+ * let's assert that everything is consistent between the mutable
+ * runtime and the readonly machine spec.
+ */
+static inline int
+circpad_is_token_removal_supported(circpad_machine_runtime_t *mi)
+{
+ /* No runtime histogram == no token removal */
+ if (mi->histogram == NULL) {
+ /* Machines that don't want token removal are trying to avoid
+ * potentially expensive mallocs, extra memory accesses, and/or
+ * potentially expensive monotime calls. Let's minimize checks
+ * and keep this path fast. */
+ tor_assert_nonfatal(mi->histogram_len == 0);
+ return 0;
+ } else {
+ /* Machines that do want token removal are less sensitive to performance.
+ * Let's spend some time to check that our state is consistent and sane */
+ const circpad_state_t *state = circpad_machine_current_state(mi);
+ if (BUG(!state)) {
+ return 1;
+ }
+ tor_assert_nonfatal(state->token_removal != CIRCPAD_TOKEN_REMOVAL_NONE);
+ tor_assert_nonfatal(state->histogram_len == mi->histogram_len);
+ tor_assert_nonfatal(mi->histogram_len != 0);
+ return 1;
+ }
+
+ tor_assert_nonfatal_unreached();
+ return 0;
+}
+
+/**
+ * This function frees any token bins allocated from a previous state
+ *
+ * Called after a state transition, or if the bins are empty.
+ */
+STATIC void
+circpad_machine_setup_tokens(circpad_machine_runtime_t *mi)
+{
+ const circpad_state_t *state = circpad_machine_current_state(mi);
+
+ /* If this state doesn't exist, or doesn't have token removal,
+ * free any previous state's runtime histogram, and bail.
+ *
+ * If we don't have a token removal strategy, we also don't need a runtime
+ * histogram and we rely on the immutable one in machine_spec_t. */
+ if (!state || state->token_removal == CIRCPAD_TOKEN_REMOVAL_NONE) {
+ if (mi->histogram) {
+ tor_free(mi->histogram);
+ mi->histogram = NULL;
+ mi->histogram_len = 0;
+ }
+ return;
+ }
+
+ /* Try to avoid re-mallocing if we don't really need to */
+ if (!mi->histogram || (mi->histogram
+ && mi->histogram_len != state->histogram_len)) {
+ tor_free(mi->histogram); // null ok
+ mi->histogram = tor_malloc_zero(sizeof(circpad_hist_token_t)
+ *state->histogram_len);
+ }
+ mi->histogram_len = state->histogram_len;
+
+ memcpy(mi->histogram, state->histogram,
+ sizeof(circpad_hist_token_t)*state->histogram_len);
+}
+
+/**
+ * Choose a length for this state (in cells), if specified.
+ */
+static void
+circpad_choose_state_length(circpad_machine_runtime_t *mi)
+{
+ const circpad_state_t *state = circpad_machine_current_state(mi);
+ double length;
+
+ if (!state || state->length_dist.type == CIRCPAD_DIST_NONE) {
+ mi->state_length = CIRCPAD_STATE_LENGTH_INFINITE;
+ return;
+ }
+
+ length = circpad_distribution_sample(state->length_dist);
+ length = MAX(0, length);
+ length += state->start_length;
+
+ if (state->max_length) {
+ length = MIN(length, state->max_length);
+ }
+
+ mi->state_length = clamp_double_to_int64(length);
+
+ log_info(LD_CIRC, "State length sampled to %"PRIu64" for circuit %u",
+ mi->state_length, CIRCUIT_IS_ORIGIN(mi->on_circ) ?
+ TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0);
+}
+
+/**
+ * Sample a value from our iat_dist, and clamp it safely
+ * to circpad_delay_t.
+ *
+ * Before returning, add <b>delay_shift</b> (can be zero) to the sampled value.
+ */
+static circpad_delay_t
+circpad_distribution_sample_iat_delay(const circpad_state_t *state,
+ circpad_delay_t delay_shift)
+{
+ double val = circpad_distribution_sample(state->iat_dist);
+ /* These comparisons are safe, because the output is in the range
+ * [0, 2**32), and double has a precision of 53 bits. */
+ /* We want a positive sample value */
+ val = MAX(0, val);
+ /* Respect the maximum sample setting */
+ val = MIN(val, state->dist_max_sample_usec);
+
+ /* Now apply the shift:
+ * This addition is exact: val is at most 2**32-1, delay_shift is at most
+ * 2**32-1, and doubles have a precision of 53 bits. */
+ val += delay_shift;
+
+ /* Clamp the distribution at infinite delay val */
+ return (circpad_delay_t)MIN(tor_llround(val), CIRCPAD_DELAY_INFINITE);
+}
+
+/**
+ * Sample an expected time-until-next-packet delay from the histogram or
+ * probability distribution.
+ *
+ * A bin of the histogram is chosen with probability proportional to the number
+ * of tokens in each bin, and then a time value is chosen uniformly from that
+ * bin's [start,end) time range.
+ */
+STATIC circpad_delay_t
+circpad_machine_sample_delay(circpad_machine_runtime_t *mi)
+{
+ const circpad_state_t *state = circpad_machine_current_state(mi);
+ const circpad_hist_token_t *histogram = NULL;
+ circpad_hist_index_t curr_bin = 0;
+ circpad_delay_t bin_start, bin_end;
+ /* These three must all be larger than circpad_hist_token_t, because
+ * we sum several circpad_hist_token_t values across the histogram */
+ uint64_t curr_weight = 0;
+ uint64_t histogram_total_tokens = 0;
+ uint64_t bin_choice;
+
+ tor_assert(state);
+
+ if (state->iat_dist.type != CIRCPAD_DIST_NONE) {
+ /* Sample from a fixed IAT distribution and return */
+ circpad_delay_t iat_delay_shift = state->use_rtt_estimate ?
+ mi->rtt_estimate_usec + state->dist_added_shift_usec :
+ state->dist_added_shift_usec;
+ return circpad_distribution_sample_iat_delay(state, iat_delay_shift);
+ } else if (circpad_is_token_removal_supported(mi)) {
+ histogram = mi->histogram;
+ for (circpad_hist_index_t b = 0; b < state->histogram_len; b++)
+ histogram_total_tokens += histogram[b];
+ } else {
+ /* We have a histogram, but it's immutable */
+ histogram = state->histogram;
+ histogram_total_tokens = state->histogram_total_tokens;
+ }
+
+ /* If we are out of tokens, don't schedule padding. */
+ if (!histogram_total_tokens) {
+ return CIRCPAD_DELAY_INFINITE;
+ }
+
+ bin_choice = crypto_fast_rng_get_uint64(get_thread_fast_rng(),
+ histogram_total_tokens);
+
+ /* Skip all the initial zero bins */
+ while (!histogram[curr_bin]) {
+ curr_bin++;
+ }
+ curr_weight = histogram[curr_bin];
+
+ // TODO: This is not constant-time. Pretty sure we don't
+ // really need it to be, though.
+ while (curr_weight < bin_choice) {
+ curr_bin++;
+ /* It should be impossible to run past the end of the histogram */
+ if (BUG(curr_bin >= state->histogram_len)) {
+ return CIRCPAD_DELAY_INFINITE;
+ }
+ curr_weight += histogram[curr_bin];
+ }
+
+ /* Do some basic checking of the current bin we are in */
+ if (BUG(curr_bin >= state->histogram_len) ||
+ BUG(histogram[curr_bin] == 0)) {
+ return CIRCPAD_DELAY_INFINITE;
+ }
+
+ // Store this index to remove the token upon callback.
+ if (state->token_removal != CIRCPAD_TOKEN_REMOVAL_NONE) {
+ mi->chosen_bin = curr_bin;
+ }
+
+ if (curr_bin >= CIRCPAD_INFINITY_BIN(state)) {
+ if (state->token_removal != CIRCPAD_TOKEN_REMOVAL_NONE &&
+ mi->histogram[curr_bin] > 0) {
+ mi->histogram[curr_bin]--;
+ }
+
+ // Infinity: Don't send a padding packet. Wait for a real packet
+ // and then see if our bins are empty or what else we should do.
+ return CIRCPAD_DELAY_INFINITE;
+ }
+
+ tor_assert(curr_bin < CIRCPAD_INFINITY_BIN(state));
+
+ bin_start = circpad_histogram_bin_to_usec(mi, curr_bin);
+ /* We don't need to reduct 1 from the upper bound because the random range
+ * function below samples from [bin_start, bin_end) */
+ bin_end = circpad_histogram_bin_to_usec(mi, curr_bin+1);
+
+ /* Bin edges are monotonically increasing so this is a bug. Handle it. */
+ if (BUG(bin_start >= bin_end)) {
+ return bin_start;
+ }
+
+ return (circpad_delay_t)crypto_fast_rng_uint64_range(get_thread_fast_rng(),
+ bin_start, bin_end);
+}
+
+/**
+ * Sample a value from the specified probability distribution.
+ *
+ * Uses functions from src/lib/math/prob_distr.c .
+ */
+static double
+circpad_distribution_sample(circpad_distribution_t dist)
+{
+ log_fn(LOG_DEBUG,LD_CIRC, "Sampling delay with distribution %d",
+ dist.type);
+
+ switch (dist.type) {
+ case CIRCPAD_DIST_NONE:
+ {
+ /* We should not get in here like this */
+ tor_assert_nonfatal_unreached();
+ return 0;
+ }
+ case CIRCPAD_DIST_UNIFORM:
+ {
+ // param2 is upper bound, param1 is lower
+ const struct uniform_t my_uniform = {
+ .base = UNIFORM(my_uniform),
+ .a = dist.param1,
+ .b = dist.param2,
+ };
+ return dist_sample(&my_uniform.base);
+ }
+ case CIRCPAD_DIST_LOGISTIC:
+ {
+ /* param1 is Mu, param2 is sigma. */
+ const struct logistic_t my_logistic = {
+ .base = LOGISTIC(my_logistic),
+ .mu = dist.param1,
+ .sigma = dist.param2,
+ };
+ return dist_sample(&my_logistic.base);
+ }
+ case CIRCPAD_DIST_LOG_LOGISTIC:
+ {
+ /* param1 is Alpha, param2 is 1.0/Beta */
+ const struct log_logistic_t my_log_logistic = {
+ .base = LOG_LOGISTIC(my_log_logistic),
+ .alpha = dist.param1,
+ .beta = dist.param2,
+ };
+ return dist_sample(&my_log_logistic.base);
+ }
+ case CIRCPAD_DIST_GEOMETRIC:
+ {
+ /* param1 is 'p' (success probability) */
+ const struct geometric_t my_geometric = {
+ .base = GEOMETRIC(my_geometric),
+ .p = dist.param1,
+ };
+ return dist_sample(&my_geometric.base);
+ }
+ case CIRCPAD_DIST_WEIBULL:
+ {
+ /* param1 is k, param2 is Lambda */
+ const struct weibull_t my_weibull = {
+ .base = WEIBULL(my_weibull),
+ .k = dist.param1,
+ .lambda = dist.param2,
+ };
+ return dist_sample(&my_weibull.base);
+ }
+ case CIRCPAD_DIST_PARETO:
+ {
+ /* param1 is sigma, param2 is xi, no more params for mu so we use 0 */
+ const struct genpareto_t my_genpareto = {
+ .base = GENPARETO(my_genpareto),
+ .mu = 0,
+ .sigma = dist.param1,
+ .xi = dist.param2,
+ };
+ return dist_sample(&my_genpareto.base);
+ }
+ }
+
+ tor_assert_nonfatal_unreached();
+ return 0;
+}
+
+/**
+ * Find the index of the first bin whose upper bound is
+ * greater than the target, and that has tokens remaining.
+ *
+ * Used for histograms with token removal.
+ */
+static circpad_hist_index_t
+circpad_machine_first_higher_index(const circpad_machine_runtime_t *mi,
+ circpad_delay_t target_bin_usec)
+{
+ circpad_hist_index_t bin = circpad_histogram_usec_to_bin(mi,
+ target_bin_usec);
+
+ /* Don't remove from the infinity bin */
+ for (; bin < CIRCPAD_INFINITY_BIN(mi); bin++) {
+ if (mi->histogram[bin] &&
+ histogram_get_bin_upper_bound(mi, bin) >= target_bin_usec) {
+ return bin;
+ }
+ }
+
+ return mi->histogram_len;
+}
+
+/**
+ * Find the index of the first bin whose lower bound is lower or equal to
+ * <b>target_bin_usec</b>, and that still has tokens remaining.
+ *
+ * Used for histograms with token removal.
+ */
+static circpad_hist_index_t
+circpad_machine_first_lower_index(const circpad_machine_runtime_t *mi,
+ circpad_delay_t target_bin_usec)
+{
+ circpad_hist_index_t bin = circpad_histogram_usec_to_bin(mi,
+ target_bin_usec);
+
+ for (; bin >= 0; bin--) {
+ if (mi->histogram[bin] &&
+ circpad_histogram_bin_to_usec(mi, bin) <= target_bin_usec) {
+ return bin;
+ }
+ }
+
+ return -1;
+}
+
+/**
+ * Remove a token from the first non-empty bin whose upper bound is
+ * greater than the target.
+ *
+ * Used for histograms with token removal.
+ */
+STATIC void
+circpad_machine_remove_higher_token(circpad_machine_runtime_t *mi,
+ circpad_delay_t target_bin_usec)
+{
+ /* We need to remove the token from the first bin
+ * whose upper bound is greater than the target, and that
+ * has tokens remaining. */
+ circpad_hist_index_t bin = circpad_machine_first_higher_index(mi,
+ target_bin_usec);
+
+ if (bin >= 0 && bin < CIRCPAD_INFINITY_BIN(mi)) {
+ if (!BUG(mi->histogram[bin] == 0)) {
+ mi->histogram[bin]--;
+ }
+ }
+}
+
+/**
+ * Remove a token from the first non-empty bin whose upper bound is
+ * lower than the target.
+ *
+ * Used for histograms with token removal.
+ */
+STATIC void
+circpad_machine_remove_lower_token(circpad_machine_runtime_t *mi,
+ circpad_delay_t target_bin_usec)
+{
+ circpad_hist_index_t bin = circpad_machine_first_lower_index(mi,
+ target_bin_usec);
+
+ if (bin >= 0 && bin < CIRCPAD_INFINITY_BIN(mi)) {
+ if (!BUG(mi->histogram[bin] == 0)) {
+ mi->histogram[bin]--;
+ }
+ }
+}
+
+/* Helper macro: Ensure that the bin has tokens available, and BUG out of the
+ * function if it's not the case. */
+#define ENSURE_BIN_CAPACITY(bin_index) \
+ if (BUG(mi->histogram[bin_index] == 0)) { \
+ return; \
+ }
+
+/**
+ * Remove a token from the closest non-empty bin to the target.
+ *
+ * If use_usec is true, measure "closest" in terms of the next closest bin
+ * midpoint.
+ *
+ * If it is false, use bin index distance only.
+ *
+ * Used for histograms with token removal.
+ */
+STATIC void
+circpad_machine_remove_closest_token(circpad_machine_runtime_t *mi,
+ circpad_delay_t target_bin_usec,
+ bool use_usec)
+{
+ circpad_hist_index_t lower, higher, current;
+ circpad_hist_index_t bin_to_remove = -1;
+
+ lower = circpad_machine_first_lower_index(mi, target_bin_usec);
+ higher = circpad_machine_first_higher_index(mi, target_bin_usec);
+ current = circpad_histogram_usec_to_bin(mi, target_bin_usec);
+
+ /* Sanity check the results */
+ if (BUG(lower > current) || BUG(higher < current)) {
+ return;
+ }
+
+ /* Take care of edge cases first */
+ if (higher == mi->histogram_len && lower == -1) {
+ /* All bins are empty */
+ return;
+ } else if (higher == mi->histogram_len) {
+ /* All higher bins are empty */
+ ENSURE_BIN_CAPACITY(lower);
+ mi->histogram[lower]--;
+ return;
+ } else if (lower == -1) {
+ /* All lower bins are empty */
+ ENSURE_BIN_CAPACITY(higher);
+ mi->histogram[higher]--;
+ return;
+ }
+
+ /* Now handle the intermediate cases */
+ if (use_usec) {
+ /* Find the closest bin midpoint to the target */
+ circpad_delay_t lower_usec = circpad_get_histogram_bin_midpoint(mi, lower);
+ circpad_delay_t higher_usec =
+ circpad_get_histogram_bin_midpoint(mi, higher);
+
+ if (target_bin_usec < lower_usec) {
+ // Lower bin is closer
+ ENSURE_BIN_CAPACITY(lower);
+ bin_to_remove = lower;
+ } else if (target_bin_usec > higher_usec) {
+ // Higher bin is closer
+ ENSURE_BIN_CAPACITY(higher);
+ bin_to_remove = higher;
+ } else if (target_bin_usec-lower_usec > higher_usec-target_bin_usec) {
+ // Higher bin is closer
+ ENSURE_BIN_CAPACITY(higher);
+ bin_to_remove = higher;
+ } else {
+ // Lower bin is closer
+ ENSURE_BIN_CAPACITY(lower);
+ bin_to_remove = lower;
+ }
+ mi->histogram[bin_to_remove]--;
+ log_debug(LD_CIRC, "Removing token from bin %d", bin_to_remove);
+ return;
+ } else {
+ if (current - lower > higher - current) {
+ // Higher bin is closer
+ ENSURE_BIN_CAPACITY(higher);
+ mi->histogram[higher]--;
+ return;
+ } else {
+ // Lower bin is closer
+ ENSURE_BIN_CAPACITY(lower);
+ mi->histogram[lower]--;
+ return;
+ }
+ }
+}
+
+#undef ENSURE_BIN_CAPACITY
+
+/**
+ * Remove a token from the exact bin corresponding to the target.
+ *
+ * If it is empty, do nothing.
+ *
+ * Used for histograms with token removal.
+ */
+static void
+circpad_machine_remove_exact(circpad_machine_runtime_t *mi,
+ circpad_delay_t target_bin_usec)
+{
+ circpad_hist_index_t bin = circpad_histogram_usec_to_bin(mi,
+ target_bin_usec);
+
+ if (mi->histogram[bin] > 0)
+ mi->histogram[bin]--;
+}
+
+/**
+ * Check our state's cell limit count and tokens.
+ *
+ * Returns 1 if either limits are hit and we decide to change states,
+ * otherwise returns 0.
+ */
+static circpad_decision_t
+check_machine_token_supply(circpad_machine_runtime_t *mi)
+{
+ uint32_t histogram_total_tokens = 0;
+
+ /* Check if bins empty. This requires summing up the current mutable
+ * machineinfo histogram token total and checking if it is zero.
+ * Machineinfo does not keep a running token count. We're assuming the
+ * extra space is not worth this short loop iteration.
+ *
+ * We also do not count infinity bin in histogram totals.
+ */
+ if (circpad_is_token_removal_supported(mi)) {
+ for (circpad_hist_index_t b = 0; b < CIRCPAD_INFINITY_BIN(mi); b++)
+ histogram_total_tokens += mi->histogram[b];
+
+ /* If we change state, we're done */
+ if (histogram_total_tokens == 0) {
+ if (circpad_internal_event_bins_empty(mi) == CIRCPAD_STATE_CHANGED)
+ return CIRCPAD_STATE_CHANGED;
+ }
+ }
+
+ if (mi->state_length == 0) {
+ return circpad_internal_event_state_length_up(mi);
+ }
+
+ return CIRCPAD_STATE_UNCHANGED;
+}
+
+/**
+ * Count that a padding packet was sent.
+ *
+ * This updates our state length count, our machine rate limit counts,
+ * and if token removal is used, decrements the histogram.
+ */
+static inline void
+circpad_machine_count_padding_sent(circpad_machine_runtime_t *mi)
+{
+ /* If we have a valid state length bound, consider it */
+ if (mi->state_length != CIRCPAD_STATE_LENGTH_INFINITE &&
+ !BUG(mi->state_length <= 0)) {
+ mi->state_length--;
+ }
+
+ /*
+ * Update non-padding counts for rate limiting: We scale at UINT16_MAX
+ * because we only use this for a percentile limit of 2 sig figs, and
+ * space is scare in the machineinfo struct.
+ */
+ mi->padding_sent++;
+ if (mi->padding_sent == UINT16_MAX) {
+ mi->padding_sent /= 2;
+ mi->nonpadding_sent /= 2;
+ }
+
+ circpad_global_padding_sent++;
+
+ /* If we have a mutable histogram, reduce the token count from
+ * the chosen padding bin (this assumes we always send padding
+ * when we intended to). */
+ if (circpad_is_token_removal_supported(mi)) {
+ /* Check array bounds and token count before removing */
+ if (!BUG(mi->chosen_bin >= mi->histogram_len) &&
+ !BUG(mi->histogram[mi->chosen_bin] == 0)) {
+ mi->histogram[mi->chosen_bin]--;
+ }
+ }
+}
+
+/**
+ * Count a nonpadding packet as being sent.
+ *
+ * This function updates our overhead accounting variables, as well
+ * as decrements the state limit packet counter, if the latter was
+ * flagged as applying to non-padding as well.
+ */
+static inline void
+circpad_machine_count_nonpadding_sent(circpad_machine_runtime_t *mi)
+{
+ /* Update non-padding counts for rate limiting: We scale at UINT16_MAX
+ * because we only use this for a percentile limit of 2 sig figs, and
+ * space is scare in the machineinfo struct. */
+ mi->nonpadding_sent++;
+ if (mi->nonpadding_sent == UINT16_MAX) {
+ mi->padding_sent /= 2;
+ mi->nonpadding_sent /= 2;
+ }
+
+ /* Update any state packet length limits that apply */
+ circpad_machine_update_state_length_for_nonpadding(mi);
+
+ /* Remove a token from the histogram, if applicable */
+ circpad_machine_remove_token(mi);
+}
+
+/**
+ * Decrement the state length counter for a non-padding packet.
+ *
+ * Only updates the state length if we're using that feature, we
+ * have a state, and the machine wants to count non-padding packets
+ * towards the state length.
+ */
+static inline void
+circpad_machine_update_state_length_for_nonpadding(
+ circpad_machine_runtime_t *mi)
+{
+ const circpad_state_t *state = NULL;
+
+ if (mi->state_length == CIRCPAD_STATE_LENGTH_INFINITE)
+ return;
+
+ state = circpad_machine_current_state(mi);
+
+ /* If we are not in a padding state (like start or end), we're done */
+ if (!state)
+ return;
+
+ /* If we're enforcing a state length on non-padding packets,
+ * decrement it */
+ if (state->length_includes_nonpadding &&
+ mi->state_length > 0) {
+ mi->state_length--;
+ }
+}
+
+/**
+ * When a non-padding packet arrives, remove a token from the bin
+ * corresponding to the delta since last sent packet. If that bin
+ * is empty, choose a token based on the specified removal strategy
+ * in the state machine.
+ */
+STATIC void
+circpad_machine_remove_token(circpad_machine_runtime_t *mi)
+{
+ const circpad_state_t *state = NULL;
+ circpad_time_t current_time;
+ circpad_delay_t target_bin_usec;
+
+ /* Dont remove any tokens if there was no padding scheduled */
+ if (!mi->padding_scheduled_at_usec) {
+ return;
+ }
+
+ state = circpad_machine_current_state(mi);
+
+ /* If we are not in a padding state (like start or end), we're done */
+ if (!state)
+ return;
+ /* Don't remove any tokens if we're not doing token removal */
+ if (state->token_removal == CIRCPAD_TOKEN_REMOVAL_NONE)
+ return;
+
+ current_time = monotime_absolute_usec();
+
+ /* If we have scheduled padding some time in the future, we want to see what
+ bin we are in at the current time */
+ target_bin_usec = (circpad_delay_t)
+ MIN((current_time - mi->padding_scheduled_at_usec),
+ CIRCPAD_DELAY_INFINITE-1);
+
+ /* We are treating this non-padding cell as a padding cell, so we cancel
+ padding timer, if present. */
+ mi->padding_scheduled_at_usec = 0;
+ if (mi->is_padding_timer_scheduled) {
+ mi->is_padding_timer_scheduled = 0;
+ timer_disable(mi->padding_timer);
+ }
+
+ /* Perform the specified token removal strategy */
+ switch (state->token_removal) {
+ case CIRCPAD_TOKEN_REMOVAL_CLOSEST_USEC:
+ circpad_machine_remove_closest_token(mi, target_bin_usec, 1);
+ break;
+ case CIRCPAD_TOKEN_REMOVAL_CLOSEST:
+ circpad_machine_remove_closest_token(mi, target_bin_usec, 0);
+ break;
+ case CIRCPAD_TOKEN_REMOVAL_LOWER:
+ circpad_machine_remove_lower_token(mi, target_bin_usec);
+ break;
+ case CIRCPAD_TOKEN_REMOVAL_HIGHER:
+ circpad_machine_remove_higher_token(mi, target_bin_usec);
+ break;
+ case CIRCPAD_TOKEN_REMOVAL_EXACT:
+ circpad_machine_remove_exact(mi, target_bin_usec);
+ break;
+ case CIRCPAD_TOKEN_REMOVAL_NONE:
+ default:
+ tor_assert_nonfatal_unreached();
+ log_warn(LD_BUG, "Circpad: Unknown token removal strategy %d",
+ state->token_removal);
+ break;
+ }
+}
+
+/**
+ * Send a relay command with a relay cell payload on a circuit to
+ * the particular hopnum.
+ *
+ * Hopnum starts at 1 (1=guard, 2=middle, 3=exit, etc).
+ *
+ * Payload may be null.
+ *
+ * Returns negative on error, 0 on success.
+ */
+MOCK_IMPL(STATIC signed_error_t,
+circpad_send_command_to_hop,(origin_circuit_t *circ, uint8_t hopnum,
+ uint8_t relay_command, const uint8_t *payload,
+ ssize_t payload_len))
+{
+ crypt_path_t *target_hop = circuit_get_cpath_hop(circ, hopnum);
+ signed_error_t ret;
+
+ /* Check that the cpath has the target hop */
+ if (!target_hop) {
+ log_fn(LOG_WARN, LD_BUG, "Padding circuit %u has %d hops, not %d",
+ circ->global_identifier, circuit_get_cpath_len(circ), hopnum);
+ return -1;
+ }
+
+ /* Check that the target hop is opened */
+ if (target_hop->state != CPATH_STATE_OPEN) {
+ log_fn(LOG_WARN,LD_CIRC,
+ "Padding circuit %u has %d hops, not %d",
+ circ->global_identifier,
+ circuit_get_cpath_opened_len(circ), hopnum);
+ return -1;
+ }
+
+ /* Send the drop command to the second hop */
+ ret = relay_send_command_from_edge(0, TO_CIRCUIT(circ), relay_command,
+ (const char*)payload, payload_len,
+ target_hop);
+ return ret;
+}
+
+/**
+ * Callback helper to send a padding cell.
+ *
+ * This helper is called after our histogram-sampled delay period passes
+ * without another packet being sent first. If a packet is sent before this
+ * callback happens, it is canceled. So when we're called here, send padding
+ * right away.
+ *
+ * If sending this padding cell forced us to transition states return
+ * CIRCPAD_STATE_CHANGED. Otherwise return CIRCPAD_STATE_UNCHANGED.
+ */
+circpad_decision_t
+circpad_send_padding_cell_for_callback(circpad_machine_runtime_t *mi)
+{
+ circuit_t *circ = mi->on_circ;
+ int machine_idx = mi->machine_index;
+ mi->padding_scheduled_at_usec = 0;
+ circpad_statenum_t state = mi->current_state;
+
+ /* Make sure circuit didn't close on us */
+ if (mi->on_circ->marked_for_close) {
+ log_fn(LOG_INFO,LD_CIRC,
+ "Padding callback on circuit marked for close (%u). Ignoring.",
+ CIRCUIT_IS_ORIGIN(mi->on_circ) ?
+ TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0);
+ return CIRCPAD_STATE_CHANGED;
+ }
+
+ circpad_machine_count_padding_sent(mi);
+
+ if (CIRCUIT_IS_ORIGIN(mi->on_circ)) {
+ circpad_send_command_to_hop(TO_ORIGIN_CIRCUIT(mi->on_circ),
+ CIRCPAD_GET_MACHINE(mi)->target_hopnum,
+ RELAY_COMMAND_DROP, NULL, 0);
+ log_info(LD_CIRC, "Callback: Sending padding to origin circuit %u"
+ " (%d) [length: %"PRIu64"]",
+ TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier,
+ mi->on_circ->purpose, mi->state_length);
+ } else {
+ // If we're a non-origin circ, we can just send from here as if we're the
+ // edge.
+ if (TO_OR_CIRCUIT(circ)->p_chan_cells.n <= circpad_max_circ_queued_cells) {
+ log_info(LD_CIRC, "Callback: Sending padding to circuit (%d)"
+ " [length: %"PRIu64"]", mi->on_circ->purpose, mi->state_length);
+ relay_send_command_from_edge(0, mi->on_circ, RELAY_COMMAND_DROP, NULL,
+ 0, NULL);
+ rep_hist_padding_count_write(PADDING_TYPE_DROP);
+ } else {
+ static ratelim_t cell_lim = RATELIM_INIT(600);
+ log_fn_ratelim(&cell_lim,LOG_NOTICE,LD_CIRC,
+ "Too many cells (%d) in circ queue to send padding.",
+ TO_OR_CIRCUIT(circ)->p_chan_cells.n);
+ }
+ }
+
+ /* This is a padding cell sent from the client or from the middle node,
+ * (because it's invoked from circuitpadding.c) */
+ circpad_cell_event_padding_sent(circ);
+
+ /* The circpad_cell_event_padding_sent() could cause us to transition.
+ * Check that we still have a padding machineinfo, and then check our token
+ * supply. */
+ if (circ->padding_info[machine_idx] != NULL) {
+ if (state != circ->padding_info[machine_idx]->current_state)
+ return CIRCPAD_STATE_CHANGED;
+ else
+ return check_machine_token_supply(circ->padding_info[machine_idx]);
+ } else {
+ return CIRCPAD_STATE_CHANGED;
+ }
+}
+
+/**
+ * Tor-timer compatible callback that tells us to send a padding cell.
+ *
+ * Timers are associated with circpad_machine_runtime_t's. When the machineinfo
+ * is freed on a circuit, the timers are cancelled. Since the lifetime
+ * of machineinfo is always longer than the timers, handles are not
+ * needed.
+ */
+static void
+circpad_send_padding_callback(tor_timer_t *timer, void *args,
+ const struct monotime_t *time)
+{
+ circpad_machine_runtime_t *mi = ((circpad_machine_runtime_t*)args);
+ (void)timer; (void)time;
+
+ if (mi && mi->on_circ) {
+ assert_circuit_ok(mi->on_circ);
+ circpad_send_padding_cell_for_callback(mi);
+ } else {
+ // This shouldn't happen (represents a timer leak)
+ log_fn(LOG_WARN,LD_CIRC,
+ "Circuit closed while waiting for padding timer.");
+ tor_fragile_assert();
+ }
+
+ // TODO-MP-AP: Unify this counter with channelpadding for rephist stats
+ //total_timers_pending--;
+}
+
+/**
+ * Cache our consensus parameters upon consensus update.
+ */
+void
+circpad_new_consensus_params(const networkstatus_t *ns)
+{
+ circpad_padding_disabled =
+ networkstatus_get_param(ns, "circpad_padding_disabled",
+ 0, 0, 1);
+
+ circpad_padding_reduced =
+ networkstatus_get_param(ns, "circpad_padding_reduced",
+ 0, 0, 1);
+
+ circpad_global_allowed_cells =
+ networkstatus_get_param(ns, "circpad_global_allowed_cells",
+ 0, 0, UINT16_MAX-1);
+
+ circpad_global_max_padding_percent =
+ networkstatus_get_param(ns, "circpad_global_max_padding_pct",
+ 0, 0, 100);
+
+ circpad_max_circ_queued_cells =
+ networkstatus_get_param(ns, "circpad_max_circ_queued_cells",
+ CIRCWINDOW_START_MAX, 0, 50*CIRCWINDOW_START_MAX);
+}
+
+/**
+ * Return true if padding is allowed by torrc and consensus.
+ */
+static bool
+circpad_is_padding_allowed(void)
+{
+ /* If padding has been disabled in the consensus, don't send any more
+ * padding. Technically the machine should be shut down when the next
+ * machine condition check happens, but machine checks only happen on
+ * certain circuit events, and if padding is disabled due to some
+ * network overload or DoS condition, we really want to stop ASAP. */
+ if (circpad_padding_disabled || !get_options()->CircuitPadding) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * Check this machine against its padding limits, as well as global
+ * consensus limits.
+ *
+ * We have two limits: a percent and a cell count. The cell count
+ * limit must be reached before the percent is enforced (this is to
+ * optionally allow very light padding of things like circuit setup
+ * while there is no other traffic on the circuit).
+ *
+ * TODO: Don't apply limits to machines form torrc.
+ *
+ * Returns 1 if limits are set and we've hit them. Otherwise returns 0.
+ */
+STATIC bool
+circpad_machine_reached_padding_limit(circpad_machine_runtime_t *mi)
+{
+ const circpad_machine_spec_t *machine = CIRCPAD_GET_MACHINE(mi);
+
+ /* If machine_padding_pct is non-zero, and we've sent more
+ * than the allowed count of padding cells, then check our
+ * percent limits for this machine. */
+ if (machine->max_padding_percent &&
+ mi->padding_sent >= machine->allowed_padding_count) {
+ uint32_t total_cells = mi->padding_sent + mi->nonpadding_sent;
+
+ /* Check the percent */
+ if ((100*(uint32_t)mi->padding_sent) / total_cells >
+ machine->max_padding_percent) {
+ return 1; // limit is reached. Stop.
+ }
+ }
+
+ /* If circpad_max_global_padding_pct is non-zero, and we've
+ * sent more than the global padding cell limit, then check our
+ * global tor process percentage limit on padding. */
+ if (circpad_global_max_padding_percent &&
+ circpad_global_padding_sent >= circpad_global_allowed_cells) {
+ uint64_t total_cells = circpad_global_padding_sent +
+ circpad_global_nonpadding_sent;
+
+ /* Check the percent */
+ if ((100*circpad_global_padding_sent) / total_cells >
+ circpad_global_max_padding_percent) {
+ return 1; // global limit reached. Stop.
+ }
+ }
+
+ return 0; // All good!
+}
+
+/**
+ * Schedule the next padding time according to the machineinfo on a
+ * circuit.
+ *
+ * The histograms represent inter-packet-delay. Whenever you get an packet
+ * event you should be scheduling your next timer (after cancelling any old
+ * ones and updating tokens accordingly).
+ *
+ * Returns 1 if we decide to transition states (due to infinity bin),
+ * 0 otherwise.
+ */
+MOCK_IMPL(circpad_decision_t,
+circpad_machine_schedule_padding,(circpad_machine_runtime_t *mi))
+{
+ circpad_delay_t in_usec = 0;
+ struct timeval timeout;
+ tor_assert(mi);
+
+ /* Don't schedule padding if it is disabled */
+ if (!circpad_is_padding_allowed()) {
+ static ratelim_t padding_lim = RATELIM_INIT(600);
+ log_fn_ratelim(&padding_lim,LOG_INFO,LD_CIRC,
+ "Padding has been disabled, but machine still on circuit %"PRIu64
+ ", %d",
+ mi->on_circ->n_chan ? mi->on_circ->n_chan->global_identifier : 0,
+ mi->on_circ->n_circ_id);
+
+ return CIRCPAD_STATE_UNCHANGED;
+ }
+
+ /* Don't schedule padding if we are currently in dormant mode. */
+ if (!is_participating_on_network()) {
+ log_info(LD_CIRC, "Not scheduling padding because we are dormant.");
+ return CIRCPAD_STATE_UNCHANGED;
+ }
+
+ // Don't pad in end (but also don't cancel any previously
+ // scheduled padding either).
+ if (mi->current_state == CIRCPAD_STATE_END) {
+ log_fn(LOG_INFO, LD_CIRC, "Padding end state on circuit %u",
+ CIRCUIT_IS_ORIGIN(mi->on_circ) ?
+ TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0);
+ return CIRCPAD_STATE_UNCHANGED;
+ }
+
+ /* Check our padding limits */
+ if (circpad_machine_reached_padding_limit(mi)) {
+ if (CIRCUIT_IS_ORIGIN(mi->on_circ)) {
+ log_fn(LOG_INFO, LD_CIRC,
+ "Padding machine has reached padding limit on circuit %u",
+ TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier);
+ } else {
+ static ratelim_t padding_lim = RATELIM_INIT(600);
+ log_fn_ratelim(&padding_lim,LOG_INFO,LD_CIRC,
+ "Padding machine has reached padding limit on circuit %"PRIu64
+ ", %d",
+ mi->on_circ->n_chan ? mi->on_circ->n_chan->global_identifier : 0,
+ mi->on_circ->n_circ_id);
+ }
+ return CIRCPAD_STATE_UNCHANGED;
+ }
+
+ if (mi->is_padding_timer_scheduled) {
+ /* Cancel current timer (if any) */
+ timer_disable(mi->padding_timer);
+ mi->is_padding_timer_scheduled = 0;
+ }
+
+ /* in_usec = in microseconds */
+ in_usec = circpad_machine_sample_delay(mi);
+ /* If we're using token removal, we need to know when the padding
+ * was scheduled at, so we can remove the appropriate token if
+ * a non-padding cell is sent before the padding timer expires.
+ *
+ * However, since monotime is unpredictably expensive, let's avoid
+ * using it for machines that don't need token removal. */
+ if (circpad_is_token_removal_supported(mi)) {
+ mi->padding_scheduled_at_usec = monotime_absolute_usec();
+ } else {
+ mi->padding_scheduled_at_usec = 1;
+ }
+ log_fn(LOG_INFO,LD_CIRC,"\tPadding in %u usec on circuit %u", in_usec,
+ CIRCUIT_IS_ORIGIN(mi->on_circ) ?
+ TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0);
+
+ // Don't schedule if we have infinite delay.
+ if (in_usec == CIRCPAD_DELAY_INFINITE) {
+ return circpad_internal_event_infinity(mi);
+ }
+
+ if (mi->state_length == 0) {
+ /* If we're at length 0, that means we hit 0 after sending
+ * a cell earlier, and emitted an event for it, but
+ * for whatever reason we did not decide to change states then.
+ * So maybe the machine is waiting for bins empty, or for an
+ * infinity event later? That would be a strange machine,
+ * but there's no reason to make it impossible. */
+ return CIRCPAD_STATE_UNCHANGED;
+ }
+
+ if (in_usec <= 0) {
+ return circpad_send_padding_cell_for_callback(mi);
+ }
+
+ timeout.tv_sec = in_usec/TOR_USEC_PER_SEC;
+ timeout.tv_usec = (in_usec%TOR_USEC_PER_SEC);
+
+ log_fn(LOG_INFO, LD_CIRC, "\tPadding circuit %u in %u sec, %u usec",
+ CIRCUIT_IS_ORIGIN(mi->on_circ) ?
+ TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0,
+ (unsigned)timeout.tv_sec, (unsigned)timeout.tv_usec);
+
+ if (mi->padding_timer) {
+ timer_set_cb(mi->padding_timer, circpad_send_padding_callback, mi);
+ } else {
+ mi->padding_timer =
+ timer_new(circpad_send_padding_callback, mi);
+ }
+ timer_schedule(mi->padding_timer, &timeout);
+ mi->is_padding_timer_scheduled = 1;
+
+ // TODO-MP-AP: Unify with channelpadding counter
+ //rep_hist_padding_count_timers(++total_timers_pending);
+
+ return CIRCPAD_STATE_UNCHANGED;
+}
+
+/**
+ * If the machine transitioned to the END state, we need
+ * to check to see if it wants us to shut it down immediately.
+ * If it does, then we need to send the appropiate negotiation commands
+ * depending on which side it is.
+ *
+ * After this function is called, mi may point to freed memory. Do
+ * not access it.
+ */
+static void
+circpad_machine_spec_transitioned_to_end(circpad_machine_runtime_t *mi)
+{
+ const circpad_machine_spec_t *machine = CIRCPAD_GET_MACHINE(mi);
+ circuit_t *on_circ = mi->on_circ;
+
+ log_fn(LOG_INFO,LD_CIRC, "Padding machine in end state on circuit %u (%d)",
+ CIRCUIT_IS_ORIGIN(on_circ) ?
+ TO_ORIGIN_CIRCUIT(on_circ)->global_identifier : 0,
+ on_circ->purpose);
+
+ /*
+ * We allow machines to shut down and delete themselves as opposed
+ * to just going back to START or waiting forever in END so that
+ * we can handle the case where this machine started while it was
+ * the only machine that matched conditions, but *since* then more
+ * "higher ranking" machines now match the conditions, and would
+ * be given a chance to take precedence over this one in
+ * circpad_add_matching_machines().
+ *
+ * Returning to START or waiting forever in END would not give those
+ * other machines a chance to be launched, where as shutting down
+ * here does.
+ */
+ if (machine->should_negotiate_end) {
+ if (machine->is_origin_side) {
+ /* We free the machine info here so that we can be replaced
+ * by a different machine. But we must leave the padding_machine
+ * in place to wait for the negotiated response */
+ circpad_circuit_machineinfo_free_idx(on_circ,
+ machine->machine_index);
+ circpad_negotiate_padding(TO_ORIGIN_CIRCUIT(on_circ),
+ machine->machine_num,
+ machine->target_hopnum,
+ CIRCPAD_COMMAND_STOP);
+ } else {
+ circpad_circuit_machineinfo_free_idx(on_circ,
+ machine->machine_index);
+ circpad_padding_negotiated(on_circ,
+ machine->machine_num,
+ CIRCPAD_COMMAND_STOP,
+ CIRCPAD_RESPONSE_OK);
+ on_circ->padding_machine[machine->machine_index] = NULL;
+ }
+ }
+}
+
+/**
+ * Generic state transition function for padding state machines.
+ *
+ * Given an event and our mutable machine info, decide if/how to
+ * transition to a different state, and perform actions accordingly.
+ *
+ * Returns 1 if we transition states, 0 otherwise.
+ */
+MOCK_IMPL(circpad_decision_t,
+circpad_machine_spec_transition,(circpad_machine_runtime_t *mi,
+ circpad_event_t event))
+{
+ const circpad_state_t *state =
+ circpad_machine_current_state(mi);
+
+ /* If state is null we are in the end state. */
+ if (!state) {
+ /* If we in end state we don't pad no matter what. */
+ return CIRCPAD_STATE_UNCHANGED;
+ }
+
+ /* Check if this event is ignored or causes a cancel */
+ if (state->next_state[event] == CIRCPAD_STATE_IGNORE) {
+ return CIRCPAD_STATE_UNCHANGED;
+ } else if (state->next_state[event] == CIRCPAD_STATE_CANCEL) {
+ /* Check cancel events and cancel any pending padding */
+ mi->padding_scheduled_at_usec = 0;
+ if (mi->is_padding_timer_scheduled) {
+ mi->is_padding_timer_scheduled = 0;
+ /* Cancel current timer (if any) */
+ timer_disable(mi->padding_timer);
+ }
+ return CIRCPAD_STATE_UNCHANGED;
+ } else {
+ circpad_statenum_t s = state->next_state[event];
+ /* See if we need to transition to any other states based on this event.
+ * Whenever a transition happens, even to our own state, we schedule
+ * padding.
+ *
+ * So if a state only wants to schedule padding for an event, it specifies
+ * a transition to itself. All non-specified events are ignored.
+ */
+ log_fn(LOG_INFO, LD_CIRC,
+ "Circuit %u circpad machine %d transitioning from %u to %u",
+ CIRCUIT_IS_ORIGIN(mi->on_circ) ?
+ TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0,
+ mi->machine_index, mi->current_state, s);
+
+ /* If this is not the same state, switch and init tokens,
+ * otherwise just reschedule padding. */
+ if (mi->current_state != s) {
+ mi->current_state = s;
+ circpad_machine_setup_tokens(mi);
+ circpad_choose_state_length(mi);
+
+ /* If we transition to the end state, check to see
+ * if this machine wants to be shut down at end */
+ if (s == CIRCPAD_STATE_END) {
+ circpad_machine_spec_transitioned_to_end(mi);
+ /* We transitioned but we don't pad in end. Also, mi
+ * may be freed. Returning STATE_CHANGED prevents us
+ * from accessing it in any callers of this function. */
+ return CIRCPAD_STATE_CHANGED;
+ }
+
+ /* We transitioned to a new state, schedule padding */
+ circpad_machine_schedule_padding(mi);
+ return CIRCPAD_STATE_CHANGED;
+ }
+
+ /* We transitioned back to the same state. Schedule padding,
+ * and inform if that causes a state transition. */
+ return circpad_machine_schedule_padding(mi);
+ }
+
+ return CIRCPAD_STATE_UNCHANGED;
+}
+
+/**
+ * Estimate the circuit RTT from the current middle hop out to the
+ * end of the circuit.
+ *
+ * We estimate RTT by calculating the time between "receive" and
+ * "send" at a middle hop. This is because we "receive" a cell
+ * from the origin, and then relay it towards the exit before a
+ * response comes back. It is that response time from the exit side
+ * that we want to measure, so that we can make use of it for synthetic
+ * response delays.
+ */
+static void
+circpad_estimate_circ_rtt_on_received(circuit_t *circ,
+ circpad_machine_runtime_t *mi)
+{
+ /* Origin circuits don't estimate RTT. They could do it easily enough,
+ * but they have no reason to use it in any delay calculations. */
+ if (CIRCUIT_IS_ORIGIN(circ) || mi->stop_rtt_update)
+ return;
+
+ /* If we already have a last received packet time, that means we
+ * did not get a response before this packet. The RTT estimate
+ * only makes sense if we do not have multiple packets on the
+ * wire, so stop estimating if this is the second packet
+ * back to back. However, for the first set of back-to-back
+ * packets, we can wait until the very first response comes back
+ * to us, to measure that RTT (for the response to optimistic
+ * data, for example). Hence stop_rtt_update is only checked
+ * in this received side function, and not in send side below.
+ */
+ if (mi->last_received_time_usec) {
+ /* We also allow multiple back-to-back packets if the circuit is not
+ * opened, to handle var cells.
+ * XXX: Will this work with out var cell plans? Maybe not,
+ * since we're opened at the middle hop as soon as we process
+ * one var extend2 :/ */
+ if (circ->state == CIRCUIT_STATE_OPEN) {
+ log_fn(LOG_INFO, LD_CIRC,
+ "Stopping padding RTT estimation on circuit (%"PRIu64
+ ", %d) after two back to back packets. Current RTT: %d",
+ circ->n_chan ? circ->n_chan->global_identifier : 0,
+ circ->n_circ_id, mi->rtt_estimate_usec);
+ mi->stop_rtt_update = 1;
+
+ if (!mi->rtt_estimate_usec) {
+ static ratelim_t rtt_lim = RATELIM_INIT(600);
+ log_fn_ratelim(&rtt_lim,LOG_NOTICE,LD_BUG,
+ "Circuit got two cells back to back before estimating RTT.");
+ }
+ }
+ } else {
+ const circpad_state_t *state = circpad_machine_current_state(mi);
+ if (BUG(!state)) {
+ return;
+ }
+
+ /* Since monotime is unpredictably expensive, only update this field
+ * if rtt estimates are needed. Otherwise, stop the rtt update. */
+ if (state->use_rtt_estimate) {
+ mi->last_received_time_usec = monotime_absolute_usec();
+ } else {
+ /* Let's fast-path future decisions not to update rtt if the
+ * feature is not in use. */
+ mi->stop_rtt_update = 1;
+ }
+ }
+}
+
+/**
+ * Handles the "send" side of RTT calculation at middle nodes.
+ *
+ * This function calculates the RTT from the middle to the end
+ * of the circuit by subtracting the last received cell timestamp
+ * from the current time. It allows back-to-back cells until
+ * the circuit is opened, to allow for var cell handshakes.
+ * XXX: Check our var cell plans to make sure this will work.
+ */
+static void
+circpad_estimate_circ_rtt_on_send(circuit_t *circ,
+ circpad_machine_runtime_t *mi)
+{
+ /* Origin circuits don't estimate RTT. They could do it easily enough,
+ * but they have no reason to use it in any delay calculations. */
+ if (CIRCUIT_IS_ORIGIN(circ))
+ return;
+
+ /* If last_received_time_usec is non-zero, we are waiting for a response
+ * from the exit side. Calculate the time delta and use it as RTT. */
+ if (mi->last_received_time_usec) {
+ circpad_time_t rtt_time = monotime_absolute_usec() -
+ mi->last_received_time_usec;
+
+ /* Reset the last RTT packet time, so we can tell if two cells
+ * arrive back to back */
+ mi->last_received_time_usec = 0;
+
+ /* Use INT32_MAX to ensure the addition doesn't overflow */
+ if (rtt_time >= INT32_MAX) {
+ log_fn(LOG_WARN,LD_CIRC,
+ "Circuit padding RTT estimate overflowed: %"PRIu64
+ " vs %"PRIu64, monotime_absolute_usec(),
+ mi->last_received_time_usec);
+ return;
+ }
+
+ /* If the old RTT estimate is lower than this one, use this one, because
+ * the circuit is getting longer. If this estimate is somehow
+ * faster than the previous, then maybe that was network jitter, or a
+ * bad monotonic clock source (so our ratchet returned a zero delta).
+ * In that case, average them. */
+ if (mi->rtt_estimate_usec < (circpad_delay_t)rtt_time) {
+ mi->rtt_estimate_usec = (circpad_delay_t)rtt_time;
+ } else {
+ mi->rtt_estimate_usec += (circpad_delay_t)rtt_time;
+ mi->rtt_estimate_usec /= 2;
+ }
+ } else if (circ->state == CIRCUIT_STATE_OPEN) {
+ /* If last_received_time_usec is zero, then we have gotten two cells back
+ * to back. Stop estimating RTT in this case. Note that we only
+ * stop RTT update if the circuit is opened, to allow for RTT estimates
+ * of var cells during circ setup. */
+ if (!mi->rtt_estimate_usec && !mi->stop_rtt_update) {
+ static ratelim_t rtt_lim = RATELIM_INIT(600);
+ log_fn_ratelim(&rtt_lim,LOG_NOTICE,LD_BUG,
+ "Circuit sent two cells back to back before estimating RTT.");
+ }
+ mi->stop_rtt_update = 1;
+ }
+}
+
+/**
+ * A "non-padding" cell has been sent from this endpoint. React
+ * according to any padding state machines on the circuit.
+ *
+ * For origin circuits, this means we sent a cell into the network.
+ * For middle relay circuits, this means we sent a cell towards the
+ * origin.
+ */
+void
+circpad_cell_event_nonpadding_sent(circuit_t *on_circ)
+{
+ /* Update global cell count */
+ circpad_global_nonpadding_sent++;
+
+ /* If there are no machines then this loop should not iterate */
+ FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, on_circ) {
+ /* First, update any timestamps */
+ on_circ->padding_info[i]->last_cell_time_sec = approx_time();
+ circpad_estimate_circ_rtt_on_send(on_circ, on_circ->padding_info[i]);
+
+ /* Then, do accounting */
+ circpad_machine_count_nonpadding_sent(on_circ->padding_info[i]);
+
+ /* Check to see if we've run out of tokens for this state already,
+ * and if not, check for other state transitions */
+ if (check_machine_token_supply(on_circ->padding_info[i])
+ == CIRCPAD_STATE_UNCHANGED) {
+ /* If removing a token did not cause a transition, check if
+ * non-padding sent event should */
+ circpad_machine_spec_transition(on_circ->padding_info[i],
+ CIRCPAD_EVENT_NONPADDING_SENT);
+ }
+ } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
+}
+
+/** Check if this cell or circuit are related to circuit padding and handle
+ * them if so. Return 0 if the cell was handled in this subsystem and does
+ * not need any other consideration, otherwise return 1.
+ */
+int
+circpad_check_received_cell(cell_t *cell, circuit_t *circ,
+ crypt_path_t *layer_hint,
+ const relay_header_t *rh)
+{
+ /* First handle the padding commands, since we want to ignore any other
+ * commands if this circuit is padding-specific. */
+ switch (rh->command) {
+ case RELAY_COMMAND_DROP:
+ /* Already examined in circpad_deliver_recognized_relay_cell_events */
+ return 0;
+ case RELAY_COMMAND_PADDING_NEGOTIATE:
+ circpad_handle_padding_negotiate(circ, cell);
+ return 0;
+ case RELAY_COMMAND_PADDING_NEGOTIATED:
+ if (circpad_handle_padding_negotiated(circ, cell, layer_hint) == 0)
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), rh->length);
+ return 0;
+ }
+
+ /* If this is a padding circuit we don't need to parse any other commands
+ * than the padding ones. Just drop them to the floor.
+ *
+ * Note: we deliberately do not call circuit_read_valid_data() here. The
+ * vanguards addon (specifically the 'bandguards' component's dropped cell
+ * detection) will thus close this circuit, as it would for any other
+ * unexpected cell. However, default tor will *not* close the circuit.
+ *
+ * This is intentional. We are not yet certain that is it optimal to keep
+ * padding circuits open in cases like these, rather than closing them.
+ * We suspect that continuing to pad is optimal against a passive classifier,
+ * but as soon as the adversary is active (even as a client adversary) this
+ * might change.
+ *
+ * So as a way forward, we log the cell command and circuit number, to
+ * help us enumerate the most common instances of this in testing with
+ * vanguards, to see which are common enough to verify and handle
+ * properly.
+ * - Mike
+ */
+ if (circ->purpose == CIRCUIT_PURPOSE_C_CIRCUIT_PADDING) {
+ log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
+ "Ignored cell (%d) that arrived in padding circuit "
+ " %u.", rh->command, CIRCUIT_IS_ORIGIN(circ) ?
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * A "non-padding" cell has been received by this endpoint. React
+ * according to any padding state machines on the circuit.
+ *
+ * For origin circuits, this means we read a cell from the network.
+ * For middle relay circuits, this means we received a cell from the
+ * origin.
+ */
+void
+circpad_cell_event_nonpadding_received(circuit_t *on_circ)
+{
+ FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, on_circ) {
+ /* First, update any timestamps */
+ on_circ->padding_info[i]->last_cell_time_sec = approx_time();
+ circpad_estimate_circ_rtt_on_received(on_circ, on_circ->padding_info[i]);
+
+ circpad_machine_spec_transition(on_circ->padding_info[i],
+ CIRCPAD_EVENT_NONPADDING_RECV);
+ } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
+}
+
+/**
+ * A padding cell has been sent from this endpoint. React
+ * according to any padding state machines on the circuit.
+ *
+ * For origin circuits, this means we sent a cell into the network.
+ * For middle relay circuits, this means we sent a cell towards the
+ * origin.
+ */
+void
+circpad_cell_event_padding_sent(circuit_t *on_circ)
+{
+ FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, on_circ) {
+ /* Check to see if we've run out of tokens for this state already,
+ * and if not, check for other state transitions */
+ if (check_machine_token_supply(on_circ->padding_info[i])
+ == CIRCPAD_STATE_UNCHANGED) {
+ /* If removing a token did not cause a transition, check if
+ * non-padding sent event should */
+
+ on_circ->padding_info[i]->last_cell_time_sec = approx_time();
+ circpad_machine_spec_transition(on_circ->padding_info[i],
+ CIRCPAD_EVENT_PADDING_SENT);
+ }
+ } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
+}
+
+/**
+ * A padding cell has been received by this endpoint. React
+ * according to any padding state machines on the circuit.
+ *
+ * For origin circuits, this means we read a cell from the network.
+ * For middle relay circuits, this means we received a cell from the
+ * origin.
+ */
+void
+circpad_cell_event_padding_received(circuit_t *on_circ)
+{
+ /* identical to padding sent */
+ FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, on_circ) {
+ on_circ->padding_info[i]->last_cell_time_sec = approx_time();
+ circpad_machine_spec_transition(on_circ->padding_info[i],
+ CIRCPAD_EVENT_PADDING_RECV);
+ } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
+}
+
+/**
+ * An "infinite" delay has ben chosen from one of our histograms.
+ *
+ * "Infinite" delays mean don't send padding -- but they can also
+ * mean transition to another state depending on the state machine
+ * definitions. Check the rules and react accordingly.
+ *
+ * Return 1 if we decide to transition, 0 otherwise.
+ */
+circpad_decision_t
+circpad_internal_event_infinity(circpad_machine_runtime_t *mi)
+{
+ return circpad_machine_spec_transition(mi, CIRCPAD_EVENT_INFINITY);
+}
+
+/**
+ * All of the bins of our current state's histogram's are empty.
+ *
+ * Check to see if this means transition to another state, and if
+ * not, refill the tokens.
+ *
+ * Return 1 if we decide to transition, 0 otherwise.
+ */
+circpad_decision_t
+circpad_internal_event_bins_empty(circpad_machine_runtime_t *mi)
+{
+ if (circpad_machine_spec_transition(mi, CIRCPAD_EVENT_BINS_EMPTY)
+ == CIRCPAD_STATE_CHANGED) {
+ return CIRCPAD_STATE_CHANGED;
+ } else {
+ /* If we dont transition, then we refill the tokens */
+ circpad_machine_setup_tokens(mi);
+ return CIRCPAD_STATE_UNCHANGED;
+ }
+}
+
+/**
+ * This state has used up its cell count. Emit the event and
+ * see if we transition.
+ *
+ * Return 1 if we decide to transition, 0 otherwise.
+ */
+circpad_decision_t
+circpad_internal_event_state_length_up(circpad_machine_runtime_t *mi)
+{
+ return circpad_machine_spec_transition(mi, CIRCPAD_EVENT_LENGTH_COUNT);
+}
+
+/**
+ * Returns true if the circuit matches the conditions.
+ */
+static inline bool
+circpad_machine_conditions_met(origin_circuit_t *circ,
+ const circpad_machine_spec_t *machine)
+{
+ /* If padding is disabled, no machines should match/apply. This has
+ * the effect of shutting down all machines, and not adding any more. */
+ if (circpad_padding_disabled || !get_options()->CircuitPadding)
+ return 0;
+
+ /* If the consensus or our torrc has selected reduced connection padding,
+ * then only allow this machine if it is flagged as acceptable under
+ * reduced padding conditions */
+ if (circpad_padding_reduced || get_options()->ReducedCircuitPadding) {
+ if (!machine->conditions.reduced_padding_ok)
+ return 0;
+ }
+
+ if (!(circpad_circ_purpose_to_mask(TO_CIRCUIT(circ)->purpose)
+ & machine->conditions.purpose_mask))
+ return 0;
+
+ if (machine->conditions.requires_vanguards) {
+ const or_options_t *options = get_options();
+
+ /* Pinned middles are effectively vanguards */
+ if (!(options->HSLayer2Nodes || options->HSLayer3Nodes))
+ return 0;
+ }
+
+ /* We check for any bits set in the circuit state mask so that machines
+ * can say any of the following through their state bitmask:
+ * "I want to apply to circuits with either streams or no streams"; OR
+ * "I only want to apply to circuits with streams"; OR
+ * "I only want to apply to circuits without streams". */
+ if (!(circpad_circuit_state(circ) & machine->conditions.state_mask))
+ return 0;
+
+ if (circuit_get_cpath_opened_len(circ) < machine->conditions.min_hops)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * Returns a minimized representation of the circuit state.
+ *
+ * The padding code only cares if the circuit is building,
+ * opened, used for streams, and/or still has relay early cells.
+ * This returns a bitmask of all state properities that apply to
+ * this circuit.
+ */
+static inline
+circpad_circuit_state_t
+circpad_circuit_state(origin_circuit_t *circ)
+{
+ circpad_circuit_state_t retmask = 0;
+
+ if (circ->p_streams)
+ retmask |= CIRCPAD_CIRC_STREAMS;
+ else
+ retmask |= CIRCPAD_CIRC_NO_STREAMS;
+
+ /* We use has_opened to prevent cannibialized circs from flapping. */
+ if (circ->has_opened)
+ retmask |= CIRCPAD_CIRC_OPENED;
+ else
+ retmask |= CIRCPAD_CIRC_BUILDING;
+
+ if (circ->remaining_relay_early_cells > 0)
+ retmask |= CIRCPAD_CIRC_HAS_RELAY_EARLY;
+ else
+ retmask |= CIRCPAD_CIRC_HAS_NO_RELAY_EARLY;
+
+ return retmask;
+}
+
+/**
+ * Convert a normal circuit purpose into a bitmask that we can
+ * use for determining matching circuits.
+ */
+circpad_purpose_mask_t
+circpad_circ_purpose_to_mask(uint8_t circ_purpose)
+{
+ /* Treat OR circ purposes as ignored. They should not be passed here*/
+ if (BUG(circ_purpose <= CIRCUIT_PURPOSE_OR_MAX_)) {
+ return 0;
+ }
+
+ /* Treat new client circuit purposes as "OMG ITS EVERYTHING".
+ * This also should not happen */
+ if (BUG(circ_purpose - CIRCUIT_PURPOSE_OR_MAX_ - 1 > 32)) {
+ return CIRCPAD_PURPOSE_ALL;
+ }
+
+ /* Convert the purpose to a bit position */
+ return 1 << (circ_purpose - CIRCUIT_PURPOSE_OR_MAX_ - 1);
+}
+
+/**
+ * Shut down any machines whose conditions no longer match
+ * the current circuit.
+ */
+static void
+circpad_shutdown_old_machines(origin_circuit_t *on_circ)
+{
+ circuit_t *circ = TO_CIRCUIT(on_circ);
+
+ FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, circ) {
+ if (!circpad_machine_conditions_met(on_circ,
+ circ->padding_machine[i])) {
+ // Clear machineinfo (frees timers)
+ circpad_circuit_machineinfo_free_idx(circ, i);
+ // Send padding negotiate stop
+ circpad_negotiate_padding(on_circ,
+ circ->padding_machine[i]->machine_num,
+ circ->padding_machine[i]->target_hopnum,
+ CIRCPAD_COMMAND_STOP);
+ }
+ } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END;
+}
+
+/**
+ * Negotiate new machines that would apply to this circuit, given the machines
+ * inside <b>machines_sl</b>.
+ *
+ * This function checks to see if we have any free machine indexes,
+ * and for each free machine index, it initializes the most recently
+ * added origin-side padding machine that matches the target machine
+ * index and circuit conditions, and negotiates it with the appropriate
+ * middle relay.
+ */
+STATIC void
+circpad_add_matching_machines(origin_circuit_t *on_circ,
+ smartlist_t *machines_sl)
+{
+ circuit_t *circ = TO_CIRCUIT(on_circ);
+
+#ifdef TOR_UNIT_TESTS
+ /* Tests don't have to init our padding machines */
+ if (!machines_sl)
+ return;
+#endif
+
+ /* If padding negotiation failed before, do not try again */
+ if (on_circ->padding_negotiation_failed)
+ return;
+
+ FOR_EACH_CIRCUIT_MACHINE_BEGIN(i) {
+ /* If there is a padding machine info, this index is occupied.
+ * No need to check conditions for this index. */
+ if (circ->padding_info[i])
+ continue;
+
+ /* We have a free machine index. Check the origin padding
+ * machines in reverse order, so that more recently added
+ * machines take priority over older ones. */
+ SMARTLIST_FOREACH_REVERSE_BEGIN(machines_sl,
+ circpad_machine_spec_t *,
+ machine) {
+ /* Machine definitions have a specific target machine index.
+ * This is so event ordering is deterministic with respect
+ * to which machine gets events first when there are two
+ * machines installed on a circuit. Make sure we only
+ * add this machine if its target machine index is free. */
+ if (machine->machine_index == i &&
+ circpad_machine_conditions_met(on_circ, machine)) {
+
+ // We can only replace this machine if the target hopnum
+ // is the same, otherwise we'll get invalid data
+ if (circ->padding_machine[i]) {
+ if (circ->padding_machine[i]->target_hopnum !=
+ machine->target_hopnum)
+ continue;
+ /* Replace it. (Don't free - is global). */
+ circ->padding_machine[i] = NULL;
+ }
+
+ /* Set up the machine immediately so that the slot is occupied.
+ * We will tear it down on error return, or if there is an error
+ * response from the relay. */
+ circpad_setup_machine_on_circ(circ, machine);
+ if (circpad_negotiate_padding(on_circ, machine->machine_num,
+ machine->target_hopnum,
+ CIRCPAD_COMMAND_START) < 0) {
+ log_info(LD_CIRC,
+ "Padding not negotiated. Cleaning machine from circuit %u",
+ CIRCUIT_IS_ORIGIN(circ) ?
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0);
+ circpad_circuit_machineinfo_free_idx(circ, i);
+ circ->padding_machine[i] = NULL;
+ on_circ->padding_negotiation_failed = 1;
+ } else {
+ /* Success. Don't try any more machines on this index */
+ break;
+ }
+ }
+ } SMARTLIST_FOREACH_END(machine);
+ } FOR_EACH_CIRCUIT_MACHINE_END;
+}
+
+/**
+ * Event that tells us we added a hop to an origin circuit.
+ *
+ * This event is used to decide if we should create a padding machine
+ * on a circuit.
+ */
+void
+circpad_machine_event_circ_added_hop(origin_circuit_t *on_circ)
+{
+ /* Since our padding conditions do not specify a max_hops,
+ * all we can do is add machines here */
+ circpad_add_matching_machines(on_circ, origin_padding_machines);
+}
+
+/**
+ * Event that tells us that an origin circuit is now built.
+ *
+ * Shut down any machines that only applied to un-built circuits.
+ * Activate any new ones.
+ */
+void
+circpad_machine_event_circ_built(origin_circuit_t *circ)
+{
+ circpad_shutdown_old_machines(circ);
+ circpad_add_matching_machines(circ, origin_padding_machines);
+}
+
+/**
+ * Circpad purpose changed event.
+ *
+ * Shut down any machines that don't apply to our circ purpose.
+ * Activate any new ones that do.
+ */
+void
+circpad_machine_event_circ_purpose_changed(origin_circuit_t *circ)
+{
+ circpad_shutdown_old_machines(circ);
+ circpad_add_matching_machines(circ, origin_padding_machines);
+}
+
+/**
+ * Event that tells us that an origin circuit is out of RELAY_EARLY
+ * cells.
+ *
+ * Shut down any machines that only applied to RELAY_EARLY circuits.
+ * Activate any new ones.
+ */
+void
+circpad_machine_event_circ_has_no_relay_early(origin_circuit_t *circ)
+{
+ circpad_shutdown_old_machines(circ);
+ circpad_add_matching_machines(circ, origin_padding_machines);
+}
+
+/**
+ * Streams attached event.
+ *
+ * Called from link_apconn_to_circ() and handle_hs_exit_conn()
+ *
+ * Shut down any machines that only applied to machines without
+ * streams. Activate any new ones.
+ */
+void
+circpad_machine_event_circ_has_streams(origin_circuit_t *circ)
+{
+ circpad_shutdown_old_machines(circ);
+ circpad_add_matching_machines(circ, origin_padding_machines);
+}
+
+/**
+ * Streams detached event.
+ *
+ * Called from circuit_detach_stream()
+ *
+ * Shut down any machines that only applied to machines without
+ * streams. Activate any new ones.
+ */
+void
+circpad_machine_event_circ_has_no_streams(origin_circuit_t *circ)
+{
+ circpad_shutdown_old_machines(circ);
+ circpad_add_matching_machines(circ, origin_padding_machines);
+}
+
+/**
+ * Verify that padding is coming from the expected hop.
+ *
+ * Returns true if from_hop matches the target hop from
+ * one of our padding machines.
+ *
+ * Returns false if we're not an origin circuit, or if from_hop
+ * does not match one of the padding machines.
+ */
+bool
+circpad_padding_is_from_expected_hop(circuit_t *circ,
+ crypt_path_t *from_hop)
+{
+ crypt_path_t *target_hop = NULL;
+ if (!CIRCUIT_IS_ORIGIN(circ))
+ return 0;
+
+ FOR_EACH_CIRCUIT_MACHINE_BEGIN(i) {
+ /* We have to check padding_machine and not padding_info/active
+ * machines here because padding may arrive after we shut down a
+ * machine. The info is gone, but the padding_machine waits
+ * for the padding_negotiated response to come back. */
+ if (!circ->padding_machine[i])
+ continue;
+
+ target_hop = circuit_get_cpath_hop(TO_ORIGIN_CIRCUIT(circ),
+ circ->padding_machine[i]->target_hopnum);
+
+ if (target_hop == from_hop)
+ return 1;
+ } FOR_EACH_CIRCUIT_MACHINE_END;
+
+ return 0;
+}
+
+/**
+ * Deliver circpad events for an "unrecognized cell".
+ *
+ * Unrecognized cells are sent to relays and are forwarded
+ * onto the next hop of their circuits. Unrecognized cells
+ * are by definition not padding. We need to tell relay-side
+ * state machines that a non-padding cell was sent or received,
+ * depending on the direction, so they can update their histograms
+ * and decide to pad or not.
+ */
+void
+circpad_deliver_unrecognized_cell_events(circuit_t *circ,
+ cell_direction_t dir)
+{
+ // We should never see unrecognized cells at origin.
+ // Our caller emits a warn when this happens.
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ return;
+ }
+
+ if (dir == CELL_DIRECTION_OUT) {
+ /* When direction is out (away from origin), then we received non-padding
+ cell coming from the origin to us. */
+ circpad_cell_event_nonpadding_received(circ);
+ } else if (dir == CELL_DIRECTION_IN) {
+ /* It's in and not origin, so the cell is going away from us.
+ * So we are relaying a non-padding cell towards the origin. */
+ circpad_cell_event_nonpadding_sent(circ);
+ }
+}
+
+/**
+ * Deliver circpad events for "recognized" relay cells.
+ *
+ * Recognized cells are destined for this hop, either client or middle.
+ * Check if this is a padding cell or not, and send the appropiate
+ * received event.
+ */
+void
+circpad_deliver_recognized_relay_cell_events(circuit_t *circ,
+ uint8_t relay_command,
+ crypt_path_t *layer_hint)
+{
+ if (relay_command == RELAY_COMMAND_DROP) {
+ rep_hist_padding_count_read(PADDING_TYPE_DROP);
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ if (circpad_padding_is_from_expected_hop(circ, layer_hint)) {
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), 0);
+ } else {
+ /* This is unexpected padding. Ignore it for now. */
+ return;
+ }
+ }
+
+ /* The cell should be recognized by now, which means that we are on the
+ destination, which means that we received a padding cell. We might be
+ the client or the Middle node, still, because leaky-pipe. */
+ circpad_cell_event_padding_received(circ);
+ log_fn(LOG_INFO, LD_CIRC, "Got padding cell on %s circuit %u.",
+ CIRCUIT_IS_ORIGIN(circ) ? "origin" : "non-origin",
+ CIRCUIT_IS_ORIGIN(circ) ?
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0);
+ } else {
+ /* We received a non-padding cell on the edge */
+ circpad_cell_event_nonpadding_received(circ);
+ }
+}
+
+/**
+ * Deliver circpad events for relay cells sent from us.
+ *
+ * If this is a padding cell, update our padding stats
+ * and deliver the event. Otherwise just deliver the event.
+ */
+void
+circpad_deliver_sent_relay_cell_events(circuit_t *circ,
+ uint8_t relay_command)
+{
+ /* RELAY_COMMAND_DROP is the multi-hop (aka circuit-level) padding cell in
+ * tor. (CELL_PADDING is a channel-level padding cell, which is not relayed
+ * or processed here).
+ *
+ * We do generate events for PADDING_NEGOTIATE and PADDING_NEGOTIATED cells.
+ */
+ if (relay_command == RELAY_COMMAND_DROP) {
+ /* Optimization: The event for RELAY_COMMAND_DROP is sent directly
+ * from circpad_send_padding_cell_for_callback(). This is to avoid
+ * putting a cell_t and a relay_header_t on the stack repeatedly
+ * if we decide to send a long train of padding cells back-to-back
+ * with 0 delay. So we do nothing here. */
+ return;
+ } else {
+ /* This is a non-padding cell sent from the client or from
+ * this node. */
+ circpad_cell_event_nonpadding_sent(circ);
+ }
+}
+
+/**
+ * Initialize the states array for a circpad machine.
+ */
+void
+circpad_machine_states_init(circpad_machine_spec_t *machine,
+ circpad_statenum_t num_states)
+{
+ if (BUG(num_states > CIRCPAD_MAX_MACHINE_STATES)) {
+ num_states = CIRCPAD_MAX_MACHINE_STATES;
+ }
+
+ machine->num_states = num_states;
+ machine->states = tor_malloc_zero(sizeof(circpad_state_t)*num_states);
+
+ /* Initialize the default next state for all events to
+ * "ignore" -- if events aren't specified, they are ignored. */
+ for (circpad_statenum_t s = 0; s < num_states; s++) {
+ for (int e = 0; e < CIRCPAD_NUM_EVENTS; e++) {
+ machine->states[s].next_state[e] = CIRCPAD_STATE_IGNORE;
+ }
+ }
+}
+
+static void
+circpad_setup_machine_on_circ(circuit_t *on_circ,
+ const circpad_machine_spec_t *machine)
+{
+ if (CIRCUIT_IS_ORIGIN(on_circ) && !machine->is_origin_side) {
+ log_fn(LOG_WARN, LD_BUG,
+ "Can't set up non-origin machine on origin circuit!");
+ return;
+ }
+
+ if (!CIRCUIT_IS_ORIGIN(on_circ) && machine->is_origin_side) {
+ log_fn(LOG_WARN, LD_BUG,
+ "Can't set up origin machine on non-origin circuit!");
+ return;
+ }
+
+ IF_BUG_ONCE(on_circ->padding_machine[machine->machine_index] != NULL) {
+ return;
+ }
+ IF_BUG_ONCE(on_circ->padding_info[machine->machine_index] != NULL) {
+ return;
+ }
+
+ /* Log message */
+ if (CIRCUIT_IS_ORIGIN(on_circ)) {
+ log_info(LD_CIRC, "Registering machine %s to origin circ %u (%d)",
+ machine->name,
+ TO_ORIGIN_CIRCUIT(on_circ)->global_identifier, on_circ->purpose);
+ } else {
+ log_info(LD_CIRC, "Registering machine %s to non-origin circ (%d)",
+ machine->name, on_circ->purpose);
+ }
+
+ on_circ->padding_info[machine->machine_index] =
+ circpad_circuit_machineinfo_new(on_circ, machine->machine_index);
+ on_circ->padding_machine[machine->machine_index] = machine;
+}
+
+/** Validate a single state of a padding machine */
+static bool
+padding_machine_state_is_valid(const circpad_state_t *state)
+{
+ int b;
+ uint32_t tokens_count = 0;
+ circpad_delay_t prev_bin_edge = 0;
+
+ /* We only validate histograms */
+ if (!state->histogram_len) {
+ return true;
+ }
+
+ /* We need at least two bins in a histogram */
+ if (state->histogram_len < 2) {
+ log_warn(LD_CIRC, "You can't have a histogram with less than 2 bins");
+ return false;
+ }
+
+ /* For each machine state, if it's a histogram, make sure all the
+ * histogram edges are well defined (i.e. are strictly monotonic). */
+ for (b = 0 ; b < state->histogram_len ; b++) {
+ /* Check that histogram edges are strictly increasing. Ignore the first
+ * edge since it can be zero. */
+ if (prev_bin_edge >= state->histogram_edges[b] && b > 0) {
+ log_warn(LD_CIRC, "Histogram edges are not increasing [%u/%u]",
+ prev_bin_edge, state->histogram_edges[b]);
+ return false;
+ }
+
+ prev_bin_edge = state->histogram_edges[b];
+
+ /* Also count the number of tokens as we go through the histogram states */
+ tokens_count += state->histogram[b];
+ }
+ /* Verify that the total number of tokens is correct */
+ if (tokens_count != state->histogram_total_tokens) {
+ log_warn(LD_CIRC, "Histogram token count is wrong [%u/%u]",
+ tokens_count, state->histogram_total_tokens);
+ return false;
+ }
+
+ return true;
+}
+
+/** Basic validation of padding machine */
+static bool
+padding_machine_is_valid(const circpad_machine_spec_t *machine)
+{
+ int i;
+
+ /* Validate the histograms of the padding machine */
+ for (i = 0 ; i < machine->num_states ; i++) {
+ if (!padding_machine_state_is_valid(&machine->states[i])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* Validate and register <b>machine</b> into <b>machine_list</b>. If
+ * <b>machine_list</b> is NULL, then just validate. */
+void
+circpad_register_padding_machine(circpad_machine_spec_t *machine,
+ smartlist_t *machine_list)
+{
+ if (!padding_machine_is_valid(machine)) {
+ log_warn(LD_CIRC, "Machine #%u is invalid. Ignoring.",
+ machine->machine_num);
+ return;
+ }
+
+ if (machine_list) {
+ smartlist_add(machine_list, machine);
+ }
+}
+
+#ifdef TOR_UNIT_TESTS
+/* These padding machines are only used for tests pending #28634. */
+static void
+circpad_circ_client_machine_init(void)
+{
+ circpad_machine_spec_t *circ_client_machine
+ = tor_malloc_zero(sizeof(circpad_machine_spec_t));
+
+ circ_client_machine->conditions.min_hops = 2;
+ circ_client_machine->conditions.state_mask =
+ CIRCPAD_CIRC_BUILDING|CIRCPAD_CIRC_OPENED|CIRCPAD_CIRC_HAS_RELAY_EARLY;
+ circ_client_machine->conditions.purpose_mask = CIRCPAD_PURPOSE_ALL;
+ circ_client_machine->conditions.reduced_padding_ok = 1;
+
+ circ_client_machine->target_hopnum = 2;
+ circ_client_machine->is_origin_side = 1;
+
+ /* Start, gap, burst */
+ circpad_machine_states_init(circ_client_machine, 3);
+
+ circ_client_machine->states[CIRCPAD_STATE_START].
+ next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_BURST;
+
+ circ_client_machine->states[CIRCPAD_STATE_BURST].
+ next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_BURST;
+ circ_client_machine->states[CIRCPAD_STATE_BURST].
+ next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_BURST;
+
+ /* If we are in burst state, and we send a non-padding cell, then we cancel
+ the timer for the next padding cell:
+ We dont want to send fake extends when actual extends are going on */
+ circ_client_machine->states[CIRCPAD_STATE_BURST].
+ next_state[CIRCPAD_EVENT_NONPADDING_SENT] = CIRCPAD_STATE_CANCEL;
+
+ circ_client_machine->states[CIRCPAD_STATE_BURST].
+ next_state[CIRCPAD_EVENT_BINS_EMPTY] = CIRCPAD_STATE_END;
+
+ circ_client_machine->states[CIRCPAD_STATE_BURST].token_removal =
+ CIRCPAD_TOKEN_REMOVAL_CLOSEST;
+
+ circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_len = 2;
+ circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_edges[0]= 500;
+ circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_edges[1]= 1000000;
+
+ /* We have 5 tokens in the histogram, which means that all circuits will look
+ * like they have 7 hops (since we start this machine after the second hop,
+ * and tokens are decremented for any valid hops, and fake extends are
+ * used after that -- 2+5==7). */
+ circ_client_machine->states[CIRCPAD_STATE_BURST].histogram[0] = 5;
+
+ circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_total_tokens = 5;
+
+ circ_client_machine->machine_num = smartlist_len(origin_padding_machines);
+ circpad_register_padding_machine(circ_client_machine,
+ origin_padding_machines);
+}
+
+static void
+circpad_circ_responder_machine_init(void)
+{
+ circpad_machine_spec_t *circ_responder_machine
+ = tor_malloc_zero(sizeof(circpad_machine_spec_t));
+
+ /* Shut down the machine after we've sent enough packets */
+ circ_responder_machine->should_negotiate_end = 1;
+
+ /* The relay-side doesn't care what hopnum it is, but for consistency,
+ * let's match the client */
+ circ_responder_machine->target_hopnum = 2;
+ circ_responder_machine->is_origin_side = 0;
+
+ /* Start, gap, burst */
+ circpad_machine_states_init(circ_responder_machine, 3);
+
+ /* This is the settings of the state machine. In the future we are gonna
+ serialize this into the consensus or the torrc */
+
+ /* We transition to the burst state on padding receive and on non-padding
+ * recieve */
+ circ_responder_machine->states[CIRCPAD_STATE_START].
+ next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_BURST;
+ circ_responder_machine->states[CIRCPAD_STATE_START].
+ next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_BURST;
+
+ /* Inside the burst state we _stay_ in the burst state when a non-padding
+ * is sent */
+ circ_responder_machine->states[CIRCPAD_STATE_BURST].
+ next_state[CIRCPAD_EVENT_NONPADDING_SENT] = CIRCPAD_STATE_BURST;
+
+ /* Inside the burst state we transition to the gap state when we receive a
+ * padding cell */
+ circ_responder_machine->states[CIRCPAD_STATE_BURST].
+ next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_GAP;
+
+ /* These describe the padding charasteristics when in burst state */
+
+ /* use_rtt_estimate tries to estimate how long padding cells take to go from
+ C->M, and uses that as what as the base of the histogram */
+ circ_responder_machine->states[CIRCPAD_STATE_BURST].use_rtt_estimate = 1;
+ /* The histogram is 2 bins: an empty one, and infinity */
+ circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram_len = 2;
+ circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram_edges[0]= 500;
+ circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram_edges[1] =
+ 1000000;
+ /* During burst state we wait forever for padding to arrive.
+
+ We are waiting for a padding cell from the client to come in, so that we
+ respond, and we immitate how extend looks like */
+ circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram[0] = 0;
+ // Only infinity bin:
+ circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram[1] = 1;
+ circ_responder_machine->states[CIRCPAD_STATE_BURST].
+ histogram_total_tokens = 1;
+
+ /* From the gap state, we _stay_ in the gap state, when we receive padding
+ * or non padding */
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].
+ next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_GAP;
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].
+ next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_GAP;
+
+ /* And from the gap state, we go to the end, when the bins are empty or a
+ * non-padding cell is sent */
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].
+ next_state[CIRCPAD_EVENT_BINS_EMPTY] = CIRCPAD_STATE_END;
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].
+ next_state[CIRCPAD_EVENT_NONPADDING_SENT] = CIRCPAD_STATE_END;
+
+ // FIXME: Tune this histogram
+
+ /* The gap state is the delay you wait after you receive a padding cell
+ before you send a padding response */
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].use_rtt_estimate = 1;
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_len = 6;
+ /* Specify histogram bins */
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[0]= 500;
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[1]= 1000;
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[2]= 2000;
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[3]= 4000;
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[4]= 8000;
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[5]= 16000;
+ /* Specify histogram tokens */
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[0] = 0;
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[1] = 1;
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[2] = 2;
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[3] = 2;
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[4] = 1;
+ /* Total number of tokens */
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_total_tokens = 6;
+
+ circ_responder_machine->states[CIRCPAD_STATE_GAP].token_removal =
+ CIRCPAD_TOKEN_REMOVAL_CLOSEST_USEC;
+
+ circ_responder_machine->machine_num = smartlist_len(relay_padding_machines);
+ circpad_register_padding_machine(circ_responder_machine,
+ relay_padding_machines);
+}
+#endif /* defined(TOR_UNIT_TESTS) */
+
+/**
+ * Initialize all of our padding machines.
+ *
+ * This is called at startup. It sets up some global machines, and then
+ * loads some from torrc, and from the tor consensus.
+ */
+void
+circpad_machines_init(void)
+{
+ tor_assert_nonfatal(origin_padding_machines == NULL);
+ tor_assert_nonfatal(relay_padding_machines == NULL);
+
+ origin_padding_machines = smartlist_new();
+ relay_padding_machines = smartlist_new();
+
+ /* Register machines for hiding client-side intro circuits */
+ circpad_machine_client_hide_intro_circuits(origin_padding_machines);
+ circpad_machine_relay_hide_intro_circuits(relay_padding_machines);
+
+ /* Register machines for hiding client-side rendezvous circuits */
+ circpad_machine_client_hide_rend_circuits(origin_padding_machines);
+ circpad_machine_relay_hide_rend_circuits(relay_padding_machines);
+
+ // TODO: Parse machines from consensus and torrc
+#ifdef TOR_UNIT_TESTS
+ circpad_circ_client_machine_init();
+ circpad_circ_responder_machine_init();
+#endif
+}
+
+/**
+ * Free our padding machines
+ */
+void
+circpad_machines_free(void)
+{
+ if (origin_padding_machines) {
+ SMARTLIST_FOREACH(origin_padding_machines,
+ circpad_machine_spec_t *,
+ m, tor_free(m->states); tor_free(m));
+ smartlist_free(origin_padding_machines);
+ }
+
+ if (relay_padding_machines) {
+ SMARTLIST_FOREACH(relay_padding_machines,
+ circpad_machine_spec_t *,
+ m, tor_free(m->states); tor_free(m));
+ smartlist_free(relay_padding_machines);
+ }
+}
+
+/**
+ * Check the Protover info to see if a node supports padding.
+ */
+static bool
+circpad_node_supports_padding(const node_t *node)
+{
+ if (node->rs) {
+ log_fn(LOG_INFO, LD_CIRC, "Checking padding: %s",
+ node->rs->pv.supports_hs_setup_padding ?
+ "supported" : "unsupported");
+ return node->rs->pv.supports_hs_setup_padding;
+ }
+
+ log_fn(LOG_INFO, LD_CIRC, "Empty routerstatus in padding check");
+ return 0;
+}
+
+/**
+ * Get a node_t for the nth hop in our circuit, starting from 1.
+ *
+ * Returns node_t from the consensus for that hop, if it is opened.
+ * Otherwise returns NULL.
+ */
+MOCK_IMPL(STATIC const node_t *,
+circuit_get_nth_node,(origin_circuit_t *circ, int hop))
+{
+ crypt_path_t *iter = circuit_get_cpath_hop(circ, hop);
+
+ if (!iter || iter->state != CPATH_STATE_OPEN)
+ return NULL;
+
+ return node_get_by_id(iter->extend_info->identity_digest);
+}
+
+/**
+ * Return true if a particular circuit supports padding
+ * at the desired hop.
+ */
+static bool
+circpad_circuit_supports_padding(origin_circuit_t *circ,
+ int target_hopnum)
+{
+ const node_t *hop;
+
+ if (!(hop = circuit_get_nth_node(circ, target_hopnum))) {
+ return 0;
+ }
+
+ return circpad_node_supports_padding(hop);
+}
+
+/**
+ * Try to negotiate padding.
+ *
+ * Returns -1 on error, 0 on success.
+ */
+signed_error_t
+circpad_negotiate_padding(origin_circuit_t *circ,
+ circpad_machine_num_t machine,
+ uint8_t target_hopnum,
+ uint8_t command)
+{
+ circpad_negotiate_t type;
+ cell_t cell;
+ ssize_t len;
+
+ /* Check that the target hop lists support for padding in
+ * its ProtoVer fields */
+ if (!circpad_circuit_supports_padding(circ, target_hopnum)) {
+ return -1;
+ }
+
+ memset(&cell, 0, sizeof(cell_t));
+ memset(&type, 0, sizeof(circpad_negotiate_t));
+ // This gets reset to RELAY_EARLY appropriately by
+ // relay_send_command_from_edge_. At least, it looks that way.
+ // QQQ-MP-AP: Verify that.
+ cell.command = CELL_RELAY;
+
+ circpad_negotiate_set_command(&type, command);
+ circpad_negotiate_set_version(&type, 0);
+ circpad_negotiate_set_machine_type(&type, machine);
+
+ if ((len = circpad_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
+ &type)) < 0)
+ return -1;
+
+ log_fn(LOG_INFO,LD_CIRC,
+ "Negotiating padding on circuit %u (%d), command %d",
+ circ->global_identifier, TO_CIRCUIT(circ)->purpose, command);
+
+ return circpad_send_command_to_hop(circ, target_hopnum,
+ RELAY_COMMAND_PADDING_NEGOTIATE,
+ cell.payload, len);
+}
+
+/**
+ * Try to negotiate padding.
+ *
+ * Returns 1 if successful (or already set up), 0 otherwise.
+ */
+bool
+circpad_padding_negotiated(circuit_t *circ,
+ circpad_machine_num_t machine,
+ uint8_t command,
+ uint8_t response)
+{
+ circpad_negotiated_t type;
+ cell_t cell;
+ ssize_t len;
+
+ memset(&cell, 0, sizeof(cell_t));
+ memset(&type, 0, sizeof(circpad_negotiated_t));
+ // This gets reset to RELAY_EARLY appropriately by
+ // relay_send_command_from_edge_. At least, it looks that way.
+ // QQQ-MP-AP: Verify that.
+ cell.command = CELL_RELAY;
+
+ circpad_negotiated_set_command(&type, command);
+ circpad_negotiated_set_response(&type, response);
+ circpad_negotiated_set_version(&type, 0);
+ circpad_negotiated_set_machine_type(&type, machine);
+
+ if ((len = circpad_negotiated_encode(cell.payload, CELL_PAYLOAD_SIZE,
+ &type)) < 0)
+ return 0;
+
+ /* Use relay_send because we're from the middle to the origin. We don't
+ * need to specify a target hop or layer_hint. */
+ return relay_send_command_from_edge(0, circ,
+ RELAY_COMMAND_PADDING_NEGOTIATED,
+ (void*)cell.payload,
+ (size_t)len, NULL) == 0;
+}
+
+/**
+ * Parse and react to a padding_negotiate cell.
+ *
+ * This is called at the middle node upon receipt of the client's choice of
+ * state machine, so that it can use the requested state machine index, if
+ * it is available.
+ *
+ * Returns -1 on error, 0 on success.
+ */
+signed_error_t
+circpad_handle_padding_negotiate(circuit_t *circ, cell_t *cell)
+{
+ int retval = 0;
+ circpad_negotiate_t *negotiate;
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
+ "Padding negotiate cell unsupported at origin (circuit %u)",
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier);
+ return -1;
+ }
+
+ if (circpad_negotiate_parse(&negotiate, cell->payload+RELAY_HEADER_SIZE,
+ CELL_PAYLOAD_SIZE-RELAY_HEADER_SIZE) < 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
+ "Received malformed PADDING_NEGOTIATE cell; dropping.");
+ return -1;
+ }
+
+ if (negotiate->command == CIRCPAD_COMMAND_STOP) {
+ /* Free the machine corresponding to this machine type */
+ if (free_circ_machineinfos_with_machine_num(circ,
+ negotiate->machine_type)) {
+ log_info(LD_CIRC, "Received STOP command for machine %u",
+ negotiate->machine_type);
+ goto done;
+ }
+ log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
+ "Received circuit padding stop command for unknown machine.");
+ goto err;
+ } else if (negotiate->command == CIRCPAD_COMMAND_START) {
+ SMARTLIST_FOREACH_BEGIN(relay_padding_machines,
+ const circpad_machine_spec_t *, m) {
+ if (m->machine_num == negotiate->machine_type) {
+ circpad_setup_machine_on_circ(circ, m);
+ circpad_cell_event_nonpadding_received(circ);
+ goto done;
+ }
+ } SMARTLIST_FOREACH_END(m);
+ }
+
+ err:
+ retval = -1;
+
+ done:
+ circpad_padding_negotiated(circ, negotiate->machine_type,
+ negotiate->command,
+ (retval == 0) ? CIRCPAD_RESPONSE_OK : CIRCPAD_RESPONSE_ERR);
+ circpad_negotiate_free(negotiate);
+
+ return retval;
+}
+
+/**
+ * Parse and react to a padding_negotiated cell.
+ *
+ * This is called at the origin upon receipt of the middle's response
+ * to our choice of state machine.
+ *
+ * Returns -1 on error, 0 on success.
+ */
+signed_error_t
+circpad_handle_padding_negotiated(circuit_t *circ, cell_t *cell,
+ crypt_path_t *layer_hint)
+{
+ circpad_negotiated_t *negotiated;
+
+ if (!CIRCUIT_IS_ORIGIN(circ)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
+ "Padding negotiated cell unsupported at non-origin.");
+ return -1;
+ }
+
+ /* Verify this came from the expected hop */
+ if (!circpad_padding_is_from_expected_hop(circ, layer_hint)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
+ "Padding negotiated cell from wrong hop on circuit %u",
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier);
+ return -1;
+ }
+
+ if (circpad_negotiated_parse(&negotiated, cell->payload+RELAY_HEADER_SIZE,
+ CELL_PAYLOAD_SIZE-RELAY_HEADER_SIZE) < 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
+ "Received malformed PADDING_NEGOTIATED cell on circuit %u; "
+ "dropping.", TO_ORIGIN_CIRCUIT(circ)->global_identifier);
+ return -1;
+ }
+
+ if (negotiated->command == CIRCPAD_COMMAND_STOP) {
+ log_info(LD_CIRC,
+ "Received STOP command on PADDING_NEGOTIATED for circuit %u",
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier);
+ /* There may not be a padding_info here if we shut down the
+ * machine in circpad_shutdown_old_machines(). Or, if
+ * circpad_add_matching_matchines() added a new machine,
+ * there may be a padding_machine for a different machine num
+ * than this response. */
+ free_circ_machineinfos_with_machine_num(circ, negotiated->machine_type);
+ } else if (negotiated->command == CIRCPAD_COMMAND_START &&
+ negotiated->response == CIRCPAD_RESPONSE_ERR) {
+ // This can happen due to consensus drift.. free the machines
+ // and be sad
+ free_circ_machineinfos_with_machine_num(circ, negotiated->machine_type);
+ TO_ORIGIN_CIRCUIT(circ)->padding_negotiation_failed = 1;
+ log_fn(LOG_PROTOCOL_WARN, LD_CIRC,
+ "Middle node did not accept our padding request on circuit %u (%d)",
+ TO_ORIGIN_CIRCUIT(circ)->global_identifier,
+ circ->purpose);
+ }
+
+ circpad_negotiated_free(negotiated);
+ return 0;
+}
+
+/** Free memory allocated by this machine spec. */
+STATIC void
+machine_spec_free_(circpad_machine_spec_t *m)
+{
+ if (!m) return;
+
+ tor_free(m->states);
+ tor_free(m);
+}
+
+/** Free all memory allocated by the circuitpadding subsystem. */
+void
+circpad_free_all(void)
+{
+ if (origin_padding_machines) {
+ SMARTLIST_FOREACH_BEGIN(origin_padding_machines,
+ circpad_machine_spec_t *, m) {
+ machine_spec_free(m);
+ } SMARTLIST_FOREACH_END(m);
+ smartlist_free(origin_padding_machines);
+ }
+ if (relay_padding_machines) {
+ SMARTLIST_FOREACH_BEGIN(relay_padding_machines,
+ circpad_machine_spec_t *, m) {
+ machine_spec_free(m);
+ } SMARTLIST_FOREACH_END(m);
+ smartlist_free(relay_padding_machines);
+ }
+}
+
+/* Serialization */
+// TODO: Should we use keyword=value here? Are there helpers for that?
+#if 0
+static void
+circpad_state_serialize(const circpad_state_t *state,
+ smartlist_t *chunks)
+{
+ smartlist_add_asprintf(chunks, " %u", state->histogram[0]);
+ for (int i = 1; i < state->histogram_len; i++) {
+ smartlist_add_asprintf(chunks, ",%u",
+ state->histogram[i]);
+ }
+
+ smartlist_add_asprintf(chunks, " 0x%x",
+ state->transition_cancel_events);
+
+ for (int i = 0; i < CIRCPAD_NUM_STATES; i++) {
+ smartlist_add_asprintf(chunks, ",0x%x",
+ state->transition_events[i]);
+ }
+
+ smartlist_add_asprintf(chunks, " %u %u",
+ state->use_rtt_estimate,
+ state->token_removal);
+}
+
+char *
+circpad_machine_spec_to_string(const circpad_machine_spec_t *machine)
+{
+ smartlist_t *chunks = smartlist_new();
+ char *out;
+ (void)machine;
+
+ circpad_state_serialize(&machine->start, chunks);
+ circpad_state_serialize(&machine->gap, chunks);
+ circpad_state_serialize(&machine->burst, chunks);
+
+ out = smartlist_join_strings(chunks, "", 0, NULL);
+
+ SMARTLIST_FOREACH(chunks, char *, cp, tor_free(cp));
+ smartlist_free(chunks);
+ return out;
+}
+
+// XXX: Writeme
+const circpad_machine_spec_t *
+circpad_string_to_machine(const char *str)
+{
+ (void)str;
+ return NULL;
+}
+
+#endif /* 0 */
diff --git a/src/core/or/circuitpadding.h b/src/core/or/circuitpadding.h
new file mode 100644
index 0000000000..74b69a1c7a
--- /dev/null
+++ b/src/core/or/circuitpadding.h
@@ -0,0 +1,813 @@
+/*
+ * Copyright (c) 2017-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitpadding.h
+ * \brief Header file for circuitpadding.c.
+ **/
+
+#ifndef TOR_CIRCUITPADDING_H
+#define TOR_CIRCUITPADDING_H
+
+#include "trunnel/circpad_negotiation.h"
+#include "lib/evloop/timers.h"
+
+struct circuit_t;
+struct origin_circuit_t;
+struct cell_t;
+
+/**
+ * Signed error return with the specific property that negative
+ * values mean error codes of various semantics, 0 means success,
+ * and positive values are unused.
+ *
+ * XXX: Tor uses this concept a lot but just calls it int. Should we move
+ * this somewhere centralized? Where?
+ */
+typedef int signed_error_t;
+
+/**
+ * These constants specify the types of events that can cause
+ * transitions between state machine states.
+ *
+ * Note that SENT and RECV are relative to this endpoint. For
+ * relays, SENT means packets destined towards the client and
+ * RECV means packets destined towards the relay. On the client,
+ * SENT means packets destined towards the relay, where as RECV
+ * means packets destined towards the client.
+ */
+typedef enum {
+ /* A non-padding cell was received. */
+ CIRCPAD_EVENT_NONPADDING_RECV = 0,
+ /* A non-padding cell was sent. */
+ CIRCPAD_EVENT_NONPADDING_SENT = 1,
+ /* A padding cell (RELAY_COMMAND_DROP) was sent. */
+ CIRCPAD_EVENT_PADDING_SENT = 2,
+ /* A padding cell was received. */
+ CIRCPAD_EVENT_PADDING_RECV = 3,
+ /* We tried to schedule padding but we ended up picking the infinity bin
+ * which means that padding was delayed infinitely */
+ CIRCPAD_EVENT_INFINITY = 4,
+ /* All histogram bins are empty (we are out of tokens) */
+ CIRCPAD_EVENT_BINS_EMPTY = 5,
+ /* This state has used up its cell count */
+ CIRCPAD_EVENT_LENGTH_COUNT = 6
+} circpad_event_t;
+#define CIRCPAD_NUM_EVENTS ((int)CIRCPAD_EVENT_LENGTH_COUNT+1)
+
+/** Boolean type that says if we decided to transition states or not */
+typedef enum {
+ CIRCPAD_STATE_UNCHANGED = 0,
+ CIRCPAD_STATE_CHANGED = 1
+} circpad_decision_t;
+
+/** The type for the things in histogram bins (aka tokens) */
+typedef uint32_t circpad_hist_token_t;
+
+/** The type for histogram indexes (needs to be negative for errors) */
+typedef int8_t circpad_hist_index_t;
+
+/** The type for absolute time, from monotime_absolute_usec() */
+typedef uint64_t circpad_time_t;
+
+/** The type for timer delays, in microseconds */
+typedef uint32_t circpad_delay_t;
+#define CIRCPAD_DELAY_UNITS_PER_SECOND (1000*1000)
+
+/**
+ * An infinite padding cell delay means don't schedule any padding --
+ * simply wait until a different event triggers a transition.
+ *
+ * This means that the maximum delay we can schedule is UINT32_MAX-1
+ * microseconds, or about 4300 seconds (1.25 hours).
+ * XXX: Is this enough if we want to simulate light, intermittent
+ * activity on an onion service?
+ */
+#define CIRCPAD_DELAY_INFINITE (UINT32_MAX)
+
+/**
+ * This is the maximum delay that the circuit padding system can have, in
+ * seconds.
+ */
+#define CIRCPAD_DELAY_MAX_SECS \
+ ((CIRCPAD_DELAY_INFINITE/CIRCPAD_DELAY_UNITS_PER_SECOND)+1)
+
+/**
+ * Macro to clarify when we're checking the infinity bin.
+ *
+ * Works with either circpad_state_t or circpad_machine_runtime_t
+ */
+#define CIRCPAD_INFINITY_BIN(mi) ((mi)->histogram_len-1)
+
+/**
+ * These constants form a bitfield that specifies when a state machine
+ * should be applied to a circuit.
+ *
+ * If any of these elements is set, then the circuit will be tested against
+ * that specific condition. If an element is unset, then we don't test it.
+ * (E.g., if neither NO_STREAMS or STREAMS are set, then we will not care
+ * whether a circuit has streams attached when we apply a state machine.)
+ *
+ * The helper function circpad_circuit_state() converts circuit state
+ * flags into this more compact representation.
+ */
+typedef enum {
+ /* Only apply machine if the circuit is still building */
+ CIRCPAD_CIRC_BUILDING = 1<<0,
+ /* Only apply machine if the circuit is open */
+ CIRCPAD_CIRC_OPENED = 1<<1,
+ /* Only apply machine if the circuit has no attached streams */
+ CIRCPAD_CIRC_NO_STREAMS = 1<<2,
+ /* Only apply machine if the circuit has attached streams */
+ CIRCPAD_CIRC_STREAMS = 1<<3,
+ /* Only apply machine if the circuit still allows RELAY_EARLY cells */
+ CIRCPAD_CIRC_HAS_RELAY_EARLY = 1<<4,
+ /* Only apply machine if the circuit has depleted its RELAY_EARLY cells
+ * allowance. */
+ CIRCPAD_CIRC_HAS_NO_RELAY_EARLY = 1<<5
+} circpad_circuit_state_t;
+
+/** Bitmask that says "apply this machine to all states" */
+#define CIRCPAD_STATE_ALL \
+ (CIRCPAD_CIRC_BUILDING|CIRCPAD_CIRC_OPENED| \
+ CIRCPAD_CIRC_STREAMS|CIRCPAD_CIRC_NO_STREAMS| \
+ CIRCPAD_CIRC_HAS_RELAY_EARLY|CIRCPAD_CIRC_HAS_NO_RELAY_EARLY)
+
+/**
+ * A compact circuit purpose bitfield mask that allows us to compactly
+ * specify which circuit purposes a machine should apply to.
+ *
+ * The helper function circpad_circ_purpose_to_mask() converts circuit
+ * purposes into bit positions in this bitmask.
+ */
+typedef uint32_t circpad_purpose_mask_t;
+
+/** Bitmask that says "apply this machine to all purposes". */
+#define CIRCPAD_PURPOSE_ALL (0xFFFFFFFF)
+
+/**
+ * This type specifies all of the conditions that must be met before
+ * a client decides to initiate padding on a circuit.
+ *
+ * A circuit must satisfy every sub-field in this type in order
+ * to be considered to match the conditions.
+ */
+typedef struct circpad_machine_conditions_t {
+ /** Only apply the machine *if* the circuit has at least this many hops */
+ unsigned min_hops : 3;
+
+ /** Only apply the machine *if* vanguards are enabled */
+ unsigned requires_vanguards : 1;
+
+ /**
+ * This machine is ok to use if reduced padding is set in consensus
+ * or torrc. This machine will still be applied even if reduced padding
+ * is not set; this flag only acts to exclude machines that don't have
+ * it set when reduced padding is requested. Therefore, reduced padding
+ * machines should appear at the lowest priority in the padding machine
+ * lists (aka first in the list), so that non-reduced padding machines
+ * for the same purpose are given a chance to apply when reduced padding
+ * is not requested. */
+ unsigned reduced_padding_ok : 1;
+
+ /** Only apply the machine *if* the circuit's state matches any of
+ * the bits set in this bitmask. */
+ circpad_circuit_state_t state_mask;
+
+ /** Only apply a machine *if* the circuit's purpose matches one
+ * of the bits set in this bitmask */
+ circpad_purpose_mask_t purpose_mask;
+
+} circpad_machine_conditions_t;
+
+/**
+ * Token removal strategy options.
+ *
+ * The WTF-PAD histograms are meant to specify a target distribution to shape
+ * traffic towards. This is accomplished by removing tokens from the histogram
+ * when either padding or non-padding cells are sent.
+ *
+ * When we see a non-padding cell at a particular time since the last cell, you
+ * remove a token from the corresponding delay bin. These flags specify
+ * which bin to choose if that bin is already empty.
+ */
+typedef enum {
+ /** Don't remove any tokens */
+ CIRCPAD_TOKEN_REMOVAL_NONE = 0,
+ /**
+ * Remove from the first non-zero higher bin index when current is zero.
+ * This is the recommended strategy from the Adaptive Padding paper. */
+ CIRCPAD_TOKEN_REMOVAL_HIGHER = 1,
+ /** Remove from the first non-zero lower bin index when current is empty. */
+ CIRCPAD_TOKEN_REMOVAL_LOWER = 2,
+ /** Remove from the closest non-zero bin index when current is empty. */
+ CIRCPAD_TOKEN_REMOVAL_CLOSEST = 3,
+ /** Remove from the closest bin by time value (since bins are
+ * exponentially spaced). */
+ CIRCPAD_TOKEN_REMOVAL_CLOSEST_USEC = 4,
+ /** Only remove from the exact bin corresponding to this delay. If
+ * the bin is 0, simply do nothing. Don't pick another bin. */
+ CIRCPAD_TOKEN_REMOVAL_EXACT = 5
+} circpad_removal_t;
+
+/**
+ * Distribution types supported by circpad_distribution_sample().
+ *
+ * These can be used instead of histograms for the inter-packet
+ * timing distribution, or to specify a distribution on the number
+ * of cells that can be sent while in a specific state of the state
+ * machine.
+ *
+ * Each distribution takes up to two parameters which are described below. */
+typedef enum {
+ /* No probability distribution is used */
+ CIRCPAD_DIST_NONE = 0,
+ /* Uniform distribution: param1 is lower bound and param2 is upper bound */
+ CIRCPAD_DIST_UNIFORM = 1,
+ /* Logistic distribution: param1 is Mu, param2 is sigma. */
+ CIRCPAD_DIST_LOGISTIC = 2,
+ /* Log-logistic distribution: param1 is Alpha, param2 is 1.0/Beta */
+ CIRCPAD_DIST_LOG_LOGISTIC = 3,
+ /* Geometric distribution: param1 is 'p' (success probability) */
+ CIRCPAD_DIST_GEOMETRIC = 4,
+ /* Weibull distribution: param1 is k, param2 is Lambda */
+ CIRCPAD_DIST_WEIBULL = 5,
+ /* Generalized Pareto distribution: param1 is sigma, param2 is xi */
+ CIRCPAD_DIST_PARETO = 6
+} circpad_distribution_type_t;
+
+/**
+ * Distribution information.
+ *
+ * This type specifies a specific distribution above, as well as
+ * up to two parameters for that distribution. The specific
+ * per-distribution meaning of these parameters is specified
+ * in circpad_distribution_sample().
+ */
+typedef struct circpad_distribution_t {
+ circpad_distribution_type_t type;
+ double param1;
+ double param2;
+} circpad_distribution_t;
+
+/** State number type. Represents current state of state machine. */
+typedef uint16_t circpad_statenum_t;
+#define CIRCPAD_STATENUM_MAX (UINT16_MAX)
+
+/** A histogram can be used to sample padding delays given a machine state.
+ * This constant defines the maximum histogram width (i.e. the max number of
+ * bins).
+ *
+ * The current limit is arbitrary and could be raised if there is a need,
+ * however too many bins will be hard to serialize in the future.
+ *
+ * Memory concerns are not so great here since the corresponding histogram and
+ * histogram_edges arrays are global and not per-circuit.
+ *
+ * If we ever upgrade this to a value that can't be represented by 8-bits we
+ * also need to upgrade circpad_hist_index_t.
+ */
+#define CIRCPAD_MAX_HISTOGRAM_LEN (100)
+
+/**
+ * A state of a padding state machine. The information here are immutable and
+ * represent the initial form of the state; it does not get updated as things
+ * happen. The mutable information that gets updated in runtime are carried in
+ * a circpad_machine_runtime_t.
+ *
+ * This struct describes the histograms and/or probability distributions, as
+ * well as parameters of a single state in the adaptive padding machine.
+ * Instances of this struct exist in global circpad machine definitions that
+ * come from torrc or the consensus.
+ */
+typedef struct circpad_state_t {
+ /**
+ * If a histogram is used for this state, this specifies the number of bins
+ * of this histogram. Histograms must have at least 2 bins.
+ *
+ * In particular, the following histogram:
+ *
+ * Tokens
+ * +
+ * 10 | +----+
+ * 9 | | | +---------+
+ * 8 | | | | |
+ * 7 | | | +-----+ |
+ * 6 +----+ Bin+-----+ | +---------------+
+ * 5 | | #1 | | | | |
+ * | Bin| | Bin | Bin | Bin #4 | Bin #5 |
+ * | #0 | | #2 | #3 | | (infinity bin)|
+ * | | | | | | |
+ * | | | | | | |
+ * 0 +----+----+-----+-----+---------+---------------+
+ * 0 100 200 350 500 1000 inf microseconds
+ *
+ * would be specified the following way:
+ * histogram_len = 6;
+ * histogram[] = { 6, 10, 6, 7, 9, 6 }
+ * histogram_edges[] = { 0, 100, 200, 350, 500, 1000 }
+ *
+ * The final bin is called the "infinity bin" and if it's chosen we don't
+ * schedule any padding. The infinity bin is strange because its lower edge
+ * is the max value of possible non-infinite delay allowed by this histogram,
+ * and its upper edge is CIRCPAD_DELAY_INFINITE. You can tell if the infinity
+ * bin is chosen by inspecting its bin index or inspecting its upper edge.
+ *
+ * If a delay probability distribution is used for this state, this is set
+ * to 0. */
+ circpad_hist_index_t histogram_len;
+ /** The histogram itself: an array of uint16s of tokens, whose
+ * widths are exponentially spaced, in microseconds.
+ *
+ * This array must have histogram_len elements that are strictly
+ * monotonically increasing. */
+ circpad_hist_token_t histogram[CIRCPAD_MAX_HISTOGRAM_LEN];
+ /* The histogram bin edges in usec.
+ *
+ * Each element of this array specifies the left edge of the corresponding
+ * bin. The rightmost edge is always infinity and is not specified in this
+ * array.
+ *
+ * This array must have histogram_len elements. */
+ circpad_delay_t histogram_edges[CIRCPAD_MAX_HISTOGRAM_LEN+1];
+ /** Total number of tokens in this histogram. This is a constant and is *not*
+ * decremented every time we spend a token. It's used for initializing and
+ * refilling the histogram. */
+ uint32_t histogram_total_tokens;
+
+ /**
+ * Represents a delay probability distribution (aka IAT distribution). It's a
+ * parametrized way of encoding inter-packet delay information in
+ * microseconds. It can be used instead of histograms.
+ *
+ * If it is used, token_removal below must be set to
+ * CIRCPAD_TOKEN_REMOVAL_NONE.
+ *
+ * Start_usec, range_sec, and rtt_estimates are still applied to the
+ * results of sampling from this distribution (range_sec is used as a max).
+ */
+ circpad_distribution_t iat_dist;
+ /* If a delay probability distribution is used, this is used as the max
+ * value we can sample from the distribution. However, RTT measurements and
+ * dist_added_shift gets applied on top of this value to derive the final
+ * padding delay. */
+ circpad_delay_t dist_max_sample_usec;
+ /* If a delay probability distribution is used and this is set, we will add
+ * this value on top of the value sampled from the IAT distribution to
+ * derive the final padding delay (We also add the RTT measurement if it's
+ * enabled.). */
+ circpad_delay_t dist_added_shift_usec;
+
+ /**
+ * The length dist is a parameterized way of encoding how long this
+ * state machine runs in terms of sent padding cells or all
+ * sent cells. Values are sampled from this distribution, clamped
+ * to max_len, and then start_len is added to that value.
+ *
+ * It may be specified instead of or in addition to
+ * the infinity bins and bins empty conditions. */
+ circpad_distribution_t length_dist;
+ /** A minimum length value, added to the output of length_dist */
+ uint16_t start_length;
+ /** A cap on the length value that can be sampled from the length_dist */
+ uint64_t max_length;
+
+ /** Should we decrement length when we see a nonpadding packet?
+ * XXX: Are there any machines that actually want to set this to 0? There may
+ * not be. OTOH, it's only a bit.. */
+ unsigned length_includes_nonpadding : 1;
+
+ /**
+ * This is an array that specifies the next state to transition to upon
+ * receipt an event matching the indicated array index.
+ *
+ * This aborts our scheduled packet and switches to the state
+ * corresponding to the index of the array. Tokens are filled upon
+ * this transition.
+ *
+ * States are allowed to transition to themselves, which means re-schedule
+ * a new padding timer. They are also allowed to temporarily "transition"
+ * to the "IGNORE" and "CANCEL" pseudo-states. See defines below
+ * for details on state behavior and meaning.
+ */
+ circpad_statenum_t next_state[CIRCPAD_NUM_EVENTS];
+
+ /**
+ * If true, estimate the RTT from this relay to the exit/website and add that
+ * to start_usec for use as the histogram bin 0 start delay.
+ *
+ * Right now this is only supported for relay-side state machines.
+ */
+ unsigned use_rtt_estimate : 1;
+
+ /** This specifies the token removal strategy to use upon padding and
+ * non-padding activity. */
+ circpad_removal_t token_removal;
+} circpad_state_t;
+
+/**
+ * The start state for this machine.
+ *
+ * In the original WTF-PAD, this is only used for transition to/from
+ * the burst state. All other fields are not used. But to simplify the
+ * code we've made it a first-class state. This has no performance
+ * consequences, but may make naive serialization of the state machine
+ * large, if we're not careful about how we represent empty fields.
+ */
+#define CIRCPAD_STATE_START 0
+
+/**
+ * The burst state for this machine.
+ *
+ * In the original Adaptive Padding algorithm and in WTF-PAD
+ * (https://www.freehaven.net/anonbib/cache/ShWa-Timing06.pdf and
+ * https://www.cs.kau.se/pulls/hot/thebasketcase-wtfpad/), the burst
+ * state serves to detect bursts in traffic. This is done by using longer
+ * delays in its histogram, which represent the expected delays between
+ * bursts of packets in the target stream. If this delay expires without a
+ * real packet being sent, the burst state sends a padding packet and then
+ * immediately transitions to the gap state, which is used to generate
+ * a synthetic padding packet train. In this implementation, this transition
+ * needs to be explicitly specified in the burst state's transition events.
+ *
+ * Because of this flexibility, other padding mechanisms can transition
+ * between these two states arbitrarily, to encode other dynamics of
+ * target traffic.
+ */
+#define CIRCPAD_STATE_BURST 1
+
+/**
+ * The gap state for this machine.
+ *
+ * In the original Adaptive Padding algorithm and in WTF-PAD, the gap
+ * state serves to simulate an artificial packet train composed of padding
+ * packets. It does this by specifying much lower inter-packet delays than
+ * the burst state, and transitioning back to itself after padding is sent
+ * if these timers expire before real traffic is sent. If real traffic is
+ * sent, it transitions back to the burst state.
+ *
+ * Again, in this implementation, these transitions must be specified
+ * explicitly, and other transitions are also permitted.
+ */
+#define CIRCPAD_STATE_GAP 2
+
+/**
+ * End is a pseudo-state that causes the machine to go completely
+ * idle, and optionally get torn down (depending on the
+ * value of circpad_machine_spec_t.should_negotiate_end)
+ *
+ * End MUST NOT occupy a slot in the machine state array.
+ */
+#define CIRCPAD_STATE_END CIRCPAD_STATENUM_MAX
+
+/**
+ * "Ignore" is a pseudo-state that means "do not react to this
+ * event".
+ *
+ * "Ignore" MUST NOT occupy a slot in the machine state array.
+ */
+#define CIRCPAD_STATE_IGNORE (CIRCPAD_STATENUM_MAX-1)
+
+/**
+ * "Cancel" is a pseudo-state that means "cancel pending timers,
+ * but remain in your current state".
+ *
+ * Cancel MUST NOT occupy a slot in the machine state array.
+ */
+#define CIRCPAD_STATE_CANCEL (CIRCPAD_STATENUM_MAX-2)
+
+/**
+ * Since we have 3 pseudo-states, the max state array length is
+ * up to one less than cancel's statenum.
+ */
+#define CIRCPAD_MAX_MACHINE_STATES (CIRCPAD_STATE_CANCEL-1)
+
+/**
+ * Mutable padding machine info.
+ *
+ * This structure contains mutable information about a padding
+ * machine. The mutable information must be kept separate because
+ * it exists per-circuit, where as the machines themselves are global.
+ * This separation is done to conserve space in the circuit structure.
+ *
+ * This is the per-circuit state that changes regarding the global state
+ * machine. Some parts of it are optional (ie NULL).
+ *
+ * XXX: Play with layout to minimize space on x64 Linux (most common relay).
+ */
+typedef struct circpad_machine_runtime_t {
+ /** The callback pointer for the padding callbacks.
+ *
+ * These timers stick around the machineinfo until the machineinfo's circuit
+ * is closed, at which point the timer is cancelled. For this reason it's
+ * safe to assume that the machineinfo exists if this timer gets
+ * triggered. */
+ tor_timer_t *padding_timer;
+
+ /** The circuit for this machine */
+ struct circuit_t *on_circ;
+
+ /** A mutable copy of the histogram for the current state.
+ * NULL if remove_tokens is false for that state */
+ circpad_hist_token_t *histogram;
+ /** Length of the above histogram.
+ * XXX: This field *could* be removed at the expense of added
+ * complexity+overhead for reaching back into the immutable machine
+ * state every time we need to inspect the histogram. It's only a byte,
+ * though, so it seemed worth it.
+ */
+ circpad_hist_index_t histogram_len;
+ /** Remove token from this index upon sending padding */
+ circpad_hist_index_t chosen_bin;
+
+ /** Stop padding/transition if this many cells sent */
+ uint64_t state_length;
+#define CIRCPAD_STATE_LENGTH_INFINITE UINT64_MAX
+
+ /** A scaled count of padding packets sent, used to limit padding overhead.
+ * When this reaches UINT16_MAX, we cut it and nonpadding_sent in half. */
+ uint16_t padding_sent;
+ /** A scaled count of non-padding packets sent, used to limit padding
+ * overhead. When this reaches UINT16_MAX, we cut it and padding_sent in
+ * half. */
+ uint16_t nonpadding_sent;
+
+ /**
+ * Timestamp of the most recent cell event (sent, received, padding,
+ * non-padding), in seconds from approx_time().
+ *
+ * Used as an emergency break to stop holding padding circuits open.
+ */
+ time_t last_cell_time_sec;
+
+ /**
+ * EWMA estimate of the RTT of the circuit from this hop
+ * to the exit end, in microseconds. */
+ circpad_delay_t rtt_estimate_usec;
+
+ /**
+ * The last time we got an event relevant to estimating
+ * the RTT. Monotonic time in microseconds since system
+ * start.
+ */
+ circpad_time_t last_received_time_usec;
+
+ /**
+ * The time at which we scheduled a non-padding packet,
+ * or selected an infinite delay.
+ *
+ * Monotonic time in microseconds since system start.
+ * This is 0 if we haven't chosen a padding delay.
+ */
+ circpad_time_t padding_scheduled_at_usec;
+
+ /** What state is this machine in? */
+ circpad_statenum_t current_state;
+
+ /**
+ * True if we have scheduled a timer for padding.
+ *
+ * This is 1 if a timer is pending. It is 0 if
+ * no timer is scheduled. (It can be 0 even when
+ * padding_was_scheduled_at_usec is non-zero).
+ */
+ unsigned is_padding_timer_scheduled : 1;
+
+ /**
+ * If this is true, we have seen full duplex behavior.
+ * Stop updating the RTT.
+ */
+ unsigned stop_rtt_update : 1;
+
+/** Max number of padding machines on each circuit. If changed,
+ * also ensure the machine_index bitwith supports the new size. */
+#define CIRCPAD_MAX_MACHINES (2)
+ /** Which padding machine index was this for.
+ * (make sure changes to the bitwidth can support the
+ * CIRCPAD_MAX_MACHINES define). */
+ unsigned machine_index : 1;
+
+} circpad_machine_runtime_t;
+
+/** Helper macro to get an actual state machine from a machineinfo */
+#define CIRCPAD_GET_MACHINE(machineinfo) \
+ ((machineinfo)->on_circ->padding_machine[(machineinfo)->machine_index])
+
+/**
+ * This specifies a particular padding machine to use after negotiation.
+ *
+ * The constants for machine_num_t are in trunnel.
+ * We want to be able to define extra numbers in the consensus/torrc, though.
+ */
+typedef uint8_t circpad_machine_num_t;
+
+/** Global state machine structure from the consensus */
+typedef struct circpad_machine_spec_t {
+ /* Just a user-friendly machine name for logs */
+ const char *name;
+
+ /** Global machine number */
+ circpad_machine_num_t machine_num;
+
+ /** Which machine index slot should this machine go into in
+ * the array on the circuit_t */
+ unsigned machine_index : 1;
+
+ /** Send a padding negotiate to shut down machine at end state? */
+ unsigned should_negotiate_end : 1;
+
+ // These next three fields are origin machine-only...
+ /** Origin side or relay side */
+ unsigned is_origin_side : 1;
+
+ /** Which hop in the circuit should we send padding to/from?
+ * 1-indexed (ie: hop #1 is guard, #2 middle, #3 exit). */
+ unsigned target_hopnum : 3;
+
+ /** If this flag is enabled, don't close circuits that use this machine even
+ * if another part of Tor wants to close this circuit.
+ *
+ * If this flag is set, the circuitpadding subsystem will close circuits the
+ * moment the machine transitions to the END state, and only if the circuit
+ * has already been asked to be closed by another part of Tor.
+ *
+ * Circuits that should have been closed but were kept open by a padding
+ * machine are re-purposed to CIRCUIT_PURPOSE_C_CIRCUIT_PADDING, hence
+ * machines should take that purpose into account if they are filtering
+ * circuits by purpose. */
+ unsigned manage_circ_lifetime : 1;
+
+ /** This machine only kills fascists if the following conditions are met. */
+ circpad_machine_conditions_t conditions;
+
+ /** How many padding cells can be sent before we apply overhead limits?
+ * XXX: Note that we can only allow up to 64k of padding cells on an
+ * otherwise quiet circuit. Is this enough? It's 33MB. */
+ uint16_t allowed_padding_count;
+
+ /** Padding percent cap: Stop padding if we exceed this percent overhead.
+ * 0 means no limit. Overhead is defined as percent of total traffic, so
+ * that we can use 0..100 here. This is the same definition as used in
+ * Prop#265. */
+ uint8_t max_padding_percent;
+
+ /** State array: indexed by circpad_statenum_t */
+ circpad_state_t *states;
+
+ /**
+ * Number of states this machine has (ie: length of the states array).
+ * XXX: This field is not needed other than for safety. */
+ circpad_statenum_t num_states;
+} circpad_machine_spec_t;
+
+void circpad_new_consensus_params(const networkstatus_t *ns);
+
+int circpad_marked_circuit_for_padding(circuit_t *circ, int reason);
+
+/**
+ * The following are event call-in points that are of interest to
+ * the state machines. They are called during cell processing. */
+void circpad_deliver_unrecognized_cell_events(struct circuit_t *circ,
+ cell_direction_t dir);
+void circpad_deliver_sent_relay_cell_events(struct circuit_t *circ,
+ uint8_t relay_command);
+void circpad_deliver_recognized_relay_cell_events(struct circuit_t *circ,
+ uint8_t relay_command,
+ crypt_path_t *layer_hint);
+
+/** Cell events are delivered by the above delivery functions */
+void circpad_cell_event_nonpadding_sent(struct circuit_t *on_circ);
+void circpad_cell_event_nonpadding_received(struct circuit_t *on_circ);
+void circpad_cell_event_padding_sent(struct circuit_t *on_circ);
+void circpad_cell_event_padding_received(struct circuit_t *on_circ);
+
+/** Internal events are events the machines send to themselves */
+circpad_decision_t
+circpad_internal_event_infinity(circpad_machine_runtime_t *mi);
+circpad_decision_t
+circpad_internal_event_bins_empty(circpad_machine_runtime_t *);
+circpad_decision_t circpad_internal_event_state_length_up(
+ circpad_machine_runtime_t *);
+
+/** Machine creation events are events that cause us to set up or
+ * tear down padding state machines. */
+void circpad_machine_event_circ_added_hop(struct origin_circuit_t *on_circ);
+void circpad_machine_event_circ_built(struct origin_circuit_t *circ);
+void circpad_machine_event_circ_purpose_changed(struct origin_circuit_t *circ);
+void circpad_machine_event_circ_has_streams(struct origin_circuit_t *circ);
+void circpad_machine_event_circ_has_no_streams(struct origin_circuit_t *circ);
+void
+circpad_machine_event_circ_has_no_relay_early(struct origin_circuit_t *circ);
+
+void circpad_machines_init(void);
+void circpad_machines_free(void);
+void circpad_register_padding_machine(circpad_machine_spec_t *machine,
+ smartlist_t *machine_list);
+
+void circpad_machine_states_init(circpad_machine_spec_t *machine,
+ circpad_statenum_t num_states);
+
+void circpad_circuit_free_all_machineinfos(struct circuit_t *circ);
+
+bool circpad_padding_is_from_expected_hop(struct circuit_t *circ,
+ crypt_path_t *from_hop);
+
+/** Serializaton functions for writing to/from torrc and consensus */
+char *circpad_machine_spec_to_string(const circpad_machine_spec_t *machine);
+const circpad_machine_spec_t *circpad_string_to_machine(const char *str);
+
+/* Padding negotiation between client and middle */
+signed_error_t circpad_handle_padding_negotiate(struct circuit_t *circ,
+ struct cell_t *cell);
+signed_error_t circpad_handle_padding_negotiated(struct circuit_t *circ,
+ struct cell_t *cell,
+ crypt_path_t *layer_hint);
+signed_error_t circpad_negotiate_padding(struct origin_circuit_t *circ,
+ circpad_machine_num_t machine,
+ uint8_t target_hopnum,
+ uint8_t command);
+bool circpad_padding_negotiated(struct circuit_t *circ,
+ circpad_machine_num_t machine,
+ uint8_t command,
+ uint8_t response);
+
+circpad_purpose_mask_t circpad_circ_purpose_to_mask(uint8_t circ_purpose);
+
+int circpad_check_received_cell(cell_t *cell, circuit_t *circ,
+ crypt_path_t *layer_hint,
+ const relay_header_t *rh);
+
+MOCK_DECL(circpad_decision_t,
+circpad_machine_schedule_padding,(circpad_machine_runtime_t *));
+
+MOCK_DECL(circpad_decision_t,
+circpad_machine_spec_transition, (circpad_machine_runtime_t *mi,
+ circpad_event_t event));
+
+circpad_decision_t circpad_send_padding_cell_for_callback(
+ circpad_machine_runtime_t *mi);
+
+void circpad_free_all(void);
+
+#ifdef CIRCUITPADDING_PRIVATE
+STATIC void machine_spec_free_(circpad_machine_spec_t *m);
+#define machine_spec_free(chan) \
+ FREE_AND_NULL(circpad_machine_spec_t,machine_spec_free_, (m))
+
+STATIC circpad_delay_t
+circpad_machine_sample_delay(circpad_machine_runtime_t *mi);
+
+STATIC bool
+circpad_machine_reached_padding_limit(circpad_machine_runtime_t *mi);
+
+STATIC circpad_delay_t
+circpad_histogram_bin_to_usec(const circpad_machine_runtime_t *mi,
+ circpad_hist_index_t bin);
+
+STATIC const circpad_state_t *
+circpad_machine_current_state(const circpad_machine_runtime_t *mi);
+
+STATIC void circpad_machine_remove_token(circpad_machine_runtime_t *mi);
+
+STATIC circpad_hist_index_t circpad_histogram_usec_to_bin(
+ const circpad_machine_runtime_t *mi,
+ circpad_delay_t us);
+
+STATIC circpad_machine_runtime_t *circpad_circuit_machineinfo_new(
+ struct circuit_t *on_circ,
+ int machine_index);
+STATIC void circpad_machine_remove_higher_token(circpad_machine_runtime_t *mi,
+ circpad_delay_t target_bin_us);
+STATIC void circpad_machine_remove_lower_token(circpad_machine_runtime_t *mi,
+ circpad_delay_t target_bin_us);
+STATIC void circpad_machine_remove_closest_token(circpad_machine_runtime_t *mi,
+ circpad_delay_t target_bin_us,
+ bool use_usec);
+STATIC void circpad_machine_setup_tokens(circpad_machine_runtime_t *mi);
+
+MOCK_DECL(STATIC signed_error_t,
+circpad_send_command_to_hop,(struct origin_circuit_t *circ, uint8_t hopnum,
+ uint8_t relay_command, const uint8_t *payload,
+ ssize_t payload_len));
+
+MOCK_DECL(STATIC const node_t *,
+circuit_get_nth_node,(origin_circuit_t *circ, int hop));
+
+STATIC circpad_delay_t
+histogram_get_bin_upper_bound(const circpad_machine_runtime_t *mi,
+ circpad_hist_index_t bin);
+
+STATIC void
+circpad_add_matching_machines(origin_circuit_t *on_circ,
+ smartlist_t *machines_sl);
+
+#ifdef TOR_UNIT_TESTS
+extern smartlist_t *origin_padding_machines;
+extern smartlist_t *relay_padding_machines;
+
+#endif
+
+#endif /* defined(CIRCUITPADDING_PRIVATE) */
+
+#endif /* !defined(TOR_CIRCUITPADDING_H) */
diff --git a/src/core/or/circuitpadding_machines.c b/src/core/or/circuitpadding_machines.c
new file mode 100644
index 0000000000..98767f9e8f
--- /dev/null
+++ b/src/core/or/circuitpadding_machines.c
@@ -0,0 +1,454 @@
+/* Copyright (c) 2019 The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitpadding_machines.c
+ * \brief Circuit padding state machines
+ *
+ * Introduce circuit padding machines that will be used by Tor circuits, as
+ * specified by proposal 302 "Hiding onion service clients using padding".
+ *
+ * Right now this file introduces two machines that aim to hide the client-side
+ * of onion service circuits against naive classifiers like the ones from the
+ * "Circuit Fingerprinting Attacks: Passive Deanonymization of Tor Hidden
+ * Services" paper from USENIX. By naive classifiers we mean classifiers that
+ * use basic features like "circuit construction circuits" and "incoming and
+ * outgoing cell counts" and "duration of activity".
+ *
+ * In particular, these machines aim to be lightweight and protect against
+ * these basic classifiers. They don't aim to protect against more advanced
+ * attacks that use deep learning or even correlate various circuit
+ * construction events together. Machines that fool such advanced classifiers
+ * are also possible, but they can't be so lightweight and might require more
+ * WTF-PAD features. So for now we opt for the following two machines:
+ *
+ * Client-side introduction circuit hiding machine:
+ *
+ * This machine hides client-side introduction circuits by making their
+ * circuit consruction sequence look like normal general circuits that
+ * download directory information. Furthermore, the circuits are kept open
+ * until all the padding has been sent, since intro circuits are usually
+ * very short lived and this act as a distinguisher. For more info see
+ * circpad_machine_client_hide_intro_circuits() and the sec.
+ *
+ * Client-side rendezvous circuit hiding machine:
+ *
+ * This machine hides client-side rendezvous circuits by making their
+ * circuit construction sequence look like normal general circuits. For more
+ * details see circpad_machine_client_hide_rend_circuits() and the spec.
+ *
+ * TODO: These are simple machines that carefully manipulate the cells of the
+ * initial circuit setup procedure to make them look like general
+ * circuits. In the future, more states can be baked into their state machine
+ * to do more advanced obfuscation.
+ **/
+
+#define CIRCUITPADDING_MACHINES_PRIVATE
+
+#include "core/or/or.h"
+#include "feature/nodelist/networkstatus.h"
+
+#include "lib/crypt_ops/crypto_rand.h"
+
+#include "core/or/circuitlist.h"
+
+#include "core/or/circuitpadding_machines.h"
+#include "core/or/circuitpadding.h"
+
+/** Create a client-side padding machine that aims to hide IP circuits. In
+ * particular, it keeps intro circuits alive until a bunch of fake traffic has
+ * been pushed through.
+ */
+void
+circpad_machine_client_hide_intro_circuits(smartlist_t *machines_sl)
+{
+ circpad_machine_spec_t *client_machine
+ = tor_malloc_zero(sizeof(circpad_machine_spec_t));
+
+ client_machine->name = "client_ip_circ";
+
+ client_machine->conditions.state_mask = CIRCPAD_CIRC_OPENED;
+ client_machine->target_hopnum = 2;
+
+ /* This is a client machine */
+ client_machine->is_origin_side = 1;
+
+ /* We only want to pad introduction circuits, and we want to start padding
+ * only after the INTRODUCE1 cell has been sent, so set the purposes
+ * appropriately.
+ *
+ * In particular we want introduction circuits to blend as much as possible
+ * with general circuits. Most general circuits have the following initial
+ * relay cell sequence (outgoing cells marked in [brackets]):
+ *
+ * [EXTEND2] -> EXTENDED2 -> [EXTEND2] -> EXTENDED2 -> [BEGIN] -> CONNECTED
+ * -> [DATA] -> [DATA] -> DATA -> DATA...(inbound data cells continue)
+ *
+ * Whereas normal introduction circuits usually look like:
+ *
+ * [EXTEND2] -> EXTENDED2 -> [EXTEND2] -> EXTENDED2 -> [EXTEND2] -> EXTENDED2
+ * -> [INTRO1] -> INTRODUCE_ACK
+ *
+ * This means that up to the sixth cell (first line of each sequence above),
+ * both general and intro circuits have identical cell sequences. After that
+ * we want to mimic the second line sequence of
+ * -> [DATA] -> [DATA] -> DATA -> DATA...(inbound data cells continue)
+ *
+ * We achieve this by starting padding INTRODUCE1 has been sent. With padding
+ * negotiation cells, in the common case of the second line looks like:
+ * -> [INTRO1] -> [PADDING_NEGOTIATE] -> PADDING_NEGOTIATED -> INTRO_ACK
+ *
+ * Then, the middle node will send between INTRO_MACHINE_MINIMUM_PADDING and
+ * INTRO_MACHINE_MAXIMUM_PADDING cells, to match the "...(inbound data cells
+ * continue)" portion of the trace (aka the rest of an HTTPS response body).
+ */
+ client_machine->conditions.purpose_mask =
+ circpad_circ_purpose_to_mask(CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT)|
+ circpad_circ_purpose_to_mask(CIRCUIT_PURPOSE_C_INTRODUCE_ACKED)|
+ circpad_circ_purpose_to_mask(CIRCUIT_PURPOSE_C_CIRCUIT_PADDING);
+
+ /* Keep the circuit alive even after the introduction has been finished,
+ * otherwise the short-term lifetime of the circuit will blow our cover */
+ client_machine->manage_circ_lifetime = 1;
+
+ /* Set padding machine limits to help guard against excessive padding */
+ client_machine->allowed_padding_count = INTRO_MACHINE_MAXIMUM_PADDING;
+ client_machine->max_padding_percent = 1;
+
+ /* Two states: START, OBFUSCATE_CIRC_SETUP (and END) */
+ circpad_machine_states_init(client_machine, 2);
+
+ /* For the origin-side machine, we transition to OBFUSCATE_CIRC_SETUP after
+ * sending PADDING_NEGOTIATE, and we stay there (without sending any padding)
+ * until we receive a STOP from the other side. */
+ client_machine->states[CIRCPAD_STATE_START].
+ next_state[CIRCPAD_EVENT_NONPADDING_SENT] =
+ CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP;
+
+ /* origin-side machine has no event reactions while in
+ * CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP, so no more state transitions here. */
+
+ /* The client side should never send padding, so it does not need
+ * to specify token removal, or a histogram definition or state lengths.
+ * That is all controlled by the middle node. */
+
+ /* Register the machine */
+ client_machine->machine_num = smartlist_len(machines_sl);
+ circpad_register_padding_machine(client_machine, machines_sl);
+
+ log_info(LD_CIRC,
+ "Registered client intro point hiding padding machine (%u)",
+ client_machine->machine_num);
+}
+
+/** Create a relay-side padding machine that aims to hide IP circuits. See
+ * comments on the function above for more details on the workings of the
+ * machine. */
+void
+circpad_machine_relay_hide_intro_circuits(smartlist_t *machines_sl)
+{
+ circpad_machine_spec_t *relay_machine
+ = tor_malloc_zero(sizeof(circpad_machine_spec_t));
+
+ relay_machine->name = "relay_ip_circ";
+
+ relay_machine->conditions.state_mask = CIRCPAD_CIRC_OPENED;
+
+ /* This is a relay-side machine */
+ relay_machine->is_origin_side = 0;
+
+ /* We want to negotiate END from this side after all our padding is done, so
+ * that the origin-side machine goes into END state, and eventually closes
+ * the circuit. */
+ relay_machine->should_negotiate_end = 1;
+
+ /* Set padding machine limits to help guard against excessive padding */
+ relay_machine->allowed_padding_count = INTRO_MACHINE_MAXIMUM_PADDING;
+ relay_machine->max_padding_percent = 1;
+
+ /* Two states: START, OBFUSCATE_CIRC_SETUP (and END) */
+ circpad_machine_states_init(relay_machine, 2);
+
+ /* For the relay-side machine, we want to transition
+ * START -> OBFUSCATE_CIRC_SETUP upon first non-padding
+ * cell sent (PADDING_NEGOTIATED in this case). */
+ relay_machine->states[CIRCPAD_STATE_START].
+ next_state[CIRCPAD_EVENT_NONPADDING_SENT] =
+ CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP;
+
+ /* For the relay-side, we want to transition from OBFUSCATE_CIRC_SETUP to END
+ * state when the length finishes. */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ next_state[CIRCPAD_EVENT_LENGTH_COUNT] = CIRCPAD_STATE_END;
+
+ /* Now let's define the OBF -> OBF transitions that maintain our padding
+ * flow:
+ *
+ * For the relay-side machine, we want to keep on sending padding bytes even
+ * when nothing else happens on this circuit. */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ next_state[CIRCPAD_EVENT_PADDING_SENT] =
+ CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP;
+ /* For the relay-side machine, we need this transition so that we re-enter
+ the state, after PADDING_NEGOTIATED is sent. Otherwise, the remove token
+ function will disable the timer, and nothing will restart it since there
+ is no other motion on an intro circuit. */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ next_state[CIRCPAD_EVENT_NONPADDING_SENT] =
+ CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP;
+
+ /* Token removal strategy for OBFUSCATE_CIRC_SETUP state: Don't
+ * remove any tokens.
+ *
+ * We rely on the state length sampling and not token removal, to avoid
+ * the mallocs required to copy the histograms for token removal,
+ * and to avoid monotime calls needed to determine histogram
+ * bins for token removal. */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ token_removal = CIRCPAD_TOKEN_REMOVAL_NONE;
+
+ /* Figure out the length of the OBFUSCATE_CIRC_SETUP state so that it's
+ * randomized. The relay side will send between INTRO_MACHINE_MINIMUM_PADDING
+ * and INTRO_MACHINE_MAXIMUM_PADDING padding cells towards the client. */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ length_dist.type = CIRCPAD_DIST_UNIFORM;
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ length_dist.param1 = INTRO_MACHINE_MINIMUM_PADDING;
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ length_dist.param2 = INTRO_MACHINE_MAXIMUM_PADDING;
+
+ /* Configure histogram */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram_len = 2;
+
+ /* For the relay-side machine we want to batch padding instantly to pretend
+ * its an incoming directory download. So set the histogram edges tight:
+ * (1, 10ms, infinity). */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram_edges[0] = 1000;
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram_edges[1] = 10000;
+
+ /* We put all our tokens in bin 0, which means we want 100% probability
+ * for choosing a inter-packet delay of between 1000 and 10000 microseconds
+ * (1 to 10ms). Since we only have 1 bin, it doesn't matter how many tokens
+ * there are, 1000 out of 1000 is 100% */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram[0] = 1000;
+
+ /* just one bin, so setup the total tokens */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram_total_tokens =
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].histogram[0];
+
+ /* Register the machine */
+ relay_machine->machine_num = smartlist_len(machines_sl);
+ circpad_register_padding_machine(relay_machine, machines_sl);
+
+ log_info(LD_CIRC,
+ "Registered relay intro circuit hiding padding machine (%u)",
+ relay_machine->machine_num);
+}
+
+/************************** Rendezvous-circuit machine ***********************/
+
+/** Create a client-side padding machine that aims to hide rendezvous
+ * circuits.*/
+void
+circpad_machine_client_hide_rend_circuits(smartlist_t *machines_sl)
+{
+ circpad_machine_spec_t *client_machine
+ = tor_malloc_zero(sizeof(circpad_machine_spec_t));
+
+ client_machine->name = "client_rp_circ";
+
+ /* Only pad after the circuit has been built and pad to the middle */
+ client_machine->conditions.state_mask = CIRCPAD_CIRC_OPENED;
+ client_machine->target_hopnum = 2;
+
+ /* This is a client machine */
+ client_machine->is_origin_side = 1;
+
+ /* We only want to pad rendezvous circuits, and we want to start padding only
+ * after the rendezvous circuit has been established.
+ *
+ * Following a similar argument as for intro circuits, we are aiming for
+ * padded rendezvous circuits to blend in with the initial cell sequence of
+ * general circuits which usually look like this:
+ *
+ * [EXTEND2] -> EXTENDED2 -> [EXTEND2] -> EXTENDED2 -> [BEGIN] -> CONNECTED
+ * -> [DATA] -> [DATA] -> DATA -> DATA...(incoming cells continue)
+ *
+ * Whereas normal rendezvous circuits usually look like:
+ *
+ * [EXTEND2] -> EXTENDED2 -> [EXTEND2] -> EXTENDED2 -> [EST_REND] -> REND_EST
+ * -> REND2 -> [BEGIN]
+ *
+ * This means that up to the sixth cell (in the first line), both general and
+ * rend circuits have identical cell sequences.
+ *
+ * After that we want to mimic a [DATA] -> [DATA] -> DATA -> DATA sequence.
+ *
+ * With padding negotiation right after the REND_ESTABLISHED, the sequence
+ * becomes:
+ *
+ * [EXTEND2] -> EXTENDED2 -> [EXTEND2] -> EXTENDED2 -> [EST_REND] -> REND_EST
+ * -> [PADDING_NEGOTIATE] -> [DROP] -> PADDING_NEGOTIATED -> DROP...
+ *
+ * After which normal application DATA cells continue on the circuit.
+ *
+ * Hence this way we make rendezvous circuits look like general circuits up
+ * till the end of the circuit setup. */
+ client_machine->conditions.purpose_mask =
+ circpad_circ_purpose_to_mask(CIRCUIT_PURPOSE_C_REND_JOINED)|
+ circpad_circ_purpose_to_mask(CIRCUIT_PURPOSE_C_REND_READY)|
+ circpad_circ_purpose_to_mask(CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED);
+
+ /* Set padding machine limits to help guard against excessive padding */
+ client_machine->allowed_padding_count = 1;
+ client_machine->max_padding_percent = 1;
+
+ /* Two states: START, OBFUSCATE_CIRC_SETUP (and END) */
+ circpad_machine_states_init(client_machine, 2);
+
+ /* START -> OBFUSCATE_CIRC_SETUP transition upon sending the first
+ * non-padding cell (which is PADDING_NEGOTIATE) */
+ client_machine->states[CIRCPAD_STATE_START].
+ next_state[CIRCPAD_EVENT_NONPADDING_SENT] =
+ CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP;
+
+ /* OBFUSCATE_CIRC_SETUP -> END transition when we send our first
+ * padding packet and/or hit the state length (the state length is 1). */
+ client_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_END;
+ client_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ next_state[CIRCPAD_EVENT_LENGTH_COUNT] = CIRCPAD_STATE_END;
+
+ /* Don't use a token removal strategy since we don't want to use monotime
+ * functions and we want to avoid mallocing histogram copies. We want
+ * this machine to be light. */
+ client_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ token_removal = CIRCPAD_TOKEN_REMOVAL_NONE;
+
+ /* Instead, to control the volume of padding (we just want to send a single
+ * padding cell) we will use a static state length. We just want one token,
+ * since we want to make the following pattern:
+ * [PADDING_NEGOTIATE] -> [DROP] -> PADDING_NEGOTIATED -> DROP */
+ client_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ length_dist.type = CIRCPAD_DIST_UNIFORM;
+ client_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ length_dist.param1 = 1;
+ client_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ length_dist.param2 = 2; // rand(1,2) is always 1
+
+ /* Histogram is: (0 msecs, 1 msec, infinity). We want this to be fast so
+ * that we send our outgoing [DROP] before the PADDING_NEGOTIATED comes
+ * back from the relay side. */
+ client_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram_len = 2;
+ client_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram_edges[0] = 0;
+ client_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram_edges[1] = 1000;
+
+ /* We want a 100% probability of choosing an inter-packet delay of
+ * between 0 and 1ms. Since we don't use token removal,
+ * the number of tokens does not matter. (And also, state_length
+ * governs how many packets we send). */
+ client_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram[0] = 1;
+ client_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram_total_tokens = 1;
+
+ /* Register the machine */
+ client_machine->machine_num = smartlist_len(machines_sl);
+ circpad_register_padding_machine(client_machine, machines_sl);
+
+ log_info(LD_CIRC,
+ "Registered client rendezvous circuit hiding padding machine (%u)",
+ client_machine->machine_num);
+}
+
+/** Create a relay-side padding machine that aims to hide IP circuits.
+ *
+ * This is meant to follow the client-side machine.
+ */
+void
+circpad_machine_relay_hide_rend_circuits(smartlist_t *machines_sl)
+{
+ circpad_machine_spec_t *relay_machine
+ = tor_malloc_zero(sizeof(circpad_machine_spec_t));
+
+ relay_machine->name = "relay_rp_circ";
+
+ /* Only pad after the circuit has been built and pad to the middle */
+ relay_machine->conditions.min_hops = 2;
+ relay_machine->conditions.state_mask = CIRCPAD_CIRC_OPENED;
+
+ /* This is a relay-side machine */
+ relay_machine->is_origin_side = 0;
+
+ /* Set padding machine limits to help guard against excessive padding */
+ relay_machine->allowed_padding_count = 1;
+ relay_machine->max_padding_percent = 1;
+
+ /* Two states: START, OBFUSCATE_CIRC_SETUP (and END) */
+ circpad_machine_states_init(relay_machine, 2);
+
+ /* START -> OBFUSCATE_CIRC_SETUP transition upon sending the first
+ * non-padding cell (which is PADDING_NEGOTIATED) */
+ relay_machine->states[CIRCPAD_STATE_START].
+ next_state[CIRCPAD_EVENT_NONPADDING_SENT] =
+ CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP;
+
+ /* OBFUSCATE_CIRC_SETUP -> END transition when we send our first
+ * padding packet and/or hit the state length (the state length is 1). */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ next_state[CIRCPAD_EVENT_PADDING_SENT] = CIRCPAD_STATE_END;
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ next_state[CIRCPAD_EVENT_LENGTH_COUNT] = CIRCPAD_STATE_END;
+
+ /* Don't use a token removal strategy since we don't want to use monotime
+ * functions and we want to avoid mallocing histogram copies. We want
+ * this machine to be light. */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ token_removal = CIRCPAD_TOKEN_REMOVAL_NONE;
+
+ /* Instead, to control the volume of padding (we just want to send a single
+ * padding cell) we will use a static state length. We just want one token,
+ * since we want to make the following pattern:
+ * [PADDING_NEGOTIATE] -> [DROP] -> PADDING_NEGOTIATED -> DROP */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ length_dist.type = CIRCPAD_DIST_UNIFORM;
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ length_dist.param1 = 1;
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ length_dist.param2 = 2; // rand(1,2) is always 1
+
+ /* Histogram is: (0 msecs, 1 msec, infinity). We want this to be fast so
+ * that the outgoing DROP cell is sent immediately after the
+ * PADDING_NEGOTIATED. */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram_len = 2;
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram_edges[0] = 0;
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram_edges[1] = 1000;
+
+ /* We want a 100% probability of choosing an inter-packet delay of
+ * between 0 and 1ms. Since we don't use token removal,
+ * the number of tokens does not matter. (And also, state_length
+ * governs how many packets we send). */
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram[0] = 1;
+ relay_machine->states[CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP].
+ histogram_total_tokens = 1;
+
+ /* Register the machine */
+ relay_machine->machine_num = smartlist_len(machines_sl);
+ circpad_register_padding_machine(relay_machine, machines_sl);
+
+ log_info(LD_CIRC,
+ "Registered relay rendezvous circuit hiding padding machine (%u)",
+ relay_machine->machine_num);
+}
diff --git a/src/core/or/circuitpadding_machines.h b/src/core/or/circuitpadding_machines.h
new file mode 100644
index 0000000000..3c9798d42d
--- /dev/null
+++ b/src/core/or/circuitpadding_machines.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2018 The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file circuitpadding_machines.h
+ * \brief Header file for circuitpadding_machines.c.
+ **/
+
+#ifndef TOR_CIRCUITPADDING_MACHINES_H
+#define TOR_CIRCUITPADDING_MACHINES_H
+
+void circpad_machine_relay_hide_intro_circuits(smartlist_t *machines_sl);
+void circpad_machine_client_hide_intro_circuits(smartlist_t *machines_sl);
+void circpad_machine_relay_hide_rend_circuits(smartlist_t *machines_sl);
+void circpad_machine_client_hide_rend_circuits(smartlist_t *machines_sl);
+
+#ifdef CIRCUITPADDING_MACHINES_PRIVATE
+
+/** State of the padding machines that actually sends padding */
+#define CIRCPAD_STATE_OBFUSCATE_CIRC_SETUP CIRCPAD_STATE_BURST
+
+/** Constants defining the amount of padding that a machine will send to hide
+ * HS circuits. The actual value is sampled uniformly random between the
+ * min/max values.
+ */
+
+/** Minimum number of relay-side padding cells to be sent by this machine */
+#define INTRO_MACHINE_MINIMUM_PADDING 7
+/** Maximum number of relay-side padding cells to be sent by this machine.
+ * The actual value will be sampled between the min and max.*/
+#define INTRO_MACHINE_MAXIMUM_PADDING 10
+
+#endif /* defined(CIRCUITPADDING_MACHINES_PRIVATE) */
+
+#endif /* !defined(TOR_CIRCUITPADDING_MACHINES_H) */
diff --git a/src/core/or/circuitstats.c b/src/core/or/circuitstats.c
index 2cde21fa1f..5875627b93 100644
--- a/src/core/or/circuitstats.c
+++ b/src/core/or/circuitstats.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -29,8 +29,8 @@
#include "core/or/circuitbuild.h"
#include "core/or/circuitstats.h"
#include "app/config/config.h"
-#include "app/config/confparse.h"
-#include "feature/control/control.h"
+#include "lib/confmgt/confmgt.h"
+#include "feature/control/control_events.h"
#include "lib/crypt_ops/crypto_rand.h"
#include "core/mainloop/mainloop.h"
#include "feature/nodelist/networkstatus.h"
@@ -44,6 +44,7 @@
#include "lib/time/tvdiff.h"
#include "lib/encoding/confline.h"
#include "feature/dirauth/authmode.h"
+#include "feature/relay/relay_periodic.h"
#include "core/or/crypt_path_st.h"
#include "core/or/origin_circuit_st.h"
@@ -639,9 +640,9 @@ circuit_build_times_rewind_history(circuit_build_times_t *cbt, int n)
void
circuit_build_times_mark_circ_as_measurement_only(origin_circuit_t *circ)
{
- control_event_circuit_status(circ,
- CIRC_EVENT_FAILED,
- END_CIRC_REASON_TIMEOUT);
+ circuit_event_status(circ,
+ CIRC_EVENT_FAILED,
+ END_CIRC_REASON_TIMEOUT);
circuit_change_purpose(TO_CIRCUIT(circ),
CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT);
/* Record this event to check for too many timeouts
@@ -971,7 +972,7 @@ circuit_build_times_update_state(const circuit_build_times_t *cbt,
/**
* Shuffle the build times array.
*
- * Adapted from http://en.wikipedia.org/wiki/Fisher-Yates_shuffle
+ * Adapted from https://en.wikipedia.org/wiki/Fisher-Yates_shuffle
*/
static void
circuit_build_times_shuffle_and_store_array(circuit_build_times_t *cbt,
@@ -1182,7 +1183,7 @@ circuit_build_times_parse_state(circuit_build_times_t *cbt,
/**
* Estimates the Xm and Alpha parameters using
- * http://en.wikipedia.org/wiki/Pareto_distribution#Parameter_estimation
+ * https://en.wikipedia.org/wiki/Pareto_distribution#Parameter_estimation
*
* The notable difference is that we use mode instead of min to estimate Xm.
* This is because our distribution is frechet-like. We claim this is
@@ -1197,7 +1198,7 @@ circuit_build_times_update_alpha(circuit_build_times_t *cbt)
int n=0,i=0,abandoned_count=0;
build_time_t max_time=0;
- /* http://en.wikipedia.org/wiki/Pareto_distribution#Parameter_estimation */
+ /* https://en.wikipedia.org/wiki/Pareto_distribution#Parameter_estimation */
/* We sort of cheat here and make our samples slightly more pareto-like
* and less frechet-like. */
cbt->Xm = circuit_build_times_get_xm(cbt);
@@ -1269,9 +1270,9 @@ circuit_build_times_update_alpha(circuit_build_times_t *cbt)
* We use it to calculate the timeout and also to generate synthetic
* values of time for circuits that timeout before completion.
*
- * See http://en.wikipedia.org/wiki/Quantile_function,
- * http://en.wikipedia.org/wiki/Inverse_transform_sampling and
- * http://en.wikipedia.org/wiki/Pareto_distribution#Generating_a_
+ * See https://en.wikipedia.org/wiki/Quantile_function,
+ * https://en.wikipedia.org/wiki/Inverse_transform_sampling and
+ * https://en.wikipedia.org/wiki/Pareto_distribution#Generating_a_
* random_sample_from_Pareto_distribution
* That's right. I'll cite wikipedia all day long.
*
@@ -1420,6 +1421,7 @@ void
circuit_build_times_network_is_live(circuit_build_times_t *cbt)
{
time_t now = approx_time();
+ // XXXX this should use pubsub
if (cbt->liveness.nonlive_timeouts > 0) {
time_t time_since_live = now - cbt->liveness.network_last_live;
log_notice(LD_CIRC,
diff --git a/src/core/or/circuitstats.h b/src/core/or/circuitstats.h
index 845d7b6722..52c9100f53 100644
--- a/src/core/or/circuitstats.h
+++ b/src/core/or/circuitstats.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -175,7 +175,7 @@ typedef struct {
} network_liveness_t;
/** Structure for circuit build times history */
-struct circuit_build_times_s {
+struct circuit_build_times_t {
/** The circular array of recorded build times in milliseconds */
build_time_t circuit_build_times[CBT_NCIRCUITS_TO_OBSERVE];
/** Current index in the circuit_build_times circular array */
diff --git a/src/core/or/circuituse.c b/src/core/or/circuituse.c
index efd69fb4a3..a88ccf9dd1 100644
--- a/src/core/or/circuituse.c
+++ b/src/core/or/circuituse.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -35,13 +35,15 @@
#include "core/or/circuitlist.h"
#include "core/or/circuitstats.h"
#include "core/or/circuituse.h"
+#include "core/or/circuitpadding.h"
#include "core/or/connection_edge.h"
#include "core/or/policies.h"
#include "feature/client/addressmap.h"
#include "feature/client/bridges.h"
#include "feature/client/circpathbias.h"
#include "feature/client/entrynodes.h"
-#include "feature/control/control.h"
+#include "feature/client/proxymode.h"
+#include "feature/control/control_events.h"
#include "feature/dircommon/directory.h"
#include "feature/hs/hs_circuit.h"
#include "feature/hs/hs_client.h"
@@ -69,7 +71,7 @@
#include "core/or/origin_circuit_st.h"
#include "core/or/socks_request_st.h"
-static void circuit_expire_old_circuits_clientside(void);
+STATIC void circuit_expire_old_circuits_clientside(void);
static void circuit_increment_failure_count(void);
/** Check whether the hidden service destination of the stream at
@@ -177,7 +179,6 @@ circuit_is_acceptable(const origin_circuit_t *origin_circ,
purpose == CIRCUIT_PURPOSE_S_HSDIR_POST ||
purpose == CIRCUIT_PURPOSE_C_HSDIR_GET) {
tor_addr_t addr;
- const int family = tor_addr_parse(&addr, conn->socks_request->address);
if (!exitnode && !build_state->onehop_tunnel) {
log_debug(LD_CIRC,"Not considering circuit with unknown router.");
return 0; /* this circuit is screwed and doesn't know it yet,
@@ -198,6 +199,8 @@ circuit_is_acceptable(const origin_circuit_t *origin_circ,
return 0; /* this is a circuit to somewhere else */
if (tor_digest_is_zero(digest)) {
/* we don't know the digest; have to compare addr:port */
+ const int family = tor_addr_parse(&addr,
+ conn->socks_request->address);
if (family < 0 ||
!tor_addr_eq(&build_state->chosen_exit->addr, &addr) ||
build_state->chosen_exit->port != conn->socks_request->port)
@@ -210,12 +213,14 @@ circuit_is_acceptable(const origin_circuit_t *origin_circ,
return 0;
}
}
- if (origin_circ->prepend_policy && family != -1) {
- int r = compare_tor_addr_to_addr_policy(&addr,
- conn->socks_request->port,
- origin_circ->prepend_policy);
- if (r == ADDR_POLICY_REJECTED)
- return 0;
+ if (origin_circ->prepend_policy) {
+ if (tor_addr_parse(&addr, conn->socks_request->address) != -1) {
+ int r = compare_tor_addr_to_addr_policy(&addr,
+ conn->socks_request->port,
+ origin_circ->prepend_policy);
+ if (r == ADDR_POLICY_REJECTED)
+ return 0;
+ }
}
if (exitnode && !connection_ap_can_use_exit(conn, exitnode)) {
/* can't exit from this router */
@@ -543,9 +548,10 @@ circuit_expire_building(void)
MAX(get_circuit_build_close_time_ms()*2 + 1000,
options->SocksTimeout * 1000));
+ bool fixed_time = circuit_build_times_disabled(get_options());
+
SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *,victim) {
struct timeval cutoff;
- bool fixed_time = circuit_build_times_disabled(get_options());
if (!CIRCUIT_IS_ORIGIN(victim) || /* didn't originate here */
victim->marked_for_close) /* don't mess with marked circs */
@@ -727,7 +733,7 @@ circuit_expire_building(void)
circuit_build_times_enough_to_compute(get_circuit_build_times())) {
log_info(LD_CIRC,
- "Deciding to count the timeout for circuit %"PRIu32"\n",
+ "Deciding to count the timeout for circuit %"PRIu32,
TO_ORIGIN_CIRCUIT(victim)->global_identifier);
/* Circuits are allowed to last longer for measurement.
@@ -770,16 +776,11 @@ circuit_expire_building(void)
if (!(TO_ORIGIN_CIRCUIT(victim)->hs_circ_has_timed_out)) {
switch (victim->purpose) {
case CIRCUIT_PURPOSE_C_REND_READY:
- /* We only want to spare a rend circ if it has been specified in
- * an INTRODUCE1 cell sent to a hidden service. A circ's
- * pending_final_cpath field is non-NULL iff it is a rend circ
- * and we have tried to send an INTRODUCE1 cell specifying it.
- * Thus, if the pending_final_cpath field *is* NULL, then we
- * want to not spare it. */
- if (TO_ORIGIN_CIRCUIT(victim)->build_state &&
- TO_ORIGIN_CIRCUIT(victim)->build_state->pending_final_cpath ==
- NULL)
+ /* We only want to spare a rend circ iff it has been specified in an
+ * INTRODUCE1 cell sent to a hidden service. */
+ if (!hs_circ_is_rend_sent_in_intro1(CONST_TO_ORIGIN_CIRCUIT(victim))) {
break;
+ }
FALLTHROUGH;
case CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT:
case CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED:
@@ -1425,6 +1426,11 @@ circuit_detach_stream(circuit_t *circ, edge_connection_t *conn)
if (circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED) {
hs_dec_rdv_stream_counter(origin_circ);
}
+
+ /* If there are no more streams on this circ, tell circpad */
+ if (!origin_circ->p_streams)
+ circpad_machine_event_circ_has_no_streams(origin_circ);
+
return;
}
} else {
@@ -1465,7 +1471,7 @@ circuit_detach_stream(circuit_t *circ, edge_connection_t *conn)
/** Find each circuit that has been unused for too long, or dirty
* for too long and has no streams on it: mark it for close.
*/
-static void
+STATIC void
circuit_expire_old_circuits_clientside(void)
{
struct timeval cutoff, now;
@@ -1505,6 +1511,7 @@ circuit_expire_old_circuits_clientside(void)
circ->purpose == CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT ||
circ->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO ||
circ->purpose == CIRCUIT_PURPOSE_TESTING ||
+ circ->purpose == CIRCUIT_PURPOSE_C_CIRCUIT_PADDING ||
(circ->purpose >= CIRCUIT_PURPOSE_C_INTRODUCING &&
circ->purpose <= CIRCUIT_PURPOSE_C_REND_READY_INTRO_ACKED) ||
circ->purpose == CIRCUIT_PURPOSE_S_CONNECT_REND) {
@@ -1674,7 +1681,7 @@ circuit_testing_failed(origin_circuit_t *circ, int at_last_hop)
void
circuit_has_opened(origin_circuit_t *circ)
{
- control_event_circuit_status(circ, CIRC_EVENT_BUILT, 0);
+ circuit_event_status(circ, CIRC_EVENT_BUILT, 0);
/* Remember that this circuit has finished building. Now if we start
* it building again later (e.g. by extending it), we will know not
@@ -1954,23 +1961,61 @@ have_enough_path_info(int need_exit)
int
circuit_purpose_is_hidden_service(uint8_t purpose)
{
- if (purpose == CIRCUIT_PURPOSE_HS_VANGUARDS) {
- return 1;
- }
-
- /* Client-side purpose */
- if (purpose >= CIRCUIT_PURPOSE_C_HS_MIN_ &&
- purpose <= CIRCUIT_PURPOSE_C_HS_MAX_) {
- return 1;
- }
-
- /* Service-side purpose */
- if (purpose >= CIRCUIT_PURPOSE_S_HS_MIN_ &&
- purpose <= CIRCUIT_PURPOSE_S_HS_MAX_) {
- return 1;
- }
-
- return 0;
+ /* HS Vanguard purpose. */
+ if (circuit_purpose_is_hs_vanguards(purpose)) {
+ return 1;
+ }
+
+ /* Client-side purpose */
+ if (circuit_purpose_is_hs_client(purpose)) {
+ return 1;
+ }
+
+ /* Service-side purpose */
+ if (circuit_purpose_is_hs_service(purpose)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/** Retrun true iff the given circuit is an HS client circuit. */
+bool
+circuit_purpose_is_hs_client(const uint8_t purpose)
+{
+ return (purpose >= CIRCUIT_PURPOSE_C_HS_MIN_ &&
+ purpose <= CIRCUIT_PURPOSE_C_HS_MAX_);
+}
+
+/** Retrun true iff the given circuit is an HS service circuit. */
+bool
+circuit_purpose_is_hs_service(const uint8_t purpose)
+{
+ return (purpose >= CIRCUIT_PURPOSE_S_HS_MIN_ &&
+ purpose <= CIRCUIT_PURPOSE_S_HS_MAX_);
+}
+
+/** Retrun true iff the given circuit is an HS Vanguards circuit. */
+bool
+circuit_purpose_is_hs_vanguards(const uint8_t purpose)
+{
+ return (purpose == CIRCUIT_PURPOSE_HS_VANGUARDS);
+}
+
+/** Retrun true iff the given circuit is an HS v2 circuit. */
+bool
+circuit_is_hs_v2(const circuit_t *circ)
+{
+ return (CIRCUIT_IS_ORIGIN(circ) &&
+ (CONST_TO_ORIGIN_CIRCUIT(circ)->rend_data != NULL));
+}
+
+/** Retrun true iff the given circuit is an HS v3 circuit. */
+bool
+circuit_is_hs_v3(const circuit_t *circ)
+{
+ return (CIRCUIT_IS_ORIGIN(circ) &&
+ (CONST_TO_ORIGIN_CIRCUIT(circ)->hs_ident != NULL));
}
/**
@@ -2523,8 +2568,7 @@ circuit_get_open_circ_or_launch(entry_connection_t *conn,
circ->rend_data = rend_data_dup(edge_conn->rend_data);
} else if (edge_conn->hs_ident) {
circ->hs_ident =
- hs_ident_circuit_new(&edge_conn->hs_ident->identity_pk,
- HS_IDENT_CIRCUIT_INTRO);
+ hs_ident_circuit_new(&edge_conn->hs_ident->identity_pk);
}
if (circ->base_.purpose == CIRCUIT_PURPOSE_C_ESTABLISH_REND &&
circ->base_.state == CIRCUIT_STATE_OPEN)
@@ -2596,6 +2640,12 @@ link_apconn_to_circ(entry_connection_t *apconn, origin_circuit_t *circ,
/* add it into the linked list of streams on this circuit */
log_debug(LD_APP|LD_CIRC, "attaching new conn to circ. n_circ_id %u.",
(unsigned)circ->base_.n_circ_id);
+
+ /* If this is the first stream on this circuit, tell circpad
+ * that streams are attached */
+ if (!circ->p_streams)
+ circpad_machine_event_circ_has_streams(circ);
+
/* reset it, so we can measure circ timeouts */
ENTRY_TO_CONN(apconn)->timestamp_last_read_allowed = time(NULL);
ENTRY_TO_EDGE_CONN(apconn)->next_stream = circ->p_streams;
@@ -3070,7 +3120,7 @@ circuit_change_purpose(circuit_t *circ, uint8_t new_purpose)
/* Take specific actions if we are repurposing a hidden service circuit. */
if (circuit_purpose_is_hidden_service(circ->purpose) &&
!circuit_purpose_is_hidden_service(new_purpose)) {
- hs_circ_cleanup(circ);
+ hs_circ_cleanup_on_repurpose(circ);
}
}
@@ -3080,6 +3130,8 @@ circuit_change_purpose(circuit_t *circ, uint8_t new_purpose)
if (CIRCUIT_IS_ORIGIN(circ)) {
control_event_circuit_purpose_changed(TO_ORIGIN_CIRCUIT(circ),
old_purpose);
+
+ circpad_machine_event_circ_purpose_changed(TO_ORIGIN_CIRCUIT(circ));
}
}
@@ -3113,7 +3165,9 @@ circuit_sent_valid_data(origin_circuit_t *circ, uint16_t relay_body_len)
{
if (!circ) return;
- tor_assert_nonfatal(relay_body_len <= RELAY_PAYLOAD_SIZE);
+ tor_assertf_nonfatal(relay_body_len <= RELAY_PAYLOAD_SIZE,
+ "Wrong relay_body_len: %d (should be at most %d)",
+ relay_body_len, RELAY_PAYLOAD_SIZE);
circ->n_delivered_written_circ_bw =
tor_add_u32_nowrap(circ->n_delivered_written_circ_bw, relay_body_len);
diff --git a/src/core/or/circuituse.h b/src/core/or/circuituse.h
index 25588dbb11..95d36d6474 100644
--- a/src/core/or/circuituse.h
+++ b/src/core/or/circuituse.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -64,6 +64,15 @@ int hostname_in_track_host_exits(const or_options_t *options,
void mark_circuit_unusable_for_new_conns(origin_circuit_t *circ);
int circuit_purpose_is_hidden_service(uint8_t);
+
+/* Series of helper functions for hidden services. */
+bool circuit_purpose_is_hs_client(const uint8_t purpose);
+bool circuit_purpose_is_hs_service(const uint8_t purpose);
+bool circuit_purpose_is_hs_vanguards(const uint8_t purpose);
+
+bool circuit_is_hs_v2(const circuit_t *circ);
+bool circuit_is_hs_v3(const circuit_t *circ);
+
int circuit_should_use_vanguards(uint8_t);
void circuit_sent_valid_data(origin_circuit_t *circ, uint16_t relay_body_len);
void circuit_read_valid_data(origin_circuit_t *circ, uint16_t relay_body_len);
diff --git a/src/core/or/command.c b/src/core/or/command.c
index 5fb6640c22..8a1d2066cc 100644
--- a/src/core/or/command.c
+++ b/src/core/or/command.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -49,11 +49,12 @@
#include "core/or/dos.h"
#include "core/or/onion.h"
#include "core/or/relay.h"
-#include "feature/control/control.h"
+#include "feature/control/control_events.h"
#include "feature/hibernate/hibernate.h"
#include "feature/nodelist/describe.h"
#include "feature/nodelist/nodelist.h"
#include "feature/nodelist/routerlist.h"
+#include "feature/relay/circuitbuild_relay.h"
#include "feature/relay/routermode.h"
#include "feature/stats/rephist.h"
#include "lib/crypt_ops/crypto_util.h"
@@ -182,7 +183,7 @@ command_process_cell(channel_t *chan, cell_t *cell)
command_time_process_cell(cl, cn, & tp ## time , \
command_process_ ## tp ## _cell); \
} STMT_END
-#else /* !(defined(KEEP_TIMING_STATS)) */
+#else /* !defined(KEEP_TIMING_STATS) */
#define PROCESS_CELL(tp, cl, cn) command_process_ ## tp ## _cell(cl, cn)
#endif /* defined(KEEP_TIMING_STATS) */
@@ -217,23 +218,6 @@ command_process_cell(channel_t *chan, cell_t *cell)
}
}
-/** Process an incoming var_cell from a channel; in the current protocol all
- * the var_cells are handshake-related and handled below the channel layer,
- * so this just logs a warning and drops the cell.
- */
-
-void
-command_process_var_cell(channel_t *chan, var_cell_t *var_cell)
-{
- tor_assert(chan);
- tor_assert(var_cell);
-
- log_info(LD_PROTOCOL,
- "Received unexpected var_cell above the channel layer of type %d"
- "; dropping it.",
- var_cell->command);
-}
-
/** Process a 'create' <b>cell</b> that just arrived from <b>chan</b>. Make a
* new circuit with the p_circ_id specified in cell. Put the circuit in state
* onionskin_pending, and pass the onionskin to the cpuworker. Circ will get
@@ -685,8 +669,7 @@ command_setup_channel(channel_t *chan)
tor_assert(chan);
channel_set_cell_handlers(chan,
- command_process_cell,
- command_process_var_cell);
+ command_process_cell);
}
/** Given a listener, install the right handler to process incoming
diff --git a/src/core/or/command.h b/src/core/or/command.h
index 8c90e1de6f..14ebb4a339 100644
--- a/src/core/or/command.h
+++ b/src/core/or/command.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -15,7 +15,6 @@
#include "core/or/channel.h"
void command_process_cell(channel_t *chan, cell_t *cell);
-void command_process_var_cell(channel_t *chan, var_cell_t *cell);
void command_setup_channel(channel_t *chan);
void command_setup_listener(channel_listener_t *chan_l);
diff --git a/src/core/or/connection_edge.c b/src/core/or/connection_edge.c
index 84b80313ce..1394a41c73 100644
--- a/src/core/or/connection_edge.c
+++ b/src/core/or/connection_edge.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -62,21 +62,24 @@
#include "app/config/config.h"
#include "core/mainloop/connection.h"
#include "core/mainloop/mainloop.h"
+#include "core/mainloop/netstatus.h"
#include "core/or/channel.h"
#include "core/or/circuitbuild.h"
#include "core/or/circuitlist.h"
#include "core/or/circuituse.h"
+#include "core/or/circuitpadding.h"
#include "core/or/connection_edge.h"
#include "core/or/connection_or.h"
#include "core/or/policies.h"
#include "core/or/reasons.h"
#include "core/or/relay.h"
+#include "core/or/sendme.h"
#include "core/proto/proto_http.h"
#include "core/proto/proto_socks.h"
#include "feature/client/addressmap.h"
#include "feature/client/circpathbias.h"
#include "feature/client/dnsserv.h"
-#include "feature/control/control.h"
+#include "feature/control/control_events.h"
#include "feature/dircache/dirserv.h"
#include "feature/dircommon/directory.h"
#include "feature/hibernate/hibernate.h"
@@ -97,7 +100,7 @@
#include "feature/rend/rendservice.h"
#include "feature/stats/predict_ports.h"
#include "feature/stats/rephist.h"
-#include "lib/container/buffers.h"
+#include "lib/buf/buffers.h"
#include "lib/crypt_ops/crypto_util.h"
#include "core/or/cell_st.h"
@@ -300,6 +303,11 @@ connection_edge_process_inbuf(edge_connection_t *conn, int package_partial)
}
return 0;
case AP_CONN_STATE_OPEN:
+ if (! conn->base_.linked) {
+ note_user_activity(approx_time());
+ }
+
+ FALLTHROUGH;
case EXIT_CONN_STATE_OPEN:
if (connection_edge_package_raw_inbuf(conn, package_partial, NULL) < 0) {
/* (We already sent an end cell if possible) */
@@ -424,6 +432,21 @@ warn_if_hs_unreachable(const edge_connection_t *conn, uint8_t reason)
}
}
+/** Given a TTL (in seconds) from a DNS response or from a relay, determine
+ * what TTL clients and relays should actually use for caching it. */
+uint32_t
+clip_dns_ttl(uint32_t ttl)
+{
+ /* This logic is a defense against "DefectTor" DNS-based traffic
+ * confirmation attacks, as in https://nymity.ch/tor-dns/tor-dns.pdf .
+ * We only give two values: a "low" value and a "high" value.
+ */
+ if (ttl < MIN_DNS_TTL)
+ return MIN_DNS_TTL;
+ else
+ return MAX_DNS_TTL;
+}
+
/** Send a relay end cell from stream <b>conn</b> down conn's circuit, and
* remember that we've done so. If this is not a client connection, set the
* relay end cell's reason for closing as <b>reason</b>.
@@ -472,7 +495,7 @@ connection_edge_end(edge_connection_t *conn, uint8_t reason)
memcpy(payload+1, tor_addr_to_in6_addr8(&conn->base_.addr), 16);
addrlen = 16;
}
- set_uint32(payload+1+addrlen, htonl(dns_clip_ttl(conn->address_ttl)));
+ set_uint32(payload+1+addrlen, htonl(clip_dns_ttl(conn->address_ttl)));
payload_len += 4+addrlen;
}
@@ -754,8 +777,13 @@ connection_edge_flushed_some(edge_connection_t *conn)
{
switch (conn->base_.state) {
case AP_CONN_STATE_OPEN:
+ if (! conn->base_.linked) {
+ note_user_activity(approx_time());
+ }
+
+ FALLTHROUGH;
case EXIT_CONN_STATE_OPEN:
- connection_edge_consider_sending_sendme(conn);
+ sendme_connection_edge_consider_sending(conn);
break;
}
return 0;
@@ -779,7 +807,7 @@ connection_edge_finished_flushing(edge_connection_t *conn)
switch (conn->base_.state) {
case AP_CONN_STATE_OPEN:
case EXIT_CONN_STATE_OPEN:
- connection_edge_consider_sending_sendme(conn);
+ sendme_connection_edge_consider_sending(conn);
return 0;
case AP_CONN_STATE_SOCKS_WAIT:
case AP_CONN_STATE_NATD_WAIT:
@@ -832,7 +860,7 @@ connected_cell_format_payload(uint8_t *payload_out,
return -1;
}
- set_uint32(payload_out + connected_payload_len, htonl(dns_clip_ttl(ttl)));
+ set_uint32(payload_out + connected_payload_len, htonl(clip_dns_ttl(ttl)));
connected_payload_len += 4;
tor_assert(connected_payload_len <= MAX_CONNECTED_CELL_PAYLOAD_LEN);
@@ -1211,7 +1239,7 @@ connection_ap_rescan_and_attach_pending(void)
entry_conn->marked_pending_circ_line = 0; \
entry_conn->marked_pending_circ_file = 0; \
} while (0)
-#else /* !(defined(DEBUGGING_17659)) */
+#else /* !defined(DEBUGGING_17659) */
#define UNMARK() do { } while (0)
#endif /* defined(DEBUGGING_17659) */
@@ -1540,6 +1568,107 @@ consider_plaintext_ports(entry_connection_t *conn, uint16_t port)
return 0;
}
+/** Parse the given hostname in address. Returns true if the parsing was
+ * successful and type_out contains the type of the hostname. Else, false is
+ * returned which means it was not recognized and type_out is set to
+ * BAD_HOSTNAME.
+ *
+ * The possible recognized forms are (where true is returned):
+ *
+ * If address is of the form "y.onion" with a well-formed handle y:
+ * Put a NUL after y, lower-case it, and return ONION_V2_HOSTNAME or
+ * ONION_V3_HOSTNAME depending on the HS version.
+ *
+ * If address is of the form "x.y.onion" with a well-formed handle x:
+ * Drop "x.", put a NUL after y, lower-case it, and return
+ * ONION_V2_HOSTNAME or ONION_V3_HOSTNAME depending on the HS version.
+ *
+ * If address is of the form "y.onion" with a badly-formed handle y:
+ * Return BAD_HOSTNAME and log a message.
+ *
+ * If address is of the form "y.exit":
+ * Put a NUL after y and return EXIT_HOSTNAME.
+ *
+ * Otherwise:
+ * Return NORMAL_HOSTNAME and change nothing.
+ */
+STATIC bool
+parse_extended_hostname(char *address, hostname_type_t *type_out)
+{
+ char *s;
+ char *q;
+ char query[HS_SERVICE_ADDR_LEN_BASE32+1];
+
+ s = strrchr(address,'.');
+ if (!s) {
+ *type_out = NORMAL_HOSTNAME; /* no dot, thus normal */
+ goto success;
+ }
+ if (!strcmp(s+1,"exit")) {
+ *s = 0; /* NUL-terminate it */
+ *type_out = EXIT_HOSTNAME; /* .exit */
+ goto success;
+ }
+ if (strcmp(s+1,"onion")) {
+ *type_out = NORMAL_HOSTNAME; /* neither .exit nor .onion, thus normal */
+ goto success;
+ }
+
+ /* so it is .onion */
+ *s = 0; /* NUL-terminate it */
+ /* locate a 'sub-domain' component, in order to remove it */
+ q = strrchr(address, '.');
+ if (q == address) {
+ *type_out = BAD_HOSTNAME;
+ goto failed; /* reject sub-domain, as DNS does */
+ }
+ q = (NULL == q) ? address : q + 1;
+ if (strlcpy(query, q, HS_SERVICE_ADDR_LEN_BASE32+1) >=
+ HS_SERVICE_ADDR_LEN_BASE32+1) {
+ *type_out = BAD_HOSTNAME;
+ goto failed;
+ }
+ if (q != address) {
+ memmove(address, q, strlen(q) + 1 /* also get \0 */);
+ }
+ /* v2 onion address check. */
+ if (strlen(query) == REND_SERVICE_ID_LEN_BASE32) {
+ *type_out = ONION_V2_HOSTNAME;
+ if (rend_valid_v2_service_id(query)) {
+ goto success;
+ }
+ goto failed;
+ }
+
+ /* v3 onion address check. */
+ if (strlen(query) == HS_SERVICE_ADDR_LEN_BASE32) {
+ *type_out = ONION_V3_HOSTNAME;
+ if (hs_address_is_valid(query)) {
+ goto success;
+ }
+ goto failed;
+ }
+
+ /* Reaching this point, nothing was recognized. */
+ *type_out = BAD_HOSTNAME;
+ goto failed;
+
+ success:
+ return true;
+ failed:
+ /* otherwise, return to previous state and return 0 */
+ *s = '.';
+ const bool is_onion = (*type_out == ONION_V2_HOSTNAME) ||
+ (*type_out == ONION_V3_HOSTNAME);
+ log_warn(LD_APP, "Invalid %shostname %s; rejecting",
+ is_onion ? "onion " : "",
+ safe_str_client(address));
+ if (*type_out == ONION_V3_HOSTNAME) {
+ *type_out = BAD_HOSTNAME;
+ }
+ return false;
+}
+
/** How many times do we try connecting with an exit configured via
* TrackHostExits before concluding that it won't work any more and trying a
* different one? */
@@ -2007,16 +2136,15 @@ connection_ap_handshake_rewrite_and_attach(entry_connection_t *conn,
const int automap = rr.automap;
const addressmap_entry_source_t exit_source = rr.exit_source;
- /* Now, we parse the address to see if it's an .onion or .exit or
- * other special address.
- */
- const hostname_type_t addresstype = parse_extended_hostname(socks->address);
-
/* Now see whether the hostname is bogus. This could happen because of an
* onion hostname whose format we don't recognize. */
- if (addresstype == BAD_HOSTNAME) {
+ hostname_type_t addresstype;
+ if (!parse_extended_hostname(socks->address, &addresstype)) {
control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s",
escaped(socks->address));
+ if (addresstype == BAD_HOSTNAME) {
+ conn->socks_request->socks_extended_error_code = SOCKS5_HS_BAD_ADDRESS;
+ }
connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
return -1;
}
@@ -2803,6 +2931,31 @@ connection_ap_process_natd(entry_connection_t *conn)
return connection_ap_rewrite_and_attach_if_allowed(conn, NULL, NULL);
}
+static const char HTTP_CONNECT_IS_NOT_AN_HTTP_PROXY_MSG[] =
+ "HTTP/1.0 405 Method Not Allowed\r\n"
+ "Content-Type: text/html; charset=iso-8859-1\r\n\r\n"
+ "<html>\n"
+ "<head>\n"
+ "<title>This is an HTTP CONNECT tunnel, not a full HTTP Proxy</title>\n"
+ "</head>\n"
+ "<body>\n"
+ "<h1>This is an HTTP CONNECT tunnel, not an HTTP proxy.</h1>\n"
+ "<p>\n"
+ "It appears you have configured your web browser to use this Tor port as\n"
+ "an HTTP proxy.\n"
+ "</p><p>\n"
+ "This is not correct: This port is configured as a CONNECT tunnel, not\n"
+ "an HTTP proxy. Please configure your client accordingly. You can also\n"
+ "use HTTPS; then the client should automatically use HTTP CONNECT."
+ "</p>\n"
+ "<p>\n"
+ "See <a href=\"https://www.torproject.org/documentation.html\">"
+ "https://www.torproject.org/documentation.html</a> for more "
+ "information.\n"
+ "</p>\n"
+ "</body>\n"
+ "</html>\n";
+
/** Called on an HTTP CONNECT entry connection when some bytes have arrived,
* but we have not yet received a full HTTP CONNECT request. Try to parse an
* HTTP CONNECT request from the connection's inbuf. On success, set up the
@@ -2843,7 +2996,7 @@ connection_ap_process_http_connect(entry_connection_t *conn)
tor_assert(command);
tor_assert(addrport);
if (strcasecmp(command, "connect")) {
- errmsg = "HTTP/1.0 405 Method Not Allowed\r\n\r\n";
+ errmsg = HTTP_CONNECT_IS_NOT_AN_HTTP_PROXY_MSG;
goto err;
}
@@ -3309,8 +3462,9 @@ tell_controller_about_resolved_result(entry_connection_t *conn,
expires = time(NULL) + ttl;
if (answer_type == RESOLVED_TYPE_IPV4 && answer_len >= 4) {
char *cp = tor_dup_ip(ntohl(get_uint32(answer)));
- control_event_address_mapped(conn->socks_request->address,
- cp, expires, NULL, 0);
+ if (cp)
+ control_event_address_mapped(conn->socks_request->address,
+ cp, expires, NULL, 0);
tor_free(cp);
} else if (answer_type == RESOLVED_TYPE_HOSTNAME && answer_len < 256) {
char *cp = tor_strndup(answer, answer_len);
@@ -3383,7 +3537,7 @@ connection_ap_handshake_socks_resolved,(entry_connection_t *conn,
}
} else if (answer_type == RESOLVED_TYPE_IPV6 && answer_len == 16) {
tor_addr_t a;
- tor_addr_from_ipv6_bytes(&a, (char*)answer);
+ tor_addr_from_ipv6_bytes(&a, answer);
if (! tor_addr_is_null(&a)) {
client_dns_set_addressmap(conn,
conn->socks_request->address, &a,
@@ -3484,11 +3638,17 @@ connection_ap_handshake_socks_reply(entry_connection_t *conn, char *reply,
size_t replylen, int endreason)
{
char buf[256];
- socks5_reply_status_t status =
- stream_end_reason_to_socks5_response(endreason);
+ socks5_reply_status_t status;
tor_assert(conn->socks_request); /* make sure it's an AP stream */
+ if (conn->socks_request->socks_use_extended_errors &&
+ conn->socks_request->socks_extended_error_code != 0) {
+ status = conn->socks_request->socks_extended_error_code;
+ } else {
+ status = stream_end_reason_to_socks5_response(endreason);
+ }
+
if (!SOCKS_COMMAND_IS_RESOLVE(conn->socks_request->command)) {
control_event_stream_status(conn, status==SOCKS5_SUCCEEDED ?
STREAM_EVENT_SUCCEEDED : STREAM_EVENT_FAILED,
@@ -3706,6 +3866,10 @@ handle_hs_exit_conn(circuit_t *circ, edge_connection_t *conn)
/* Link the circuit and the connection crypt path. */
conn->cpath_layer = origin_circ->cpath->prev;
+ /* If this is the first stream on this circuit, tell circpad */
+ if (!origin_circ->p_streams)
+ circpad_machine_event_circ_has_streams(origin_circ);
+
/* Add it into the linked list of p_streams on this circuit */
conn->next_stream = origin_circ->p_streams;
origin_circ->p_streams = conn;
@@ -3796,6 +3960,7 @@ connection_exit_begin_conn(cell_t *cell, circuit_t *circ)
if (! bcell.is_begindir) {
/* Steal reference */
+ tor_assert(bcell.address);
address = bcell.address;
port = bcell.port;
@@ -4300,68 +4465,6 @@ connection_ap_can_use_exit(const entry_connection_t *conn,
return 1;
}
-/** If address is of the form "y.onion" with a well-formed handle y:
- * Put a NUL after y, lower-case it, and return ONION_V2_HOSTNAME or
- * ONION_V3_HOSTNAME depending on the HS version.
- *
- * If address is of the form "x.y.onion" with a well-formed handle x:
- * Drop "x.", put a NUL after y, lower-case it, and return
- * ONION_V2_HOSTNAME or ONION_V3_HOSTNAME depending on the HS version.
- *
- * If address is of the form "y.onion" with a badly-formed handle y:
- * Return BAD_HOSTNAME and log a message.
- *
- * If address is of the form "y.exit":
- * Put a NUL after y and return EXIT_HOSTNAME.
- *
- * Otherwise:
- * Return NORMAL_HOSTNAME and change nothing.
- */
-hostname_type_t
-parse_extended_hostname(char *address)
-{
- char *s;
- char *q;
- char query[HS_SERVICE_ADDR_LEN_BASE32+1];
-
- s = strrchr(address,'.');
- if (!s)
- return NORMAL_HOSTNAME; /* no dot, thus normal */
- if (!strcmp(s+1,"exit")) {
- *s = 0; /* NUL-terminate it */
- return EXIT_HOSTNAME; /* .exit */
- }
- if (strcmp(s+1,"onion"))
- return NORMAL_HOSTNAME; /* neither .exit nor .onion, thus normal */
-
- /* so it is .onion */
- *s = 0; /* NUL-terminate it */
- /* locate a 'sub-domain' component, in order to remove it */
- q = strrchr(address, '.');
- if (q == address) {
- goto failed; /* reject sub-domain, as DNS does */
- }
- q = (NULL == q) ? address : q + 1;
- if (strlcpy(query, q, HS_SERVICE_ADDR_LEN_BASE32+1) >=
- HS_SERVICE_ADDR_LEN_BASE32+1)
- goto failed;
- if (q != address) {
- memmove(address, q, strlen(q) + 1 /* also get \0 */);
- }
- if (rend_valid_v2_service_id(query)) {
- return ONION_V2_HOSTNAME; /* success */
- }
- if (hs_address_is_valid(query)) {
- return ONION_V3_HOSTNAME;
- }
- failed:
- /* otherwise, return to previous state and return 0 */
- *s = '.';
- log_warn(LD_APP, "Invalid onion hostname %s; rejecting",
- safe_str_client(address));
- return BAD_HOSTNAME;
-}
-
/** Return true iff the (possibly NULL) <b>alen</b>-byte chunk of memory at
* <b>a</b> is equal to the (possibly NULL) <b>blen</b>-byte chunk of memory
* at <b>b</b>. */
@@ -4565,6 +4668,25 @@ circuit_clear_isolation(origin_circuit_t *circ)
circ->socks_username_len = circ->socks_password_len = 0;
}
+/** Send an END and mark for close the given edge connection conn using the
+ * given reason that has to be a stream reason.
+ *
+ * Note: We don't unattached the AP connection (if applicable) because we
+ * don't want to flush the remaining data. This function aims at ending
+ * everything quickly regardless of the connection state.
+ *
+ * This function can't fail and does nothing if conn is NULL. */
+void
+connection_edge_end_close(edge_connection_t *conn, uint8_t reason)
+{
+ if (!conn) {
+ return;
+ }
+
+ connection_edge_end(conn, reason);
+ connection_mark_for_close(TO_CONN(conn));
+}
+
/** Free all storage held in module-scoped variables for connection_edge.c */
void
connection_edge_free_all(void)
diff --git a/src/core/or/connection_edge.h b/src/core/or/connection_edge.h
index 68d8b19a11..8c06af5664 100644
--- a/src/core/or/connection_edge.h
+++ b/src/core/or/connection_edge.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -71,6 +71,15 @@ entry_connection_t *EDGE_TO_ENTRY_CONN(edge_connection_t *);
#define connection_mark_unattached_ap(conn, endreason) \
connection_mark_unattached_ap_((conn), (endreason), __LINE__, SHORT_FILE__)
+/** Possible return values for parse_extended_hostname. */
+typedef enum hostname_type_t {
+ BAD_HOSTNAME,
+ EXIT_HOSTNAME,
+ NORMAL_HOSTNAME,
+ ONION_V2_HOSTNAME,
+ ONION_V3_HOSTNAME,
+} hostname_type_t;
+
MOCK_DECL(void,connection_mark_unattached_ap_,
(entry_connection_t *conn, int endreason,
int line, const char *file));
@@ -80,6 +89,7 @@ int connection_edge_process_inbuf(edge_connection_t *conn,
int connection_edge_destroy(circid_t circ_id, edge_connection_t *conn);
int connection_edge_end(edge_connection_t *conn, uint8_t reason);
int connection_edge_end_errno(edge_connection_t *conn);
+void connection_edge_end_close(edge_connection_t *conn, uint8_t reason);
int connection_edge_flushed_some(edge_connection_t *conn);
int connection_edge_finished_flushing(edge_connection_t *conn);
int connection_edge_finished_connecting(edge_connection_t *conn);
@@ -154,13 +164,6 @@ int connection_ap_handshake_rewrite_and_attach(entry_connection_t *conn,
origin_circuit_t *circ,
crypt_path_t *cpath);
-/** Possible return values for parse_extended_hostname. */
-typedef enum hostname_type_t {
- NORMAL_HOSTNAME, ONION_V2_HOSTNAME, ONION_V3_HOSTNAME,
- EXIT_HOSTNAME, BAD_HOSTNAME
-} hostname_type_t;
-hostname_type_t parse_extended_hostname(char *address);
-
#if defined(HAVE_NET_IF_H) && defined(HAVE_NET_PFVAR_H)
int get_pf_socket(void);
#endif
@@ -179,6 +182,21 @@ void connection_ap_warn_and_unmark_if_pending_circ(
entry_connection_t *entry_conn,
const char *where);
+/** Lowest value for DNS ttl that a server should give or a client should
+ * believe. */
+#define MIN_DNS_TTL (5*60)
+/** Highest value for DNS ttl that a server should give or a client should
+ * believe. */
+#define MAX_DNS_TTL (60*60)
+/** How long do we keep DNS cache entries before purging them (regardless of
+ * their TTL)? */
+#define MAX_DNS_ENTRY_AGE (3*60*60)
+/** How long do we cache/tell clients to cache DNS records when no TTL is
+ * known? */
+#define DEFAULT_DNS_TTL (30*60)
+
+uint32_t clip_dns_ttl(uint32_t ttl);
+
int connection_half_edge_is_valid_data(const smartlist_t *half_conns,
streamid_t stream_id);
int connection_half_edge_is_valid_sendme(const smartlist_t *half_conns,
@@ -218,6 +236,8 @@ void half_edge_free_(struct half_edge_t *he);
#ifdef CONNECTION_EDGE_PRIVATE
+STATIC bool parse_extended_hostname(char *address, hostname_type_t *type_out);
+
/** A parsed BEGIN or BEGIN_DIR cell */
typedef struct begin_cell_t {
/** The address the client has asked us to connect to, or NULL if this is
diff --git a/src/core/or/connection_or.c b/src/core/or/connection_or.c
index 67157d8d0b..b88d1b6afb 100644
--- a/src/core/or/connection_or.c
+++ b/src/core/or/connection_or.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -18,17 +18,19 @@
* tortls.c) which it uses as its TLS stream. It is responsible for
* sending and receiving cells over that TLS.
*
- * This module also implements the client side of the v3 Tor link handshake,
+ * This module also implements the client side of the v3 (and greater) Tor
+ * link handshake.
**/
#include "core/or/or.h"
#include "feature/client/bridges.h"
-#include "lib/container/buffers.h"
+#include "lib/buf/buffers.h"
/*
* Define this so we get channel internal functions, since we're implementing
* part of a subclass (channel_tls_t).
*/
-#define TOR_CHANNEL_INTERNAL_
+#define CHANNEL_OBJECT_PRIVATE
#define CONNECTION_OR_PRIVATE
+#define ORCONN_EVENT_PRIVATE
#include "core/or/channel.h"
#include "core/or/channeltls.h"
#include "core/or/circuitbuild.h"
@@ -38,14 +40,14 @@
#include "app/config/config.h"
#include "core/mainloop/connection.h"
#include "core/or/connection_or.h"
-#include "feature/control/control.h"
-#include "lib/crypt_ops/crypto_rand.h"
+#include "feature/relay/relay_handshake.h"
+#include "feature/control/control_events.h"
#include "lib/crypt_ops/crypto_util.h"
#include "feature/dirauth/reachability.h"
#include "feature/client/entrynodes.h"
#include "lib/geoip/geoip.h"
#include "core/mainloop/mainloop.h"
-#include "trunnel/link_handshake.h"
+#include "trunnel/netinfo.h"
#include "feature/nodelist/microdesc.h"
#include "feature/nodelist/networkstatus.h"
#include "feature/nodelist/nodelist.h"
@@ -76,7 +78,8 @@
#include "lib/crypt_ops/crypto_format.h"
#include "lib/tls/tortls.h"
-#include "lib/tls/x509.h"
+
+#include "core/or/orconn_event.h"
static int connection_tls_finish_handshake(or_connection_t *conn);
static int connection_or_launch_v3_or_handshake(or_connection_t *conn);
@@ -91,13 +94,6 @@ static unsigned int
connection_or_is_bad_for_new_circs(or_connection_t *or_conn);
static void connection_or_mark_bad_for_new_circs(or_connection_t *or_conn);
-/*
- * Call this when changing connection state, so notifications to the owning
- * channel can be handled.
- */
-
-static void connection_or_change_state(or_connection_t *conn, uint8_t state);
-
static void connection_or_check_canonicity(or_connection_t *conn,
int started_here);
@@ -112,10 +108,6 @@ TO_OR_CONN(connection_t *c)
return DOWNCAST(or_connection_t, c);
}
-/** Global map between Extended ORPort identifiers and OR
- * connections. */
-static digestmap_t *orconn_ext_or_id_map = NULL;
-
/** Clear clear conn->identity_digest and update other data
* structures as appropriate.*/
void
@@ -201,71 +193,6 @@ connection_or_set_identity_digest(or_connection_t *conn,
channel_set_identity_digest(chan, rsa_digest, ed_id);
}
-/** Remove the Extended ORPort identifier of <b>conn</b> from the
- * global identifier list. Also, clear the identifier from the
- * connection itself. */
-void
-connection_or_remove_from_ext_or_id_map(or_connection_t *conn)
-{
- or_connection_t *tmp;
- if (!orconn_ext_or_id_map)
- return;
- if (!conn->ext_or_conn_id)
- return;
-
- tmp = digestmap_remove(orconn_ext_or_id_map, conn->ext_or_conn_id);
- if (!tor_digest_is_zero(conn->ext_or_conn_id))
- tor_assert(tmp == conn);
-
- memset(conn->ext_or_conn_id, 0, EXT_OR_CONN_ID_LEN);
-}
-
-/** Return the connection whose ext_or_id is <b>id</b>. Return NULL if no such
- * connection is found. */
-or_connection_t *
-connection_or_get_by_ext_or_id(const char *id)
-{
- if (!orconn_ext_or_id_map)
- return NULL;
- return digestmap_get(orconn_ext_or_id_map, id);
-}
-
-/** Deallocate the global Extended ORPort identifier list */
-void
-connection_or_clear_ext_or_id_map(void)
-{
- digestmap_free(orconn_ext_or_id_map, NULL);
- orconn_ext_or_id_map = NULL;
-}
-
-/** Creates an Extended ORPort identifier for <b>conn</b> and deposits
- * it into the global list of identifiers. */
-void
-connection_or_set_ext_or_identifier(or_connection_t *conn)
-{
- char random_id[EXT_OR_CONN_ID_LEN];
- or_connection_t *tmp;
-
- if (!orconn_ext_or_id_map)
- orconn_ext_or_id_map = digestmap_new();
-
- /* Remove any previous identifiers: */
- if (conn->ext_or_conn_id && !tor_digest_is_zero(conn->ext_or_conn_id))
- connection_or_remove_from_ext_or_id_map(conn);
-
- do {
- crypto_rand(random_id, sizeof(random_id));
- } while (digestmap_get(orconn_ext_or_id_map, random_id));
-
- if (!conn->ext_or_conn_id)
- conn->ext_or_conn_id = tor_malloc_zero(EXT_OR_CONN_ID_LEN);
-
- memcpy(conn->ext_or_conn_id, random_id, EXT_OR_CONN_ID_LEN);
-
- tmp = digestmap_set(orconn_ext_or_id_map, random_id, conn);
- tor_assert(!tmp);
-}
-
/**************************************************************/
/** Map from a string describing what a non-open OR connection was doing when
@@ -400,23 +327,69 @@ connection_or_report_broken_states(int severity, int domain)
smartlist_free(items);
}
+/**
+ * Helper function to publish an OR connection status event
+ *
+ * Publishes a messages to subscribers of ORCONN messages, and sends
+ * the control event.
+ **/
+void
+connection_or_event_status(or_connection_t *conn, or_conn_status_event_t tp,
+ int reason)
+{
+ orconn_status_msg_t *msg = tor_malloc(sizeof(*msg));
+
+ msg->gid = conn->base_.global_identifier;
+ msg->status = tp;
+ msg->reason = reason;
+ orconn_status_publish(msg);
+ control_event_or_conn_status(conn, tp, reason);
+}
+
+/**
+ * Helper function to publish a state change message
+ *
+ * connection_or_change_state() calls this to notify subscribers about
+ * a change of an OR connection state.
+ **/
+static void
+connection_or_state_publish(const or_connection_t *conn, uint8_t state)
+{
+ orconn_state_msg_t *msg = tor_malloc(sizeof(*msg));
+
+ msg->gid = conn->base_.global_identifier;
+ if (conn->is_pt) {
+ /* Do extra decoding because conn->proxy_type indicates the proxy
+ * protocol that tor uses to talk with the transport plugin,
+ * instead of PROXY_PLUGGABLE. */
+ tor_assert_nonfatal(conn->proxy_type != PROXY_NONE);
+ msg->proxy_type = PROXY_PLUGGABLE;
+ } else {
+ msg->proxy_type = conn->proxy_type;
+ }
+ msg->state = state;
+ if (conn->chan) {
+ msg->chan = TLS_CHAN_TO_BASE(conn->chan)->global_identifier;
+ } else {
+ msg->chan = 0;
+ }
+ orconn_state_publish(msg);
+}
+
/** Call this to change or_connection_t states, so the owning channel_tls_t can
* be notified.
*/
-static void
-connection_or_change_state(or_connection_t *conn, uint8_t state)
+MOCK_IMPL(STATIC void,
+connection_or_change_state,(or_connection_t *conn, uint8_t state))
{
- uint8_t old_state;
-
tor_assert(conn);
- old_state = conn->base_.state;
conn->base_.state = state;
+ connection_or_state_publish(conn, state);
if (conn->chan)
- channel_tls_handle_state_change_on_orconn(conn->chan, conn,
- old_state, state);
+ channel_tls_handle_state_change_on_orconn(conn->chan, conn, state);
}
/** Return the number of circuits using an or_connection_t; this used to
@@ -676,6 +649,19 @@ connection_or_finished_flushing(or_connection_t *conn)
switch (conn->base_.state) {
case OR_CONN_STATE_PROXY_HANDSHAKING:
+ /* PROXY_HAPROXY gets connected by receiving an ack. */
+ if (conn->proxy_type == PROXY_HAPROXY) {
+ tor_assert(TO_CONN(conn)->proxy_state == PROXY_HAPROXY_WAIT_FOR_FLUSH);
+ TO_CONN(conn)->proxy_state = PROXY_CONNECTED;
+
+ if (connection_tls_start_handshake(conn, 0) < 0) {
+ /* TLS handshaking error of some kind. */
+ connection_or_close_for_error(conn, 0);
+ return -1;
+ }
+ break;
+ }
+ break;
case OR_CONN_STATE_OPEN:
case OR_CONN_STATE_OR_HANDSHAKING_V2:
case OR_CONN_STATE_OR_HANDSHAKING_V3:
@@ -707,8 +693,6 @@ connection_or_finished_connecting(or_connection_t *or_conn)
log_debug(LD_HANDSHAKE,"OR connect() to router at %s:%u finished.",
conn->address,conn->port);
- control_event_bootstrap(BOOTSTRAP_STATUS_HANDSHAKE, 0);
- control_event_boot_first_orconn();
if (proxy_type != PROXY_NONE) {
/* start proxy handshake */
@@ -717,8 +701,9 @@ connection_or_finished_connecting(or_connection_t *or_conn)
return -1;
}
- connection_start_reading(conn);
connection_or_change_state(or_conn, OR_CONN_STATE_PROXY_HANDSHAKING);
+ connection_start_reading(conn);
+
return 0;
}
@@ -758,21 +743,27 @@ connection_or_about_to_close(or_connection_t *or_conn)
entry_guard_chan_failed(TLS_CHAN_TO_BASE(or_conn->chan));
if (conn->state >= OR_CONN_STATE_TLS_HANDSHAKING) {
int reason = tls_error_to_orconn_end_reason(or_conn->tls_error);
- control_event_or_conn_status(or_conn, OR_CONN_EVENT_FAILED,
- reason);
- if (!authdir_mode_tests_reachability(options))
- control_event_bootstrap_prob_or(
- orconn_end_reason_to_control_string(reason),
- reason, or_conn);
+ connection_or_event_status(or_conn, OR_CONN_EVENT_FAILED,
+ reason);
+ if (!authdir_mode_tests_reachability(options)) {
+ const char *warning = NULL;
+ if (reason == END_OR_CONN_REASON_TLS_ERROR && or_conn->tls) {
+ warning = tor_tls_get_last_error_msg(or_conn->tls);
+ }
+ if (warning == NULL) {
+ warning = orconn_end_reason_to_control_string(reason);
+ }
+ control_event_bootstrap_prob_or(warning, reason, or_conn);
+ }
}
}
} else if (conn->hold_open_until_flushed) {
/* We only set hold_open_until_flushed when we're intentionally
* closing a connection. */
- control_event_or_conn_status(or_conn, OR_CONN_EVENT_CLOSED,
+ connection_or_event_status(or_conn, OR_CONN_EVENT_CLOSED,
tls_error_to_orconn_end_reason(or_conn->tls_error));
} else if (!tor_digest_is_zero(or_conn->identity_digest)) {
- control_event_or_conn_status(or_conn, OR_CONN_EVENT_CLOSED,
+ connection_or_event_status(or_conn, OR_CONN_EVENT_CLOSED,
tls_error_to_orconn_end_reason(or_conn->tls_error));
}
}
@@ -918,12 +909,21 @@ connection_or_check_canonicity(or_connection_t *conn, int started_here)
}
if (r) {
- tor_addr_port_t node_ap;
- node_get_pref_orport(r, &node_ap);
- /* XXXX proposal 186 is making this more complex. For now, a conn
- is canonical when it uses the _preferred_ address. */
- if (tor_addr_eq(&conn->base_.addr, &node_ap.addr))
+ tor_addr_port_t node_ipv4_ap;
+ tor_addr_port_t node_ipv6_ap;
+ node_get_prim_orport(r, &node_ipv4_ap);
+ node_get_pref_ipv6_orport(r, &node_ipv6_ap);
+ if (tor_addr_eq(&conn->base_.addr, &node_ipv4_ap.addr) ||
+ tor_addr_eq(&conn->base_.addr, &node_ipv6_ap.addr)) {
connection_or_set_canonical(conn, 1);
+ }
+ /* Choose the correct canonical address and port. */
+ tor_addr_port_t *node_ap;
+ if (tor_addr_family(&conn->base_.addr) == AF_INET) {
+ node_ap = &node_ipv4_ap;
+ } else {
+ node_ap = &node_ipv6_ap;
+ }
if (!started_here) {
/* Override the addr/port, so our log messages will make sense.
* This is dangerous, since if we ever try looking up a conn by
@@ -935,13 +935,14 @@ connection_or_check_canonicity(or_connection_t *conn, int started_here)
* right IP address and port 56244, that wouldn't be as helpful. now we
* log the "right" port too, so we know if it's moria1 or moria2.
*/
- tor_addr_copy(&conn->base_.addr, &node_ap.addr);
- conn->base_.port = node_ap.port;
+ /* See #33898 for a ticket that resolves this technical debt. */
+ tor_addr_copy(&conn->base_.addr, &node_ap->addr);
+ conn->base_.port = node_ap->port;
}
tor_free(conn->nickname);
conn->nickname = tor_strdup(node_get_nickname(r));
tor_free(conn->base_.address);
- conn->base_.address = tor_addr_to_str_dup(&node_ap.addr);
+ conn->base_.address = tor_addr_to_str_dup(&node_ap->addr);
} else {
tor_free(conn->nickname);
conn->nickname = tor_malloc(HEX_DIGEST_LEN+2);
@@ -1229,11 +1230,11 @@ or_connect_failure_ht_hash(const or_connect_failure_entry_t *entry)
}
HT_PROTOTYPE(or_connect_failure_ht, or_connect_failure_entry_t, node,
- or_connect_failure_ht_hash, or_connect_failure_ht_eq)
+ or_connect_failure_ht_hash, or_connect_failure_ht_eq);
HT_GENERATE2(or_connect_failure_ht, or_connect_failure_entry_t, node,
or_connect_failure_ht_hash, or_connect_failure_ht_eq,
- 0.6, tor_reallocarray_, tor_free_)
+ 0.6, tor_reallocarray_, tor_free_);
/* Initialize a given connect failure entry with the given identity_digest,
* addr and port. All field are optional except ocf. */
@@ -1364,7 +1365,7 @@ void
connection_or_connect_failed(or_connection_t *conn,
int reason, const char *msg)
{
- control_event_or_conn_status(conn, OR_CONN_EVENT_FAILED, reason);
+ connection_or_event_status(conn, OR_CONN_EVENT_FAILED, reason);
if (!authdir_mode_tests_reachability(get_options()))
control_event_bootstrap_prob_or(msg, reason, conn);
note_or_connect_failed(conn);
@@ -1430,7 +1431,7 @@ connection_or_connect, (const tor_addr_t *_addr, uint16_t port,
int r;
tor_addr_t proxy_addr;
uint16_t proxy_port;
- int proxy_type;
+ int proxy_type, is_pt = 0;
tor_assert(_addr);
tor_assert(id_digest);
@@ -1471,21 +1472,27 @@ connection_or_connect, (const tor_addr_t *_addr, uint16_t port,
return NULL;
}
- connection_or_change_state(conn, OR_CONN_STATE_CONNECTING);
- control_event_or_conn_status(conn, OR_CONN_EVENT_LAUNCHED, 0);
-
conn->is_outgoing = 1;
/* If we are using a proxy server, find it and use it. */
- r = get_proxy_addrport(&proxy_addr, &proxy_port, &proxy_type, TO_CONN(conn));
+ r = get_proxy_addrport(&proxy_addr, &proxy_port, &proxy_type, &is_pt,
+ TO_CONN(conn));
if (r == 0) {
conn->proxy_type = proxy_type;
if (proxy_type != PROXY_NONE) {
tor_addr_copy(&addr, &proxy_addr);
port = proxy_port;
conn->base_.proxy_state = PROXY_INFANT;
+ conn->is_pt = is_pt;
}
+ connection_or_change_state(conn, OR_CONN_STATE_CONNECTING);
+ connection_or_event_status(conn, OR_CONN_EVENT_LAUNCHED, 0);
} else {
+ /* This duplication of state change calls is necessary in case we
+ * run into an error condition below */
+ connection_or_change_state(conn, OR_CONN_STATE_CONNECTING);
+ connection_or_event_status(conn, OR_CONN_EVENT_LAUNCHED, 0);
+
/* get_proxy_addrport() might fail if we have a Bridge line that
references a transport, but no ClientTransportPlugin lines
defining its transport proxy. If this is the case, let's try to
@@ -1691,7 +1698,8 @@ connection_tls_continue_handshake(or_connection_t *conn)
switch (result) {
CASE_TOR_TLS_ERROR_ANY:
- log_info(LD_OR,"tls error [%s]. breaking connection.",
+ conn->tls_error = result;
+ log_info(LD_OR,"tls error [%s]. breaking connection.",
tor_tls_err_to_string(result));
return -1;
case TOR_TLS_DONE:
@@ -1723,6 +1731,7 @@ connection_tls_continue_handshake(or_connection_t *conn)
log_debug(LD_OR,"wanted read");
return 0;
case TOR_TLS_CLOSE:
+ conn->tls_error = result;
log_info(LD_OR,"tls closed. breaking connection.");
return -1;
}
@@ -1981,8 +1990,8 @@ connection_or_client_learned_peer_id(or_connection_t *conn,
/* Tell the new guard API about the channel failure */
entry_guard_chan_failed(TLS_CHAN_TO_BASE(conn->chan));
- control_event_or_conn_status(conn, OR_CONN_EVENT_FAILED,
- END_OR_CONN_REASON_OR_IDENTITY);
+ connection_or_event_status(conn, OR_CONN_EVENT_FAILED,
+ END_OR_CONN_REASON_OR_IDENTITY);
if (!authdir_mode_tests_reachability(options))
control_event_bootstrap_prob_or(
"Unexpected identity in router certificate",
@@ -2222,7 +2231,7 @@ int
connection_or_set_state_open(or_connection_t *conn)
{
connection_or_change_state(conn, OR_CONN_STATE_OPEN);
- control_event_or_conn_status(conn, OR_CONN_EVENT_CONNECTED, 0);
+ connection_or_event_status(conn, OR_CONN_EVENT_CONNECTED, 0);
/* Link protocol 3 appeared in Tor 0.2.3.6-alpha, so any connection
* that uses an earlier link protocol should not be treated as a relay. */
@@ -2252,6 +2261,8 @@ connection_or_write_cell_to_buf(const cell_t *cell, or_connection_t *conn)
cell_pack(&networkcell, cell, conn->wide_circ_ids);
+ /* We need to count padding cells from this non-packed code path
+ * since they are sent via chan->write_cell() (which is not packed) */
rep_hist_padding_count_write(PADDING_TYPE_TOTAL);
if (cell->command == CELL_PADDING)
rep_hist_padding_count_write(PADDING_TYPE_CELL);
@@ -2262,7 +2273,7 @@ connection_or_write_cell_to_buf(const cell_t *cell, or_connection_t *conn)
if (conn->chan) {
channel_timestamp_active(TLS_CHAN_TO_BASE(conn->chan));
- if (TLS_CHAN_TO_BASE(conn->chan)->currently_padding) {
+ if (TLS_CHAN_TO_BASE(conn->chan)->padding_enabled) {
rep_hist_padding_count_write(PADDING_TYPE_ENABLED_TOTAL);
if (cell->command == CELL_PADDING)
rep_hist_padding_count_write(PADDING_TYPE_ENABLED_CELL);
@@ -2292,6 +2303,7 @@ connection_or_write_var_cell_to_buf,(const var_cell_t *cell,
if (conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3)
or_handshake_state_record_var_cell(conn, conn->handshake_state, cell, 0);
+ rep_hist_padding_count_write(PADDING_TYPE_TOTAL);
/* Touch the channel's active timestamp if there is one */
if (conn->chan)
channel_timestamp_active(TLS_CHAN_TO_BASE(conn->chan));
@@ -2428,6 +2440,31 @@ connection_or_send_versions(or_connection_t *conn, int v3_plus)
return 0;
}
+static netinfo_addr_t *
+netinfo_addr_from_tor_addr(const tor_addr_t *tor_addr)
+{
+ sa_family_t addr_family = tor_addr_family(tor_addr);
+
+ if (BUG(addr_family != AF_INET && addr_family != AF_INET6))
+ return NULL;
+
+ netinfo_addr_t *netinfo_addr = netinfo_addr_new();
+
+ if (addr_family == AF_INET) {
+ netinfo_addr_set_addr_type(netinfo_addr, NETINFO_ADDR_TYPE_IPV4);
+ netinfo_addr_set_len(netinfo_addr, 4);
+ netinfo_addr_set_addr_ipv4(netinfo_addr, tor_addr_to_ipv4h(tor_addr));
+ } else if (addr_family == AF_INET6) {
+ netinfo_addr_set_addr_type(netinfo_addr, NETINFO_ADDR_TYPE_IPV6);
+ netinfo_addr_set_len(netinfo_addr, 16);
+ uint8_t *ipv6_buf = netinfo_addr_getarray_addr_ipv6(netinfo_addr);
+ const uint8_t *in6_addr = tor_addr_to_in6_addr8(tor_addr);
+ memcpy(ipv6_buf, in6_addr, 16);
+ }
+
+ return netinfo_addr;
+}
+
/** Send a NETINFO cell on <b>conn</b>, telling the other server what we know
* about their address, our address, and the current time. */
MOCK_IMPL(int,
@@ -2436,8 +2473,7 @@ connection_or_send_netinfo,(or_connection_t *conn))
cell_t cell;
time_t now = time(NULL);
const routerinfo_t *me;
- int len;
- uint8_t *out;
+ int r = -1;
tor_assert(conn->handshake_state);
@@ -2450,20 +2486,21 @@ connection_or_send_netinfo,(or_connection_t *conn))
memset(&cell, 0, sizeof(cell_t));
cell.command = CELL_NETINFO;
+ netinfo_cell_t *netinfo_cell = netinfo_cell_new();
+
/* Timestamp, if we're a relay. */
if (public_server_mode(get_options()) || ! conn->is_outgoing)
- set_uint32(cell.payload, htonl((uint32_t)now));
+ netinfo_cell_set_timestamp(netinfo_cell, (uint32_t)now);
/* Their address. */
- out = cell.payload + 4;
+ const tor_addr_t *remote_tor_addr =
+ !tor_addr_is_null(&conn->real_addr) ? &conn->real_addr : &conn->base_.addr;
/* We use &conn->real_addr below, unless it hasn't yet been set. If it
* hasn't yet been set, we know that base_.addr hasn't been tampered with
* yet either. */
- len = append_address_to_payload(out, !tor_addr_is_null(&conn->real_addr)
- ? &conn->real_addr : &conn->base_.addr);
- if (len<0)
- return -1;
- out += len;
+ netinfo_addr_t *their_addr = netinfo_addr_from_tor_addr(remote_tor_addr);
+
+ netinfo_cell_set_other_addr(netinfo_cell, their_addr);
/* My address -- only include it if I'm a public relay, or if I'm a
* bridge and this is an incoming connection. If I'm a bridge and this
@@ -2471,556 +2508,40 @@ connection_or_send_netinfo,(or_connection_t *conn))
if ((public_server_mode(get_options()) || !conn->is_outgoing) &&
(me = router_get_my_routerinfo())) {
tor_addr_t my_addr;
- *out++ = 1 + !tor_addr_is_null(&me->ipv6_addr);
-
tor_addr_from_ipv4h(&my_addr, me->addr);
- len = append_address_to_payload(out, &my_addr);
- if (len < 0)
- return -1;
- out += len;
-
- if (!tor_addr_is_null(&me->ipv6_addr)) {
- len = append_address_to_payload(out, &me->ipv6_addr);
- if (len < 0)
- return -1;
- }
- } else {
- *out = 0;
- }
-
- conn->handshake_state->digest_sent_data = 0;
- conn->handshake_state->sent_netinfo = 1;
- connection_or_write_cell_to_buf(&cell, conn);
-
- return 0;
-}
-
-/** Helper used to add an encoded certs to a cert cell */
-static void
-add_certs_cell_cert_helper(certs_cell_t *certs_cell,
- uint8_t cert_type,
- const uint8_t *cert_encoded,
- size_t cert_len)
-{
- tor_assert(cert_len <= UINT16_MAX);
- certs_cell_cert_t *ccc = certs_cell_cert_new();
- ccc->cert_type = cert_type;
- ccc->cert_len = cert_len;
- certs_cell_cert_setlen_body(ccc, cert_len);
- memcpy(certs_cell_cert_getarray_body(ccc), cert_encoded, cert_len);
-
- certs_cell_add_certs(certs_cell, ccc);
-}
-
-/** Add an encoded X509 cert (stored as <b>cert_len</b> bytes at
- * <b>cert_encoded</b>) to the trunnel certs_cell_t object that we are
- * building in <b>certs_cell</b>. Set its type field to <b>cert_type</b>.
- * (If <b>cert</b> is NULL, take no action.) */
-static void
-add_x509_cert(certs_cell_t *certs_cell,
- uint8_t cert_type,
- const tor_x509_cert_t *cert)
-{
- if (NULL == cert)
- return;
- const uint8_t *cert_encoded = NULL;
- size_t cert_len;
- tor_x509_cert_get_der(cert, &cert_encoded, &cert_len);
+ uint8_t n_my_addrs = 1 + !tor_addr_is_null(&me->ipv6_addr);
+ netinfo_cell_set_n_my_addrs(netinfo_cell, n_my_addrs);
- add_certs_cell_cert_helper(certs_cell, cert_type, cert_encoded, cert_len);
-}
+ netinfo_cell_add_my_addrs(netinfo_cell,
+ netinfo_addr_from_tor_addr(&my_addr));
-/** Add an Ed25519 cert from <b>cert</b> to the trunnel certs_cell_t object
- * that we are building in <b>certs_cell</b>. Set its type field to
- * <b>cert_type</b>. (If <b>cert</b> is NULL, take no action.) */
-static void
-add_ed25519_cert(certs_cell_t *certs_cell,
- uint8_t cert_type,
- const tor_cert_t *cert)
-{
- if (NULL == cert)
- return;
-
- add_certs_cell_cert_helper(certs_cell, cert_type,
- cert->encoded, cert->encoded_len);
-}
-
-#ifdef TOR_UNIT_TESTS
-int certs_cell_ed25519_disabled_for_testing = 0;
-#else
-#define certs_cell_ed25519_disabled_for_testing 0
-#endif
-
-/** Send a CERTS cell on the connection <b>conn</b>. Return 0 on success, -1
- * on failure. */
-int
-connection_or_send_certs_cell(or_connection_t *conn)
-{
- const tor_x509_cert_t *global_link_cert = NULL, *id_cert = NULL;
- tor_x509_cert_t *own_link_cert = NULL;
- var_cell_t *cell;
-
- certs_cell_t *certs_cell = NULL;
-
- tor_assert(conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3);
-
- if (! conn->handshake_state)
- return -1;
-
- const int conn_in_server_mode = ! conn->handshake_state->started_here;
-
- /* Get the encoded values of the X509 certificates */
- if (tor_tls_get_my_certs(conn_in_server_mode,
- &global_link_cert, &id_cert) < 0)
- return -1;
-
- if (conn_in_server_mode) {
- own_link_cert = tor_tls_get_own_cert(conn->tls);
- }
- tor_assert(id_cert);
-
- certs_cell = certs_cell_new();
-
- /* Start adding certs. First the link cert or auth1024 cert. */
- if (conn_in_server_mode) {
- tor_assert_nonfatal(own_link_cert);
- add_x509_cert(certs_cell,
- OR_CERT_TYPE_TLS_LINK, own_link_cert);
- } else {
- tor_assert(global_link_cert);
- add_x509_cert(certs_cell,
- OR_CERT_TYPE_AUTH_1024, global_link_cert);
- }
-
- /* Next the RSA->RSA ID cert */
- add_x509_cert(certs_cell,
- OR_CERT_TYPE_ID_1024, id_cert);
-
- /* Next the Ed25519 certs */
- add_ed25519_cert(certs_cell,
- CERTTYPE_ED_ID_SIGN,
- get_master_signing_key_cert());
- if (conn_in_server_mode) {
- tor_assert_nonfatal(conn->handshake_state->own_link_cert ||
- certs_cell_ed25519_disabled_for_testing);
- add_ed25519_cert(certs_cell,
- CERTTYPE_ED_SIGN_LINK,
- conn->handshake_state->own_link_cert);
- } else {
- add_ed25519_cert(certs_cell,
- CERTTYPE_ED_SIGN_AUTH,
- get_current_auth_key_cert());
- }
-
- /* And finally the crosscert. */
- {
- const uint8_t *crosscert=NULL;
- size_t crosscert_len;
- get_master_rsa_crosscert(&crosscert, &crosscert_len);
- if (crosscert) {
- add_certs_cell_cert_helper(certs_cell,
- CERTTYPE_RSA1024_ID_EDID,
- crosscert, crosscert_len);
+ if (!tor_addr_is_null(&me->ipv6_addr)) {
+ netinfo_cell_add_my_addrs(netinfo_cell,
+ netinfo_addr_from_tor_addr(&me->ipv6_addr));
}
}
- /* We've added all the certs; make the cell. */
- certs_cell->n_certs = certs_cell_getlen_certs(certs_cell);
-
- ssize_t alloc_len = certs_cell_encoded_len(certs_cell);
- tor_assert(alloc_len >= 0 && alloc_len <= UINT16_MAX);
- cell = var_cell_new(alloc_len);
- cell->command = CELL_CERTS;
- ssize_t enc_len = certs_cell_encode(cell->payload, alloc_len, certs_cell);
- tor_assert(enc_len > 0 && enc_len <= alloc_len);
- cell->payload_len = enc_len;
-
- connection_or_write_var_cell_to_buf(cell, conn);
- var_cell_free(cell);
- certs_cell_free(certs_cell);
- tor_x509_cert_free(own_link_cert);
-
- return 0;
-}
-
-#ifdef TOR_UNIT_TESTS
-int testing__connection_or_pretend_TLSSECRET_is_supported = 0;
-#else
-#define testing__connection_or_pretend_TLSSECRET_is_supported 0
-#endif
-
-/** Return true iff <b>challenge_type</b> is an AUTHCHALLENGE type that
- * we can send and receive. */
-int
-authchallenge_type_is_supported(uint16_t challenge_type)
-{
- switch (challenge_type) {
- case AUTHTYPE_RSA_SHA256_TLSSECRET:
-#ifdef HAVE_WORKING_TOR_TLS_GET_TLSSECRETS
- return 1;
-#else
- return testing__connection_or_pretend_TLSSECRET_is_supported;
-#endif
- case AUTHTYPE_ED25519_SHA256_RFC5705:
- return 1;
- case AUTHTYPE_RSA_SHA256_RFC5705:
- default:
- return 0;
+ const char *errmsg = NULL;
+ if ((errmsg = netinfo_cell_check(netinfo_cell))) {
+ log_warn(LD_OR, "Failed to validate NETINFO cell with error: %s",
+ errmsg);
+ goto cleanup;
}
-}
-
-/** Return true iff <b>challenge_type_a</b> is one that we would rather
- * use than <b>challenge_type_b</b>. */
-int
-authchallenge_type_is_better(uint16_t challenge_type_a,
- uint16_t challenge_type_b)
-{
- /* Any supported type is better than an unsupported one;
- * all unsupported types are equally bad. */
- if (!authchallenge_type_is_supported(challenge_type_a))
- return 0;
- if (!authchallenge_type_is_supported(challenge_type_b))
- return 1;
- /* It happens that types are superior in numerically ascending order.
- * If that ever changes, this must change too. */
- return (challenge_type_a > challenge_type_b);
-}
-
-/** Send an AUTH_CHALLENGE cell on the connection <b>conn</b>. Return 0
- * on success, -1 on failure. */
-int
-connection_or_send_auth_challenge_cell(or_connection_t *conn)
-{
- var_cell_t *cell = NULL;
- int r = -1;
- tor_assert(conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3);
-
- if (! conn->handshake_state)
- return -1;
-
- auth_challenge_cell_t *ac = auth_challenge_cell_new();
- tor_assert(sizeof(ac->challenge) == 32);
- crypto_rand((char*)ac->challenge, sizeof(ac->challenge));
-
- if (authchallenge_type_is_supported(AUTHTYPE_RSA_SHA256_TLSSECRET))
- auth_challenge_cell_add_methods(ac, AUTHTYPE_RSA_SHA256_TLSSECRET);
- /* Disabled, because everything that supports this method also supports
- * the much-superior ED25519_SHA256_RFC5705 */
- /* auth_challenge_cell_add_methods(ac, AUTHTYPE_RSA_SHA256_RFC5705); */
- if (authchallenge_type_is_supported(AUTHTYPE_ED25519_SHA256_RFC5705))
- auth_challenge_cell_add_methods(ac, AUTHTYPE_ED25519_SHA256_RFC5705);
- auth_challenge_cell_set_n_methods(ac,
- auth_challenge_cell_getlen_methods(ac));
-
- cell = var_cell_new(auth_challenge_cell_encoded_len(ac));
- ssize_t len = auth_challenge_cell_encode(cell->payload, cell->payload_len,
- ac);
- if (len != cell->payload_len) {
- /* LCOV_EXCL_START */
- log_warn(LD_BUG, "Encoded auth challenge cell length not as expected");
- goto done;
- /* LCOV_EXCL_STOP */
+ if (netinfo_cell_encode(cell.payload, CELL_PAYLOAD_SIZE,
+ netinfo_cell) < 0) {
+ log_warn(LD_OR, "Failed generating NETINFO cell");
+ goto cleanup;
}
- cell->command = CELL_AUTH_CHALLENGE;
- connection_or_write_var_cell_to_buf(cell, conn);
- r = 0;
+ conn->handshake_state->digest_sent_data = 0;
+ conn->handshake_state->sent_netinfo = 1;
+ connection_or_write_cell_to_buf(&cell, conn);
- done:
- var_cell_free(cell);
- auth_challenge_cell_free(ac);
+ r = 0;
+ cleanup:
+ netinfo_cell_free(netinfo_cell);
return r;
}
-
-/** Compute the main body of an AUTHENTICATE cell that a client can use
- * to authenticate itself on a v3 handshake for <b>conn</b>. Return it
- * in a var_cell_t.
- *
- * If <b>server</b> is true, only calculate the first
- * V3_AUTH_FIXED_PART_LEN bytes -- the part of the authenticator that's
- * determined by the rest of the handshake, and which match the provided value
- * exactly.
- *
- * If <b>server</b> is false and <b>signing_key</b> is NULL, calculate the
- * first V3_AUTH_BODY_LEN bytes of the authenticator (that is, everything
- * that should be signed), but don't actually sign it.
- *
- * If <b>server</b> is false and <b>signing_key</b> is provided, calculate the
- * entire authenticator, signed with <b>signing_key</b>.
- *
- * Return the length of the cell body on success, and -1 on failure.
- */
-var_cell_t *
-connection_or_compute_authenticate_cell_body(or_connection_t *conn,
- const int authtype,
- crypto_pk_t *signing_key,
- const ed25519_keypair_t *ed_signing_key,
- int server)
-{
- auth1_t *auth = NULL;
- auth_ctx_t *ctx = auth_ctx_new();
- var_cell_t *result = NULL;
- int old_tlssecrets_algorithm = 0;
- const char *authtype_str = NULL;
-
- int is_ed = 0;
-
- /* assert state is reasonable XXXX */
- switch (authtype) {
- case AUTHTYPE_RSA_SHA256_TLSSECRET:
- authtype_str = "AUTH0001";
- old_tlssecrets_algorithm = 1;
- break;
- case AUTHTYPE_RSA_SHA256_RFC5705:
- authtype_str = "AUTH0002";
- break;
- case AUTHTYPE_ED25519_SHA256_RFC5705:
- authtype_str = "AUTH0003";
- is_ed = 1;
- break;
- default:
- tor_assert(0);
- break;
- }
-
- auth = auth1_new();
- ctx->is_ed = is_ed;
-
- /* Type: 8 bytes. */
- memcpy(auth1_getarray_type(auth), authtype_str, 8);
-
- {
- const tor_x509_cert_t *id_cert=NULL;
- const common_digests_t *my_digests, *their_digests;
- const uint8_t *my_id, *their_id, *client_id, *server_id;
- if (tor_tls_get_my_certs(server, NULL, &id_cert))
- goto err;
- my_digests = tor_x509_cert_get_id_digests(id_cert);
- their_digests =
- tor_x509_cert_get_id_digests(conn->handshake_state->certs->id_cert);
- tor_assert(my_digests);
- tor_assert(their_digests);
- my_id = (uint8_t*)my_digests->d[DIGEST_SHA256];
- their_id = (uint8_t*)their_digests->d[DIGEST_SHA256];
-
- client_id = server ? their_id : my_id;
- server_id = server ? my_id : their_id;
-
- /* Client ID digest: 32 octets. */
- memcpy(auth->cid, client_id, 32);
-
- /* Server ID digest: 32 octets. */
- memcpy(auth->sid, server_id, 32);
- }
-
- if (is_ed) {
- const ed25519_public_key_t *my_ed_id, *their_ed_id;
- if (!conn->handshake_state->certs->ed_id_sign) {
- log_warn(LD_OR, "Ed authenticate without Ed ID cert from peer.");
- goto err;
- }
- my_ed_id = get_master_identity_key();
- their_ed_id = &conn->handshake_state->certs->ed_id_sign->signing_key;
-
- const uint8_t *cid_ed = (server ? their_ed_id : my_ed_id)->pubkey;
- const uint8_t *sid_ed = (server ? my_ed_id : their_ed_id)->pubkey;
-
- memcpy(auth->u1_cid_ed, cid_ed, ED25519_PUBKEY_LEN);
- memcpy(auth->u1_sid_ed, sid_ed, ED25519_PUBKEY_LEN);
- }
-
- {
- crypto_digest_t *server_d, *client_d;
- if (server) {
- server_d = conn->handshake_state->digest_sent;
- client_d = conn->handshake_state->digest_received;
- } else {
- client_d = conn->handshake_state->digest_sent;
- server_d = conn->handshake_state->digest_received;
- }
-
- /* Server log digest : 32 octets */
- crypto_digest_get_digest(server_d, (char*)auth->slog, 32);
-
- /* Client log digest : 32 octets */
- crypto_digest_get_digest(client_d, (char*)auth->clog, 32);
- }
-
- {
- /* Digest of cert used on TLS link : 32 octets. */
- tor_x509_cert_t *cert = NULL;
- if (server) {
- cert = tor_tls_get_own_cert(conn->tls);
- } else {
- cert = tor_tls_get_peer_cert(conn->tls);
- }
- if (!cert) {
- log_warn(LD_OR, "Unable to find cert when making %s data.",
- authtype_str);
- goto err;
- }
-
- memcpy(auth->scert,
- tor_x509_cert_get_cert_digests(cert)->d[DIGEST_SHA256], 32);
-
- tor_x509_cert_free(cert);
- }
-
- /* HMAC of clientrandom and serverrandom using master key : 32 octets */
- if (old_tlssecrets_algorithm) {
- if (tor_tls_get_tlssecrets(conn->tls, auth->tlssecrets) < 0) {
- log_fn(LOG_PROTOCOL_WARN, LD_OR, "Somebody asked us for an older TLS "
- "authentication method (AUTHTYPE_RSA_SHA256_TLSSECRET) "
- "which we don't support.");
- }
- } else {
- char label[128];
- tor_snprintf(label, sizeof(label),
- "EXPORTER FOR TOR TLS CLIENT BINDING %s", authtype_str);
- int r = tor_tls_export_key_material(conn->tls, auth->tlssecrets,
- auth->cid, sizeof(auth->cid),
- label);
- if (r < 0) {
- if (r != -2)
- log_warn(LD_BUG, "TLS key export failed for unknown reason.");
- // If r == -2, this was openssl bug 7712.
- goto err;
- }
- }
-
- /* 8 octets were reserved for the current time, but we're trying to get out
- * of the habit of sending time around willynilly. Fortunately, nothing
- * checks it. That's followed by 16 bytes of nonce. */
- crypto_rand((char*)auth->rand, 24);
-
- ssize_t maxlen = auth1_encoded_len(auth, ctx);
- if (ed_signing_key && is_ed) {
- maxlen += ED25519_SIG_LEN;
- } else if (signing_key && !is_ed) {
- maxlen += crypto_pk_keysize(signing_key);
- }
-
- const int AUTH_CELL_HEADER_LEN = 4; /* 2 bytes of type, 2 bytes of length */
- result = var_cell_new(AUTH_CELL_HEADER_LEN + maxlen);
- uint8_t *const out = result->payload + AUTH_CELL_HEADER_LEN;
- const size_t outlen = maxlen;
- ssize_t len;
-
- result->command = CELL_AUTHENTICATE;
- set_uint16(result->payload, htons(authtype));
-
- if ((len = auth1_encode(out, outlen, auth, ctx)) < 0) {
- /* LCOV_EXCL_START */
- log_warn(LD_BUG, "Unable to encode signed part of AUTH1 data.");
- goto err;
- /* LCOV_EXCL_STOP */
- }
-
- if (server) {
- auth1_t *tmp = NULL;
- ssize_t len2 = auth1_parse(&tmp, out, len, ctx);
- if (!tmp) {
- /* LCOV_EXCL_START */
- log_warn(LD_BUG, "Unable to parse signed part of AUTH1 data that "
- "we just encoded");
- goto err;
- /* LCOV_EXCL_STOP */
- }
- result->payload_len = (tmp->end_of_signed - result->payload);
-
- auth1_free(tmp);
- if (len2 != len) {
- /* LCOV_EXCL_START */
- log_warn(LD_BUG, "Mismatched length when re-parsing AUTH1 data.");
- goto err;
- /* LCOV_EXCL_STOP */
- }
- goto done;
- }
-
- if (ed_signing_key && is_ed) {
- ed25519_signature_t sig;
- if (ed25519_sign(&sig, out, len, ed_signing_key) < 0) {
- /* LCOV_EXCL_START */
- log_warn(LD_BUG, "Unable to sign ed25519 authentication data");
- goto err;
- /* LCOV_EXCL_STOP */
- }
- auth1_setlen_sig(auth, ED25519_SIG_LEN);
- memcpy(auth1_getarray_sig(auth), sig.sig, ED25519_SIG_LEN);
-
- } else if (signing_key && !is_ed) {
- auth1_setlen_sig(auth, crypto_pk_keysize(signing_key));
-
- char d[32];
- crypto_digest256(d, (char*)out, len, DIGEST_SHA256);
- int siglen = crypto_pk_private_sign(signing_key,
- (char*)auth1_getarray_sig(auth),
- auth1_getlen_sig(auth),
- d, 32);
- if (siglen < 0) {
- log_warn(LD_OR, "Unable to sign AUTH1 data.");
- goto err;
- }
-
- auth1_setlen_sig(auth, siglen);
- }
-
- len = auth1_encode(out, outlen, auth, ctx);
- if (len < 0) {
- /* LCOV_EXCL_START */
- log_warn(LD_BUG, "Unable to encode signed AUTH1 data.");
- goto err;
- /* LCOV_EXCL_STOP */
- }
- tor_assert(len + AUTH_CELL_HEADER_LEN <= result->payload_len);
- result->payload_len = len + AUTH_CELL_HEADER_LEN;
- set_uint16(result->payload+2, htons(len));
-
- goto done;
-
- err:
- var_cell_free(result);
- result = NULL;
- done:
- auth1_free(auth);
- auth_ctx_free(ctx);
- return result;
-}
-
-/** Send an AUTHENTICATE cell on the connection <b>conn</b>. Return 0 on
- * success, -1 on failure */
-MOCK_IMPL(int,
-connection_or_send_authenticate_cell,(or_connection_t *conn, int authtype))
-{
- var_cell_t *cell;
- crypto_pk_t *pk = tor_tls_get_my_client_auth_key();
- /* XXXX make sure we're actually supposed to send this! */
-
- if (!pk) {
- log_warn(LD_BUG, "Can't compute authenticate cell: no client auth key");
- return -1;
- }
- if (! authchallenge_type_is_supported(authtype)) {
- log_warn(LD_BUG, "Tried to send authenticate cell with unknown "
- "authentication type %d", authtype);
- return -1;
- }
-
- cell = connection_or_compute_authenticate_cell_body(conn,
- authtype,
- pk,
- get_current_auth_keypair(),
- 0 /* not server */);
- if (! cell) {
- log_fn(LOG_PROTOCOL_WARN, LD_NET, "Unable to compute authenticate cell!");
- return -1;
- }
- connection_or_write_var_cell_to_buf(cell, conn);
- var_cell_free(cell);
-
- return 0;
-}
diff --git a/src/core/or/connection_or.h b/src/core/or/connection_or.h
index 817bcdd317..e9ace56ab4 100644
--- a/src/core/or/connection_or.h
+++ b/src/core/or/connection_or.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -17,40 +17,11 @@ struct ed25519_keypair_t;
or_connection_t *TO_OR_CONN(connection_t *);
-#define OR_CONN_STATE_MIN_ 1
-/** State for a connection to an OR: waiting for connect() to finish. */
-#define OR_CONN_STATE_CONNECTING 1
-/** State for a connection to an OR: waiting for proxy handshake to complete */
-#define OR_CONN_STATE_PROXY_HANDSHAKING 2
-/** State for an OR connection client: SSL is handshaking, not done
- * yet. */
-#define OR_CONN_STATE_TLS_HANDSHAKING 3
-/** State for a connection to an OR: We're doing a second SSL handshake for
- * renegotiation purposes. (V2 handshake only.) */
-#define OR_CONN_STATE_TLS_CLIENT_RENEGOTIATING 4
-/** State for a connection at an OR: We're waiting for the client to
- * renegotiate (to indicate a v2 handshake) or send a versions cell (to
- * indicate a v3 handshake) */
-#define OR_CONN_STATE_TLS_SERVER_RENEGOTIATING 5
-/** State for an OR connection: We're done with our SSL handshake, we've done
- * renegotiation, but we haven't yet negotiated link protocol versions and
- * sent a netinfo cell. */
-#define OR_CONN_STATE_OR_HANDSHAKING_V2 6
-/** State for an OR connection: We're done with our SSL handshake, but we
- * haven't yet negotiated link protocol versions, done a V3 handshake, and
- * sent a netinfo cell. */
-#define OR_CONN_STATE_OR_HANDSHAKING_V3 7
-/** State for an OR connection: Ready to send/receive cells. */
-#define OR_CONN_STATE_OPEN 8
-#define OR_CONN_STATE_MAX_ 8
+#include "core/or/orconn_event.h"
void connection_or_clear_identity(or_connection_t *conn);
void connection_or_clear_identity_map(void);
void clear_broken_connection_map(int disable);
-or_connection_t *connection_or_get_for_extend(const char *digest,
- const tor_addr_t *target_addr,
- const char **msg_out,
- int *launch_out);
void connection_or_block_renegotiation(or_connection_t *conn);
int connection_or_reached_eof(or_connection_t *conn);
@@ -81,6 +52,9 @@ MOCK_DECL(void,connection_or_close_for_error,
void connection_or_report_broken_states(int severity, int domain);
+void connection_or_event_status(or_connection_t *conn,
+ or_conn_status_event_t tp, int reason);
+
MOCK_DECL(int,connection_tls_start_handshake,(or_connection_t *conn,
int receiving));
int connection_tls_continue_handshake(or_connection_t *conn);
@@ -119,19 +93,6 @@ MOCK_DECL(void,connection_or_write_var_cell_to_buf,(const var_cell_t *cell,
or_connection_t *conn));
int connection_or_send_versions(or_connection_t *conn, int v3_plus);
MOCK_DECL(int,connection_or_send_netinfo,(or_connection_t *conn));
-int connection_or_send_certs_cell(or_connection_t *conn);
-int connection_or_send_auth_challenge_cell(or_connection_t *conn);
-int authchallenge_type_is_supported(uint16_t challenge_type);
-int authchallenge_type_is_better(uint16_t challenge_type_a,
- uint16_t challenge_type_b);
-var_cell_t *connection_or_compute_authenticate_cell_body(
- or_connection_t *conn,
- const int authtype,
- crypto_pk_t *signing_key,
- const struct ed25519_keypair_t *ed_signing_key,
- int server);
-MOCK_DECL(int,connection_or_send_authenticate_cell,
- (or_connection_t *conn, int type));
int is_or_protocol_version_known(uint16_t version);
@@ -156,10 +117,16 @@ void connection_or_group_set_badness_(smartlist_t *group, int force);
#ifdef CONNECTION_OR_PRIVATE
STATIC int should_connect_to_relay(const or_connection_t *or_conn);
STATIC void note_or_connect_failed(const or_connection_t *or_conn);
-#endif
+
+/*
+ * Call this when changing connection state, so notifications to the owning
+ * channel can be handled.
+ */
+MOCK_DECL(STATIC void,connection_or_change_state,
+ (or_connection_t *conn, uint8_t state));
+#endif /* defined(CONNECTION_OR_PRIVATE) */
#ifdef TOR_UNIT_TESTS
-extern int certs_cell_ed25519_disabled_for_testing;
extern int testing__connection_or_pretend_TLSSECRET_is_supported;
#endif
diff --git a/src/core/or/connection_st.h b/src/core/or/connection_st.h
index c197a81340..685c9f89f4 100644
--- a/src/core/or/connection_st.h
+++ b/src/core/or/connection_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file connection_st.h
+ * @brief Base connection structure.
+ **/
+
#ifndef CONNECTION_ST_H
#define CONNECTION_ST_H
@@ -149,4 +154,4 @@ struct connection_t {
* directory connection. */
#define DIR_CONN_IS_SERVER(conn) ((conn)->purpose == DIR_PURPOSE_SERVER)
-#endif
+#endif /* !defined(CONNECTION_ST_H) */
diff --git a/src/core/or/core_or.md b/src/core/or/core_or.md
new file mode 100644
index 0000000000..0b4d430a00
--- /dev/null
+++ b/src/core/or/core_or.md
@@ -0,0 +1,62 @@
+@dir core/or
+@brief core/or: **Onion routing happens here!**
+
+This is the central part of Tor that handles the core tasks of onion routing:
+building circuit, handling circuits, attaching circuit to streams, moving
+data around, and so forth.
+
+Some aspects of this module should probably be refactored into others.
+
+Notable files here include:
+
+`channel.c`
+: Generic channel implementation. Channels handle sending and receiving cells
+among tor nodes.
+
+`channeltls.c`
+: Channel implementation for TLS-based OR connections. Uses `connection_or.c`.
+
+`circuitbuild.c`
+: Code for constructing circuits and choosing their paths. (*Note*:
+this module could plausibly be split into handling the client side,
+the server side, and the path generation aspects of circuit building.)
+
+`circuitlist.c`
+: Code for maintaining and navigating the global list of circuits.
+
+`circuitmux.c`
+: Generic circuitmux implementation. A circuitmux handles deciding, for a
+particular channel, which circuit should write next.
+
+`circuitmux_ewma.c`
+: A circuitmux implementation based on the EWMA (exponentially
+weighted moving average) algorithm.
+
+`circuituse.c`
+: Code to actually send and receive data on circuits.
+
+`command.c`
+: Handles incoming cells on channels.
+
+`connection.c`
+: Generic and common connection tools, and implementation for the simpler
+connection types.
+
+`connection_edge.c`
+: Implementation for entry and exit connections.
+
+`connection_or.c`
+: Implementation for OR connections (the ones that send cells over TLS).
+
+`onion.c`
+: Generic code for generating and responding to CREATE and CREATED
+cells, and performing the appropriate onion handshakes. Also contains
+code to manage the server-side onion queue.
+
+`relay.c`
+: Handles particular types of relay cells, and provides code to receive,
+encrypt, route, and interpret relay cells.
+
+`scheduler.c`
+: Decides which channel/circuit pair is ready to receive the next cell.
+
diff --git a/src/core/or/cpath_build_state_st.h b/src/core/or/cpath_build_state_st.h
index dbe596d851..ee9a0d972c 100644
--- a/src/core/or/cpath_build_state_st.h
+++ b/src/core/or/cpath_build_state_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file cpath_build_state_st.h
+ * @brief Circuit-build-stse structure
+ **/
+
#ifndef CIRCUIT_BUILD_STATE_ST_ST_H
#define CIRCUIT_BUILD_STATE_ST_ST_H
@@ -34,5 +39,4 @@ struct cpath_build_state_t {
time_t expiry_time;
};
-#endif
-
+#endif /* !defined(CIRCUIT_BUILD_STATE_ST_ST_H) */
diff --git a/src/core/or/crypt_path.c b/src/core/or/crypt_path.c
new file mode 100644
index 0000000000..8f41540848
--- /dev/null
+++ b/src/core/or/crypt_path.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) 2019-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file crypt_path.c
+ *
+ * \brief Functions dealing with layered circuit encryption. This file aims to
+ * provide an API around the crypt_path_t structure which holds crypto
+ * information about a specific hop of a circuit.
+ *
+ * TODO: We should eventually move all functions dealing and manipulating
+ * crypt_path_t to this file, so that eventually we encapsulate more and more
+ * of crypt_path_t. Here are some more functions that can be moved here with
+ * some more effort:
+ *
+ * - circuit_list_path_impl()
+ * - Functions dealing with cpaths in HSv2 create_rend_cpath() and
+ * create_rend_cpath_legacy()
+ * - The cpath related parts of rend_service_receive_introduction() and
+ * rend_client_send_introduction().
+ **/
+
+#define CRYPT_PATH_PRIVATE
+
+#include "core/or/or.h"
+#include "core/or/crypt_path.h"
+
+#include "core/crypto/relay_crypto.h"
+#include "core/crypto/onion_crypto.h"
+#include "core/or/circuitbuild.h"
+#include "core/or/circuitlist.h"
+
+#include "lib/crypt_ops/crypto_dh.h"
+#include "lib/crypt_ops/crypto_util.h"
+
+#include "core/or/crypt_path_st.h"
+#include "core/or/cell_st.h"
+
+/** Add <b>new_hop</b> to the end of the doubly-linked-list <b>head_ptr</b>.
+ * This function is used to extend cpath by another hop.
+ */
+void
+cpath_extend_linked_list(crypt_path_t **head_ptr, crypt_path_t *new_hop)
+{
+ if (*head_ptr) {
+ new_hop->next = (*head_ptr);
+ new_hop->prev = (*head_ptr)->prev;
+ (*head_ptr)->prev->next = new_hop;
+ (*head_ptr)->prev = new_hop;
+ } else {
+ *head_ptr = new_hop;
+ new_hop->prev = new_hop->next = new_hop;
+ }
+}
+
+/** Create a new hop, annotate it with information about its
+ * corresponding router <b>choice</b>, and append it to the
+ * end of the cpath <b>head_ptr</b>. */
+int
+cpath_append_hop(crypt_path_t **head_ptr, extend_info_t *choice)
+{
+ crypt_path_t *hop = tor_malloc_zero(sizeof(crypt_path_t));
+
+ /* link hop into the cpath, at the end. */
+ cpath_extend_linked_list(head_ptr, hop);
+
+ hop->magic = CRYPT_PATH_MAGIC;
+ hop->state = CPATH_STATE_CLOSED;
+
+ hop->extend_info = extend_info_dup(choice);
+
+ hop->package_window = circuit_initial_package_window();
+ hop->deliver_window = CIRCWINDOW_START;
+
+ return 0;
+}
+
+/** Verify that cpath <b>cp</b> has all of its invariants
+ * correct. Trigger an assert if anything is invalid.
+ */
+void
+cpath_assert_ok(const crypt_path_t *cp)
+{
+ const crypt_path_t *start = cp;
+
+ do {
+ cpath_assert_layer_ok(cp);
+ /* layers must be in sequence of: "open* awaiting? closed*" */
+ if (cp != start) {
+ if (cp->state == CPATH_STATE_AWAITING_KEYS) {
+ tor_assert(cp->prev->state == CPATH_STATE_OPEN);
+ } else if (cp->state == CPATH_STATE_OPEN) {
+ tor_assert(cp->prev->state == CPATH_STATE_OPEN);
+ }
+ }
+ cp = cp->next;
+ tor_assert(cp);
+ } while (cp != start);
+}
+
+/** Verify that cpath layer <b>cp</b> has all of its invariants
+ * correct. Trigger an assert if anything is invalid.
+ */
+void
+cpath_assert_layer_ok(const crypt_path_t *cp)
+{
+// tor_assert(cp->addr); /* these are zero for rendezvous extra-hops */
+// tor_assert(cp->port);
+ tor_assert(cp);
+ tor_assert(cp->magic == CRYPT_PATH_MAGIC);
+ switch (cp->state)
+ {
+ case CPATH_STATE_OPEN:
+ relay_crypto_assert_ok(&cp->pvt_crypto);
+ FALLTHROUGH;
+ case CPATH_STATE_CLOSED:
+ /*XXXX Assert that there's no handshake_state either. */
+ tor_assert(!cp->rend_dh_handshake_state);
+ break;
+ case CPATH_STATE_AWAITING_KEYS:
+ /* tor_assert(cp->dh_handshake_state); */
+ break;
+ default:
+ log_fn(LOG_ERR, LD_BUG, "Unexpected state %d", cp->state);
+ tor_assert(0);
+ }
+ tor_assert(cp->package_window >= 0);
+ tor_assert(cp->deliver_window >= 0);
+}
+
+/** Initialize cpath-\>{f|b}_{crypto|digest} from the key material in key_data.
+ *
+ * If <b>is_hs_v3</b> is set, this cpath will be used for next gen hidden
+ * service circuits and <b>key_data</b> must be at least
+ * HS_NTOR_KEY_EXPANSION_KDF_OUT_LEN bytes in length.
+ *
+ * If <b>is_hs_v3</b> is not set, key_data must contain CPATH_KEY_MATERIAL_LEN
+ * bytes, which are used as follows:
+ * - 20 to initialize f_digest
+ * - 20 to initialize b_digest
+ * - 16 to key f_crypto
+ * - 16 to key b_crypto
+ *
+ * (If 'reverse' is true, then f_XX and b_XX are swapped.)
+ *
+ * Return 0 if init was successful, else -1 if it failed.
+ */
+int
+cpath_init_circuit_crypto(crypt_path_t *cpath,
+ const char *key_data, size_t key_data_len,
+ int reverse, int is_hs_v3)
+{
+
+ tor_assert(cpath);
+ return relay_crypto_init(&cpath->pvt_crypto, key_data, key_data_len,
+ reverse, is_hs_v3);
+}
+
+/** Deallocate space associated with the cpath node <b>victim</b>. */
+void
+cpath_free(crypt_path_t *victim)
+{
+ if (!victim)
+ return;
+
+ relay_crypto_clear(&victim->pvt_crypto);
+ onion_handshake_state_release(&victim->handshake_state);
+ crypto_dh_free(victim->rend_dh_handshake_state);
+ extend_info_free(victim->extend_info);
+
+ memwipe(victim, 0xBB, sizeof(crypt_path_t)); /* poison memory */
+ tor_free(victim);
+}
+
+/********************** cpath crypto API *******************************/
+
+/** Encrypt or decrypt <b>payload</b> using the crypto of <b>cpath</b>. Actual
+ * operation decided by <b>is_decrypt</b>. */
+void
+cpath_crypt_cell(const crypt_path_t *cpath, uint8_t *payload, bool is_decrypt)
+{
+ if (is_decrypt) {
+ relay_crypt_one_payload(cpath->pvt_crypto.b_crypto, payload);
+ } else {
+ relay_crypt_one_payload(cpath->pvt_crypto.f_crypto, payload);
+ }
+}
+
+/** Getter for the incoming digest of <b>cpath</b>. */
+struct crypto_digest_t *
+cpath_get_incoming_digest(const crypt_path_t *cpath)
+{
+ return cpath->pvt_crypto.b_digest;
+}
+
+/** Set the right integrity digest on the outgoing <b>cell</b> based on the
+ * cell payload and update the forward digest of <b>cpath</b>. */
+void
+cpath_set_cell_forward_digest(crypt_path_t *cpath, cell_t *cell)
+{
+ relay_set_digest(cpath->pvt_crypto.f_digest, cell);
+}
+
+/************ cpath sendme API ***************************/
+
+/** Return the sendme_digest of this <b>cpath</b>. */
+uint8_t *
+cpath_get_sendme_digest(crypt_path_t *cpath)
+{
+ return relay_crypto_get_sendme_digest(&cpath->pvt_crypto);
+}
+
+/** Record the cell digest, indicated by is_foward_digest or not, as the
+ * SENDME cell digest. */
+void
+cpath_sendme_record_cell_digest(crypt_path_t *cpath, bool is_foward_digest)
+{
+ tor_assert(cpath);
+ relay_crypto_record_sendme_digest(&cpath->pvt_crypto, is_foward_digest);
+}
+
+/************ other cpath functions ***************************/
+
+/** Return the first non-open hop in cpath, or return NULL if all
+ * hops are open. */
+crypt_path_t *
+cpath_get_next_non_open_hop(crypt_path_t *cpath)
+{
+ crypt_path_t *hop = cpath;
+ do {
+ if (hop->state != CPATH_STATE_OPEN)
+ return hop;
+ hop = hop->next;
+ } while (hop != cpath);
+ return NULL;
+}
+
+#ifdef TOR_UNIT_TESTS
+
+/** Unittest helper function: Count number of hops in cpath linked list. */
+unsigned int
+cpath_get_n_hops(crypt_path_t **head_ptr)
+{
+ unsigned int n_hops = 0;
+ crypt_path_t *tmp;
+
+ if (!*head_ptr) {
+ return 0;
+ }
+
+ tmp = *head_ptr;
+ do {
+ n_hops++;
+ tmp = tmp->next;
+ } while (tmp != *head_ptr);
+
+ return n_hops;
+}
+
+#endif /* defined(TOR_UNIT_TESTS) */
+
diff --git a/src/core/or/crypt_path.h b/src/core/or/crypt_path.h
new file mode 100644
index 0000000000..7a95fec2b4
--- /dev/null
+++ b/src/core/or/crypt_path.h
@@ -0,0 +1,46 @@
+/**
+ * \file crypt_path.h
+ * \brief Header file for crypt_path.c.
+ **/
+
+#ifndef CRYPT_PATH_H
+#define CRYPT_PATH_H
+
+void cpath_assert_layer_ok(const crypt_path_t *cp);
+
+void cpath_assert_ok(const crypt_path_t *cp);
+
+int cpath_append_hop(crypt_path_t **head_ptr, extend_info_t *choice);
+
+int cpath_init_circuit_crypto(crypt_path_t *cpath,
+ const char *key_data, size_t key_data_len,
+ int reverse, int is_hs_v3);
+
+void
+cpath_free(crypt_path_t *victim);
+
+void cpath_extend_linked_list(crypt_path_t **head_ptr, crypt_path_t *new_hop);
+
+void
+cpath_crypt_cell(const crypt_path_t *cpath, uint8_t *payload, bool is_decrypt);
+
+struct crypto_digest_t *
+cpath_get_incoming_digest(const crypt_path_t *cpath);
+
+void cpath_sendme_record_cell_digest(crypt_path_t *cpath,
+ bool is_foward_digest);
+
+void
+cpath_set_cell_forward_digest(crypt_path_t *cpath, cell_t *cell);
+
+crypt_path_t *cpath_get_next_non_open_hop(crypt_path_t *cpath);
+
+void cpath_sendme_circuit_record_inbound_cell(crypt_path_t *cpath);
+
+uint8_t *cpath_get_sendme_digest(crypt_path_t *cpath);
+
+#if defined(TOR_UNIT_TESTS)
+unsigned int cpath_get_n_hops(crypt_path_t **head_ptr);
+#endif /* defined(TOR_UNIT_TESTS) */
+
+#endif /* !defined(CRYPT_PATH_H) */
diff --git a/src/core/or/crypt_path_reference_st.h b/src/core/or/crypt_path_reference_st.h
index 3d79f26c1c..71f9cb8c36 100644
--- a/src/core/or/crypt_path_reference_st.h
+++ b/src/core/or/crypt_path_reference_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file crypt_path_reference_st.h
+ * @brief reference-counting structure for crypt_path_t
+ **/
+
#ifndef CRYPT_PATH_REFERENCE_ST_H
#define CRYPT_PATH_REFERENCE_ST_H
@@ -19,5 +24,4 @@ struct crypt_path_reference_t {
crypt_path_t *cpath;
};
-#endif
-
+#endif /* !defined(CRYPT_PATH_REFERENCE_ST_H) */
diff --git a/src/core/or/crypt_path_st.h b/src/core/or/crypt_path_st.h
index 429480f8ab..2b69728a6d 100644
--- a/src/core/or/crypt_path_st.h
+++ b/src/core/or/crypt_path_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file crypt_path_st.h
+ * @brief Path structures for origin circuits.
+ **/
+
#ifndef CRYPT_PATH_ST_H
#define CRYPT_PATH_ST_H
@@ -24,15 +29,24 @@ struct onion_handshake_state_t {
} u;
};
+/** Macro to encapsulate private members of a struct.
+ *
+ * Renames 'x' to 'x_crypt_path_private_field'.
+ */
+#define CRYPT_PATH_PRIV_FIELD(x) x ## _crypt_path_private_field
+
+#ifdef CRYPT_PATH_PRIVATE
+
+/* Helper macro to access private members of a struct. */
+#define pvt_crypto CRYPT_PATH_PRIV_FIELD(crypto)
+
+#endif /* defined(CRYPT_PATH_PRIVATE) */
+
/** Holds accounting information for a single step in the layered encryption
* performed by a circuit. Used only at the client edge of a circuit. */
struct crypt_path_t {
uint32_t magic;
- /** Cryptographic state used for encrypting and authenticating relay
- * cells to and from this hop. */
- relay_crypto_t crypto;
-
/** Current state of the handshake as performed with the OR at this
* step. */
onion_handshake_state_t handshake_state;
@@ -65,6 +79,12 @@ struct crypt_path_t {
* at this step? */
int deliver_window; /**< How many cells are we willing to deliver originating
* at this step? */
+
+ /*********************** Private members ****************************/
+
+ /** Private member: Cryptographic state used for encrypting and
+ * authenticating relay cells to and from this hop. */
+ relay_crypto_t CRYPT_PATH_PRIV_FIELD(crypto);
};
-#endif
+#endif /* !defined(CRYPT_PATH_ST_H) */
diff --git a/src/core/or/dataflow.md b/src/core/or/dataflow.md
new file mode 100644
index 0000000000..1098701780
--- /dev/null
+++ b/src/core/or/dataflow.md
@@ -0,0 +1,236 @@
+@tableofcontents
+
+@page dataflow Data flow in the Tor process
+
+We read bytes from the network, we write bytes to the network. For the
+most part, the bytes we write correspond roughly to bytes we have read,
+with bits of cryptography added in.
+
+The rest is a matter of details.
+
+### Connections and buffers: reading, writing, and interpreting.
+
+At a low level, Tor's networking code is based on "connections". Each
+connection represents an object that can send or receive network-like
+events. For the most part, each connection has a single underlying TCP
+stream (I'll discuss counterexamples below).
+
+A connection that behaves like a TCP stream has an input buffer and an
+output buffer. Incoming data is
+written into the input buffer ("inbuf"); data to be written to the
+network is queued on an output buffer ("outbuf").
+
+Buffers are implemented in buffers.c. Each of these buffers is
+implemented as a linked queue of memory extents, in the style of classic
+BSD mbufs, or Linux skbufs.
+
+A connection's reading and writing can be enabled or disabled. Under
+the hood, this functionality is implemented using libevent events: one
+for reading, one for writing. These events are turned on/off in
+main.c, in the functions connection_{start,stop}_{reading,writing}.
+
+When a read or write event is turned on, the main libevent loop polls
+the kernel, asking which sockets are ready to read or write. (This
+polling happens in the event_base_loop() call in run_main_loop_once()
+in main.c.) When libevent finds a socket that's ready to read or write,
+it invokes conn_{read,write}_callback(), also in main.c
+
+These callback functions delegate to connection_handle_read() and
+connection_handle_write() in connection.c, which read or write on the
+network as appropriate, possibly delegating to openssl.
+
+After data is read or written, or other event occurs, these
+connection_handle_read_write() functions call logic functions whose job is
+to respond to the information. Some examples included:
+
+ * connection_flushed_some() -- called after a connection writes any
+ amount of data from its outbuf.
+ * connection_finished_flushing() -- called when a connection has
+ emptied its outbuf.
+ * connection_finished_connecting() -- called when an in-process connection
+ finishes making a remote connection.
+ * connection_reached_eof() -- called after receiving a FIN from the
+ remote server.
+ * connection_process_inbuf() -- called when more data arrives on
+ the inbuf.
+
+These functions then call into specific implementations depending on
+the type of the connection. For example, if the connection is an
+edge_connection_t, connection_reached_eof() will call
+connection_edge_reached_eof().
+
+> **Note:** "Also there are bufferevents!" We have vestigial
+> code for an alternative low-level networking
+> implementation, based on Libevent's evbuffer and bufferevent
+> code. These two object types take on (most of) the roles of
+> buffers and connections respectively. It isn't working in today's
+> Tor, due to code rot and possible lingering libevent bugs. More
+> work is needed; it would be good to get this working efficiently
+> again, to have IOCP support on Windows.
+
+
+#### Controlling connections ####
+
+A connection can have reading or writing enabled or disabled for a
+wide variety of reasons, including:
+
+ * Writing is disabled when there is no more data to write
+ * For some connection types, reading is disabled when the inbuf is
+ too full.
+ * Reading/writing is temporarily disabled on connections that have
+ recently read/written enough data up to their bandwidth
+ * Reading is disabled on connections when reading more data from them
+ would require that data to be buffered somewhere else that is
+ already full.
+
+Currently, these conditions are checked in a diffuse set of
+increasingly complex conditional expressions. In the future, it could
+be helpful to transition to a unified model for handling temporary
+read/write suspensions.
+
+#### Kinds of connections ####
+
+Today Tor has the following connection and pseudoconnection types.
+For the most part, each type of channel has an associated C module
+that implements its underlying logic.
+
+**Edge connections** receive data from and deliver data to points
+outside the onion routing network. See `connection_edge.c`. They fall into two types:
+
+**Entry connections** are a type of edge connection. They receive data
+from the user running a Tor client, and deliver data to that user.
+They are used to implement SOCKSPort, TransPort, NATDPort, and so on.
+Sometimes they are called "AP" connections for historical reasons (it
+used to stand for "Application Proxy").
+
+**Exit connections** are a type of edge connection. They exist at an
+exit node, and transmit traffic to and from the network.
+
+(Entry connections and exit connections are also used as placeholders
+when performing a remote DNS request; they are not decoupled from the
+notion of "stream" in the Tor protocol. This is implemented partially
+in `connection_edge.c`, and partially in `dnsserv.c` and `dns.c`.)
+
+**OR connections** send and receive Tor cells over TLS, using some
+version of the Tor link protocol. Their implementation is spread
+across `connection_or.c`, with a bit of logic in `command.c`,
+`relay.c`, and `channeltls.c`.
+
+**Extended OR connections** are a type of OR connection for use on
+bridges using pluggable transports, so that the PT can tell the bridge
+some information about the incoming connection before passing on its
+data. They are implemented in `ext_orport.c`.
+
+**Directory connections** are server-side or client-side connections
+that implement Tor's HTTP-based directory protocol. These are
+instantiated using a socket when Tor is making an unencrypted HTTP
+connection. When Tor is tunneling a directory request over a Tor
+circuit, directory connections are implemented using a linked
+connection pair (see below). Directory connections are implemented in
+`directory.c`; some of the server-side logic is implemented in
+`dirserver.c`.
+
+**Controller connections** are local connections to a controller
+process implementing the controller protocol from
+control-spec.txt. These are in `control.c`.
+
+**Listener connections** are not stream oriented! Rather, they wrap a
+listening socket in order to detect new incoming connections. They
+bypass most of stream logic. They don't have associated buffers.
+They are implemented in `connection.c`.
+
+![structure hierarchy for connection types](./diagrams/02/02-connection-types.png "structure hierarchy for connection types")
+
+>**Note**: "History Time!" You might occasionally find reference to a couple types of connections
+> which no longer exist in modern Tor. A *CPUWorker connection*
+>connected the main Tor process to a thread or process used for
+>computation. (Nowadays we use in-process communication.) Even more
+>anciently, a *DNSWorker connection* connected the main tor process to
+>a separate thread or process used for running `gethostbyname()` or
+>`getaddrinfo()`. (Nowadays we use Libevent's evdns facility to
+>perform DNS requests asynchronously.)
+
+#### Linked connections ####
+
+Sometimes two channels are joined together, such that data which the
+Tor process sends on one should immediately be received by the same
+Tor process on the other. (For example, when Tor makes a tunneled
+directory connection, this is implemented on the client side as a
+directory connection whose output goes, not to the network, but to a
+local entry connection. And when a directory receives a tunnelled
+directory connection, this is implemented as an exit connection whose
+output goes, not to the network, but to a local directory connection.)
+
+The earliest versions of Tor to support linked connections used
+socketpairs for the purpose. But using socketpairs forced us to copy
+data through kernelspace, and wasted limited file descriptors. So
+instead, a pair of connections can be linked in-process. Each linked
+connection has a pointer to the other, such that data written on one
+is immediately readable on the other, and vice versa.
+
+### From connections to channels ###
+
+There's an abstraction layer above OR connections (the ones that
+handle cells) and below cells called **Channels**. A channel's
+purpose is to transmit authenticated cells from one Tor instance
+(relay or client) to another.
+
+Currently, only one implementation exists: Channel_tls, which sends
+and receiveds cells over a TLS-based OR connection.
+
+Cells are sent on a channel using
+`channel_write_{,packed_,var_}cell()`. Incoming cells arrive on a
+channel from its backend using `channel_queue*_cell()`, and are
+immediately processed using `channel_process_cells()`.
+
+Some cell types are handled below the channel layer, such as those
+that affect handshaking only. And some others are passed up to the
+generic cross-channel code in `command.c`: cells like `DESTROY` and
+`CREATED` are all trivial to handle. But relay cells
+require special handling...
+
+### From channels through circuits ###
+
+When a relay cell arrives on an existing circuit, it is handled in
+`circuit_receive_relay_cell()` -- one of the innermost functions in
+Tor. This function encrypts or decrypts the relay cell as
+appropriate, and decides whether the cell is intended for the current
+hop of the circuit.
+
+If the cell *is* intended for the current hop, we pass it to
+`connection_edge_process_relay_cell()` in `relay.c`, which acts on it
+based on its relay command, and (possibly) queues its data on an
+`edge_connection_t`.
+
+If the cell *is not* intended for the current hop, we queue it for the
+next channel in sequence with `append cell_to_circuit_queue()`. This
+places the cell on a per-circuit queue for cells headed out on that
+particular channel.
+
+### Sending cells on circuits: the complicated bit.
+
+Relay cells are queued onto circuits from one of two (main) sources:
+reading data from edge connections, and receiving a cell to be relayed
+on a circuit. Both of these sources place their cells on cell queue:
+each circuit has one cell queue for each direction that it travels.
+
+A naive implementation would skip using cell queues, and instead write
+each outgoing relay cell. (Tor did this in its earlier versions.)
+But such an approach tends to give poor performance, because it allows
+high-volume circuits to clog channels, and it forces the Tor server to
+send data queued on a circuit even after that circuit has been closed.
+
+So by using queues on each circuit, we can add cells to each channel
+on a just-in-time basis, choosing the cell at each moment based on
+a performance-aware algorithm.
+
+This logic is implemented in two main modules: `scheduler.c` and
+`circuitmux*.c`. The scheduler code is responsible for determining
+globally, across all channels that could write cells, which one should
+next receive queued cells. The circuitmux code determines, for all
+of the circuits with queued cells for a channel, which one should
+queue the next cell.
+
+(This logic applies to outgoing relay cells only; incoming relay cells
+are processed as they arrive.)
+
diff --git a/src/core/or/destroy_cell_queue_st.h b/src/core/or/destroy_cell_queue_st.h
index 56630670ba..aa28289be5 100644
--- a/src/core/or/destroy_cell_queue_st.h
+++ b/src/core/or/destroy_cell_queue_st.h
@@ -1,12 +1,19 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file destroy_cell_queue_st.h
+ * @brief Destroy-cell queue structures
+ **/
+
#ifndef DESTROY_CELL_QUEUE_ST_H
#define DESTROY_CELL_QUEUE_ST_H
+#include "core/or/cell_queue_st.h"
+
/** A single queued destroy cell. */
struct destroy_cell_t {
TOR_SIMPLEQ_ENTRY(destroy_cell_t) next;
@@ -19,9 +26,8 @@ struct destroy_cell_t {
/** A queue of destroy cells on a channel. */
struct destroy_cell_queue_t {
/** Linked list of packed_cell_t */
- TOR_SIMPLEQ_HEAD(dcell_simpleq, destroy_cell_t) head;
+ TOR_SIMPLEQ_HEAD(dcell_simpleq_t, destroy_cell_t) head;
int n; /**< The number of cells in the queue. */
};
-#endif
-
+#endif /* !defined(DESTROY_CELL_QUEUE_ST_H) */
diff --git a/src/core/or/dos.c b/src/core/or/dos.c
index d06eaa6d05..5f99280030 100644
--- a/src/core/or/dos.c
+++ b/src/core/or/dos.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019, The Tor Project, Inc. */
+/* Copyright (c) 2018-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/*
@@ -15,6 +15,7 @@
#include "core/or/channel.h"
#include "core/or/connection_or.h"
#include "core/or/relay.h"
+#include "feature/hs/hs_dos.h"
#include "feature/nodelist/networkstatus.h"
#include "feature/nodelist/nodelist.h"
#include "feature/relay/routermode.h"
@@ -629,6 +630,7 @@ dos_log_heartbeat(void)
char *cc_msg = NULL;
char *single_hop_client_msg = NULL;
char *circ_stats_msg = NULL;
+ char *hs_dos_intro2_msg = NULL;
/* Stats number coming from relay.c append_cell_to_circuit_queue(). */
tor_asprintf(&circ_stats_msg,
@@ -654,17 +656,24 @@ dos_log_heartbeat(void)
num_single_hop_client_refused);
}
+ /* HS DoS stats. */
+ tor_asprintf(&hs_dos_intro2_msg,
+ " %" PRIu64 " INTRODUCE2 rejected.",
+ hs_dos_get_intro2_rejected_count());
+
log_notice(LD_HEARTBEAT,
- "DoS mitigation since startup:%s%s%s%s",
+ "DoS mitigation since startup:%s%s%s%s%s",
circ_stats_msg,
(cc_msg != NULL) ? cc_msg : " [cc not enabled]",
(conn_msg != NULL) ? conn_msg : " [conn not enabled]",
- (single_hop_client_msg != NULL) ? single_hop_client_msg : "");
+ (single_hop_client_msg != NULL) ? single_hop_client_msg : "",
+ (hs_dos_intro2_msg != NULL) ? hs_dos_intro2_msg : "");
tor_free(conn_msg);
tor_free(cc_msg);
tor_free(single_hop_client_msg);
tor_free(circ_stats_msg);
+ tor_free(hs_dos_intro2_msg);
return;
}
diff --git a/src/core/or/dos.h b/src/core/or/dos.h
index 058b7afce6..b3eca058b8 100644
--- a/src/core/or/dos.h
+++ b/src/core/or/dos.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019, The Tor Project, Inc. */
+/* Copyright (c) 2018-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/*
@@ -135,7 +135,7 @@ MOCK_DECL(STATIC unsigned int, get_param_cc_enabled,
MOCK_DECL(STATIC unsigned int, get_param_conn_enabled,
(const networkstatus_t *ns));
-#endif /* TOR_DOS_PRIVATE */
+#endif /* defined(DOS_PRIVATE) */
-#endif /* TOR_DOS_H */
+#endif /* !defined(TOR_DOS_H) */
diff --git a/src/core/or/edge_connection_st.h b/src/core/or/edge_connection_st.h
index 1665b8589f..9b2f031b9d 100644
--- a/src/core/or/edge_connection_st.h
+++ b/src/core/or/edge_connection_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file edge_connection_st.h
+ * @brief Edge-connection structure.
+ **/
+
#ifndef EDGE_CONNECTION_ST_H
#define EDGE_CONNECTION_ST_H
@@ -73,5 +78,4 @@ struct edge_connection_t {
uint64_t dirreq_id;
};
-#endif
-
+#endif /* !defined(EDGE_CONNECTION_ST_H) */
diff --git a/src/core/or/entry_connection_st.h b/src/core/or/entry_connection_st.h
index 45621fadbf..495ffd85dd 100644
--- a/src/core/or/entry_connection_st.h
+++ b/src/core/or/entry_connection_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file entry_connection_st.h
+ * @brief Entry connection structure.
+ **/
+
#ifndef ENTRY_CONNECTION_ST_H
#define ENTRY_CONNECTION_ST_H
@@ -96,5 +101,4 @@ struct entry_connection_t {
/** Cast a entry_connection_t subtype pointer to a edge_connection_t **/
#define ENTRY_TO_EDGE_CONN(c) (&(((c))->edge_))
-#endif
-
+#endif /* !defined(ENTRY_CONNECTION_ST_H) */
diff --git a/src/core/or/entry_port_cfg_st.h b/src/core/or/entry_port_cfg_st.h
index 87dfb331e5..ef1095086d 100644
--- a/src/core/or/entry_port_cfg_st.h
+++ b/src/core/or/entry_port_cfg_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file entry_port_cfg_st.h
+ * @brief Configuration structure for client ports.
+ **/
+
#ifndef ENTRY_PORT_CFG_ST_H
#define ENTRY_PORT_CFG_ST_H
@@ -48,7 +53,9 @@ struct entry_port_cfg_t {
* do we prefer IPv6? */
unsigned int prefer_ipv6_virtaddr : 1;
-};
+ /** For socks listeners: can we send back the extended SOCKS5 error code? */
+ unsigned int extended_socks5_codes : 1;
-#endif
+};
+#endif /* !defined(ENTRY_PORT_CFG_ST_H) */
diff --git a/src/core/or/extend_info_st.h b/src/core/or/extend_info_st.h
index bc7a77b1b2..a66ce24cfa 100644
--- a/src/core/or/extend_info_st.h
+++ b/src/core/or/extend_info_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file extend_info_st.h
+ * @brief Extend-info structure.
+ **/
+
#ifndef EXTEND_INFO_ST_H
#define EXTEND_INFO_ST_H
@@ -27,4 +32,4 @@ struct extend_info_t {
curve25519_public_key_t curve25519_onion_key;
};
-#endif
+#endif /* !defined(EXTEND_INFO_ST_H) */
diff --git a/src/core/or/half_edge_st.h b/src/core/or/half_edge_st.h
index d4617be108..c6b6e518fc 100644
--- a/src/core/or/half_edge_st.h
+++ b/src/core/or/half_edge_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file half_edge_st.h
+ * @brief Half-open connection structure.
+ **/
+
#ifndef HALF_EDGE_ST_H
#define HALF_EDGE_ST_H
@@ -30,5 +35,4 @@ typedef struct half_edge_t {
int connected_pending : 1;
} half_edge_t;
-#endif
-
+#endif /* !defined(HALF_EDGE_ST_H) */
diff --git a/src/core/or/include.am b/src/core/or/include.am
new file mode 100644
index 0000000000..3626e76bed
--- /dev/null
+++ b/src/core/or/include.am
@@ -0,0 +1,96 @@
+
+# ADD_C_FILE: INSERT SOURCES HERE.
+LIBTOR_APP_A_SOURCES += \
+ src/core/or/address_set.c \
+ src/core/or/channel.c \
+ src/core/or/channelpadding.c \
+ src/core/or/channeltls.c \
+ src/core/or/circuitbuild.c \
+ src/core/or/circuitlist.c \
+ src/core/or/circuitmux.c \
+ src/core/or/circuitmux_ewma.c \
+ src/core/or/circuitpadding.c \
+ src/core/or/circuitpadding_machines.c \
+ src/core/or/circuitstats.c \
+ src/core/or/circuituse.c \
+ src/core/or/crypt_path.c \
+ src/core/or/command.c \
+ src/core/or/connection_edge.c \
+ src/core/or/connection_or.c \
+ src/core/or/dos.c \
+ src/core/or/onion.c \
+ src/core/or/ocirc_event.c \
+ src/core/or/or_periodic.c \
+ src/core/or/or_sys.c \
+ src/core/or/orconn_event.c \
+ src/core/or/policies.c \
+ src/core/or/protover.c \
+ src/core/or/protover_rust.c \
+ src/core/or/reasons.c \
+ src/core/or/relay.c \
+ src/core/or/scheduler.c \
+ src/core/or/scheduler_kist.c \
+ src/core/or/scheduler_vanilla.c \
+ src/core/or/sendme.c \
+ src/core/or/status.c \
+ src/core/or/versions.c
+
+# ADD_C_FILE: INSERT HEADERS HERE.
+noinst_HEADERS += \
+ src/core/or/addr_policy_st.h \
+ src/core/or/address_set.h \
+ src/core/or/cell_queue_st.h \
+ src/core/or/cell_st.h \
+ src/core/or/channel.h \
+ src/core/or/channelpadding.h \
+ src/core/or/channeltls.h \
+ src/core/or/circuit_st.h \
+ src/core/or/circuitbuild.h \
+ src/core/or/circuitlist.h \
+ src/core/or/circuitmux.h \
+ src/core/or/circuitmux_ewma.h \
+ src/core/or/circuitstats.h \
+ src/core/or/circuitpadding.h \
+ src/core/or/circuitpadding_machines.h \
+ src/core/or/circuituse.h \
+ src/core/or/command.h \
+ src/core/or/connection_edge.h \
+ src/core/or/connection_or.h \
+ src/core/or/connection_st.h \
+ src/core/or/crypt_path.h \
+ src/core/or/cpath_build_state_st.h \
+ src/core/or/crypt_path_reference_st.h \
+ src/core/or/crypt_path_st.h \
+ src/core/or/destroy_cell_queue_st.h \
+ src/core/or/dos.h \
+ src/core/or/edge_connection_st.h \
+ src/core/or/half_edge_st.h \
+ src/core/or/entry_connection_st.h \
+ src/core/or/entry_port_cfg_st.h \
+ src/core/or/extend_info_st.h \
+ src/core/or/listener_connection_st.h \
+ src/core/or/onion.h \
+ src/core/or/or.h \
+ src/core/or/or_periodic.h \
+ src/core/or/or_sys.h \
+ src/core/or/orconn_event.h \
+ src/core/or/or_circuit_st.h \
+ src/core/or/or_connection_st.h \
+ src/core/or/or_handshake_certs_st.h \
+ src/core/or/or_handshake_state_st.h \
+ src/core/or/ocirc_event.h \
+ src/core/or/origin_circuit_st.h \
+ src/core/or/policies.h \
+ src/core/or/port_cfg_st.h \
+ src/core/or/protover.h \
+ src/core/or/reasons.h \
+ src/core/or/relay.h \
+ src/core/or/relay_crypto_st.h \
+ src/core/or/scheduler.h \
+ src/core/or/sendme.h \
+ src/core/or/server_port_cfg_st.h \
+ src/core/or/socks_request_st.h \
+ src/core/or/status.h \
+ src/core/or/tor_version_st.h \
+ src/core/or/var_cell_st.h \
+ src/core/or/versions.h
diff --git a/src/core/or/listener_connection_st.h b/src/core/or/listener_connection_st.h
index 8989a39dc8..78175ea061 100644
--- a/src/core/or/listener_connection_st.h
+++ b/src/core/or/listener_connection_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file listener_connection_st.h
+ * @brief Listener connection structure.
+ **/
+
#ifndef LISTENER_CONNECTION_ST_H
#define LISTENER_CONNECTION_ST_H
@@ -21,5 +26,4 @@ struct listener_connection_t {
};
-#endif
-
+#endif /* !defined(LISTENER_CONNECTION_ST_H) */
diff --git a/src/core/or/ocirc_event.c b/src/core/or/ocirc_event.c
new file mode 100644
index 0000000000..fa16459175
--- /dev/null
+++ b/src/core/or/ocirc_event.c
@@ -0,0 +1,121 @@
+/* Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file ocirc_event.c
+ * \brief Publish state change messages for origin circuits
+ *
+ * Implements a basic publish-subscribe framework for messages about
+ * the state of origin circuits. The publisher calls the subscriber
+ * callback functions synchronously.
+ *
+ * Although the synchronous calls might not simplify the call graph,
+ * this approach improves data isolation because the publisher doesn't
+ * need knowledge about the internals of subscribing subsystems. It
+ * also avoids race conditions that might occur in asynchronous
+ * frameworks.
+ **/
+
+#include "core/or/or.h"
+
+#define OCIRC_EVENT_PRIVATE
+
+#include "core/or/cpath_build_state_st.h"
+#include "core/or/ocirc_event.h"
+#include "core/or/or_sys.h"
+#include "core/or/origin_circuit_st.h"
+#include "lib/subsys/subsys.h"
+
+DECLARE_PUBLISH(ocirc_state);
+DECLARE_PUBLISH(ocirc_chan);
+DECLARE_PUBLISH(ocirc_cevent);
+
+static void
+ocirc_event_free(msg_aux_data_t u)
+{
+ tor_free_(u.ptr);
+}
+
+static char *
+ocirc_state_fmt(msg_aux_data_t u)
+{
+ ocirc_state_msg_t *msg = (ocirc_state_msg_t *)u.ptr;
+ char *s = NULL;
+
+ tor_asprintf(&s, "<gid=%"PRIu32" state=%d onehop=%d>",
+ msg->gid, msg->state, msg->onehop);
+ return s;
+}
+
+static char *
+ocirc_chan_fmt(msg_aux_data_t u)
+{
+ ocirc_chan_msg_t *msg = (ocirc_chan_msg_t *)u.ptr;
+ char *s = NULL;
+
+ tor_asprintf(&s, "<gid=%"PRIu32" chan=%"PRIu64" onehop=%d>",
+ msg->gid, msg->chan, msg->onehop);
+ return s;
+}
+
+static char *
+ocirc_cevent_fmt(msg_aux_data_t u)
+{
+ ocirc_cevent_msg_t *msg = (ocirc_cevent_msg_t *)u.ptr;
+ char *s = NULL;
+
+ tor_asprintf(&s, "<gid=%"PRIu32" evtype=%d reason=%d onehop=%d>",
+ msg->gid, msg->evtype, msg->reason, msg->onehop);
+ return s;
+}
+
+static dispatch_typefns_t ocirc_state_fns = {
+ .free_fn = ocirc_event_free,
+ .fmt_fn = ocirc_state_fmt,
+};
+
+static dispatch_typefns_t ocirc_chan_fns = {
+ .free_fn = ocirc_event_free,
+ .fmt_fn = ocirc_chan_fmt,
+};
+
+static dispatch_typefns_t ocirc_cevent_fns = {
+ .free_fn = ocirc_event_free,
+ .fmt_fn = ocirc_cevent_fmt,
+};
+
+int
+ocirc_add_pubsub(struct pubsub_connector_t *connector)
+{
+ if (DISPATCH_REGISTER_TYPE(connector, ocirc_state, &ocirc_state_fns))
+ return -1;
+ if (DISPATCH_REGISTER_TYPE(connector, ocirc_chan, &ocirc_chan_fns))
+ return -1;
+ if (DISPATCH_REGISTER_TYPE(connector, ocirc_cevent, &ocirc_cevent_fns))
+ return -1;
+ if (DISPATCH_ADD_PUB(connector, ocirc, ocirc_state))
+ return -1;
+ if (DISPATCH_ADD_PUB(connector, ocirc, ocirc_chan))
+ return -1;
+ if (DISPATCH_ADD_PUB(connector, ocirc, ocirc_cevent))
+ return -1;
+ return 0;
+}
+
+void
+ocirc_state_publish(ocirc_state_msg_t *msg)
+{
+ PUBLISH(ocirc_state, msg);
+}
+
+void
+ocirc_chan_publish(ocirc_chan_msg_t *msg)
+{
+ PUBLISH(ocirc_chan, msg);
+}
+
+void
+ocirc_cevent_publish(ocirc_cevent_msg_t *msg)
+{
+ PUBLISH(ocirc_cevent, msg);
+}
diff --git a/src/core/or/ocirc_event.h b/src/core/or/ocirc_event.h
new file mode 100644
index 0000000000..10307a3664
--- /dev/null
+++ b/src/core/or/ocirc_event.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file ocirc_event.h
+ * \brief Header file for ocirc_event.c
+ **/
+
+#ifndef TOR_OCIRC_EVENT_H
+#define TOR_OCIRC_EVENT_H
+
+#include <stdbool.h>
+
+#include "lib/cc/torint.h"
+#include "lib/pubsub/pubsub.h"
+
+/** Used to indicate the type of a circuit event passed to the controller.
+ * The various types are defined in control-spec.txt */
+typedef enum circuit_status_event_t {
+ CIRC_EVENT_LAUNCHED = 0,
+ CIRC_EVENT_BUILT = 1,
+ CIRC_EVENT_EXTENDED = 2,
+ CIRC_EVENT_FAILED = 3,
+ CIRC_EVENT_CLOSED = 4,
+} circuit_status_event_t;
+
+/** Message for origin circuit state update */
+typedef struct ocirc_state_msg_t {
+ uint32_t gid; /**< global ID (only origin circuits have them) */
+ int state; /**< new circuit state */
+ bool onehop; /**< one-hop circuit? */
+} ocirc_state_msg_t;
+
+DECLARE_MESSAGE(ocirc_state, ocirc_state, ocirc_state_msg_t *);
+
+/**
+ * Message when a channel gets associated to a circuit.
+ *
+ * This doesn't always correspond to something in circuitbuild.c
+ * setting the n_chan field in the circuit. For some reason, if
+ * circuit_handle_first_hop() launches a new circuit, it doesn't set
+ * the n_chan field.
+ */
+typedef struct ocirc_chan_msg_t {
+ uint32_t gid; /**< global ID */
+ uint64_t chan; /**< channel ID */
+ bool onehop; /**< one-hop circuit? */
+} ocirc_chan_msg_t;
+
+DECLARE_MESSAGE(ocirc_chan, ocirc_chan, ocirc_chan_msg_t *);
+
+/**
+ * Message for origin circuit status event
+ *
+ * This contains information that ends up in CIRC control protocol events.
+ */
+typedef struct ocirc_cevent_msg_t {
+ uint32_t gid; /**< global ID */
+ int evtype; /**< event type */
+ int reason; /**< reason */
+ bool onehop; /**< one-hop circuit? */
+} ocirc_cevent_msg_t;
+
+DECLARE_MESSAGE(ocirc_cevent, ocirc_cevent, ocirc_cevent_msg_t *);
+
+#ifdef OCIRC_EVENT_PRIVATE
+void ocirc_state_publish(ocirc_state_msg_t *msg);
+void ocirc_chan_publish(ocirc_chan_msg_t *msg);
+void ocirc_cevent_publish(ocirc_cevent_msg_t *msg);
+#endif
+
+#endif /* !defined(TOR_OCIRC_EVENT_H) */
diff --git a/src/core/or/onion.c b/src/core/or/onion.c
index aeddedd807..a3b5c6922d 100644
--- a/src/core/or/onion.c
+++ b/src/core/or/onion.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -240,11 +240,21 @@ created_cell_parse(created_cell_t *cell_out, const cell_t *cell_in)
static int
check_extend_cell(const extend_cell_t *cell)
{
+ const bool is_extend2 = (cell->cell_type == RELAY_COMMAND_EXTEND2);
+
if (tor_digest_is_zero((const char*)cell->node_id))
return -1;
- /* We don't currently allow EXTEND2 cells without an IPv4 address */
- if (tor_addr_family(&cell->orport_ipv4.addr) == AF_UNSPEC)
- return -1;
+ if (!tor_addr_port_is_valid_ap(&cell->orport_ipv4, 0)) {
+ /* EXTEND cells must have an IPv4 address. */
+ if (!is_extend2) {
+ return -1;
+ }
+ /* EXTEND2 cells must have at least one IP address.
+ * It can be IPv4 or IPv6. */
+ if (!tor_addr_port_is_valid_ap(&cell->orport_ipv6, 0)) {
+ return -1;
+ }
+ }
if (cell->create_cell.cell_type == CELL_CREATE) {
if (cell->cell_type != RELAY_COMMAND_EXTEND)
return -1;
@@ -343,7 +353,7 @@ extend_cell_from_extend2_cell_body(extend_cell_t *cell_out,
continue;
found_ipv6 = 1;
tor_addr_from_ipv6_bytes(&cell_out->orport_ipv6.addr,
- (const char *)ls->un_ipv6_addr);
+ ls->un_ipv6_addr);
cell_out->orport_ipv6.port = ls->un_ipv6_port;
break;
case LS_LEGACY_ID:
@@ -364,7 +374,12 @@ extend_cell_from_extend2_cell_body(extend_cell_t *cell_out,
}
}
- if (!found_rsa_id || !found_ipv4) /* These are mandatory */
+ /* EXTEND2 cells must have an RSA ID */
+ if (!found_rsa_id)
+ return -1;
+
+ /* EXTEND2 cells must have at least one IP address */
+ if (!found_ipv4 && !found_ipv6)
return -1;
return create_cell_from_create2_cell_body(&cell_out->create_cell,
@@ -374,9 +389,11 @@ extend_cell_from_extend2_cell_body(extend_cell_t *cell_out,
/** Parse an EXTEND or EXTEND2 cell (according to <b>command</b>) from the
* <b>payload_length</b> bytes of <b>payload</b> into <b>cell_out</b>. Return
* 0 on success, -1 on failure. */
-int
-extend_cell_parse(extend_cell_t *cell_out, const uint8_t command,
- const uint8_t *payload, size_t payload_length)
+MOCK_IMPL(int,
+extend_cell_parse,(extend_cell_t *cell_out,
+ const uint8_t command,
+ const uint8_t *payload,
+ size_t payload_length))
{
tor_assert(cell_out);
@@ -618,12 +635,13 @@ extend_cell_format(uint8_t *command_out, uint16_t *len_out,
break;
case RELAY_COMMAND_EXTEND2:
{
- uint8_t n_specifiers = 2;
+ uint8_t n_specifiers = 1;
*command_out = RELAY_COMMAND_EXTEND2;
extend2_cell_body_t *cell = extend2_cell_body_new();
link_specifier_t *ls;
- {
- /* IPv4 specifier first. */
+ if (tor_addr_port_is_valid_ap(&cell_in->orport_ipv4, 0)) {
+ /* Maybe IPv4 specifier first. */
+ ++n_specifiers;
ls = link_specifier_new();
extend2_cell_body_add_ls(cell, ls);
ls->ls_type = LS_IPV4;
@@ -649,6 +667,17 @@ extend_cell_format(uint8_t *command_out, uint16_t *len_out,
ls->ls_len = 32;
memcpy(ls->un_ed25519_id, cell_in->ed_pubkey.pubkey, 32);
}
+ if (tor_addr_port_is_valid_ap(&cell_in->orport_ipv6, 0)) {
+ /* Then maybe IPv6 specifier. */
+ ++n_specifiers;
+ ls = link_specifier_new();
+ extend2_cell_body_add_ls(cell, ls);
+ ls->ls_type = LS_IPV6;
+ ls->ls_len = 18;
+ tor_addr_copy_ipv6_bytes(ls->un_ipv6_addr,
+ &cell_in->orport_ipv6.addr);
+ ls->un_ipv6_port = cell_in->orport_ipv6.port;
+ }
cell->n_spec = n_specifiers;
/* Now, the handshake */
diff --git a/src/core/or/onion.h b/src/core/or/onion.h
index bb0b5b8dfd..256f0a3f31 100644
--- a/src/core/or/onion.h
+++ b/src/core/or/onion.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -56,8 +56,8 @@ typedef struct extend_cell_t {
/** Ed25519 public identity key. Zero if not set. */
struct ed25519_public_key_t ed_pubkey;
/** The "create cell" embedded in this extend cell. Note that unlike the
- * create cells we generate ourself, this once can have a handshake type we
- * don't recognize. */
+ * create cells we generate ourselves, this create cell can have a handshake
+ * type we don't recognize. */
create_cell_t create_cell;
} extend_cell_t;
@@ -74,8 +74,10 @@ void create_cell_init(create_cell_t *cell_out, uint8_t cell_type,
const uint8_t *onionskin);
int create_cell_parse(create_cell_t *cell_out, const cell_t *cell_in);
int created_cell_parse(created_cell_t *cell_out, const cell_t *cell_in);
-int extend_cell_parse(extend_cell_t *cell_out, const uint8_t command,
- const uint8_t *payload_in, size_t payload_len);
+MOCK_DECL(int,extend_cell_parse,(extend_cell_t *cell_out,
+ const uint8_t command,
+ const uint8_t *payload_in,
+ size_t payload_len));
int extended_cell_parse(extended_cell_t *cell_out, const uint8_t command,
const uint8_t *payload_in, size_t payload_len);
diff --git a/src/core/or/or.h b/src/core/or/or.h
index 7c601e49b3..8758a2ec6f 100644
--- a/src/core/or/or.h
+++ b/src/core/or/or.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -24,9 +24,8 @@
#include "lib/arch/bytes.h"
#include "lib/cc/compat_compiler.h"
-#include "lib/cc/torint.h"
#include "lib/container/map.h"
-#include "lib/container/buffers.h"
+#include "lib/buf/buffers.h"
#include "lib/container/smartlist.h"
#include "lib/crypt_ops/crypto_cipher.h"
#include "lib/crypt_ops/crypto_rsa.h"
@@ -97,6 +96,8 @@ struct curve25519_public_key_t;
#define SIGNEWNYM 129
#define SIGCLEARDNSCACHE 130
#define SIGHEARTBEAT 131
+#define SIGACTIVE 132
+#define SIGDORMANT 133
#if (SIZEOF_CELL_T != 0)
/* On Irix, stdlib.h defines a cell_t type, so we need to make sure
@@ -166,12 +167,13 @@ struct curve25519_public_key_t;
#define PROXY_CONNECT 1
#define PROXY_SOCKS4 2
#define PROXY_SOCKS5 3
-/* !!!! If there is ever a PROXY_* type over 3, we must grow the proxy_type
+#define PROXY_HAPROXY 4
+/* !!!! If there is ever a PROXY_* type over 7, we must grow the proxy_type
* field in or_connection_t */
/* Pluggable transport proxy type. Don't use this in or_connection_t,
* instead use the actual underlying proxy type (see above). */
-#define PROXY_PLUGGABLE 4
+#define PROXY_PLUGGABLE 5
/** How many circuits do we want simultaneously in-progress to handle
* a given stream? */
@@ -205,6 +207,9 @@ struct curve25519_public_key_t;
#define RELAY_COMMAND_RENDEZVOUS_ESTABLISHED 39
#define RELAY_COMMAND_INTRODUCE_ACK 40
+#define RELAY_COMMAND_PADDING_NEGOTIATE 41
+#define RELAY_COMMAND_PADDING_NEGOTIATED 42
+
/* Reasons why an OR connection is closed. */
#define END_OR_CONN_REASON_DONE 1
#define END_OR_CONN_REASON_REFUSED 2 /* connection refused */
@@ -215,7 +220,8 @@ struct curve25519_public_key_t;
#define END_OR_CONN_REASON_IO_ERROR 7 /* read/write error */
#define END_OR_CONN_REASON_RESOURCE_LIMIT 8 /* sockets, buffers, etc */
#define END_OR_CONN_REASON_PT_MISSING 9 /* PT failed or not available */
-#define END_OR_CONN_REASON_MISC 10
+#define END_OR_CONN_REASON_TLS_ERROR 10 /* Problem in TLS protocol */
+#define END_OR_CONN_REASON_MISC 11
/* Reasons why we (or a remote OR) might close a stream. See tor-spec.txt for
* documentation of these. The values must match. */
@@ -604,21 +610,21 @@ typedef uint32_t circid_t;
/** Identifies a stream on a circuit */
typedef uint16_t streamid_t;
-/* channel_t typedef; struct channel_s is in channel.h */
+/* channel_t typedef; struct channel_t is in channel.h */
-typedef struct channel_s channel_t;
+typedef struct channel_t channel_t;
-/* channel_listener_t typedef; struct channel_listener_s is in channel.h */
+/* channel_listener_t typedef; struct channel_listener_t is in channel.h */
-typedef struct channel_listener_s channel_listener_t;
+typedef struct channel_listener_t channel_listener_t;
/* TLS channel stuff */
-typedef struct channel_tls_s channel_tls_t;
+typedef struct channel_tls_t channel_tls_t;
-/* circuitmux_t typedef; struct circuitmux_s is in circuitmux.h */
+/* circuitmux_t typedef; struct circuitmux_t is in circuitmux.h */
-typedef struct circuitmux_s circuitmux_t;
+typedef struct circuitmux_t circuitmux_t;
typedef struct cell_t cell_t;
typedef struct var_cell_t var_cell_t;
@@ -834,6 +840,14 @@ typedef struct protover_summary_flags_t {
* service rendezvous point supporting version 3 as seen in proposal 224.
* This requires HSRend=2. */
unsigned int supports_v3_rendezvous_point: 1;
+
+ /** True iff this router has a protocol list that allows clients to
+ * negotiate hs circuit setup padding. Requires Padding>=2. */
+ unsigned int supports_hs_setup_padding : 1;
+
+ /** True iff this router has a protocol list that allows it to support the
+ * ESTABLISH_INTRO DoS cell extension. Requires HSIntro>=5. */
+ unsigned int supports_establish_intro_dos_extension : 1;
} protover_summary_flags_t;
typedef struct routerinfo_t routerinfo_t;
@@ -982,8 +996,6 @@ typedef struct routerset_t routerset_t;
typedef struct or_options_t or_options_t;
-#define LOG_PROTOCOL_WARN (get_protocol_warning_severity_level())
-
typedef struct or_state_t or_state_t;
#define MAX_SOCKS_ADDR_LEN 256
@@ -1000,7 +1012,7 @@ typedef struct or_state_t or_state_t;
#define BW_MIN_WEIGHT_SCALE 1
#define BW_MAX_WEIGHT_SCALE INT32_MAX
-typedef struct circuit_build_times_s circuit_build_times_t;
+typedef struct circuit_build_times_t circuit_build_times_t;
/********************************* config.c ***************************/
diff --git a/src/core/or/or_circuit_st.h b/src/core/or/or_circuit_st.h
index 6b6feb9d89..4e17b1c143 100644
--- a/src/core/or/or_circuit_st.h
+++ b/src/core/or/or_circuit_st.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
#ifndef OR_CIRCUIT_ST_H
@@ -12,6 +12,8 @@
#include "core/or/circuit_st.h"
#include "core/or/crypt_path_st.h"
+#include "lib/evloop/token_bucket.h"
+
struct onion_queue_t;
/** An or_circuit_t holds information needed to implement a circuit at an
@@ -25,7 +27,7 @@ struct or_circuit_t {
/** Pointer to a workqueue entry, if this circuit has given an onionskin to
* a cpuworker and is waiting for a response. Used to decide whether it is
* safe to free a circuit or if it is still in use by a cpuworker. */
- struct workqueue_entry_s *workqueue_entry;
+ struct workqueue_entry_t *workqueue_entry;
/** The circuit_id used in the previous (backward) hop of this circuit. */
circid_t p_circ_id;
@@ -33,11 +35,6 @@ struct or_circuit_t {
cell_queue_t p_chan_cells;
/** The channel that is previous in this circuit. */
channel_t *p_chan;
- /**
- * Circuit mux associated with p_chan to which this circuit is attached;
- * NULL if we have no p_chan.
- */
- circuitmux_t *p_mux;
/** Linked list of Exit streams associated with this circuit. */
edge_connection_t *n_streams;
/** Linked list of Exit streams associated with this circuit that are
@@ -74,7 +71,20 @@ struct or_circuit_t {
* exit-ward queues of this circuit; reset every time when writing
* buffer stats to disk. */
uint64_t total_cell_waiting_time;
+
+ /** If set, the DoS defenses are enabled on this circuit meaning that the
+ * introduce2_bucket is initialized and used. */
+ unsigned int introduce2_dos_defense_enabled : 1;
+ /** If set, the DoS defenses were explicitly enabled through the
+ * ESTABLISH_INTRO cell extension. If unset, the consensus is used to learn
+ * if the defenses can be enabled or not. */
+ unsigned int introduce2_dos_defense_explicit : 1;
+
+ /** INTRODUCE2 cell bucket controlling how much can go on this circuit. Only
+ * used if this is a service introduction circuit at the intro point
+ * (purpose = CIRCUIT_PURPOSE_INTRO_POINT). */
+ token_bucket_ctr_t introduce2_bucket;
};
-#endif
+#endif /* !defined(OR_CIRCUIT_ST_H) */
diff --git a/src/core/or/or_connection_st.h b/src/core/or/or_connection_st.h
index d5db5e8694..92956c2847 100644
--- a/src/core/or/or_connection_st.h
+++ b/src/core/or/or_connection_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file or_connection_st.h
+ * @brief OR connection structure.
+ **/
+
#ifndef OR_CONNECTION_ST_H
#define OR_CONNECTION_ST_H
@@ -58,7 +63,7 @@ struct or_connection_t {
/** True iff this is an outgoing connection. */
unsigned int is_outgoing:1;
- unsigned int proxy_type:2; /**< One of PROXY_NONE...PROXY_SOCKS5 */
+ unsigned int proxy_type:3; /**< One of PROXY_NONE...PROXY_HAPROXY */
unsigned int wide_circ_ids:1;
/** True iff this connection has had its bootstrap failure logged with
* control_event_bootstrap_problem. */
@@ -67,6 +72,8 @@ struct or_connection_t {
* geoip cache and handled by the DoS mitigation subsystem. We use this to
* insure we have a coherent count of concurrent connection. */
unsigned int tracked_for_dos_mitigation : 1;
+ /** True iff this connection is using a pluggable transport */
+ unsigned int is_pt : 1;
uint16_t link_proto; /**< What protocol version are we using? 0 for
* "none negotiated yet." */
@@ -89,4 +96,4 @@ struct or_connection_t {
uint64_t bytes_xmitted, bytes_xmitted_by_tls;
};
-#endif
+#endif /* !defined(OR_CONNECTION_ST_H) */
diff --git a/src/core/or/or_handshake_certs_st.h b/src/core/or/or_handshake_certs_st.h
index a93b7104aa..31755f04a1 100644
--- a/src/core/or/or_handshake_certs_st.h
+++ b/src/core/or/or_handshake_certs_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file or_handshake_certs_st.h
+ * @brief OR handshake certs structure
+ **/
+
#ifndef OR_HANDSHAKE_CERTS_ST
#define OR_HANDSHAKE_CERTS_ST
@@ -37,4 +42,4 @@ struct or_handshake_certs_t {
size_t ed_rsa_crosscert_len;
};
-#endif
+#endif /* !defined(OR_HANDSHAKE_CERTS_ST) */
diff --git a/src/core/or/or_handshake_state_st.h b/src/core/or/or_handshake_state_st.h
index 09a8a34179..050404046d 100644
--- a/src/core/or/or_handshake_state_st.h
+++ b/src/core/or/or_handshake_state_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file or_handshake_state_st.h
+ * @brief OR handshake state structure
+ **/
+
#ifndef OR_HANDSHAKE_STATE_ST
#define OR_HANDSHAKE_STATE_ST
@@ -74,5 +79,4 @@ struct or_handshake_state_t {
or_handshake_certs_t *certs;
};
-#endif
-
+#endif /* !defined(OR_HANDSHAKE_STATE_ST) */
diff --git a/src/core/or/or_periodic.c b/src/core/or/or_periodic.c
new file mode 100644
index 0000000000..4dfdce14ca
--- /dev/null
+++ b/src/core/or/or_periodic.c
@@ -0,0 +1,67 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file or_periodic.c
+ * @brief Periodic callbacks for the onion routing subsystem
+ **/
+
+#include "orconfig.h"
+#include "core/or/or.h"
+
+#include "core/mainloop/periodic.h"
+
+#include "core/or/channel.h"
+#include "core/or/circuituse.h"
+#include "core/or/or_periodic.h"
+
+#include "feature/relay/routermode.h"
+
+#ifndef COCCI
+#define DECLARE_EVENT(name, roles, flags) \
+ static periodic_event_item_t name ## _event = \
+ PERIODIC_EVENT(name, \
+ PERIODIC_EVENT_ROLE_##roles, \
+ flags)
+#endif /* !defined(COCCI) */
+
+#define FL(name) (PERIODIC_EVENT_FLAG_ ## name)
+
+#define CHANNEL_CHECK_INTERVAL (60*60)
+static int
+check_canonical_channels_callback(time_t now, const or_options_t *options)
+{
+ (void)now;
+ if (public_server_mode(options))
+ channel_check_for_duplicates();
+
+ return CHANNEL_CHECK_INTERVAL;
+}
+
+DECLARE_EVENT(check_canonical_channels, RELAY, FL(NEED_NET));
+
+/**
+ * Periodic callback: as a server, see if we have any old unused circuits
+ * that should be expired */
+static int
+expire_old_circuits_serverside_callback(time_t now,
+ const or_options_t *options)
+{
+ (void)options;
+ /* every 11 seconds, so not usually the same second as other such events */
+ circuit_expire_old_circuits_serverside(now);
+ return 11;
+}
+
+DECLARE_EVENT(expire_old_circuits_serverside, ROUTER, FL(NEED_NET));
+
+void
+or_register_periodic_events(void)
+{
+ // These are router-only events, but they're owned by the OR subsystem. */
+ periodic_events_register(&check_canonical_channels_event);
+ periodic_events_register(&expire_old_circuits_serverside_event);
+}
diff --git a/src/core/or/or_periodic.h b/src/core/or/or_periodic.h
new file mode 100644
index 0000000000..080573a838
--- /dev/null
+++ b/src/core/or/or_periodic.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file or_periodic.h
+ * @brief Header for core/or/or_periodic.c
+ **/
+
+#ifndef TOR_CORE_OR_OR_PERIODIC_H
+#define TOR_CORE_OR_OR_PERIODIC_H
+
+void or_register_periodic_events(void);
+
+#endif /* !defined(TOR_CORE_OR_OR_PERIODIC_H) */
diff --git a/src/core/or/or_sys.c b/src/core/or/or_sys.c
new file mode 100644
index 0000000000..73c6087dce
--- /dev/null
+++ b/src/core/or/or_sys.c
@@ -0,0 +1,56 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file or_sys.c
+ * @brief Subsystem definitions for OR module.
+ **/
+
+#include "orconfig.h"
+#include "core/or/or.h"
+#include "core/or/or_periodic.h"
+#include "core/or/or_sys.h"
+#include "core/or/policies.h"
+#include "core/or/protover.h"
+#include "core/or/versions.h"
+
+#include "lib/subsys/subsys.h"
+
+static int
+subsys_or_initialize(void)
+{
+ or_register_periodic_events();
+ return 0;
+}
+
+static void
+subsys_or_shutdown(void)
+{
+ protover_free_all();
+ protover_summary_cache_free_all();
+ policies_free_all();
+}
+
+static int
+subsys_or_add_pubsub(struct pubsub_connector_t *connector)
+{
+ int rv = 0;
+ if (orconn_add_pubsub(connector) < 0)
+ rv = -1;
+ if (ocirc_add_pubsub(connector) < 0)
+ rv = -1;
+ return rv;
+}
+
+const struct subsys_fns_t sys_or = {
+ .name = "or",
+ SUBSYS_DECLARE_LOCATION(),
+ .supported = true,
+ .level = 20,
+ .initialize = subsys_or_initialize,
+ .shutdown = subsys_or_shutdown,
+ .add_pubsub = subsys_or_add_pubsub,
+};
diff --git a/src/core/or/or_sys.h b/src/core/or/or_sys.h
new file mode 100644
index 0000000000..7ee56c8682
--- /dev/null
+++ b/src/core/or/or_sys.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * @file or_sys.h
+ * @brief Header for core/or/or_sys.c
+ **/
+
+#ifndef TOR_CORE_OR_OR_SYS_H
+#define TOR_CORE_OR_OR_SYS_H
+
+extern const struct subsys_fns_t sys_or;
+
+struct pubsub_connector_t;
+int ocirc_add_pubsub(struct pubsub_connector_t *connector);
+int orconn_add_pubsub(struct pubsub_connector_t *connector);
+
+#endif /* !defined(TOR_CORE_OR_OR_SYS_H) */
diff --git a/src/core/or/orconn_event.c b/src/core/or/orconn_event.c
new file mode 100644
index 0000000000..c30e2dd22f
--- /dev/null
+++ b/src/core/or/orconn_event.c
@@ -0,0 +1,92 @@
+/* Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file orconn_event.c
+ * \brief Publish state change messages for OR connections
+ *
+ * Implements a basic publish-subscribe framework for messages about
+ * the state of OR connections. The publisher calls the subscriber
+ * callback functions synchronously.
+ *
+ * Although the synchronous calls might not simplify the call graph,
+ * this approach improves data isolation because the publisher doesn't
+ * need knowledge about the internals of subscribing subsystems. It
+ * also avoids race conditions that might occur in asynchronous
+ * frameworks.
+ **/
+
+#include "core/or/or.h"
+#include "lib/pubsub/pubsub.h"
+#include "lib/subsys/subsys.h"
+
+#define ORCONN_EVENT_PRIVATE
+#include "core/or/orconn_event.h"
+#include "core/or/or_sys.h"
+
+DECLARE_PUBLISH(orconn_state);
+DECLARE_PUBLISH(orconn_status);
+
+static void
+orconn_event_free(msg_aux_data_t u)
+{
+ tor_free_(u.ptr);
+}
+
+static char *
+orconn_state_fmt(msg_aux_data_t u)
+{
+ orconn_state_msg_t *msg = (orconn_state_msg_t *)u.ptr;
+ char *s = NULL;
+
+ tor_asprintf(&s, "<gid=%"PRIu64" chan=%"PRIu64" proxy_type=%d state=%d>",
+ msg->gid, msg->chan, msg->proxy_type, msg->state);
+ return s;
+}
+
+static char *
+orconn_status_fmt(msg_aux_data_t u)
+{
+ orconn_status_msg_t *msg = (orconn_status_msg_t *)u.ptr;
+ char *s = NULL;
+
+ tor_asprintf(&s, "<gid=%"PRIu64" status=%d reason=%d>",
+ msg->gid, msg->status, msg->reason);
+ return s;
+}
+
+static dispatch_typefns_t orconn_state_fns = {
+ .free_fn = orconn_event_free,
+ .fmt_fn = orconn_state_fmt,
+};
+
+static dispatch_typefns_t orconn_status_fns = {
+ .free_fn = orconn_event_free,
+ .fmt_fn = orconn_status_fmt,
+};
+
+int
+orconn_add_pubsub(struct pubsub_connector_t *connector)
+{
+ if (DISPATCH_REGISTER_TYPE(connector, orconn_state, &orconn_state_fns))
+ return -1;
+ if (DISPATCH_REGISTER_TYPE(connector, orconn_status, &orconn_status_fns))
+ return -1;
+ if (DISPATCH_ADD_PUB(connector, orconn, orconn_state) != 0)
+ return -1;
+ if (DISPATCH_ADD_PUB(connector, orconn, orconn_status) != 0)
+ return -1;
+ return 0;
+}
+
+void
+orconn_state_publish(orconn_state_msg_t *msg)
+{
+ PUBLISH(orconn_state, msg);
+}
+
+void
+orconn_status_publish(orconn_status_msg_t *msg)
+{
+ PUBLISH(orconn_status, msg);
+}
diff --git a/src/core/or/orconn_event.h b/src/core/or/orconn_event.h
new file mode 100644
index 0000000000..2653b20d6e
--- /dev/null
+++ b/src/core/or/orconn_event.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2001 Matej Pfajfar.
+ * Copyright (c) 2001-2004, Roger Dingledine.
+ * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file orconn_event.h
+ * \brief Header file for orconn_event.c
+ *
+ * The OR_CONN_STATE_* symbols are here to make it easier for
+ * subscribers to make decisions based on the messages that they
+ * receive.
+ **/
+
+#ifndef TOR_ORCONN_EVENT_H
+#define TOR_ORCONN_EVENT_H
+
+#include "lib/pubsub/pubsub.h"
+
+/**
+ * @name States of OR connections
+ *
+ * These must be in a partial ordering such that usually no OR
+ * connection will transition from a higher-numbered state to a
+ * lower-numbered one. Code such as bto_update_best() depends on this
+ * ordering to determine the best state it's seen so far.
+ * @{ */
+#define OR_CONN_STATE_MIN_ 1
+/** State for a connection to an OR: waiting for connect() to finish. */
+#define OR_CONN_STATE_CONNECTING 1
+/** State for a connection to an OR: waiting for proxy handshake to complete */
+#define OR_CONN_STATE_PROXY_HANDSHAKING 2
+/** State for an OR connection client: SSL is handshaking, not done
+ * yet. */
+#define OR_CONN_STATE_TLS_HANDSHAKING 3
+/** State for a connection to an OR: We're doing a second SSL handshake for
+ * renegotiation purposes. (V2 handshake only.) */
+#define OR_CONN_STATE_TLS_CLIENT_RENEGOTIATING 4
+/** State for a connection at an OR: We're waiting for the client to
+ * renegotiate (to indicate a v2 handshake) or send a versions cell (to
+ * indicate a v3 handshake) */
+#define OR_CONN_STATE_TLS_SERVER_RENEGOTIATING 5
+/** State for an OR connection: We're done with our SSL handshake, we've done
+ * renegotiation, but we haven't yet negotiated link protocol versions and
+ * sent a netinfo cell. */
+#define OR_CONN_STATE_OR_HANDSHAKING_V2 6
+/** State for an OR connection: We're done with our SSL handshake, but we
+ * haven't yet negotiated link protocol versions, done a V3 handshake, and
+ * sent a netinfo cell. */
+#define OR_CONN_STATE_OR_HANDSHAKING_V3 7
+/** State for an OR connection: Ready to send/receive cells. */
+#define OR_CONN_STATE_OPEN 8
+#define OR_CONN_STATE_MAX_ 8
+/** @} */
+
+/** Used to indicate the type of an OR connection event passed to the
+ * controller. The various types are defined in control-spec.txt */
+typedef enum or_conn_status_event_t {
+ OR_CONN_EVENT_LAUNCHED = 0,
+ OR_CONN_EVENT_CONNECTED = 1,
+ OR_CONN_EVENT_FAILED = 2,
+ OR_CONN_EVENT_CLOSED = 3,
+ OR_CONN_EVENT_NEW = 4,
+} or_conn_status_event_t;
+
+/**
+ * Message for orconn state update
+ *
+ * This contains information about internal state changes of
+ * or_connection_t objects. The chan and proxy_type fields are
+ * additional information that a subscriber may need to make
+ * decisions.
+ **/
+typedef struct orconn_state_msg_t {
+ uint64_t gid; /**< connection's global ID */
+ uint64_t chan; /**< associated channel ID */
+ int proxy_type; /**< connection's proxy type */
+ uint8_t state; /**< new connection state */
+} orconn_state_msg_t;
+
+DECLARE_MESSAGE(orconn_state, orconn_state, orconn_state_msg_t *);
+
+/**
+ * Message for orconn status event
+ *
+ * This contains information that ends up in ORCONN control protocol
+ * events.
+ **/
+typedef struct orconn_status_msg_t {
+ uint64_t gid; /**< connection's global ID */
+ int status; /**< or_conn_status_event_t */
+ int reason; /**< reason */
+} orconn_status_msg_t;
+
+DECLARE_MESSAGE(orconn_status, orconn_status, orconn_status_msg_t *);
+
+#ifdef ORCONN_EVENT_PRIVATE
+void orconn_state_publish(orconn_state_msg_t *);
+void orconn_status_publish(orconn_status_msg_t *);
+#endif
+
+#endif /* !defined(TOR_ORCONN_EVENT_H) */
diff --git a/src/core/or/origin_circuit_st.h b/src/core/or/origin_circuit_st.h
index f55416db14..79e250cd59 100644
--- a/src/core/or/origin_circuit_st.h
+++ b/src/core/or/origin_circuit_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file origin_circuit_st.h
+ * @brief Origin circuit structure.
+ **/
+
#ifndef ORIGIN_CIRCUIT_ST_H
#define ORIGIN_CIRCUIT_ST_H
@@ -161,6 +166,10 @@ struct origin_circuit_t {
* connections to this circuit. */
unsigned int unusable_for_new_conns : 1;
+ /* If this flag is set (due to padding negotiation failure), we should
+ * not try to negotiate further circuit padding. */
+ unsigned padding_negotiation_failed : 1;
+
/**
* Tristate variable to guard against pathbias miscounting
* due to circuit purpose transitions changing the decision
@@ -291,4 +300,4 @@ struct origin_circuit_t {
};
-#endif
+#endif /* !defined(ORIGIN_CIRCUIT_ST_H) */
diff --git a/src/core/or/policies.c b/src/core/or/policies.c
index 3ed282d785..2bf2dc7005 100644
--- a/src/core/or/policies.c
+++ b/src/core/or/policies.c
@@ -1,6 +1,6 @@
/* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -29,7 +29,9 @@
#include "feature/relay/routermode.h"
#include "lib/geoip/geoip.h"
#include "ht.h"
+#include "lib/crypt_ops/crypto_rand.h"
#include "lib/encoding/confline.h"
+#include "trunnel/ed25519_cert.h"
#include "core/or/addr_policy_st.h"
#include "feature/dirclient/dir_server_st.h"
@@ -165,7 +167,7 @@ policy_expand_unspec(smartlist_t **policy)
}
tor_addr_from_ipv4h(&newpolicy_ipv4.addr, 0);
tor_addr_from_ipv6_bytes(&newpolicy_ipv6.addr,
- "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0");
+ (const uint8_t *)"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0");
smartlist_add(tmp, addr_policy_get_canonical_entry(&newpolicy_ipv4));
smartlist_add(tmp, addr_policy_get_canonical_entry(&newpolicy_ipv6));
addr_policy_free(p);
@@ -918,49 +920,6 @@ fascist_firewall_choose_address_ipv4h(uint32_t ipv4h_addr,
pref_ipv6, ap);
}
-/* Some microdescriptor consensus methods have no IPv6 addresses in rs: they
- * are in the microdescriptors. For these consensus methods, we can't rely on
- * the node's IPv6 address until its microdescriptor is available (when using
- * microdescs).
- * But for bridges, rewrite_node_address_for_bridge() updates node->ri with
- * the configured address, so we can trust bridge addresses.
- * (Bridges could gain an IPv6 address if their microdescriptor arrives, but
- * this will never be their preferred address: that is in the config.)
- * Returns true if the node needs a microdescriptor for its IPv6 address, and
- * false if the addresses in the node are already up-to-date.
- */
-static int
-node_awaiting_ipv6(const or_options_t* options, const node_t *node)
-{
- tor_assert(node);
-
- /* There's no point waiting for an IPv6 address if we'd never use it */
- if (!fascist_firewall_use_ipv6(options)) {
- return 0;
- }
-
- /* If the node has an IPv6 address, we're not waiting */
- if (node_has_ipv6_addr(node)) {
- return 0;
- }
-
- /* If the current consensus method and flavour has IPv6 addresses, we're not
- * waiting */
- if (networkstatus_consensus_has_ipv6(options)) {
- return 0;
- }
-
- /* Bridge clients never use the address from a bridge's md, so there's no
- * need to wait for it. */
- if (node_is_a_configured_bridge(node)) {
- return 0;
- }
-
- /* We are waiting if we_use_microdescriptors_for_circuits() and we have no
- * md. */
- return (!node->md && we_use_microdescriptors_for_circuits(options));
-}
-
/** Like fascist_firewall_choose_address_base(), but takes <b>rs</b>.
* Consults the corresponding node, then falls back to rs if node is NULL.
* This should only happen when there's no valid consensus, and rs doesn't
@@ -983,7 +942,7 @@ fascist_firewall_choose_address_rs(const routerstatus_t *rs,
const or_options_t *options = get_options();
const node_t *node = node_get_by_id(rs->identity_digest);
- if (node && !node_awaiting_ipv6(options, node)) {
+ if (node) {
fascist_firewall_choose_address_node(node, fw_connection, pref_only, ap);
} else {
/* There's no node-specific IPv6 preference, so use the generic IPv6
@@ -1001,6 +960,83 @@ fascist_firewall_choose_address_rs(const routerstatus_t *rs,
}
}
+/** Like fascist_firewall_choose_address_base(), but takes in a smartlist
+ * <b>lspecs</b> consisting of one or more link specifiers. We assume
+ * fw_connection is FIREWALL_OR_CONNECTION as link specifiers cannot
+ * contain DirPorts.
+ */
+void
+fascist_firewall_choose_address_ls(const smartlist_t *lspecs,
+ int pref_only, tor_addr_port_t* ap)
+{
+ int have_v4 = 0, have_v6 = 0;
+ uint16_t port_v4 = 0, port_v6 = 0;
+ tor_addr_t addr_v4, addr_v6;
+
+ tor_assert(ap);
+
+ if (lspecs == NULL) {
+ log_warn(LD_BUG, "Unknown or missing link specifiers");
+ return;
+ }
+ if (smartlist_len(lspecs) == 0) {
+ log_warn(LD_PROTOCOL, "Link specifiers are empty");
+ return;
+ }
+
+ tor_addr_make_null(&ap->addr, AF_UNSPEC);
+ ap->port = 0;
+
+ tor_addr_make_null(&addr_v4, AF_INET);
+ tor_addr_make_null(&addr_v6, AF_INET6);
+
+ SMARTLIST_FOREACH_BEGIN(lspecs, const link_specifier_t *, ls) {
+ switch (link_specifier_get_ls_type(ls)) {
+ case LS_IPV4:
+ /* Skip if we already seen a v4. */
+ if (have_v4) continue;
+ tor_addr_from_ipv4h(&addr_v4,
+ link_specifier_get_un_ipv4_addr(ls));
+ port_v4 = link_specifier_get_un_ipv4_port(ls);
+ have_v4 = 1;
+ break;
+ case LS_IPV6:
+ /* Skip if we already seen a v6, or deliberately skip it if we're not a
+ * direct connection. */
+ if (have_v6) continue;
+ tor_addr_from_ipv6_bytes(&addr_v6,
+ link_specifier_getconstarray_un_ipv6_addr(ls));
+ port_v6 = link_specifier_get_un_ipv6_port(ls);
+ have_v6 = 1;
+ break;
+ default:
+ /* Ignore unknown. */
+ break;
+ }
+ } SMARTLIST_FOREACH_END(ls);
+
+ /* If we don't have IPv4 or IPv6 in link specifiers, log a bug and return. */
+ if (!have_v4 && !have_v6) {
+ if (!have_v6) {
+ log_warn(LD_PROTOCOL, "None of our link specifiers have IPv4 or IPv6");
+ } else {
+ log_warn(LD_PROTOCOL, "None of our link specifiers have IPv4");
+ }
+ return;
+ }
+
+ /* Here, don't check for DirPorts as link specifiers are only used for
+ * ORPorts. */
+ const or_options_t *options = get_options();
+ int pref_ipv6 = fascist_firewall_prefer_ipv6_orport(options);
+ /* Assume that the DirPorts are zero as link specifiers only use ORPorts. */
+ fascist_firewall_choose_address_base(&addr_v4, port_v4, 0,
+ &addr_v6, port_v6, 0,
+ FIREWALL_OR_CONNECTION,
+ pref_only, pref_ipv6,
+ ap);
+}
+
/** Like fascist_firewall_choose_address_base(), but takes <b>node</b>, and
* looks up the node's IPv6 preference rather than taking an argument
* for pref_ipv6. */
@@ -1019,17 +1055,6 @@ fascist_firewall_choose_address_node(const node_t *node,
}
node_assert_ok(node);
- /* Calling fascist_firewall_choose_address_node() when the node is missing
- * IPv6 information breaks IPv6-only clients.
- * If the node is a hard-coded fallback directory or authority, call
- * fascist_firewall_choose_address_rs() on the fake (hard-coded) routerstatus
- * for the node.
- * If it is not hard-coded, check that the node has a microdescriptor, full
- * descriptor (routerinfo), or is one of our configured bridges before
- * calling this function. */
- if (BUG(node_awaiting_ipv6(get_options(), node))) {
- return;
- }
const int pref_ipv6_node = (fw_connection == FIREWALL_OR_CONNECTION
? node_ipv6_or_preferred(node)
@@ -1150,6 +1175,15 @@ authdir_policy_badexit_address(uint32_t addr, uint16_t port)
#define REJECT(arg) \
STMT_BEGIN *msg = tor_strdup(arg); goto err; STMT_END
+/** Check <b>or_options</b> to determine whether or not we are using the
+ * default options for exit policy. Return true if so, false otherwise. */
+static int
+policy_using_default_exit_options(const or_options_t *or_options)
+{
+ return (or_options->ExitPolicy == NULL && or_options->ExitRelay == -1 &&
+ or_options->ReducedExitPolicy == 0 && or_options->IPv6Exit == 0);
+}
+
/** Config helper: If there's any problem with the policy configuration
* options in <b>options</b>, return -1 and set <b>msg</b> to a newly
* allocated description of the error. Else return 0. */
@@ -1168,9 +1202,8 @@ validate_addr_policies(const or_options_t *options, char **msg)
static int warned_about_nonexit = 0;
- if (public_server_mode(options) &&
- !warned_about_nonexit && options->ExitPolicy == NULL &&
- options->ExitRelay == -1 && options->ReducedExitPolicy == 0) {
+ if (public_server_mode(options) && !warned_about_nonexit &&
+ policy_using_default_exit_options(options)) {
warned_about_nonexit = 1;
log_notice(LD_CONFIG, "By default, Tor does not run as an exit relay. "
"If you want to be an exit relay, "
@@ -1359,9 +1392,9 @@ policy_hash(const policy_map_ent_t *ent)
}
HT_PROTOTYPE(policy_map, policy_map_ent_t, node, policy_hash,
- policy_eq)
+ policy_eq);
HT_GENERATE2(policy_map, policy_map_ent_t, node, policy_hash,
- policy_eq, 0.6, tor_reallocarray_, tor_free_)
+ policy_eq, 0.6, tor_reallocarray_, tor_free_);
/** Given a pointer to an addr_policy_t, return a copy of the pointer to the
* "canonical" copy of that addr_policy_t; the canonical copy is a single
@@ -2127,9 +2160,9 @@ policies_parse_exit_policy_from_options(const or_options_t *or_options,
int rv = 0;
/* Short-circuit for non-exit relays, or for relays where we didn't specify
- * ExitPolicy or ReducedExitPolicy and ExitRelay is auto. */
- if (or_options->ExitRelay == 0 || (or_options->ExitPolicy == NULL &&
- or_options->ExitRelay == -1 && or_options->ReducedExitPolicy == 0)) {
+ * ExitPolicy or ReducedExitPolicy or IPv6Exit and ExitRelay is auto. */
+ if (or_options->ExitRelay == 0 ||
+ policy_using_default_exit_options(or_options)) {
append_exit_policy_string(result, "reject *4:*");
append_exit_policy_string(result, "reject *6:*");
return 0;
@@ -2706,7 +2739,7 @@ parse_short_policy(const char *summary)
int is_accept;
int n_entries;
short_policy_entry_t entries[MAX_EXITPOLICY_SUMMARY_LEN]; /* overkill */
- const char *next;
+ char *next;
if (!strcmpstart(summary, "accept ")) {
is_accept = 1;
@@ -2721,57 +2754,56 @@ parse_short_policy(const char *summary)
n_entries = 0;
for ( ; *summary; summary = next) {
- const char *comma = strchr(summary, ',');
- unsigned low, high;
- char dummy;
- char ent_buf[32];
- size_t len;
-
- next = comma ? comma+1 : strchr(summary, '\0');
- len = comma ? (size_t)(comma - summary) : strlen(summary);
-
if (n_entries == MAX_EXITPOLICY_SUMMARY_LEN) {
log_fn(LOG_PROTOCOL_WARN, LD_DIR, "Impossibly long policy summary %s",
escaped(orig_summary));
return NULL;
}
- if (! TOR_ISDIGIT(*summary) || len > (sizeof(ent_buf)-1)) {
- /* unrecognized entry format. skip it. */
- continue;
- }
- if (len < 1) {
- /* empty; skip it. */
- /* XXX This happens to be unreachable, since if len==0, then *summary is
- * ',' or '\0', and the TOR_ISDIGIT test above would have failed. */
- continue;
+ unsigned low, high;
+ int ok;
+ low = (unsigned) tor_parse_ulong(summary, 10, 1, 65535, &ok, &next);
+ if (!ok) {
+ if (! TOR_ISDIGIT(*summary) || *summary == ',') {
+ /* Unrecognized format: skip it. */
+ goto skip_ent;
+ } else {
+ goto bad_ent;
+ }
}
- memcpy(ent_buf, summary, len);
- ent_buf[len] = '\0';
+ switch (*next) {
+ case ',':
+ ++next;
+ FALLTHROUGH;
+ case '\0':
+ high = low;
+ break;
+ case '-':
+ high = (unsigned) tor_parse_ulong(next+1, 10, low, 65535, &ok, &next);
+ if (!ok)
+ goto bad_ent;
- if (tor_sscanf(ent_buf, "%u-%u%c", &low, &high, &dummy) == 2) {
- if (low<1 || low>65535 || high<1 || high>65535 || low>high) {
- log_fn(LOG_PROTOCOL_WARN, LD_DIR,
- "Found bad entry in policy summary %s", escaped(orig_summary));
- return NULL;
- }
- } else if (tor_sscanf(ent_buf, "%u%c", &low, &dummy) == 1) {
- if (low<1 || low>65535) {
- log_fn(LOG_PROTOCOL_WARN, LD_DIR,
- "Found bad entry in policy summary %s", escaped(orig_summary));
- return NULL;
- }
- high = low;
- } else {
- log_fn(LOG_PROTOCOL_WARN, LD_DIR,"Found bad entry in policy summary %s",
- escaped(orig_summary));
- return NULL;
+ if (*next == ',')
+ ++next;
+ else if (*next != '\0')
+ goto bad_ent;
+
+ break;
+ default:
+ goto bad_ent;
}
entries[n_entries].min_port = low;
entries[n_entries].max_port = high;
n_entries++;
+
+ continue;
+ skip_ent:
+ next = strchr(next, ',');
+ if (!next)
+ break;
+ ++next;
}
if (n_entries == 0) {
@@ -2792,6 +2824,11 @@ parse_short_policy(const char *summary)
result->n_entries = n_entries;
memcpy(result->entries, entries, sizeof(short_policy_entry_t)*n_entries);
return result;
+
+ bad_ent:
+ log_fn(LOG_PROTOCOL_WARN, LD_DIR,"Found bad entry in policy summary %s",
+ escaped(orig_summary));
+ return NULL;
}
/** Write <b>policy</b> back out into a string. */
diff --git a/src/core/or/policies.h b/src/core/or/policies.h
index 2c38de362f..72a37d62b0 100644
--- a/src/core/or/policies.h
+++ b/src/core/or/policies.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -91,6 +91,8 @@ int fascist_firewall_allows_dir_server(const dir_server_t *ds,
void fascist_firewall_choose_address_rs(const routerstatus_t *rs,
firewall_connection_t fw_connection,
int pref_only, tor_addr_port_t* ap);
+void fascist_firewall_choose_address_ls(const smartlist_t *lspecs,
+ int pref_only, tor_addr_port_t* ap);
void fascist_firewall_choose_address_node(const node_t *node,
firewall_connection_t fw_connection,
int pref_only, tor_addr_port_t* ap);
diff --git a/src/core/or/port_cfg_st.h b/src/core/or/port_cfg_st.h
index b67091ce32..064e679d78 100644
--- a/src/core/or/port_cfg_st.h
+++ b/src/core/or/port_cfg_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file port_cfg_st.h
+ * @brief Listener port configuration structure.
+ **/
+
#ifndef PORT_CFG_ST_H
#define PORT_CFG_ST_H
@@ -31,5 +36,4 @@ struct port_cfg_t {
char unix_addr[FLEXIBLE_ARRAY_MEMBER];
};
-#endif
-
+#endif /* !defined(PORT_CFG_ST_H) */
diff --git a/src/core/or/protover.c b/src/core/or/protover.c
index dfb0e9e303..0d03e9a06b 100644
--- a/src/core/or/protover.c
+++ b/src/core/or/protover.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2019, The Tor Project, Inc. */
+/* Copyright (c) 2016-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -39,6 +39,9 @@ static int protocol_list_contains(const smartlist_t *protos,
static const struct {
protocol_type_t protover_type;
const char *name;
+/* If you add a new protocol here, you probably also want to add
+ * parsing for it in summarize_protover_flags(), so that it has a
+ * summary flag in routerstatus_t */
} PROTOCOL_NAMES[] = {
{ PRT_LINK, "Link" },
{ PRT_LINKAUTH, "LinkAuth" },
@@ -49,7 +52,9 @@ static const struct {
{ PRT_HSREND, "HSRend" },
{ PRT_DESC, "Desc" },
{ PRT_MICRODESC, "Microdesc"},
- { PRT_CONS, "Cons" }
+ { PRT_PADDING, "Padding"},
+ { PRT_CONS, "Cons" },
+ { PRT_FLOWCTRL, "FlowCtrl"},
};
#define N_PROTOCOL_NAMES ARRAY_LENGTH(PROTOCOL_NAMES)
@@ -386,8 +391,9 @@ protover_get_supported_protocols(void)
"Cons=1-2 "
"Desc=1-2 "
"DirCache=1-2 "
+ "FlowCtrl=1 "
"HSDir=1-2 "
- "HSIntro=3-4 "
+ "HSIntro=3-5 "
"HSRend=1-2 "
"Link=1-5 "
#ifdef HAVE_WORKING_TOR_TLS_GET_TLSSECRETS
@@ -396,6 +402,7 @@ protover_get_supported_protocols(void)
"LinkAuth=3 "
#endif
"Microdesc=1-2 "
+ "Padding=2 "
"Relay=1-2";
}
@@ -815,6 +822,8 @@ protover_all_supported(const char *s, char **missing_out)
* ones and, if so, add them to unsupported->ranges. */
if (versions->low != 0 && versions->high != 0) {
smartlist_add(unsupported->ranges, versions);
+ } else {
+ tor_free(versions);
}
/* Finally, if we had something unsupported, add it to the list of
* missing_some things and mark that there was something missing. */
@@ -823,7 +832,6 @@ protover_all_supported(const char *s, char **missing_out)
all_supported = 0;
} else {
proto_entry_free(unsupported);
- tor_free(versions);
}
} SMARTLIST_FOREACH_END(range);
diff --git a/src/core/or/protover.h b/src/core/or/protover.h
index 7e181ba97a..9509f3e8a3 100644
--- a/src/core/or/protover.h
+++ b/src/core/or/protover.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2019, The Tor Project, Inc. */
+/* Copyright (c) 2016-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -28,21 +28,25 @@ struct smartlist_t;
#define PROTOVER_HS_INTRO_V3 4
/** The protover version number that signifies HSv3 rendezvous point support */
#define PROTOVER_HS_RENDEZVOUS_POINT_V3 2
+/** The protover that signals support for HS circuit setup padding machines */
+#define PROTOVER_HS_SETUP_PADDING 2
/** List of recognized subprotocols. */
/// C_RUST_COUPLED: src/rust/protover/ffi.rs `translate_to_rust`
/// C_RUST_COUPLED: src/rust/protover/protover.rs `Proto`
typedef enum protocol_type_t {
- PRT_LINK,
- PRT_LINKAUTH,
- PRT_RELAY,
- PRT_DIRCACHE,
- PRT_HSDIR,
- PRT_HSINTRO,
- PRT_HSREND,
- PRT_DESC,
- PRT_MICRODESC,
- PRT_CONS,
+ PRT_LINK = 0,
+ PRT_LINKAUTH = 1,
+ PRT_RELAY = 2,
+ PRT_DIRCACHE = 3,
+ PRT_HSDIR = 4,
+ PRT_HSINTRO = 5,
+ PRT_HSREND = 6,
+ PRT_DESC = 7,
+ PRT_MICRODESC = 8,
+ PRT_CONS = 9,
+ PRT_PADDING = 10,
+ PRT_FLOWCTRL = 11,
} protocol_type_t;
bool protover_contains_long_protocol_names(const char *s);
diff --git a/src/core/or/protover_rust.c b/src/core/or/protover_rust.c
index bc56ea11d0..f44746b6da 100644
--- a/src/core/or/protover_rust.c
+++ b/src/core/or/protover_rust.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2019, The Tor Project, Inc. */
+/* Copyright (c) 2016-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/*
diff --git a/src/core/or/reasons.c b/src/core/or/reasons.c
index e21bfa670a..708f43a689 100644
--- a/src/core/or/reasons.c
+++ b/src/core/or/reasons.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -244,6 +244,8 @@ orconn_end_reason_to_control_string(int r)
return "IOERROR";
case END_OR_CONN_REASON_RESOURCE_LIMIT:
return "RESOURCELIMIT";
+ case END_OR_CONN_REASON_TLS_ERROR:
+ return "TLS_ERROR";
case END_OR_CONN_REASON_MISC:
return "MISC";
case END_OR_CONN_REASON_PT_MISSING:
@@ -276,6 +278,8 @@ tls_error_to_orconn_end_reason(int e)
case TOR_TLS_CLOSE:
case TOR_TLS_DONE:
return END_OR_CONN_REASON_DONE;
+ case TOR_TLS_ERROR_MISC:
+ return END_OR_CONN_REASON_TLS_ERROR;
default:
return END_OR_CONN_REASON_MISC;
}
diff --git a/src/core/or/reasons.h b/src/core/or/reasons.h
index c45a8bc38d..2e534aab73 100644
--- a/src/core/or/reasons.h
+++ b/src/core/or/reasons.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
diff --git a/src/core/or/relay.c b/src/core/or/relay.c
index 00353f47a9..892dbe2d0a 100644
--- a/src/core/or/relay.c
+++ b/src/core/or/relay.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -49,22 +49,24 @@
#include "core/or/or.h"
#include "feature/client/addressmap.h"
#include "lib/err/backtrace.h"
-#include "lib/container/buffers.h"
+#include "lib/buf/buffers.h"
#include "core/or/channel.h"
#include "feature/client/circpathbias.h"
#include "core/or/circuitbuild.h"
#include "core/or/circuitlist.h"
#include "core/or/circuituse.h"
+#include "core/or/circuitpadding.h"
#include "lib/compress/compress.h"
#include "app/config/config.h"
#include "core/mainloop/connection.h"
#include "core/or/connection_edge.h"
#include "core/or/connection_or.h"
-#include "feature/control/control.h"
+#include "feature/control/control_events.h"
#include "lib/crypt_ops/crypto_rand.h"
#include "lib/crypt_ops/crypto_util.h"
#include "feature/dircommon/directory.h"
#include "feature/relay/dns.h"
+#include "feature/relay/circuitbuild_relay.h"
#include "feature/stats/geoip_stats.h"
#include "feature/hs/hs_cache.h"
#include "core/mainloop/mainloop.h"
@@ -80,7 +82,6 @@
#include "feature/nodelist/describe.h"
#include "feature/nodelist/routerlist.h"
#include "core/or/scheduler.h"
-#include "feature/stats/rephist.h"
#include "core/or/cell_st.h"
#include "core/or/cell_queue_st.h"
@@ -93,15 +94,12 @@
#include "core/or/origin_circuit_st.h"
#include "feature/nodelist/routerinfo_st.h"
#include "core/or/socks_request_st.h"
-
-#include "lib/intmath/weakrng.h"
+#include "core/or/sendme.h"
static edge_connection_t *relay_lookup_conn(circuit_t *circ, cell_t *cell,
cell_direction_t cell_direction,
crypt_path_t *layer_hint);
-static void circuit_consider_sending_sendme(circuit_t *circ,
- crypt_path_t *layer_hint);
static void circuit_resume_edge_reading(circuit_t *circ,
crypt_path_t *layer_hint);
static int circuit_resume_edge_reading_helper(edge_connection_t *conn,
@@ -134,9 +132,6 @@ uint64_t stats_n_relay_cells_delivered = 0;
* reached (see append_cell_to_circuit_queue()) */
uint64_t stats_n_circ_max_cell_reached = 0;
-/** Used to tell which stream to read from first on a circuit. */
-static tor_weak_rng_t stream_choice_rng = TOR_WEAK_RNG_INIT;
-
/**
* Update channel usage state based on the type of relay cell and
* circuit properties.
@@ -253,6 +248,10 @@ circuit_receive_relay_cell(cell_t *cell, circuit_t *circ,
if (recognized) {
edge_connection_t *conn = NULL;
+ /* Recognized cell, the cell digest has been updated, we'll record it for
+ * the SENDME if need be. */
+ sendme_record_received_cell_digest(circ, layer_hint);
+
if (circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING) {
if (pathbias_check_probe_response(circ, cell) == -1) {
pathbias_count_valid_cells(circ, cell);
@@ -267,8 +266,8 @@ circuit_receive_relay_cell(cell_t *cell, circuit_t *circ,
if (cell_direction == CELL_DIRECTION_OUT) {
++stats_n_relay_cells_delivered;
log_debug(LD_OR,"Sending away from origin.");
- if ((reason=connection_edge_process_relay_cell(cell, circ, conn, NULL))
- < 0) {
+ reason = connection_edge_process_relay_cell(cell, circ, conn, NULL);
+ if (reason < 0) {
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
"connection_edge_process_relay_cell (away from origin) "
"failed.");
@@ -278,8 +277,9 @@ circuit_receive_relay_cell(cell_t *cell, circuit_t *circ,
if (cell_direction == CELL_DIRECTION_IN) {
++stats_n_relay_cells_delivered;
log_debug(LD_OR,"Sending to origin.");
- if ((reason = connection_edge_process_relay_cell(cell, circ, conn,
- layer_hint)) < 0) {
+ reason = connection_edge_process_relay_cell(cell, circ, conn,
+ layer_hint);
+ if (reason < 0) {
/* If a client is trying to connect to unknown hidden service port,
* END_CIRC_AT_ORIGIN is sent back so we can then close the circuit.
* Do not log warn as this is an expected behavior for a service. */
@@ -293,7 +293,9 @@ circuit_receive_relay_cell(cell_t *cell, circuit_t *circ,
return 0;
}
- /* not recognized. pass it on. */
+ /* not recognized. inform circpad and pass it on. */
+ circpad_deliver_unrecognized_cell_events(circ, cell_direction);
+
if (cell_direction == CELL_DIRECTION_OUT) {
cell->circ_id = circ->n_circ_id; /* switch it */
chan = circ->n_chan;
@@ -353,11 +355,11 @@ circuit_receive_relay_cell(cell_t *cell, circuit_t *circ,
* - Encrypt it to the right layer
* - Append it to the appropriate cell_queue on <b>circ</b>.
*/
-static int
-circuit_package_relay_cell(cell_t *cell, circuit_t *circ,
+MOCK_IMPL(int,
+circuit_package_relay_cell, (cell_t *cell, circuit_t *circ,
cell_direction_t cell_direction,
crypt_path_t *layer_hint, streamid_t on_stream,
- const char *filename, int lineno)
+ const char *filename, int lineno))
{
channel_t *chan; /* where to send the cell */
@@ -524,6 +526,8 @@ relay_command_to_string(uint8_t command)
case RELAY_COMMAND_INTRODUCE_ACK: return "INTRODUCE_ACK";
case RELAY_COMMAND_EXTEND2: return "EXTEND2";
case RELAY_COMMAND_EXTENDED2: return "EXTENDED2";
+ case RELAY_COMMAND_PADDING_NEGOTIATE: return "PADDING_NEGOTIATE";
+ case RELAY_COMMAND_PADDING_NEGOTIATED: return "PADDING_NEGOTIATED";
default:
tor_snprintf(buf, sizeof(buf), "Unrecognized relay command %u",
(unsigned)command);
@@ -531,6 +535,64 @@ relay_command_to_string(uint8_t command)
}
}
+/** When padding a cell with randomness, leave this many zeros after the
+ * payload. */
+#define CELL_PADDING_GAP 4
+
+/** Return the offset where the padding should start. The <b>data_len</b> is
+ * the relay payload length expected to be put in the cell. It can not be
+ * bigger than RELAY_PAYLOAD_SIZE else this function assert().
+ *
+ * Value will always be smaller than CELL_PAYLOAD_SIZE because this offset is
+ * for the entire cell length not just the data payload length. Zero is
+ * returned if there is no room for padding.
+ *
+ * This function always skips the first 4 bytes after the payload because
+ * having some unused zero bytes has saved us a lot of times in the past. */
+
+STATIC size_t
+get_pad_cell_offset(size_t data_len)
+{
+ /* This is never supposed to happen but in case it does, stop right away
+ * because if tor is tricked somehow into not adding random bytes to the
+ * payload with this function returning 0 for a bad data_len, the entire
+ * authenticated SENDME design can be bypassed leading to bad denial of
+ * service attacks. */
+ tor_assert(data_len <= RELAY_PAYLOAD_SIZE);
+
+ /* If the offset is larger than the cell payload size, we return an offset
+ * of zero indicating that no padding needs to be added. */
+ size_t offset = RELAY_HEADER_SIZE + data_len + CELL_PADDING_GAP;
+ if (offset >= CELL_PAYLOAD_SIZE) {
+ return 0;
+ }
+ return offset;
+}
+
+/* Add random bytes to the unused portion of the payload, to foil attacks
+ * where the other side can predict all of the bytes in the payload and thus
+ * compute the authenticated SENDME cells without seeing the traffic. See
+ * proposal 289. */
+static void
+pad_cell_payload(uint8_t *cell_payload, size_t data_len)
+{
+ size_t pad_offset, pad_len;
+
+ tor_assert(cell_payload);
+
+ pad_offset = get_pad_cell_offset(data_len);
+ if (pad_offset == 0) {
+ /* We can't add padding so we are done. */
+ return;
+ }
+
+ /* Remember here that the cell_payload is the length of the header and
+ * payload size so we offset it using the full length of the cell. */
+ pad_len = CELL_PAYLOAD_SIZE - pad_offset;
+ crypto_fast_rng_getbytes(get_thread_fast_rng(),
+ cell_payload + pad_offset, pad_len);
+}
+
/** Make a relay cell out of <b>relay_command</b> and <b>payload</b>, and send
* it onto the open circuit <b>circ</b>. <b>stream_id</b> is the ID on
* <b>circ</b> for the stream that's sending the relay cell, or 0 if it's a
@@ -574,11 +636,14 @@ relay_send_command_from_edge_,(streamid_t stream_id, circuit_t *circ,
if (payload_len)
memcpy(cell.payload+RELAY_HEADER_SIZE, payload, payload_len);
+ /* Add random padding to the cell if we can. */
+ pad_cell_payload(cell.payload, payload_len);
+
log_debug(LD_OR,"delivering %d cell %s.", relay_command,
cell_direction == CELL_DIRECTION_OUT ? "forward" : "backward");
- if (relay_command == RELAY_COMMAND_DROP)
- rep_hist_padding_count_write(PADDING_TYPE_DROP);
+ /* Tell circpad we're sending a relay cell */
+ circpad_deliver_sent_relay_cell_events(circ, relay_command);
/* If we are sending an END cell and this circuit is used for a tunneled
* directory request, advance its state. */
@@ -602,7 +667,9 @@ relay_send_command_from_edge_,(streamid_t stream_id, circuit_t *circ,
* one of them. Don't worry about the conn protocol version:
* append_cell_to_circuit_queue will fix it up. */
cell.command = CELL_RELAY_EARLY;
- --origin_circ->remaining_relay_early_cells;
+ /* If we're out of relay early cells, tell circpad */
+ if (--origin_circ->remaining_relay_early_cells == 0)
+ circpad_machine_event_circ_has_no_relay_early(origin_circ);
log_debug(LD_OR, "Sending a RELAY_EARLY cell; %d remaining.",
(int)origin_circ->remaining_relay_early_cells);
/* Memorize the command that is sent as RELAY_EARLY cell; helps debug
@@ -639,6 +706,14 @@ relay_send_command_from_edge_,(streamid_t stream_id, circuit_t *circ,
circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL);
return -1;
}
+
+ /* If applicable, note the cell digest for the SENDME version 1 purpose if
+ * we need to. This call needs to be after the circuit_package_relay_cell()
+ * because the cell digest is set within that function. */
+ if (relay_command == RELAY_COMMAND_DATA) {
+ sendme_record_cell_digest_on_circ(circ, cpath_layer);
+ }
+
return 0;
}
@@ -792,7 +867,7 @@ connection_ap_process_end_not_open(
ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+5));
} else if (rh->length == 17 || rh->length == 21) {
tor_addr_from_ipv6_bytes(&addr,
- (char*)(cell->payload+RELAY_HEADER_SIZE+1));
+ (cell->payload+RELAY_HEADER_SIZE+1));
if (rh->length == 21)
ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+17));
}
@@ -1017,7 +1092,7 @@ connected_cell_parse(const relay_header_t *rh, const cell_t *cell,
return -1;
if (get_uint8(payload + 4) != 6)
return -1;
- tor_addr_from_ipv6_bytes(addr_out, (char*)(payload + 5));
+ tor_addr_from_ipv6_bytes(addr_out, (payload + 5));
bytes = ntohl(get_uint32(payload + 21));
if (bytes <= INT32_MAX)
*ttl_out = (int) bytes;
@@ -1090,7 +1165,7 @@ resolved_cell_parse(const cell_t *cell, const relay_header_t *rh,
if (answer_len != 16)
goto err;
addr = tor_malloc_zero(sizeof(*addr));
- tor_addr_from_ipv6_bytes(&addr->addr, (const char*) cp);
+ tor_addr_from_ipv6_bytes(&addr->addr, cp);
cp += 16;
addr->ttl = ntohl(get_uint32(cp));
cp += 4;
@@ -1447,85 +1522,108 @@ relay_crypt_from_last_hop(origin_circuit_t *circ, crypt_path_t *layer_hint)
return true;
}
-/** An incoming relay cell has arrived on circuit <b>circ</b>. If
- * <b>conn</b> is NULL this is a control cell, else <b>cell</b> is
- * destined for <b>conn</b>.
+/** Process a SENDME cell that arrived on <b>circ</b>. If it is a stream level
+ * cell, it is destined for the given <b>conn</b>. If it is a circuit level
+ * cell, it is destined for the <b>layer_hint</b>. The <b>domain</b> is the
+ * logging domain that should be used.
*
- * If <b>layer_hint</b> is defined, then we're the origin of the
- * circuit, and it specifies the hop that packaged <b>cell</b>.
- *
- * Return -reason if you want to warn and tear down the circuit, else 0.
- */
-STATIC int
-connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
- edge_connection_t *conn,
- crypt_path_t *layer_hint)
+ * Return 0 if everything went well or a negative value representing a circuit
+ * end reason on error for which the caller is responsible for closing it. */
+static int
+process_sendme_cell(const relay_header_t *rh, const cell_t *cell,
+ circuit_t *circ, edge_connection_t *conn,
+ crypt_path_t *layer_hint, int domain)
{
- static int num_seen=0;
- relay_header_t rh;
- unsigned domain = layer_hint?LD_APP:LD_EXIT;
- int reason;
- int optimistic_data = 0; /* Set to 1 if we receive data on a stream
- * that's in the EXIT_CONN_STATE_RESOLVING
- * or EXIT_CONN_STATE_CONNECTING states. */
-
- tor_assert(cell);
- tor_assert(circ);
+ int ret;
- relay_header_unpack(&rh, cell->payload);
-// log_fn(LOG_DEBUG,"command %d stream %d", rh.command, rh.stream_id);
- num_seen++;
- log_debug(domain, "Now seen %d relay cells here (command %d, stream %d).",
- num_seen, rh.command, rh.stream_id);
+ tor_assert(rh);
- if (rh.length > RELAY_PAYLOAD_SIZE) {
- log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
- "Relay cell length field too long. Closing circuit.");
- return - END_CIRC_REASON_TORPROTOCOL;
+ if (!rh->stream_id) {
+ /* Circuit level SENDME cell. */
+ ret = sendme_process_circuit_level(layer_hint, circ,
+ cell->payload + RELAY_HEADER_SIZE,
+ rh->length);
+ if (ret < 0) {
+ return ret;
+ }
+ /* Resume reading on any streams now that we've processed a valid
+ * SENDME cell that updated our package window. */
+ circuit_resume_edge_reading(circ, layer_hint);
+ /* We are done, the rest of the code is for the stream level. */
+ return 0;
}
- if (rh.stream_id == 0) {
- switch (rh.command) {
- case RELAY_COMMAND_BEGIN:
- case RELAY_COMMAND_CONNECTED:
- case RELAY_COMMAND_END:
- case RELAY_COMMAND_RESOLVE:
- case RELAY_COMMAND_RESOLVED:
- case RELAY_COMMAND_BEGIN_DIR:
- log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay command %d with zero "
- "stream_id. Dropping.", (int)rh.command);
- return 0;
- default:
- ;
+ /* No connection, might be half edge state. We are done if so. */
+ if (!conn) {
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
+ if (connection_half_edge_is_valid_sendme(ocirc->half_streams,
+ rh->stream_id)) {
+ circuit_read_valid_data(ocirc, rh->length);
+ log_info(domain, "Sendme cell on circ %u valid on half-closed "
+ "stream id %d",
+ ocirc->global_identifier, rh->stream_id);
+ }
}
+
+ log_info(domain, "SENDME cell dropped, unknown stream (streamid %d).",
+ rh->stream_id);
+ return 0;
}
- /* either conn is NULL, in which case we've got a control cell, or else
- * conn points to the recognized stream. */
+ /* Stream level SENDME cell. */
+ ret = sendme_process_stream_level(conn, circ, rh->length);
+ if (ret < 0) {
+ /* Means we need to close the circuit with reason ret. */
+ return ret;
+ }
- if (conn && !connection_state_is_open(TO_CONN(conn))) {
- if (conn->base_.type == CONN_TYPE_EXIT &&
- (conn->base_.state == EXIT_CONN_STATE_CONNECTING ||
- conn->base_.state == EXIT_CONN_STATE_RESOLVING) &&
- rh.command == RELAY_COMMAND_DATA) {
- /* Allow DATA cells to be delivered to an exit node in state
- * EXIT_CONN_STATE_CONNECTING or EXIT_CONN_STATE_RESOLVING.
- * This speeds up HTTP, for example. */
- optimistic_data = 1;
- } else if (rh.stream_id == 0 && rh.command == RELAY_COMMAND_DATA) {
- log_warn(LD_BUG, "Somehow I had a connection that matched a "
- "data cell with stream ID 0.");
- } else {
- return connection_edge_process_relay_cell_not_open(
- &rh, cell, circ, conn, layer_hint);
- }
+ /* We've now processed properly a SENDME cell, all windows have been
+ * properly updated, we'll read on the edge connection to see if we can
+ * get data out towards the end point (Exit or client) since we are now
+ * allowed to deliver more cells. */
+
+ if (circuit_queue_streams_are_blocked(circ)) {
+ /* Still waiting for queue to flush; don't touch conn */
+ return 0;
+ }
+ connection_start_reading(TO_CONN(conn));
+ /* handle whatever might still be on the inbuf */
+ if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) {
+ /* (We already sent an end cell if possible) */
+ connection_mark_for_close(TO_CONN(conn));
+ return 0;
}
+ return 0;
+}
- switch (rh.command) {
- case RELAY_COMMAND_DROP:
- rep_hist_padding_count_read(PADDING_TYPE_DROP);
-// log_info(domain,"Got a relay-level padding cell. Dropping.");
- return 0;
+/** A helper for connection_edge_process_relay_cell(): Actually handles the
+ * cell that we received on the connection.
+ *
+ * The arguments are the same as in the parent function
+ * connection_edge_process_relay_cell(), plus the relay header <b>rh</b> as
+ * unpacked by the parent function, and <b>optimistic_data</b> as set by the
+ * parent function.
+ */
+STATIC int
+handle_relay_cell_command(cell_t *cell, circuit_t *circ,
+ edge_connection_t *conn, crypt_path_t *layer_hint,
+ relay_header_t *rh, int optimistic_data)
+{
+ unsigned domain = layer_hint?LD_APP:LD_EXIT;
+ int reason;
+
+ tor_assert(rh);
+
+ /* First pass the cell to the circuit padding subsystem, in case it's a
+ * padding cell or circuit that should be handled there. */
+ if (circpad_check_received_cell(cell, circ, layer_hint, rh) == 0) {
+ log_debug(domain, "Cell handled as circuit padding");
+ return 0;
+ }
+
+ /* Now handle all the other commands */
+ switch (rh->command) {
case RELAY_COMMAND_BEGIN:
case RELAY_COMMAND_BEGIN_DIR:
if (layer_hint &&
@@ -1546,7 +1644,7 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
"Begin cell for known stream. Dropping.");
return 0;
}
- if (rh.command == RELAY_COMMAND_BEGIN_DIR &&
+ if (rh->command == RELAY_COMMAND_BEGIN_DIR &&
circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) {
/* Assign this circuit and its app-ward OR connection a unique ID,
* so that we can measure download times. The local edge and dir
@@ -1559,24 +1657,21 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
return connection_exit_begin_conn(cell, circ);
case RELAY_COMMAND_DATA:
++stats_n_data_cells_received;
- if (( layer_hint && --layer_hint->deliver_window < 0) ||
- (!layer_hint && --circ->deliver_window < 0)) {
+
+ /* Update our circuit-level deliver window that we received a DATA cell.
+ * If the deliver window goes below 0, we end the circuit and stream due
+ * to a protocol failure. */
+ if (sendme_circuit_data_received(circ, layer_hint) < 0) {
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
"(relay data) circ deliver_window below 0. Killing.");
- if (conn) {
- /* XXXX Do we actually need to do this? Will killing the circuit
- * not send an END and mark the stream for close as appropriate? */
- connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
- connection_mark_for_close(TO_CONN(conn));
- }
+ connection_edge_end_close(conn, END_STREAM_REASON_TORPROTOCOL);
return -END_CIRC_REASON_TORPROTOCOL;
}
- log_debug(domain,"circ deliver_window now %d.", layer_hint ?
- layer_hint->deliver_window : circ->deliver_window);
- circuit_consider_sending_sendme(circ, layer_hint);
+ /* Consider sending a circuit-level SENDME cell. */
+ sendme_circuit_consider_sending(circ, layer_hint);
- if (rh.stream_id == 0) {
+ if (rh->stream_id == 0) {
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay data cell with zero "
"stream_id. Dropping.");
return 0;
@@ -1584,32 +1679,37 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
if (CIRCUIT_IS_ORIGIN(circ)) {
origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
if (connection_half_edge_is_valid_data(ocirc->half_streams,
- rh.stream_id)) {
- circuit_read_valid_data(ocirc, rh.length);
+ rh->stream_id)) {
+ circuit_read_valid_data(ocirc, rh->length);
log_info(domain,
"data cell on circ %u valid on half-closed "
- "stream id %d", ocirc->global_identifier, rh.stream_id);
+ "stream id %d", ocirc->global_identifier, rh->stream_id);
}
}
log_info(domain,"data cell dropped, unknown stream (streamid %d).",
- rh.stream_id);
+ rh->stream_id);
return 0;
}
- if (--conn->deliver_window < 0) { /* is it below 0 after decrement? */
+ /* Update our stream-level deliver window that we just received a DATA
+ * cell. Going below 0 means we have a protocol level error so the
+ * stream and circuit are closed. */
+
+ if (sendme_stream_data_received(conn) < 0) {
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
"(relay data) conn deliver_window below 0. Killing.");
+ connection_edge_end_close(conn, END_STREAM_REASON_TORPROTOCOL);
return -END_CIRC_REASON_TORPROTOCOL;
}
/* Total all valid application bytes delivered */
- if (CIRCUIT_IS_ORIGIN(circ) && rh.length > 0) {
- circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), rh.length);
+ if (CIRCUIT_IS_ORIGIN(circ) && rh->length > 0) {
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), rh->length);
}
- stats_n_data_bytes_received += rh.length;
+ stats_n_data_bytes_received += rh->length;
connection_buf_add((char*)(cell->payload + RELAY_HEADER_SIZE),
- rh.length, TO_CONN(conn));
+ rh->length, TO_CONN(conn));
#ifdef MEASUREMENTS_21206
/* Count number of RELAY_DATA cells received on a linked directory
@@ -1625,26 +1725,26 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
/* Only send a SENDME if we're not getting optimistic data; otherwise
* a SENDME could arrive before the CONNECTED.
*/
- connection_edge_consider_sending_sendme(conn);
+ sendme_connection_edge_consider_sending(conn);
}
return 0;
case RELAY_COMMAND_END:
- reason = rh.length > 0 ?
+ reason = rh->length > 0 ?
get_uint8(cell->payload+RELAY_HEADER_SIZE) : END_STREAM_REASON_MISC;
if (!conn) {
if (CIRCUIT_IS_ORIGIN(circ)) {
origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
if (relay_crypt_from_last_hop(ocirc, layer_hint) &&
connection_half_edge_is_valid_end(ocirc->half_streams,
- rh.stream_id)) {
+ rh->stream_id)) {
- circuit_read_valid_data(ocirc, rh.length);
+ circuit_read_valid_data(ocirc, rh->length);
log_info(domain,
"end cell (%s) on circ %u valid on half-closed "
"stream id %d",
stream_end_reason_to_string(reason),
- ocirc->global_identifier, rh.stream_id);
+ ocirc->global_identifier, rh->stream_id);
return 0;
}
}
@@ -1676,7 +1776,7 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
/* Total all valid application bytes delivered */
if (CIRCUIT_IS_ORIGIN(circ)) {
- circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), rh.length);
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), rh->length);
}
}
return 0;
@@ -1684,7 +1784,7 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
case RELAY_COMMAND_EXTEND2: {
static uint64_t total_n_extend=0, total_nonearly=0;
total_n_extend++;
- if (rh.stream_id) {
+ if (rh->stream_id) {
log_fn(LOG_PROTOCOL_WARN, domain,
"'extend' cell received for non-zero stream. Dropping.");
return 0;
@@ -1725,9 +1825,9 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
log_debug(domain,"Got an extended cell! Yay.");
{
extended_cell_t extended_cell;
- if (extended_cell_parse(&extended_cell, rh.command,
+ if (extended_cell_parse(&extended_cell, rh->command,
(const uint8_t*)cell->payload+RELAY_HEADER_SIZE,
- rh.length)<0) {
+ rh->length)<0) {
log_warn(LD_PROTOCOL,
"Can't parse EXTENDED cell; killing circuit.");
return -END_CIRC_REASON_TORPROTOCOL;
@@ -1745,7 +1845,7 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
}
/* Total all valid bytes delivered. */
if (CIRCUIT_IS_ORIGIN(circ)) {
- circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), rh.length);
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), rh->length);
}
return 0;
case RELAY_COMMAND_TRUNCATE:
@@ -1789,7 +1889,7 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
* circuit is being torn down anyway, though. */
if (CIRCUIT_IS_ORIGIN(circ)) {
circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ),
- rh.length);
+ rh->length);
}
circuit_truncated(TO_ORIGIN_CIRCUIT(circ),
get_uint8(cell->payload + RELAY_HEADER_SIZE));
@@ -1804,11 +1904,11 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
if (CIRCUIT_IS_ORIGIN(circ)) {
origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
if (connection_half_edge_is_valid_connected(ocirc->half_streams,
- rh.stream_id)) {
- circuit_read_valid_data(ocirc, rh.length);
+ rh->stream_id)) {
+ circuit_read_valid_data(ocirc, rh->length);
log_info(domain,
"connected cell on circ %u valid on half-closed "
- "stream id %d", ocirc->global_identifier, rh.stream_id);
+ "stream id %d", ocirc->global_identifier, rh->stream_id);
return 0;
}
}
@@ -1816,102 +1916,10 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
log_info(domain,
"'connected' received on circid %u for streamid %d, "
"no conn attached anymore. Ignoring.",
- (unsigned)circ->n_circ_id, rh.stream_id);
+ (unsigned)circ->n_circ_id, rh->stream_id);
return 0;
case RELAY_COMMAND_SENDME:
- if (!rh.stream_id) {
- if (layer_hint) {
- if (layer_hint->package_window + CIRCWINDOW_INCREMENT >
- CIRCWINDOW_START_MAX) {
- static struct ratelim_t exit_warn_ratelim = RATELIM_INIT(600);
- log_fn_ratelim(&exit_warn_ratelim, LOG_WARN, LD_PROTOCOL,
- "Unexpected sendme cell from exit relay. "
- "Closing circ.");
- return -END_CIRC_REASON_TORPROTOCOL;
- }
- layer_hint->package_window += CIRCWINDOW_INCREMENT;
- log_debug(LD_APP,"circ-level sendme at origin, packagewindow %d.",
- layer_hint->package_window);
- circuit_resume_edge_reading(circ, layer_hint);
-
- /* We count circuit-level sendme's as valid delivered data because
- * they are rate limited.
- */
- if (CIRCUIT_IS_ORIGIN(circ)) {
- circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ),
- rh.length);
- }
-
- } else {
- if (circ->package_window + CIRCWINDOW_INCREMENT >
- CIRCWINDOW_START_MAX) {
- static struct ratelim_t client_warn_ratelim = RATELIM_INIT(600);
- log_fn_ratelim(&client_warn_ratelim,LOG_PROTOCOL_WARN, LD_PROTOCOL,
- "Unexpected sendme cell from client. "
- "Closing circ (window %d).",
- circ->package_window);
- return -END_CIRC_REASON_TORPROTOCOL;
- }
- circ->package_window += CIRCWINDOW_INCREMENT;
- log_debug(LD_APP,
- "circ-level sendme at non-origin, packagewindow %d.",
- circ->package_window);
- circuit_resume_edge_reading(circ, layer_hint);
- }
- return 0;
- }
- if (!conn) {
- if (CIRCUIT_IS_ORIGIN(circ)) {
- origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
- if (connection_half_edge_is_valid_sendme(ocirc->half_streams,
- rh.stream_id)) {
- circuit_read_valid_data(ocirc, rh.length);
- log_info(domain,
- "sendme cell on circ %u valid on half-closed "
- "stream id %d", ocirc->global_identifier, rh.stream_id);
- }
- }
-
- log_info(domain,"sendme cell dropped, unknown stream (streamid %d).",
- rh.stream_id);
- return 0;
- }
-
- /* Don't allow the other endpoint to request more than our maximum
- * (i.e. initial) stream SENDME window worth of data. Well-behaved
- * stock clients will not request more than this max (as per the check
- * in the while loop of connection_edge_consider_sending_sendme()).
- */
- if (conn->package_window + STREAMWINDOW_INCREMENT >
- STREAMWINDOW_START_MAX) {
- static struct ratelim_t stream_warn_ratelim = RATELIM_INIT(600);
- log_fn_ratelim(&stream_warn_ratelim, LOG_PROTOCOL_WARN, LD_PROTOCOL,
- "Unexpected stream sendme cell. Closing circ (window %d).",
- conn->package_window);
- return -END_CIRC_REASON_TORPROTOCOL;
- }
-
- /* At this point, the stream sendme is valid */
- if (CIRCUIT_IS_ORIGIN(circ)) {
- circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ),
- rh.length);
- }
-
- conn->package_window += STREAMWINDOW_INCREMENT;
- log_debug(domain,"stream-level sendme, packagewindow now %d.",
- conn->package_window);
- if (circuit_queue_streams_are_blocked(circ)) {
- /* Still waiting for queue to flush; don't touch conn */
- return 0;
- }
- connection_start_reading(TO_CONN(conn));
- /* handle whatever might still be on the inbuf */
- if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) {
- /* (We already sent an end cell if possible) */
- connection_mark_for_close(TO_CONN(conn));
- return 0;
- }
- return 0;
+ return process_sendme_cell(rh, cell, circ, conn, layer_hint, domain);
case RELAY_COMMAND_RESOLVE:
if (layer_hint) {
log_fn(LOG_PROTOCOL_WARN, LD_APP,
@@ -1940,11 +1948,11 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
origin_circuit_t *ocirc = TO_ORIGIN_CIRCUIT(circ);
if (relay_crypt_from_last_hop(ocirc, layer_hint) &&
connection_half_edge_is_valid_resolved(ocirc->half_streams,
- rh.stream_id)) {
- circuit_read_valid_data(ocirc, rh.length);
+ rh->stream_id)) {
+ circuit_read_valid_data(ocirc, rh->length);
log_info(domain,
"resolved cell on circ %u valid on half-closed "
- "stream id %d", ocirc->global_identifier, rh.stream_id);
+ "stream id %d", ocirc->global_identifier, rh->stream_id);
return 0;
}
}
@@ -1962,17 +1970,96 @@ connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
case RELAY_COMMAND_INTRO_ESTABLISHED:
case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED:
rend_process_relay_cell(circ, layer_hint,
- rh.command, rh.length,
+ rh->command, rh->length,
cell->payload+RELAY_HEADER_SIZE);
return 0;
}
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
"Received unknown relay command %d. Perhaps the other side is using "
"a newer version of Tor? Dropping.",
- rh.command);
+ rh->command);
return 0; /* for forward compatibility, don't kill the circuit */
}
+/** An incoming relay cell has arrived on circuit <b>circ</b>. If
+ * <b>conn</b> is NULL this is a control cell, else <b>cell</b> is
+ * destined for <b>conn</b>.
+ *
+ * If <b>layer_hint</b> is defined, then we're the origin of the
+ * circuit, and it specifies the hop that packaged <b>cell</b>.
+ *
+ * Return -reason if you want to warn and tear down the circuit, else 0.
+ */
+STATIC int
+connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
+ edge_connection_t *conn,
+ crypt_path_t *layer_hint)
+{
+ static int num_seen=0;
+ relay_header_t rh;
+ unsigned domain = layer_hint?LD_APP:LD_EXIT;
+ int optimistic_data = 0; /* Set to 1 if we receive data on a stream
+ * that's in the EXIT_CONN_STATE_RESOLVING
+ * or EXIT_CONN_STATE_CONNECTING states. */
+
+ tor_assert(cell);
+ tor_assert(circ);
+
+ relay_header_unpack(&rh, cell->payload);
+// log_fn(LOG_DEBUG,"command %d stream %d", rh.command, rh.stream_id);
+ num_seen++;
+ log_debug(domain, "Now seen %d relay cells here (command %d, stream %d).",
+ num_seen, rh.command, rh.stream_id);
+
+ if (rh.length > RELAY_PAYLOAD_SIZE) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Relay cell length field too long. Closing circuit.");
+ return - END_CIRC_REASON_TORPROTOCOL;
+ }
+
+ if (rh.stream_id == 0) {
+ switch (rh.command) {
+ case RELAY_COMMAND_BEGIN:
+ case RELAY_COMMAND_CONNECTED:
+ case RELAY_COMMAND_END:
+ case RELAY_COMMAND_RESOLVE:
+ case RELAY_COMMAND_RESOLVED:
+ case RELAY_COMMAND_BEGIN_DIR:
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay command %d with zero "
+ "stream_id. Dropping.", (int)rh.command);
+ return 0;
+ default:
+ ;
+ }
+ }
+
+ /* Tell circpad that we've received a recognized cell */
+ circpad_deliver_recognized_relay_cell_events(circ, rh.command, layer_hint);
+
+ /* either conn is NULL, in which case we've got a control cell, or else
+ * conn points to the recognized stream. */
+ if (conn && !connection_state_is_open(TO_CONN(conn))) {
+ if (conn->base_.type == CONN_TYPE_EXIT &&
+ (conn->base_.state == EXIT_CONN_STATE_CONNECTING ||
+ conn->base_.state == EXIT_CONN_STATE_RESOLVING) &&
+ rh.command == RELAY_COMMAND_DATA) {
+ /* Allow DATA cells to be delivered to an exit node in state
+ * EXIT_CONN_STATE_CONNECTING or EXIT_CONN_STATE_RESOLVING.
+ * This speeds up HTTP, for example. */
+ optimistic_data = 1;
+ } else if (rh.stream_id == 0 && rh.command == RELAY_COMMAND_DATA) {
+ log_warn(LD_BUG, "Somehow I had a connection that matched a "
+ "data cell with stream ID 0.");
+ } else {
+ return connection_edge_process_relay_cell_not_open(
+ &rh, cell, circ, conn, layer_hint);
+ }
+ }
+
+ return handle_relay_cell_command(cell, circ, conn, layer_hint,
+ &rh, optimistic_data);
+}
+
/** How many relay_data cells have we built, ever? */
uint64_t stats_n_data_cells_packaged = 0;
/** How many bytes of data have we put in relay_data cells have we built,
@@ -1986,6 +2073,84 @@ uint64_t stats_n_data_cells_received = 0;
* ever received were completely full of data. */
uint64_t stats_n_data_bytes_received = 0;
+/**
+ * Called when initializing a circuit, or when we have reached the end of the
+ * window in which we need to send some randomness so that incoming sendme
+ * cells will be unpredictable. Resets the flags and picks a new window.
+ */
+void
+circuit_reset_sendme_randomness(circuit_t *circ)
+{
+ circ->have_sent_sufficiently_random_cell = 0;
+ circ->send_randomness_after_n_cells = CIRCWINDOW_INCREMENT / 2 +
+ crypto_fast_rng_get_uint(get_thread_fast_rng(), CIRCWINDOW_INCREMENT / 2);
+}
+
+/**
+ * Any relay data payload containing fewer than this many real bytes is
+ * considered to have enough randomness to.
+ **/
+#define RELAY_PAYLOAD_LENGTH_FOR_RANDOM_SENDMES \
+ (RELAY_PAYLOAD_SIZE - CELL_PADDING_GAP - 16)
+
+/**
+ * Helper. Return the number of bytes that should be put into a cell from a
+ * given edge connection on which <b>n_available</b> bytes are available.
+ */
+STATIC size_t
+connection_edge_get_inbuf_bytes_to_package(size_t n_available,
+ int package_partial,
+ circuit_t *on_circuit)
+{
+ if (!n_available)
+ return 0;
+
+ /* Do we need to force this payload to have space for randomness? */
+ const bool force_random_bytes =
+ (on_circuit->send_randomness_after_n_cells == 0) &&
+ (! on_circuit->have_sent_sufficiently_random_cell);
+
+ /* At most how much would we like to send in this cell? */
+ size_t target_length;
+ if (force_random_bytes) {
+ target_length = RELAY_PAYLOAD_LENGTH_FOR_RANDOM_SENDMES;
+ } else {
+ target_length = RELAY_PAYLOAD_SIZE;
+ }
+
+ /* Decide how many bytes we will actually put into this cell. */
+ size_t package_length;
+ if (n_available >= target_length) { /* A full payload is available. */
+ package_length = target_length;
+ } else { /* not a full payload available */
+ if (package_partial)
+ package_length = n_available; /* just take whatever's available now */
+ else
+ return 0; /* nothing to do until we have a full payload */
+ }
+
+ /* If we reach this point, we will be definitely sending the cell. */
+ tor_assert_nonfatal(package_length > 0);
+
+ if (package_length <= RELAY_PAYLOAD_LENGTH_FOR_RANDOM_SENDMES) {
+ /* This cell will have enough randomness in the padding to make a future
+ * sendme cell unpredictable. */
+ on_circuit->have_sent_sufficiently_random_cell = 1;
+ }
+
+ if (on_circuit->send_randomness_after_n_cells == 0) {
+ /* Either this cell, or some previous cell, had enough padding to
+ * ensure sendme unpredictability. */
+ tor_assert_nonfatal(on_circuit->have_sent_sufficiently_random_cell);
+ /* Pick a new interval in which we need to send randomness. */
+ circuit_reset_sendme_randomness(on_circuit);
+ }
+
+ --on_circuit->send_randomness_after_n_cells;
+
+ return package_length;
+}
+
/** If <b>conn</b> has an entire relay payload of bytes on its inbuf (or
* <b>package_partial</b> is true), and the appropriate package windows aren't
* empty, grab a cell and send it down the circuit.
@@ -2058,17 +2223,14 @@ connection_edge_package_raw_inbuf(edge_connection_t *conn, int package_partial,
bytes_to_process = connection_get_inbuf_len(TO_CONN(conn));
}
- if (!bytes_to_process)
+ length = connection_edge_get_inbuf_bytes_to_package(bytes_to_process,
+ package_partial, circ);
+ if (!length)
return 0;
- if (!package_partial && bytes_to_process < RELAY_PAYLOAD_SIZE)
- return 0;
+ /* If we reach this point, we will definitely be packaging bytes into
+ * a cell. */
- if (bytes_to_process > RELAY_PAYLOAD_SIZE) {
- length = RELAY_PAYLOAD_SIZE;
- } else {
- length = bytes_to_process;
- }
stats_n_data_bytes_packaged += length;
stats_n_data_cells_packaged += 1;
@@ -2103,15 +2265,17 @@ connection_edge_package_raw_inbuf(edge_connection_t *conn, int package_partial,
return 0;
}
- if (!cpath_layer) { /* non-rendezvous exit */
- tor_assert(circ->package_window > 0);
- circ->package_window--;
- } else { /* we're an AP, or an exit on a rendezvous circ */
- tor_assert(cpath_layer->package_window > 0);
- cpath_layer->package_window--;
+ /* Handle the circuit-level SENDME package window. */
+ if (sendme_note_circuit_data_packaged(circ, cpath_layer) < 0) {
+ /* Package window has gone under 0. Protocol issue. */
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Circuit package window is below 0. Closing circuit.");
+ conn->end_reason = END_STREAM_REASON_TORPROTOCOL;
+ return -1;
}
- if (--conn->package_window <= 0) { /* is it 0 after decrement? */
+ /* Handle the stream-level SENDME package window. */
+ if (sendme_note_stream_data_packaged(conn) < 0) {
connection_stop_reading(TO_CONN(conn));
log_debug(domain,"conn->package_window reached 0.");
circuit_consider_stop_edge_reading(circ, cpath_layer);
@@ -2129,42 +2293,6 @@ connection_edge_package_raw_inbuf(edge_connection_t *conn, int package_partial,
goto repeat_connection_edge_package_raw_inbuf;
}
-/** Called when we've just received a relay data cell, when
- * we've just finished flushing all bytes to stream <b>conn</b>,
- * or when we've flushed *some* bytes to the stream <b>conn</b>.
- *
- * If conn->outbuf is not too full, and our deliver window is
- * low, send back a suitable number of stream-level sendme cells.
- */
-void
-connection_edge_consider_sending_sendme(edge_connection_t *conn)
-{
- circuit_t *circ;
-
- if (connection_outbuf_too_full(TO_CONN(conn)))
- return;
-
- circ = circuit_get_by_edge_conn(conn);
- if (!circ) {
- /* this can legitimately happen if the destroy has already
- * arrived and torn down the circuit */
- log_info(LD_APP,"No circuit associated with conn. Skipping.");
- return;
- }
-
- while (conn->deliver_window <= STREAMWINDOW_START - STREAMWINDOW_INCREMENT) {
- log_debug(conn->base_.type == CONN_TYPE_AP ?LD_APP:LD_EXIT,
- "Outbuf %d, Queuing stream sendme.",
- (int)conn->base_.outbuf_flushlen);
- conn->deliver_window += STREAMWINDOW_INCREMENT;
- if (connection_edge_send_command(conn, RELAY_COMMAND_SENDME,
- NULL, 0) < 0) {
- log_warn(LD_APP,"connection_edge_send_command failed. Skipping.");
- return; /* the circuit's closed, don't continue */
- }
- }
-}
-
/** The circuit <b>circ</b> has received a circuit-level sendme
* (on hop <b>layer_hint</b>, if we're the OP). Go through all the
* attached streams and let them resume reading and packaging, if
@@ -2187,12 +2315,6 @@ circuit_resume_edge_reading(circuit_t *circ, crypt_path_t *layer_hint)
circ, layer_hint);
}
-void
-stream_choice_seed_weak_rng(void)
-{
- crypto_seed_weak_rng(&stream_choice_rng);
-}
-
/** A helper function for circuit_resume_edge_reading() above.
* The arguments are the same, except that <b>conn</b> is the head
* of a linked list of edge streams that should each be considered.
@@ -2244,7 +2366,8 @@ circuit_resume_edge_reading_helper(edge_connection_t *first_conn,
int num_streams = 0;
for (conn = first_conn; conn; conn = conn->next_stream) {
num_streams++;
- if (tor_weak_random_one_in_n(&stream_choice_rng, num_streams)) {
+
+ if (crypto_fast_rng_one_in_n(get_thread_fast_rng(), num_streams)) {
chosen_stream = conn;
}
/* Invariant: chosen_stream has been chosen uniformly at random from
@@ -2386,33 +2509,6 @@ circuit_consider_stop_edge_reading(circuit_t *circ, crypt_path_t *layer_hint)
return 0;
}
-/** Check if the deliver_window for circuit <b>circ</b> (at hop
- * <b>layer_hint</b> if it's defined) is low enough that we should
- * send a circuit-level sendme back down the circuit. If so, send
- * enough sendmes that the window would be overfull if we sent any
- * more.
- */
-static void
-circuit_consider_sending_sendme(circuit_t *circ, crypt_path_t *layer_hint)
-{
-// log_fn(LOG_INFO,"Considering: layer_hint is %s",
-// layer_hint ? "defined" : "null");
- while ((layer_hint ? layer_hint->deliver_window : circ->deliver_window) <=
- CIRCWINDOW_START - CIRCWINDOW_INCREMENT) {
- log_debug(LD_CIRC,"Queuing circuit sendme.");
- if (layer_hint)
- layer_hint->deliver_window += CIRCWINDOW_INCREMENT;
- else
- circ->deliver_window += CIRCWINDOW_INCREMENT;
- if (relay_send_command_from_edge(0, circ, RELAY_COMMAND_SENDME,
- NULL, 0, layer_hint) < 0) {
- log_warn(LD_CIRC,
- "relay_send_command_from_edge failed. Circuit's closed.");
- return; /* the circuit's closed, don't continue */
- }
- }
-}
-
/** The total number of cells we have allocated. */
static size_t total_cells_allocated = 0;
@@ -2787,7 +2883,7 @@ set_streams_blocked_on_circ(circuit_t *circ, channel_t *chan,
}
/** Extract the command from a packed cell. */
-static uint8_t
+uint8_t
packed_cell_get_command(const packed_cell_t *cell, int wide_circ_ids)
{
if (wide_circ_ids) {
@@ -3142,7 +3238,7 @@ decode_address_from_payload(tor_addr_t *addr_out, const uint8_t *payload,
case RESOLVED_TYPE_IPV6:
if (payload[1] != 16)
return NULL;
- tor_addr_from_ipv6_bytes(addr_out, (char*)(payload+2));
+ tor_addr_from_ipv6_bytes(addr_out, (payload+2));
break;
default:
tor_addr_make_unspec(addr_out);
diff --git a/src/core/or/relay.h b/src/core/or/relay.h
index 7cc3c43e43..31bed0e01b 100644
--- a/src/core/or/relay.h
+++ b/src/core/or/relay.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -42,6 +42,7 @@ int connection_edge_package_raw_inbuf(edge_connection_t *conn,
int package_partial,
int *max_cells);
void connection_edge_consider_sending_sendme(edge_connection_t *conn);
+void circuit_reset_sendme_randomness(circuit_t *circ);
extern uint64_t stats_n_data_cells_packaged;
extern uint64_t stats_n_data_bytes_packaged;
@@ -78,6 +79,11 @@ void destroy_cell_queue_append(destroy_cell_queue_t *queue,
void channel_unlink_all_circuits(channel_t *chan, smartlist_t *detached_out);
MOCK_DECL(int, channel_flush_from_first_active_circuit,
(channel_t *chan, int max));
+MOCK_DECL(int, circuit_package_relay_cell, (cell_t *cell, circuit_t *circ,
+ cell_direction_t cell_direction,
+ crypt_path_t *layer_hint, streamid_t on_stream,
+ const char *filename, int lineno));
+
void update_circuit_on_cmux_(circuit_t *circ, cell_direction_t direction,
const char *file, int lineno);
#define update_circuit_on_cmux(circ, direction) \
@@ -89,15 +95,19 @@ const uint8_t *decode_address_from_payload(tor_addr_t *addr_out,
int payload_len);
void circuit_clear_cell_queue(circuit_t *circ, channel_t *chan);
-void stream_choice_seed_weak_rng(void);
-
circid_t packed_cell_get_circid(const packed_cell_t *cell, int wide_circ_ids);
+uint8_t packed_cell_get_command(const packed_cell_t *cell, int wide_circ_ids);
#ifdef RELAY_PRIVATE
+STATIC int
+handle_relay_cell_command(cell_t *cell, circuit_t *circ,
+ edge_connection_t *conn, crypt_path_t *layer_hint,
+ relay_header_t *rh, int optimistic_data);
+
STATIC int connected_cell_parse(const relay_header_t *rh, const cell_t *cell,
tor_addr_t *addr_out, int *ttl_out);
/** An address-and-ttl tuple as yielded by resolved_cell_parse */
-typedef struct address_ttl_s {
+typedef struct address_ttl_t {
tor_addr_t addr;
char *hostname;
int ttl;
@@ -117,8 +127,11 @@ STATIC int cell_queues_check_size(void);
STATIC int connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
edge_connection_t *conn,
crypt_path_t *layer_hint);
+STATIC size_t get_pad_cell_offset(size_t payload_len);
+STATIC size_t connection_edge_get_inbuf_bytes_to_package(size_t n_available,
+ int package_partial,
+ circuit_t *on_circuit);
#endif /* defined(RELAY_PRIVATE) */
#endif /* !defined(TOR_RELAY_H) */
-
diff --git a/src/core/or/relay_crypto_st.h b/src/core/or/relay_crypto_st.h
index dafce257c7..d92f486a90 100644
--- a/src/core/or/relay_crypto_st.h
+++ b/src/core/or/relay_crypto_st.h
@@ -1,13 +1,18 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file relay_crypto_st.h
+ * @brief Relay-cell encryption state structure.
+ **/
+
#ifndef RELAY_CRYPTO_ST_H
#define RELAY_CRYPTO_ST_H
-#define crypto_cipher_t aes_cnt_cipher
+#define crypto_cipher_t aes_cnt_cipher_t
struct crypto_cipher_t;
struct crypto_digest_t;
@@ -25,7 +30,9 @@ struct relay_crypto_t {
/** Digest state for cells heading away from the OR at this step. */
struct crypto_digest_t *b_digest;
+ /** Digest used for the next SENDME cell if any. */
+ uint8_t sendme_digest[DIGEST_LEN];
};
#undef crypto_cipher_t
-#endif
+#endif /* !defined(RELAY_CRYPTO_ST_H) */
diff --git a/src/core/or/scheduler.c b/src/core/or/scheduler.c
index 9f1a27d501..072d78128b 100644
--- a/src/core/or/scheduler.c
+++ b/src/core/or/scheduler.c
@@ -1,18 +1,17 @@
-/* Copyright (c) 2013-2019, The Tor Project, Inc. */
+/* Copyright (c) 2013-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
#include "core/or/or.h"
#include "app/config/config.h"
#include "lib/evloop/compat_libevent.h"
-#define SCHEDULER_PRIVATE_
+#define SCHEDULER_PRIVATE
#define SCHEDULER_KIST_PRIVATE
#include "core/or/scheduler.h"
#include "core/mainloop/mainloop.h"
-#include "lib/container/buffers.h"
-#define TOR_CHANNEL_INTERNAL_
+#include "lib/buf/buffers.h"
+#define CHANNEL_OBJECT_PRIVATE
#include "core/or/channeltls.h"
-#include "lib/evloop/compat_libevent.h"
#include "core/or/or_connection_st.h"
@@ -43,7 +42,7 @@
* circuit scheduler. It was supposed to prioritize circuits across many
* channels, but wasn't effective. It is preserved in scheduler_vanilla.c.
*
- * [0]: http://www.robgjansen.com/publications/kist-sec2014.pdf
+ * [0]: https://www.robgjansen.com/publications/kist-sec2014.pdf
*
* Then we actually got around to implementing KIST for real. We decided to
* modularize the scheduler so new ones can be implemented. You can find KIST
@@ -267,7 +266,7 @@ select_scheduler(void)
log_notice(LD_SCHED, "Scheduler type KIST has been disabled by "
"the consensus or no kernel support.");
}
-#else /* !(defined(HAVE_KIST_SUPPORT)) */
+#else /* !defined(HAVE_KIST_SUPPORT) */
log_info(LD_SCHED, "Scheduler type KIST not built in");
#endif /* defined(HAVE_KIST_SUPPORT) */
continue;
@@ -503,7 +502,12 @@ scheduler_free_all(void)
the_scheduler = NULL;
}
-/** Mark a channel as no longer ready to accept writes. */
+/** Mark a channel as no longer ready to accept writes.
+ *
+ * Possible state changes:
+ * - SCHED_CHAN_PENDING -> SCHED_CHAN_WAITING_TO_WRITE
+ * - SCHED_CHAN_WAITING_FOR_CELLS -> SCHED_CHAN_IDLE
+ */
MOCK_IMPL(void,
scheduler_channel_doesnt_want_writes,(channel_t *chan))
{
@@ -514,31 +518,32 @@ scheduler_channel_doesnt_want_writes,(channel_t *chan))
return;
}
- /* If it's already in pending, we can put it in waiting_to_write */
if (chan->scheduler_state == SCHED_CHAN_PENDING) {
/*
- * It's in channels_pending, so it shouldn't be in any of
- * the other lists. It can't write any more, so it goes to
- * channels_waiting_to_write.
+ * It has cells but no longer can write, so it becomes
+ * SCHED_CHAN_WAITING_TO_WRITE. It's in channels_pending, so we
+ * should remove it from the list.
*/
smartlist_pqueue_remove(channels_pending,
scheduler_compare_channels,
offsetof(channel_t, sched_heap_idx),
chan);
scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_TO_WRITE);
- } else {
+ } else if (chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS) {
/*
- * It's not in pending, so it can't become waiting_to_write; it's
- * either not in any of the lists (nothing to do) or it's already in
- * waiting_for_cells (remove it, can't write any more).
+ * It does not have cells and no longer can write, so it becomes
+ * SCHED_CHAN_IDLE.
*/
- if (chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS) {
- scheduler_set_channel_state(chan, SCHED_CHAN_IDLE);
- }
+ scheduler_set_channel_state(chan, SCHED_CHAN_IDLE);
}
}
-/** Mark a channel as having waiting cells. */
+/** Mark a channel as having waiting cells.
+ *
+ * Possible state changes:
+ * - SCHED_CHAN_WAITING_FOR_CELLS -> SCHED_CHAN_PENDING
+ * - SCHED_CHAN_IDLE -> SCHED_CHAN_WAITING_TO_WRITE
+ */
MOCK_IMPL(void,
scheduler_channel_has_waiting_cells,(channel_t *chan))
{
@@ -549,12 +554,11 @@ scheduler_channel_has_waiting_cells,(channel_t *chan))
return;
}
- /* First, check if it's also writeable */
if (chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS) {
/*
- * It's in channels_waiting_for_cells, so it shouldn't be in any of
- * the other lists. It has waiting cells now, so it goes to
- * channels_pending.
+ * It is able to write and now has cells, so it becomes
+ * SCHED_CHAN_PENDING. It must be added to the channels_pending
+ * list.
*/
scheduler_set_channel_state(chan, SCHED_CHAN_PENDING);
if (!SCHED_BUG(chan->sched_heap_idx != -1, chan)) {
@@ -566,16 +570,12 @@ scheduler_channel_has_waiting_cells,(channel_t *chan))
/* If we made a channel pending, we potentially have scheduling work to
* do. */
the_scheduler->schedule();
- } else {
+ } else if (chan->scheduler_state == SCHED_CHAN_IDLE) {
/*
- * It's not in waiting_for_cells, so it can't become pending; it's
- * either not in any of the lists (we add it to waiting_to_write)
- * or it's already in waiting_to_write or pending (we do nothing)
+ * It is not able to write but now has cells, so it becomes
+ * SCHED_CHAN_WAITING_TO_WRITE.
*/
- if (!(chan->scheduler_state == SCHED_CHAN_WAITING_TO_WRITE ||
- chan->scheduler_state == SCHED_CHAN_PENDING)) {
- scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_TO_WRITE);
- }
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_TO_WRITE);
}
}
@@ -663,8 +663,12 @@ scheduler_release_channel,(channel_t *chan))
scheduler_set_channel_state(chan, SCHED_CHAN_IDLE);
}
-/** Mark a channel as ready to accept writes */
-
+/** Mark a channel as ready to accept writes.
+ * Possible state changes:
+ *
+ * - SCHED_CHAN_WAITING_TO_WRITE -> SCHED_CHAN_PENDING
+ * - SCHED_CHAN_IDLE -> SCHED_CHAN_WAITING_FOR_CELLS
+ */
void
scheduler_channel_wants_writes(channel_t *chan)
{
@@ -675,10 +679,11 @@ scheduler_channel_wants_writes(channel_t *chan)
return;
}
- /* If it's already in waiting_to_write, we can put it in pending */
if (chan->scheduler_state == SCHED_CHAN_WAITING_TO_WRITE) {
/*
- * It can write now, so it goes to channels_pending.
+ * It has cells and can now write, so it becomes
+ * SCHED_CHAN_PENDING. It must be added to the channels_pending
+ * list.
*/
scheduler_set_channel_state(chan, SCHED_CHAN_PENDING);
if (!SCHED_BUG(chan->sched_heap_idx != -1, chan)) {
@@ -689,15 +694,12 @@ scheduler_channel_wants_writes(channel_t *chan)
}
/* We just made a channel pending, we have scheduling work to do. */
the_scheduler->schedule();
- } else {
+ } else if (chan->scheduler_state == SCHED_CHAN_IDLE) {
/*
- * It's not in SCHED_CHAN_WAITING_TO_WRITE, so it can't become pending;
- * it's either idle and goes to WAITING_FOR_CELLS, or it's a no-op.
+ * It does not have cells but can now write, so it becomes
+ * SCHED_CHAN_WAITING_FOR_CELLS.
*/
- if (!(chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS ||
- chan->scheduler_state == SCHED_CHAN_PENDING)) {
- scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
- }
+ scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
}
}
diff --git a/src/core/or/scheduler.h b/src/core/or/scheduler.h
index 843be2603c..82df2b0b0f 100644
--- a/src/core/or/scheduler.h
+++ b/src/core/or/scheduler.h
@@ -1,4 +1,4 @@
-/* * Copyright (c) 2017-2019, The Tor Project, Inc. */
+/* * Copyright (c) 2017-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -40,7 +40,7 @@ typedef enum {
* doesn't create any state for itself, thus it has nothing to free when Tor
* is shutting down), then set that function pointer to NULL.
*/
-typedef struct scheduler_s {
+typedef struct scheduler_t {
/* Scheduler type. This is used for logging when the scheduler is switched
* during runtime. */
scheduler_types_t type;
@@ -136,7 +136,9 @@ MOCK_DECL(void, scheduler_channel_has_waiting_cells, (channel_t *chan));
* These functions are only visible to the scheduling system, the current
* scheduler implementation, and tests.
*****************************************************************************/
-#ifdef SCHEDULER_PRIVATE_
+#ifdef SCHEDULER_PRIVATE
+
+#include "ext/ht.h"
/*********************************
* Defined in scheduler.c
@@ -173,8 +175,8 @@ void scheduler_touch_channel(channel_t *chan);
/* Socket table entry which holds information of a channel's socket and kernel
* TCP information. Only used by KIST. */
-typedef struct socket_table_ent_s {
- HT_ENTRY(socket_table_ent_s) node;
+typedef struct socket_table_ent_t {
+ HT_ENTRY(socket_table_ent_t) node;
const channel_t *chan;
/* Amount written this scheduling run */
uint64_t written;
@@ -187,7 +189,7 @@ typedef struct socket_table_ent_s {
uint32_t notsent;
} socket_table_ent_t;
-typedef HT_HEAD(outbuf_table_s, outbuf_table_ent_s) outbuf_table_t;
+typedef HT_HEAD(outbuf_table_s, outbuf_table_ent_t) outbuf_table_t;
MOCK_DECL(int, channel_should_write_to_kernel,
(outbuf_table_t *table, channel_t *chan));
@@ -212,7 +214,6 @@ extern int32_t sched_run_interval;
scheduler_t *get_vanilla_scheduler(void);
-#endif /* defined(SCHEDULER_PRIVATE_) */
+#endif /* defined(SCHEDULER_PRIVATE) */
#endif /* !defined(TOR_SCHEDULER_H) */
-
diff --git a/src/core/or/scheduler_kist.c b/src/core/or/scheduler_kist.c
index 79ecb0bc7e..c73d768f88 100644
--- a/src/core/or/scheduler_kist.c
+++ b/src/core/or/scheduler_kist.c
@@ -1,17 +1,22 @@
-/* Copyright (c) 2017-2019, The Tor Project, Inc. */
+/* Copyright (c) 2017-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file scheduler_kist.c
+ * @brief Implements the KIST cell scheduler.
+ **/
+
#define SCHEDULER_KIST_PRIVATE
#include "core/or/or.h"
-#include "lib/container/buffers.h"
+#include "lib/buf/buffers.h"
#include "app/config/config.h"
#include "core/mainloop/connection.h"
#include "feature/nodelist/networkstatus.h"
-#define TOR_CHANNEL_INTERNAL_
+#define CHANNEL_OBJECT_PRIVATE
#include "core/or/channel.h"
#include "core/or/channeltls.h"
-#define SCHEDULER_PRIVATE_
+#define SCHEDULER_PRIVATE
#include "core/or/scheduler.h"
#include "lib/math/fp.h"
@@ -46,22 +51,22 @@ socket_table_ent_eq(const socket_table_ent_t *a, const socket_table_ent_t *b)
return a->chan == b->chan;
}
-typedef HT_HEAD(socket_table_s, socket_table_ent_s) socket_table_t;
+typedef HT_HEAD(socket_table_s, socket_table_ent_t) socket_table_t;
static socket_table_t socket_table = HT_INITIALIZER();
-HT_PROTOTYPE(socket_table_s, socket_table_ent_s, node, socket_table_ent_hash,
- socket_table_ent_eq)
-HT_GENERATE2(socket_table_s, socket_table_ent_s, node, socket_table_ent_hash,
- socket_table_ent_eq, 0.6, tor_reallocarray, tor_free_)
+HT_PROTOTYPE(socket_table_s, socket_table_ent_t, node, socket_table_ent_hash,
+ socket_table_ent_eq);
+HT_GENERATE2(socket_table_s, socket_table_ent_t, node, socket_table_ent_hash,
+ socket_table_ent_eq, 0.6, tor_reallocarray, tor_free_);
/* outbuf_table hash table stuff. The outbuf_table keeps track of which
* channels have data sitting in their outbuf so the kist scheduler can force
* a write from outbuf to kernel periodically during a run and at the end of a
* run. */
-typedef struct outbuf_table_ent_s {
- HT_ENTRY(outbuf_table_ent_s) node;
+typedef struct outbuf_table_ent_t {
+ HT_ENTRY(outbuf_table_ent_t) node;
channel_t *chan;
} outbuf_table_ent_t;
@@ -77,10 +82,10 @@ outbuf_table_ent_eq(const outbuf_table_ent_t *a, const outbuf_table_ent_t *b)
return a->chan->global_identifier == b->chan->global_identifier;
}
-HT_PROTOTYPE(outbuf_table_s, outbuf_table_ent_s, node, outbuf_table_ent_hash,
- outbuf_table_ent_eq)
-HT_GENERATE2(outbuf_table_s, outbuf_table_ent_s, node, outbuf_table_ent_hash,
- outbuf_table_ent_eq, 0.6, tor_reallocarray, tor_free_)
+HT_PROTOTYPE(outbuf_table_s, outbuf_table_ent_t, node, outbuf_table_ent_hash,
+ outbuf_table_ent_eq);
+HT_GENERATE2(outbuf_table_s, outbuf_table_ent_t, node, outbuf_table_ent_hash,
+ outbuf_table_ent_eq, 0.6, tor_reallocarray, tor_free_);
/*****************************************************************************
* Other internal data
@@ -104,7 +109,7 @@ static unsigned int kist_lite_mode = 0;
* changed and it doesn't recognized the values passed to the syscalls needed
* by KIST. In that case, fallback to the naive approach. */
static unsigned int kist_no_kernel_support = 0;
-#else /* !(defined(HAVE_KIST_SUPPORT)) */
+#else /* !defined(HAVE_KIST_SUPPORT) */
static unsigned int kist_lite_mode = 1;
#endif /* defined(HAVE_KIST_SUPPORT) */
@@ -298,7 +303,7 @@ update_socket_info_impl, (socket_table_ent_t *ent))
}
return;
-#else /* !(defined(HAVE_KIST_SUPPORT)) */
+#else /* !defined(HAVE_KIST_SUPPORT) */
goto fallback;
#endif /* defined(HAVE_KIST_SUPPORT) */
@@ -458,6 +463,13 @@ MOCK_IMPL(void, channel_write_to_kernel, (channel_t *chan))
log_debug(LD_SCHED, "Writing %lu bytes to kernel for chan %" PRIu64,
(unsigned long)channel_outbuf_length(chan),
chan->global_identifier);
+ /* Note that 'connection_handle_write()' may change the scheduler state of
+ * the channel during the scheduling loop with
+ * 'connection_or_flushed_some()' -> 'scheduler_channel_wants_writes()'.
+ * This side-effect will only occur if the channel is currently in the
+ * 'SCHED_CHAN_WAITING_TO_WRITE' or 'SCHED_CHAN_IDLE' states, which KIST
+ * rarely uses, so it should be fine unless KIST begins using these states
+ * in the future. */
connection_handle_write(TO_CONN(BASE_CHAN_TO_TLS(chan)->conn), 0);
}
@@ -724,7 +736,7 @@ kist_scheduler_run(void)
SMARTLIST_FOREACH_BEGIN(to_readd, channel_t *, readd_chan) {
scheduler_set_channel_state(readd_chan, SCHED_CHAN_PENDING);
if (!smartlist_contains(cp, readd_chan)) {
- if (!SCHED_BUG(chan->sched_heap_idx != -1, chan)) {
+ if (!SCHED_BUG(readd_chan->sched_heap_idx != -1, readd_chan)) {
/* XXXX Note that the check above is in theory redundant with
* the smartlist_contains check. But let's make sure we're
* not messing anything up, and leave them both for now. */
@@ -833,7 +845,7 @@ scheduler_can_use_kist(void)
return run_interval > 0;
}
-#else /* !(defined(HAVE_KIST_SUPPORT)) */
+#else /* !defined(HAVE_KIST_SUPPORT) */
int
scheduler_can_use_kist(void)
diff --git a/src/core/or/scheduler_vanilla.c b/src/core/or/scheduler_vanilla.c
index 33536ae04b..d862ff8710 100644
--- a/src/core/or/scheduler_vanilla.c
+++ b/src/core/or/scheduler_vanilla.c
@@ -1,11 +1,16 @@
-/* Copyright (c) 2017-2019, The Tor Project, Inc. */
+/* Copyright (c) 2017-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file scheduler_vanilla.c
+ * @brief "Vanilla" (pre-KIST) cell scheduler code.
+ **/
+
#include "core/or/or.h"
#include "app/config/config.h"
-#define TOR_CHANNEL_INTERNAL_
+#define CHANNEL_OBJECT_PRIVATE
#include "core/or/channel.h"
-#define SCHEDULER_PRIVATE_
+#define SCHEDULER_PRIVATE
#include "core/or/scheduler.h"
/*****************************************************************************
@@ -172,4 +177,3 @@ get_vanilla_scheduler(void)
{
return &vanilla_scheduler;
}
-
diff --git a/src/core/or/sendme.c b/src/core/or/sendme.c
new file mode 100644
index 0000000000..788f56088c
--- /dev/null
+++ b/src/core/or/sendme.c
@@ -0,0 +1,710 @@
+/* Copyright (c) 2019-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file sendme.c
+ * \brief Code that is related to SENDME cells both in terms of
+ * creating/parsing cells and handling the content.
+ */
+
+#define SENDME_PRIVATE
+
+#include "core/or/or.h"
+
+#include "app/config/config.h"
+#include "core/crypto/relay_crypto.h"
+#include "core/mainloop/connection.h"
+#include "core/or/cell_st.h"
+#include "core/or/crypt_path.h"
+#include "core/or/circuitlist.h"
+#include "core/or/circuituse.h"
+#include "core/or/or_circuit_st.h"
+#include "core/or/relay.h"
+#include "core/or/sendme.h"
+#include "feature/nodelist/networkstatus.h"
+#include "lib/ctime/di_ops.h"
+#include "trunnel/sendme_cell.h"
+
+/* Return the minimum version given by the consensus (if any) that should be
+ * used when emitting a SENDME cell. */
+STATIC int
+get_emit_min_version(void)
+{
+ return networkstatus_get_param(NULL, "sendme_emit_min_version",
+ SENDME_EMIT_MIN_VERSION_DEFAULT,
+ SENDME_EMIT_MIN_VERSION_MIN,
+ SENDME_EMIT_MIN_VERSION_MAX);
+}
+
+/* Return the minimum version given by the consensus (if any) that should be
+ * accepted when receiving a SENDME cell. */
+STATIC int
+get_accept_min_version(void)
+{
+ return networkstatus_get_param(NULL, "sendme_accept_min_version",
+ SENDME_ACCEPT_MIN_VERSION_DEFAULT,
+ SENDME_ACCEPT_MIN_VERSION_MIN,
+ SENDME_ACCEPT_MIN_VERSION_MAX);
+}
+
+/* Pop the first cell digset on the given circuit from the SENDME last digests
+ * list. NULL is returned if the list is uninitialized or empty.
+ *
+ * The caller gets ownership of the returned digest thus is responsible for
+ * freeing the memory. */
+static uint8_t *
+pop_first_cell_digest(const circuit_t *circ)
+{
+ uint8_t *circ_digest;
+
+ tor_assert(circ);
+
+ if (circ->sendme_last_digests == NULL ||
+ smartlist_len(circ->sendme_last_digests) == 0) {
+ return NULL;
+ }
+
+ /* More cell digest than the SENDME window is never suppose to happen. The
+ * cell should have been rejected before reaching this point due to its
+ * package_window down to 0 leading to a circuit close. Scream loudly but
+ * still pop the element so we don't memory leak. */
+ tor_assert_nonfatal(smartlist_len(circ->sendme_last_digests) <=
+ CIRCWINDOW_START_MAX / CIRCWINDOW_INCREMENT);
+
+ circ_digest = smartlist_get(circ->sendme_last_digests, 0);
+ smartlist_del_keeporder(circ->sendme_last_digests, 0);
+ return circ_digest;
+}
+
+/* Return true iff the given cell digest matches the first digest in the
+ * circuit sendme list. */
+static bool
+v1_digest_matches(const uint8_t *circ_digest, const uint8_t *cell_digest)
+{
+ tor_assert(circ_digest);
+ tor_assert(cell_digest);
+
+ /* Compare the digest with the one in the SENDME. This cell is invalid
+ * without a perfect match. */
+ if (tor_memneq(circ_digest, cell_digest, TRUNNEL_SENDME_V1_DIGEST_LEN)) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "SENDME v1 cell digest do not match.");
+ return false;
+ }
+
+ /* Digests matches! */
+ return true;
+}
+
+/* Return true iff the given decoded SENDME version 1 cell is valid and
+ * matches the expected digest on the circuit.
+ *
+ * Validation is done by comparing the digest in the cell from the previous
+ * cell we saw which tells us that the other side has in fact seen that cell.
+ * See proposal 289 for more details. */
+static bool
+cell_v1_is_valid(const sendme_cell_t *cell, const uint8_t *circ_digest)
+{
+ tor_assert(cell);
+ tor_assert(circ_digest);
+
+ const uint8_t *cell_digest = sendme_cell_getconstarray_data_v1_digest(cell);
+ return v1_digest_matches(circ_digest, cell_digest);
+}
+
+/* Return true iff the given cell version can be handled or if the minimum
+ * accepted version from the consensus is known to us. */
+STATIC bool
+cell_version_can_be_handled(uint8_t cell_version)
+{
+ int accept_version = get_accept_min_version();
+
+ /* We will first check if the consensus minimum accepted version can be
+ * handled by us and if not, regardless of the cell version we got, we can't
+ * continue. */
+ if (accept_version > SENDME_MAX_SUPPORTED_VERSION) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Unable to accept SENDME version %u (from consensus). "
+ "We only support <= %u. Probably your tor is too old?",
+ accept_version, SENDME_MAX_SUPPORTED_VERSION);
+ goto invalid;
+ }
+
+ /* Then, is this version below the accepted version from the consensus? If
+ * yes, we must not handle it. */
+ if (cell_version < accept_version) {
+ log_info(LD_PROTOCOL, "Unacceptable SENDME version %u. Only "
+ "accepting %u (from consensus). Closing circuit.",
+ cell_version, accept_version);
+ goto invalid;
+ }
+
+ /* Is this cell version supported by us? */
+ if (cell_version > SENDME_MAX_SUPPORTED_VERSION) {
+ log_info(LD_PROTOCOL, "SENDME cell version %u is not supported by us. "
+ "We only support <= %u",
+ cell_version, SENDME_MAX_SUPPORTED_VERSION);
+ goto invalid;
+ }
+
+ return true;
+ invalid:
+ return false;
+}
+
+/* Return true iff the encoded SENDME cell in cell_payload of length
+ * cell_payload_len is valid. For each version:
+ *
+ * 0: No validation
+ * 1: Authenticated with last cell digest.
+ *
+ * This is the main critical function to make sure we can continue to
+ * send/recv cells on a circuit. If the SENDME is invalid, the circuit should
+ * be marked for close by the caller. */
+STATIC bool
+sendme_is_valid(const circuit_t *circ, const uint8_t *cell_payload,
+ size_t cell_payload_len)
+{
+ uint8_t cell_version;
+ uint8_t *circ_digest = NULL;
+ sendme_cell_t *cell = NULL;
+
+ tor_assert(circ);
+ tor_assert(cell_payload);
+
+ /* An empty payload means version 0 so skip trunnel parsing. We won't be
+ * able to parse a 0 length buffer into a valid SENDME cell. */
+ if (cell_payload_len == 0) {
+ cell_version = 0;
+ } else {
+ /* First we'll decode the cell so we can get the version. */
+ if (sendme_cell_parse(&cell, cell_payload, cell_payload_len) < 0) {
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Unparseable SENDME cell received. Closing circuit.");
+ goto invalid;
+ }
+ cell_version = sendme_cell_get_version(cell);
+ }
+
+ /* Validate that we can handle this cell version. */
+ if (!cell_version_can_be_handled(cell_version)) {
+ goto invalid;
+ }
+
+ /* Pop the first element that was added (FIFO). We do that regardless of the
+ * version so we don't accumulate on the circuit if v0 is used by the other
+ * end point. */
+ circ_digest = pop_first_cell_digest(circ);
+ if (circ_digest == NULL) {
+ /* We shouldn't have received a SENDME if we have no digests. Log at
+ * protocol warning because it can be tricked by sending many SENDMEs
+ * without prior data cell. */
+ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "We received a SENDME but we have no cell digests to match. "
+ "Closing circuit.");
+ goto invalid;
+ }
+
+ /* Validate depending on the version now. */
+ switch (cell_version) {
+ case 0x01:
+ if (!cell_v1_is_valid(cell, circ_digest)) {
+ goto invalid;
+ }
+ break;
+ case 0x00:
+ /* Version 0, there is no work to be done on the payload so it is
+ * necessarily valid if we pass the version validation. */
+ break;
+ default:
+ log_warn(LD_PROTOCOL, "Unknown SENDME cell version %d received.",
+ cell_version);
+ tor_assert_nonfatal_unreached();
+ break;
+ }
+
+ /* Valid cell. */
+ sendme_cell_free(cell);
+ tor_free(circ_digest);
+ return true;
+ invalid:
+ sendme_cell_free(cell);
+ tor_free(circ_digest);
+ return false;
+}
+
+/* Build and encode a version 1 SENDME cell into payload, which must be at
+ * least of RELAY_PAYLOAD_SIZE bytes, using the digest for the cell data.
+ *
+ * Return the size in bytes of the encoded cell in payload. A negative value
+ * is returned on encoding failure. */
+STATIC ssize_t
+build_cell_payload_v1(const uint8_t *cell_digest, uint8_t *payload)
+{
+ ssize_t len = -1;
+ sendme_cell_t *cell = NULL;
+
+ tor_assert(cell_digest);
+ tor_assert(payload);
+
+ cell = sendme_cell_new();
+
+ /* Building a payload for version 1. */
+ sendme_cell_set_version(cell, 0x01);
+ /* Set the data length field for v1. */
+ sendme_cell_set_data_len(cell, TRUNNEL_SENDME_V1_DIGEST_LEN);
+
+ /* Copy the digest into the data payload. */
+ memcpy(sendme_cell_getarray_data_v1_digest(cell), cell_digest,
+ sendme_cell_get_data_len(cell));
+
+ /* Finally, encode the cell into the payload. */
+ len = sendme_cell_encode(payload, RELAY_PAYLOAD_SIZE, cell);
+
+ sendme_cell_free(cell);
+ return len;
+}
+
+/* Send a circuit-level SENDME on the given circuit using the layer_hint if
+ * not NULL. The digest is only used for version 1.
+ *
+ * Return 0 on success else a negative value and the circuit will be closed
+ * because we failed to send the cell on it. */
+static int
+send_circuit_level_sendme(circuit_t *circ, crypt_path_t *layer_hint,
+ const uint8_t *cell_digest)
+{
+ uint8_t emit_version;
+ uint8_t payload[RELAY_PAYLOAD_SIZE];
+ ssize_t payload_len;
+
+ tor_assert(circ);
+ tor_assert(cell_digest);
+
+ emit_version = get_emit_min_version();
+ switch (emit_version) {
+ case 0x01:
+ payload_len = build_cell_payload_v1(cell_digest, payload);
+ if (BUG(payload_len < 0)) {
+ /* Unable to encode the cell, abort. We can recover from this by closing
+ * the circuit but in theory it should never happen. */
+ return -1;
+ }
+ log_debug(LD_PROTOCOL, "Emitting SENDME version 1 cell.");
+ break;
+ case 0x00:
+ FALLTHROUGH;
+ default:
+ /* Unknown version, fallback to version 0 meaning no payload. */
+ payload_len = 0;
+ log_debug(LD_PROTOCOL, "Emitting SENDME version 0 cell. "
+ "Consensus emit version is %d", emit_version);
+ break;
+ }
+
+ if (relay_send_command_from_edge(0, circ, RELAY_COMMAND_SENDME,
+ (char *) payload, payload_len,
+ layer_hint) < 0) {
+ log_warn(LD_CIRC,
+ "SENDME relay_send_command_from_edge failed. Circuit's closed.");
+ return -1; /* the circuit's closed, don't continue */
+ }
+ return 0;
+}
+
+/* Record the cell digest only if the next cell is expected to be a SENDME. */
+static void
+record_cell_digest_on_circ(circuit_t *circ, const uint8_t *sendme_digest)
+{
+ tor_assert(circ);
+ tor_assert(sendme_digest);
+
+ /* Add the digest to the last seen list in the circuit. */
+ if (circ->sendme_last_digests == NULL) {
+ circ->sendme_last_digests = smartlist_new();
+ }
+ smartlist_add(circ->sendme_last_digests,
+ tor_memdup(sendme_digest, DIGEST_LEN));
+}
+
+/*
+ * Public API
+ */
+
+/** Return true iff the next cell for the given cell window is expected to be
+ * a SENDME.
+ *
+ * We are able to know that because the package or deliver window value minus
+ * one cell (the possible SENDME cell) should be a multiple of the increment
+ * window value. */
+static bool
+circuit_sendme_cell_is_next(int window)
+{
+ /* At the start of the window, no SENDME will be expected. */
+ if (window == CIRCWINDOW_START) {
+ return false;
+ }
+
+ /* Are we at the limit of the increment and if not, we don't expect next
+ * cell is a SENDME.
+ *
+ * We test against the window minus 1 because when we are looking if the
+ * next cell is a SENDME, the window (either package or deliver) hasn't been
+ * decremented just yet so when this is called, we are currently processing
+ * the "window - 1" cell.
+ *
+ * This function is used when recording a cell digest and this is done quite
+ * low in the stack when decrypting or encrypting a cell. The window is only
+ * updated once the cell is actually put in the outbuf. */
+ if (((window - 1) % CIRCWINDOW_INCREMENT) != 0) {
+ return false;
+ }
+
+ /* Next cell is expected to be a SENDME. */
+ return true;
+}
+
+/** Called when we've just received a relay data cell, when we've just
+ * finished flushing all bytes to stream <b>conn</b>, or when we've flushed
+ * *some* bytes to the stream <b>conn</b>.
+ *
+ * If conn->outbuf is not too full, and our deliver window is low, send back a
+ * suitable number of stream-level sendme cells.
+ */
+void
+sendme_connection_edge_consider_sending(edge_connection_t *conn)
+{
+ tor_assert(conn);
+
+ int log_domain = TO_CONN(conn)->type == CONN_TYPE_AP ? LD_APP : LD_EXIT;
+
+ /* Don't send it if we still have data to deliver. */
+ if (connection_outbuf_too_full(TO_CONN(conn))) {
+ goto end;
+ }
+
+ if (circuit_get_by_edge_conn(conn) == NULL) {
+ /* This can legitimately happen if the destroy has already arrived and
+ * torn down the circuit. */
+ log_info(log_domain, "No circuit associated with edge connection. "
+ "Skipping sending SENDME.");
+ goto end;
+ }
+
+ while (conn->deliver_window <=
+ (STREAMWINDOW_START - STREAMWINDOW_INCREMENT)) {
+ log_debug(log_domain, "Outbuf %" TOR_PRIuSZ ", queuing stream SENDME.",
+ TO_CONN(conn)->outbuf_flushlen);
+ conn->deliver_window += STREAMWINDOW_INCREMENT;
+ if (connection_edge_send_command(conn, RELAY_COMMAND_SENDME,
+ NULL, 0) < 0) {
+ log_warn(LD_BUG, "connection_edge_send_command failed while sending "
+ "a SENDME. Circuit probably closed, skipping.");
+ goto end; /* The circuit's closed, don't continue */
+ }
+ }
+
+ end:
+ return;
+}
+
+/** Check if the deliver_window for circuit <b>circ</b> (at hop
+ * <b>layer_hint</b> if it's defined) is low enough that we should
+ * send a circuit-level sendme back down the circuit. If so, send
+ * enough sendmes that the window would be overfull if we sent any
+ * more.
+ */
+void
+sendme_circuit_consider_sending(circuit_t *circ, crypt_path_t *layer_hint)
+{
+ bool sent_one_sendme = false;
+ const uint8_t *digest;
+
+ while ((layer_hint ? layer_hint->deliver_window : circ->deliver_window) <=
+ CIRCWINDOW_START - CIRCWINDOW_INCREMENT) {
+ log_debug(LD_CIRC,"Queuing circuit sendme.");
+ if (layer_hint) {
+ layer_hint->deliver_window += CIRCWINDOW_INCREMENT;
+ digest = cpath_get_sendme_digest(layer_hint);
+ } else {
+ circ->deliver_window += CIRCWINDOW_INCREMENT;
+ digest = relay_crypto_get_sendme_digest(&TO_OR_CIRCUIT(circ)->crypto);
+ }
+ if (send_circuit_level_sendme(circ, layer_hint, digest) < 0) {
+ return; /* The circuit's closed, don't continue */
+ }
+ /* Current implementation is not suppose to send multiple SENDME at once
+ * because this means we would use the same relay crypto digest for each
+ * SENDME leading to a mismatch on the other side and the circuit to
+ * collapse. Scream loudly if it ever happens so we can address it. */
+ tor_assert_nonfatal(!sent_one_sendme);
+ sent_one_sendme = true;
+ }
+}
+
+/* Process a circuit-level SENDME cell that we just received. The layer_hint,
+ * if not NULL, is the Exit hop of the connection which means that we are a
+ * client. In that case, circ must be an origin circuit. The cell_body_len is
+ * the length of the SENDME cell payload (excluding the header). The
+ * cell_payload is the payload.
+ *
+ * Return 0 on success (the SENDME is valid and the package window has
+ * been updated properly).
+ *
+ * On error, a negative value is returned, which indicates that the
+ * circuit must be closed using the value as the reason for it. */
+int
+sendme_process_circuit_level(crypt_path_t *layer_hint,
+ circuit_t *circ, const uint8_t *cell_payload,
+ uint16_t cell_payload_len)
+{
+ tor_assert(circ);
+ tor_assert(cell_payload);
+
+ /* Validate the SENDME cell. Depending on the version, different validation
+ * can be done. An invalid SENDME requires us to close the circuit. */
+ if (!sendme_is_valid(circ, cell_payload, cell_payload_len)) {
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+
+ /* If we are the origin of the circuit, we are the Client so we use the
+ * layer hint (the Exit hop) for the package window tracking. */
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ /* If we are the origin of the circuit, it is impossible to not have a
+ * cpath. Just in case, bug on it and close the circuit. */
+ if (BUG(layer_hint == NULL)) {
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+ if ((layer_hint->package_window + CIRCWINDOW_INCREMENT) >
+ CIRCWINDOW_START_MAX) {
+ static struct ratelim_t exit_warn_ratelim = RATELIM_INIT(600);
+ log_fn_ratelim(&exit_warn_ratelim, LOG_WARN, LD_PROTOCOL,
+ "Unexpected sendme cell from exit relay. "
+ "Closing circ.");
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+ layer_hint->package_window += CIRCWINDOW_INCREMENT;
+ log_debug(LD_APP, "circ-level sendme at origin, packagewindow %d.",
+ layer_hint->package_window);
+
+ /* We count circuit-level sendme's as valid delivered data because they
+ * are rate limited. */
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), cell_payload_len);
+ } else {
+ /* We aren't the origin of this circuit so we are the Exit and thus we
+ * track the package window with the circuit object. */
+ if ((circ->package_window + CIRCWINDOW_INCREMENT) >
+ CIRCWINDOW_START_MAX) {
+ static struct ratelim_t client_warn_ratelim = RATELIM_INIT(600);
+ log_fn_ratelim(&client_warn_ratelim, LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Unexpected sendme cell from client. "
+ "Closing circ (window %d).", circ->package_window);
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+ circ->package_window += CIRCWINDOW_INCREMENT;
+ log_debug(LD_EXIT, "circ-level sendme at non-origin, packagewindow %d.",
+ circ->package_window);
+ }
+
+ return 0;
+}
+
+/* Process a stream-level SENDME cell that we just received. The conn is the
+ * edge connection (stream) that the circuit circ is associated with. The
+ * cell_body_len is the length of the payload (excluding the header).
+ *
+ * Return 0 on success (the SENDME is valid and the package window has
+ * been updated properly).
+ *
+ * On error, a negative value is returned, which indicates that the
+ * circuit must be closed using the value as the reason for it. */
+int
+sendme_process_stream_level(edge_connection_t *conn, circuit_t *circ,
+ uint16_t cell_body_len)
+{
+ tor_assert(conn);
+ tor_assert(circ);
+
+ /* Don't allow the other endpoint to request more than our maximum (i.e.
+ * initial) stream SENDME window worth of data. Well-behaved stock clients
+ * will not request more than this max (as per the check in the while loop
+ * of sendme_connection_edge_consider_sending()). */
+ if ((conn->package_window + STREAMWINDOW_INCREMENT) >
+ STREAMWINDOW_START_MAX) {
+ static struct ratelim_t stream_warn_ratelim = RATELIM_INIT(600);
+ log_fn_ratelim(&stream_warn_ratelim, LOG_PROTOCOL_WARN, LD_PROTOCOL,
+ "Unexpected stream sendme cell. Closing circ (window %d).",
+ conn->package_window);
+ return -END_CIRC_REASON_TORPROTOCOL;
+ }
+ /* At this point, the stream sendme is valid */
+ conn->package_window += STREAMWINDOW_INCREMENT;
+
+ /* We count circuit-level sendme's as valid delivered data because they are
+ * rate limited. */
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), cell_body_len);
+ }
+
+ log_debug(CIRCUIT_IS_ORIGIN(circ) ? LD_APP : LD_EXIT,
+ "stream-level sendme, package_window now %d.",
+ conn->package_window);
+ return 0;
+}
+
+/* Called when a relay DATA cell is received on the given circuit. If
+ * layer_hint is NULL, this means we are the Exit end point else we are the
+ * Client. Update the deliver window and return its new value. */
+int
+sendme_circuit_data_received(circuit_t *circ, crypt_path_t *layer_hint)
+{
+ int deliver_window, domain;
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ tor_assert(layer_hint);
+ --layer_hint->deliver_window;
+ deliver_window = layer_hint->deliver_window;
+ domain = LD_APP;
+ } else {
+ tor_assert(!layer_hint);
+ --circ->deliver_window;
+ deliver_window = circ->deliver_window;
+ domain = LD_EXIT;
+ }
+
+ log_debug(domain, "Circuit deliver_window now %d.", deliver_window);
+ return deliver_window;
+}
+
+/* Called when a relay DATA cell is received for the given edge connection
+ * conn. Update the deliver window and return its new value. */
+int
+sendme_stream_data_received(edge_connection_t *conn)
+{
+ tor_assert(conn);
+ return --conn->deliver_window;
+}
+
+/* Called when a relay DATA cell is packaged on the given circuit. If
+ * layer_hint is NULL, this means we are the Exit end point else we are the
+ * Client. Update the package window and return its new value. */
+int
+sendme_note_circuit_data_packaged(circuit_t *circ, crypt_path_t *layer_hint)
+{
+ int package_window, domain;
+
+ tor_assert(circ);
+
+ if (CIRCUIT_IS_ORIGIN(circ)) {
+ /* Client side. */
+ tor_assert(layer_hint);
+ --layer_hint->package_window;
+ package_window = layer_hint->package_window;
+ domain = LD_APP;
+ } else {
+ /* Exit side. */
+ tor_assert(!layer_hint);
+ --circ->package_window;
+ package_window = circ->package_window;
+ domain = LD_EXIT;
+ }
+
+ log_debug(domain, "Circuit package_window now %d.", package_window);
+ return package_window;
+}
+
+/* Called when a relay DATA cell is packaged for the given edge connection
+ * conn. Update the package window and return its new value. */
+int
+sendme_note_stream_data_packaged(edge_connection_t *conn)
+{
+ tor_assert(conn);
+
+ --conn->package_window;
+ log_debug(LD_APP, "Stream package_window now %d.", conn->package_window);
+ return conn->package_window;
+}
+
+/* Record the cell digest into the circuit sendme digest list depending on
+ * which edge we are. The digest is recorded only if we expect the next cell
+ * that we will receive is a SENDME so we can match the digest. */
+void
+sendme_record_cell_digest_on_circ(circuit_t *circ, crypt_path_t *cpath)
+{
+ int package_window;
+ uint8_t *sendme_digest;
+
+ tor_assert(circ);
+
+ package_window = circ->package_window;
+ if (cpath) {
+ package_window = cpath->package_window;
+ }
+
+ /* Is this the last cell before a SENDME? The idea is that if the
+ * package_window reaches a multiple of the increment, after this cell, we
+ * should expect a SENDME. */
+ if (!circuit_sendme_cell_is_next(package_window)) {
+ return;
+ }
+
+ /* Getting the digest is expensive so we only do it once we are certain to
+ * record it on the circuit. */
+ if (cpath) {
+ sendme_digest = cpath_get_sendme_digest(cpath);
+ } else {
+ sendme_digest =
+ relay_crypto_get_sendme_digest(&TO_OR_CIRCUIT(circ)->crypto);
+ }
+
+ record_cell_digest_on_circ(circ, sendme_digest);
+}
+
+/* Called once we decrypted a cell and recognized it. Record the cell digest
+ * as the next sendme digest only if the next cell we'll send on the circuit
+ * is expected to be a SENDME. */
+void
+sendme_record_received_cell_digest(circuit_t *circ, crypt_path_t *cpath)
+{
+ tor_assert(circ);
+
+ /* Only record if the next cell is expected to be a SENDME. */
+ if (!circuit_sendme_cell_is_next(cpath ? cpath->deliver_window :
+ circ->deliver_window)) {
+ return;
+ }
+
+ if (cpath) {
+ /* Record incoming digest. */
+ cpath_sendme_record_cell_digest(cpath, false);
+ } else {
+ /* Record foward digest. */
+ relay_crypto_record_sendme_digest(&TO_OR_CIRCUIT(circ)->crypto, true);
+ }
+}
+
+/* Called once we encrypted a cell. Record the cell digest as the next sendme
+ * digest only if the next cell we expect to receive is a SENDME so we can
+ * match the digests. */
+void
+sendme_record_sending_cell_digest(circuit_t *circ, crypt_path_t *cpath)
+{
+ tor_assert(circ);
+
+ /* Only record if the next cell is expected to be a SENDME. */
+ if (!circuit_sendme_cell_is_next(cpath ? cpath->package_window :
+ circ->package_window)) {
+ goto end;
+ }
+
+ if (cpath) {
+ /* Record the forward digest. */
+ cpath_sendme_record_cell_digest(cpath, true);
+ } else {
+ /* Record the incoming digest. */
+ relay_crypto_record_sendme_digest(&TO_OR_CIRCUIT(circ)->crypto, false);
+ }
+
+ end:
+ return;
+}
diff --git a/src/core/or/sendme.h b/src/core/or/sendme.h
new file mode 100644
index 0000000000..05d37ec3bb
--- /dev/null
+++ b/src/core/or/sendme.h
@@ -0,0 +1,80 @@
+/* Copyright (c) 2019-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+/**
+ * \file sendme.h
+ * \brief Header file for sendme.c.
+ **/
+
+#ifndef TOR_SENDME_H
+#define TOR_SENDME_H
+
+#include "core/or/edge_connection_st.h"
+#include "core/or/crypt_path_st.h"
+#include "core/or/circuit_st.h"
+
+/* Sending SENDME cell. */
+void sendme_connection_edge_consider_sending(edge_connection_t *edge_conn);
+void sendme_circuit_consider_sending(circuit_t *circ,
+ crypt_path_t *layer_hint);
+
+/* Processing SENDME cell. */
+int sendme_process_circuit_level(crypt_path_t *layer_hint,
+ circuit_t *circ, const uint8_t *cell_payload,
+ uint16_t cell_payload_len);
+int sendme_process_stream_level(edge_connection_t *conn, circuit_t *circ,
+ uint16_t cell_body_len);
+
+/* Update deliver window functions. */
+int sendme_stream_data_received(edge_connection_t *conn);
+int sendme_circuit_data_received(circuit_t *circ, crypt_path_t *layer_hint);
+
+/* Update package window functions. */
+int sendme_note_circuit_data_packaged(circuit_t *circ,
+ crypt_path_t *layer_hint);
+int sendme_note_stream_data_packaged(edge_connection_t *conn);
+
+/* Record cell digest on circuit. */
+void sendme_record_cell_digest_on_circ(circuit_t *circ, crypt_path_t *cpath);
+/* Record cell digest as the SENDME digest. */
+void sendme_record_received_cell_digest(circuit_t *circ, crypt_path_t *cpath);
+void sendme_record_sending_cell_digest(circuit_t *circ, crypt_path_t *cpath);
+
+/* Private section starts. */
+#ifdef SENDME_PRIVATE
+
+/* The maximum supported version. Above that value, the cell can't be
+ * recognized as a valid SENDME. */
+#define SENDME_MAX_SUPPORTED_VERSION 1
+
+/* The cell version constants for when emitting a cell. */
+#define SENDME_EMIT_MIN_VERSION_DEFAULT 1
+#define SENDME_EMIT_MIN_VERSION_MIN 0
+#define SENDME_EMIT_MIN_VERSION_MAX UINT8_MAX
+
+/* The cell version constants for when accepting a cell. */
+#define SENDME_ACCEPT_MIN_VERSION_DEFAULT 0
+#define SENDME_ACCEPT_MIN_VERSION_MIN 0
+#define SENDME_ACCEPT_MIN_VERSION_MAX UINT8_MAX
+
+/*
+ * Unit tests declaractions.
+ */
+#ifdef TOR_UNIT_TESTS
+
+STATIC int get_emit_min_version(void);
+STATIC int get_accept_min_version(void);
+
+STATIC bool cell_version_can_be_handled(uint8_t cell_version);
+
+STATIC ssize_t build_cell_payload_v1(const uint8_t *cell_digest,
+ uint8_t *payload);
+STATIC bool sendme_is_valid(const circuit_t *circ,
+ const uint8_t *cell_payload,
+ size_t cell_payload_len);
+
+#endif /* defined(TOR_UNIT_TESTS) */
+
+#endif /* defined(SENDME_PRIVATE) */
+
+#endif /* !defined(TOR_SENDME_H) */
diff --git a/src/core/or/server_port_cfg_st.h b/src/core/or/server_port_cfg_st.h
index bd026af7ee..9a005eccdf 100644
--- a/src/core/or/server_port_cfg_st.h
+++ b/src/core/or/server_port_cfg_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file server_port_cfg_st.h
+ * @brief Cnfiguration structure for server ports.
+ **/
+
#ifndef SERVER_PORT_CFG_ST_H
#define SERVER_PORT_CFG_ST_H
@@ -16,5 +21,4 @@ struct server_port_cfg_t {
unsigned int bind_ipv6_only : 1;
};
-#endif
-
+#endif /* !defined(SERVER_PORT_CFG_ST_H) */
diff --git a/src/core/or/socks_request_st.h b/src/core/or/socks_request_st.h
index 5922870c61..4bcdb48b92 100644
--- a/src/core/or/socks_request_st.h
+++ b/src/core/or/socks_request_st.h
@@ -1,12 +1,19 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file socks_request_st.h
+ * @brief Client request structure.
+ **/
+
#ifndef SOCKS_REQUEST_ST_H
#define SOCKS_REQUEST_ST_H
+#include "lib/net/socks5_status.h"
+
#define MAX_SOCKS_REPLY_LEN 1024
#define SOCKS_NO_AUTH 0x00
@@ -58,6 +65,11 @@ struct socks_request_t {
* "username/password" authentication if both are offered. Used as input to
* parse_socks. */
unsigned int socks_prefer_no_auth : 1;
+ /** If set, we can send back the extended error code in the reply. */
+ unsigned int socks_use_extended_errors : 1;
+ /** If non zero, this contains the extended error code that should be used
+ * if the port was configured to use them. */
+ socks5_reply_status_t socks_extended_error_code;
/** Number of bytes in username; 0 if username is NULL */
size_t usernamelen;
@@ -74,4 +86,4 @@ struct socks_request_t {
uint8_t socks5_atyp; /* SOCKS5 address type */
};
-#endif
+#endif /* !defined(SOCKS_REQUEST_ST_H) */
diff --git a/src/core/or/status.c b/src/core/or/status.c
index 46494ca76c..ed8448883c 100644
--- a/src/core/or/status.c
+++ b/src/core/or/status.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2019, The Tor Project, Inc. */
+/* Copyright (c) 2010-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -17,6 +17,7 @@
#include "core/or/or.h"
#include "core/or/circuituse.h"
#include "app/config/config.h"
+#include "feature/dirclient/dirclient.h"
#include "core/or/status.h"
#include "feature/nodelist/nodelist.h"
#include "core/or/relay.h"
@@ -146,6 +147,8 @@ log_heartbeat(time_t now)
uptime, count_circuits(), bw_sent, bw_rcvd,
hibernating?" We are currently hibernating.":"");
+ dirclient_dump_total_dls();
+
if (server_mode(options) && accounting_is_enabled(options) && !hibernating) {
log_accounting(now, options);
}
diff --git a/src/core/or/status.h b/src/core/or/status.h
index 3467501ebb..639f8cdf51 100644
--- a/src/core/or/status.h
+++ b/src/core/or/status.h
@@ -1,6 +1,11 @@
-/* Copyright (c) 2010-2019, The Tor Project, Inc. */
+/* Copyright (c) 2010-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file status.h
+ * @brief Header for status.c
+ **/
+
#ifndef TOR_STATUS_H
#define TOR_STATUS_H
@@ -15,4 +20,3 @@ STATIC char *bytes_to_usage(uint64_t bytes);
#endif
#endif /* !defined(TOR_STATUS_H) */
-
diff --git a/src/core/or/tor_version_st.h b/src/core/or/tor_version_st.h
index 716429bd32..46644c5eb8 100644
--- a/src/core/or/tor_version_st.h
+++ b/src/core/or/tor_version_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file tor_version_st.h
+ * @brief Parsed Tor version structure.
+ **/
+
#ifndef TOR_VERSION_ST_H
#define TOR_VERSION_ST_H
@@ -28,5 +33,4 @@ struct tor_version_t {
char git_tag[DIGEST_LEN];
};
-#endif
-
+#endif /* !defined(TOR_VERSION_ST_H) */
diff --git a/src/core/or/var_cell_st.h b/src/core/or/var_cell_st.h
index 4287c83f6d..caf64c63a5 100644
--- a/src/core/or/var_cell_st.h
+++ b/src/core/or/var_cell_st.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file var_cell_st.h
+ * @brief Variable-length cell structure.
+ **/
+
#ifndef VAR_CELL_ST_H
#define VAR_CELL_ST_H
@@ -19,5 +24,4 @@ struct var_cell_t {
uint8_t payload[FLEXIBLE_ARRAY_MEMBER];
};
-#endif
-
+#endif /* !defined(VAR_CELL_ST_H) */
diff --git a/src/core/or/versions.c b/src/core/or/versions.c
index 33273a5294..31f1f5b997 100644
--- a/src/core/or/versions.c
+++ b/src/core/or/versions.c
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -16,6 +16,25 @@
#include "core/or/tor_version_st.h"
+/**
+ * Return the approximate date when this release came out, or was
+ * scheduled to come out, according to the APPROX_RELEASE_DATE set in
+ * configure.ac
+ **/
+time_t
+tor_get_approx_release_date(void)
+{
+ char tbuf[ISO_TIME_LEN+1];
+ tor_snprintf(tbuf, sizeof(tbuf),
+ "%s 00:00:00", APPROX_RELEASE_DATE);
+ time_t result = 0;
+ int r = parse_iso_time(tbuf, &result);
+ if (BUG(r < 0)) {
+ result = 0;
+ }
+ return result;
+}
+
/** Return VS_RECOMMENDED if <b>myversion</b> is contained in
* <b>versionlist</b>. Else, return VS_EMPTY if versionlist has no
* entries. Else, return VS_OLD if every member of
@@ -277,7 +296,7 @@ tor_version_parse(const char *s, tor_version_t *out)
return -1;
hexlen = (int)(close_paren-cp);
memwipe(digest, 0, sizeof(digest));
- if ( hexlen == 0 || (hexlen % 2) == 1)
+ if (hexlen == 0 || (hexlen % 2) == 1)
return -1;
if (base16_decode(digest, hexlen/2, cp, hexlen) != hexlen/2)
return -1;
@@ -377,6 +396,69 @@ sort_version_list(smartlist_t *versions, int remove_duplicates)
smartlist_uniq(versions, compare_tor_version_str_ptr_, tor_free_);
}
+/** If there are more than this many entries, we're probably under
+ * some kind of weird DoS. */
+static const int MAX_PROTOVER_SUMMARY_MAP_LEN = 1024;
+
+/**
+ * Map from protover string to protover_summary_flags_t.
+ */
+static strmap_t *protover_summary_map = NULL;
+
+/**
+ * Helper. Given a non-NULL protover string <b>protocols</b>, set <b>out</b>
+ * to its summary, and memoize the result in <b>protover_summary_map</b>.
+ */
+static void
+memoize_protover_summary(protover_summary_flags_t *out,
+ const char *protocols)
+{
+ if (!protover_summary_map)
+ protover_summary_map = strmap_new();
+
+ if (strmap_size(protover_summary_map) >= MAX_PROTOVER_SUMMARY_MAP_LEN) {
+ protover_summary_cache_free_all();
+ tor_assert(protover_summary_map == NULL);
+ protover_summary_map = strmap_new();
+ }
+
+ const protover_summary_flags_t *cached =
+ strmap_get(protover_summary_map, protocols);
+
+ if (cached != NULL) {
+ /* We found a cached entry; no need to parse this one. */
+ memcpy(out, cached, sizeof(protover_summary_flags_t));
+ tor_assert(out->protocols_known);
+ return;
+ }
+
+ memset(out, 0, sizeof(*out));
+ out->protocols_known = 1;
+ out->supports_extend2_cells =
+ protocol_list_supports_protocol(protocols, PRT_RELAY, 2);
+ out->supports_ed25519_link_handshake_compat =
+ protocol_list_supports_protocol(protocols, PRT_LINKAUTH, 3);
+ out->supports_ed25519_link_handshake_any =
+ protocol_list_supports_protocol_or_later(protocols, PRT_LINKAUTH, 3);
+ out->supports_ed25519_hs_intro =
+ protocol_list_supports_protocol(protocols, PRT_HSINTRO, 4);
+ out->supports_v3_hsdir =
+ protocol_list_supports_protocol(protocols, PRT_HSDIR,
+ PROTOVER_HSDIR_V3);
+ out->supports_v3_rendezvous_point =
+ protocol_list_supports_protocol(protocols, PRT_HSREND,
+ PROTOVER_HS_RENDEZVOUS_POINT_V3);
+ out->supports_hs_setup_padding =
+ protocol_list_supports_protocol(protocols, PRT_PADDING,
+ PROTOVER_HS_SETUP_PADDING);
+ out->supports_establish_intro_dos_extension =
+ protocol_list_supports_protocol(protocols, PRT_HSINTRO, 5);
+
+ protover_summary_flags_t *new_cached = tor_memdup(out, sizeof(*out));
+ cached = strmap_set(protover_summary_map, protocols, new_cached);
+ tor_assert(!cached);
+}
+
/** Summarize the protocols listed in <b>protocols</b> into <b>out</b>,
* falling back or correcting them based on <b>version</b> as appropriate.
*/
@@ -388,21 +470,7 @@ summarize_protover_flags(protover_summary_flags_t *out,
tor_assert(out);
memset(out, 0, sizeof(*out));
if (protocols) {
- out->protocols_known = 1;
- out->supports_extend2_cells =
- protocol_list_supports_protocol(protocols, PRT_RELAY, 2);
- out->supports_ed25519_link_handshake_compat =
- protocol_list_supports_protocol(protocols, PRT_LINKAUTH, 3);
- out->supports_ed25519_link_handshake_any =
- protocol_list_supports_protocol_or_later(protocols, PRT_LINKAUTH, 3);
- out->supports_ed25519_hs_intro =
- protocol_list_supports_protocol(protocols, PRT_HSINTRO, 4);
- out->supports_v3_hsdir =
- protocol_list_supports_protocol(protocols, PRT_HSDIR,
- PROTOVER_HSDIR_V3);
- out->supports_v3_rendezvous_point =
- protocol_list_supports_protocol(protocols, PRT_HSREND,
- PROTOVER_HS_RENDEZVOUS_POINT_V3);
+ memoize_protover_summary(out, protocols);
}
if (version && !strcmpstart(version, "Tor ")) {
if (!out->protocols_known) {
@@ -420,3 +488,13 @@ summarize_protover_flags(protover_summary_flags_t *out,
}
}
}
+
+/**
+ * Free all space held in the protover_summary_map.
+ */
+void
+protover_summary_cache_free_all(void)
+{
+ strmap_free(protover_summary_map, tor_free_);
+ protover_summary_map = NULL;
+}
diff --git a/src/core/or/versions.h b/src/core/or/versions.h
index 22f3be176f..75dc17f9c7 100644
--- a/src/core/or/versions.h
+++ b/src/core/or/versions.h
@@ -1,7 +1,7 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
@@ -26,6 +26,8 @@ typedef enum version_status_t {
VS_UNKNOWN, /**< We have no idea. */
} version_status_t;
+time_t tor_get_approx_release_date(void);
+
version_status_t tor_version_is_obsolete(const char *myversion,
const char *versionlist);
int tor_version_parse_platform(const char *platform,
@@ -41,4 +43,6 @@ void summarize_protover_flags(protover_summary_flags_t *out,
const char *protocols,
const char *version);
+void protover_summary_cache_free_all(void);
+
#endif /* !defined(TOR_VERSIONS_H) */
diff --git a/src/core/proto/.may_include b/src/core/proto/.may_include
new file mode 100644
index 0000000000..a66c3f83a6
--- /dev/null
+++ b/src/core/proto/.may_include
@@ -0,0 +1,14 @@
+!advisory
+
+orconfig.h
+
+lib/crypt_ops/*.h
+lib/buf/*.h
+lib/malloc/*.h
+lib/string/*.h
+
+lib/net/address.h
+
+trunnel/*.h
+
+core/proto/*.h
diff --git a/src/core/proto/core_proto.md b/src/core/proto/core_proto.md
new file mode 100644
index 0000000000..ad43bc5846
--- /dev/null
+++ b/src/core/proto/core_proto.md
@@ -0,0 +1,6 @@
+@dir /core/proto
+@brief core/proto: Protocol encoding/decoding
+
+These functions should (but do not always) exist at a lower level than most
+of the rest of core.
+
diff --git a/src/core/proto/include.am b/src/core/proto/include.am
new file mode 100644
index 0000000000..726ef924cf
--- /dev/null
+++ b/src/core/proto/include.am
@@ -0,0 +1,18 @@
+
+# ADD_C_FILE: INSERT SOURCES HERE.
+LIBTOR_APP_A_SOURCES += \
+ src/core/proto/proto_cell.c \
+ src/core/proto/proto_control0.c \
+ src/core/proto/proto_ext_or.c \
+ src/core/proto/proto_haproxy.c \
+ src/core/proto/proto_http.c \
+ src/core/proto/proto_socks.c
+
+# ADD_C_FILE: INSERT HEADERS HERE.
+noinst_HEADERS += \
+ src/core/proto/proto_cell.h \
+ src/core/proto/proto_control0.h \
+ src/core/proto/proto_ext_or.h \
+ src/core/proto/proto_haproxy.h \
+ src/core/proto/proto_http.h \
+ src/core/proto/proto_socks.h
diff --git a/src/core/proto/proto_cell.c b/src/core/proto/proto_cell.c
index 0442e2c6ee..5c1a2c24d7 100644
--- a/src/core/proto/proto_cell.c
+++ b/src/core/proto/proto_cell.c
@@ -1,11 +1,18 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file proto_cell.c
+ * @brief Decodes Tor cells from buffers.
+ **/
+/* Right now it only handles variable-length cells, but eventually
+ * we should refactor other cell-reading code into here. */
+
#include "core/or/or.h"
-#include "lib/container/buffers.h"
+#include "lib/buf/buffers.h"
#include "core/proto/proto_cell.h"
#include "core/or/connection_or.h"
@@ -83,4 +90,3 @@ fetch_var_cell_from_buf(buf_t *buf, var_cell_t **out, int linkproto)
*out = result;
return 1;
}
-
diff --git a/src/core/proto/proto_cell.h b/src/core/proto/proto_cell.h
index 4f3982ea43..0e9cfbfed3 100644
--- a/src/core/proto/proto_cell.h
+++ b/src/core/proto/proto_cell.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file proto_cell.h
+ * @brief Header for proto_cell.c
+ **/
+
#ifndef TOR_PROTO_CELL_H
#define TOR_PROTO_CELL_H
@@ -14,4 +19,3 @@ int fetch_var_cell_from_buf(struct buf_t *buf, struct var_cell_t **out,
int linkproto);
#endif /* !defined(TOR_PROTO_CELL_H) */
-
diff --git a/src/core/proto/proto_control0.c b/src/core/proto/proto_control0.c
index 21fa328f02..323b37c539 100644
--- a/src/core/proto/proto_control0.c
+++ b/src/core/proto/proto_control0.c
@@ -1,11 +1,16 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file proto_control0.c
+ * @brief Code to detect the obsolete v0 control protocol.
+ **/
+
#include "core/or/or.h"
-#include "lib/container/buffers.h"
+#include "lib/buf/buffers.h"
#include "core/proto/proto_control0.h"
/** Return 1 iff buf looks more like it has an (obsolete) v0 controller
@@ -23,4 +28,3 @@ peek_buf_has_control0_command(buf_t *buf)
}
return 0;
}
-
diff --git a/src/core/proto/proto_control0.h b/src/core/proto/proto_control0.h
index 162e513a1b..561bf00d19 100644
--- a/src/core/proto/proto_control0.h
+++ b/src/core/proto/proto_control0.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file proto_control0.h
+ * @brief Header for proto_control0.c
+ **/
+
#ifndef TOR_PROTO_CONTROL0_H
#define TOR_PROTO_CONTROL0_H
@@ -11,4 +16,3 @@ struct buf_t;
int peek_buf_has_control0_command(struct buf_t *buf);
#endif /* !defined(TOR_PROTO_CONTROL0_H) */
-
diff --git a/src/core/proto/proto_ext_or.c b/src/core/proto/proto_ext_or.c
index edbc51b10c..23fc2393b7 100644
--- a/src/core/proto/proto_ext_or.c
+++ b/src/core/proto/proto_ext_or.c
@@ -1,11 +1,16 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file proto_ext_or.c
+ * @brief Parsing/encoding for the extended OR protocol.
+ **/
+
#include "core/or/or.h"
-#include "lib/container/buffers.h"
+#include "lib/buf/buffers.h"
#include "feature/relay/ext_orport.h"
#include "core/proto/proto_ext_or.h"
@@ -37,4 +42,3 @@ fetch_ext_or_command_from_buf(buf_t *buf, ext_or_cmd_t **out)
buf_get_bytes(buf, (*out)->body, len);
return 1;
}
-
diff --git a/src/core/proto/proto_ext_or.h b/src/core/proto/proto_ext_or.h
index b2bc64af85..3408599fb7 100644
--- a/src/core/proto/proto_ext_or.h
+++ b/src/core/proto/proto_ext_or.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file proto_ext_or.h
+ * @brief Header for proto_ext_or.c
+ **/
+
#ifndef TOR_PROTO_EXT_OR_H
#define TOR_PROTO_EXT_OR_H
@@ -19,4 +24,11 @@ struct ext_or_cmd_t {
int fetch_ext_or_command_from_buf(struct buf_t *buf,
struct ext_or_cmd_t **out);
+ext_or_cmd_t *ext_or_cmd_new(uint16_t len);
+
+#define ext_or_cmd_free(cmd) \
+ FREE_AND_NULL(ext_or_cmd_t, ext_or_cmd_free_, (cmd))
+
+void ext_or_cmd_free_(ext_or_cmd_t *cmd);
+
#endif /* !defined(TOR_PROTO_EXT_OR_H) */
diff --git a/src/core/proto/proto_haproxy.c b/src/core/proto/proto_haproxy.c
new file mode 100644
index 0000000000..9129fc55bf
--- /dev/null
+++ b/src/core/proto/proto_haproxy.c
@@ -0,0 +1,45 @@
+/* Copyright (c) 2019-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#define PROTO_HAPROXY_PRIVATE
+#include "lib/malloc/malloc.h"
+#include "lib/net/address.h"
+#include "lib/string/printf.h"
+#include "core/proto/proto_haproxy.h"
+
+/** Return a newly allocated PROXY header null-terminated string. Returns NULL
+ * if addr_port->addr is incompatible with the proxy protocol.
+ */
+char *
+haproxy_format_proxy_header_line(const tor_addr_port_t *addr_port)
+{
+ tor_assert(addr_port);
+
+ sa_family_t family = tor_addr_family(&addr_port->addr);
+ const char *family_string = NULL;
+ const char *src_addr_string = NULL;
+
+ switch (family) {
+ case AF_INET:
+ family_string = "TCP4";
+ src_addr_string = "0.0.0.0";
+ break;
+ case AF_INET6:
+ family_string = "TCP6";
+ src_addr_string = "::";
+ break;
+ default:
+ /* Unknown family. */
+ return NULL;
+ }
+
+ char *buf;
+ char addrbuf[TOR_ADDR_BUF_LEN];
+
+ tor_addr_to_str(addrbuf, &addr_port->addr, sizeof(addrbuf), 0);
+
+ tor_asprintf(&buf, "PROXY %s %s %s 0 %d\r\n", family_string, src_addr_string,
+ addrbuf, addr_port->port);
+
+ return buf;
+}
diff --git a/src/core/proto/proto_haproxy.h b/src/core/proto/proto_haproxy.h
new file mode 100644
index 0000000000..63c164e1a1
--- /dev/null
+++ b/src/core/proto/proto_haproxy.h
@@ -0,0 +1,12 @@
+/* Copyright (c) 2019-2020, The Tor Project, Inc. */
+/* See LICENSE for licensing information */
+
+#ifndef TOR_PROTO_HAPROXY_H
+#define TOR_PROTO_HAPROXY_H
+
+struct tor_addr_port_t;
+
+char *haproxy_format_proxy_header_line(
+ const struct tor_addr_port_t *addr_port);
+
+#endif /* !defined(TOR_PROTO_HAPROXY_H) */
diff --git a/src/core/proto/proto_http.c b/src/core/proto/proto_http.c
index 5c86fc4979..ef4b897fcc 100644
--- a/src/core/proto/proto_http.c
+++ b/src/core/proto/proto_http.c
@@ -1,12 +1,17 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file proto_http.c
+ * @brief Parse a subset of the HTTP protocol.
+ **/
+
#define PROTO_HTTP_PRIVATE
#include "core/or/or.h"
-#include "lib/container/buffers.h"
+#include "lib/buf/buffers.h"
#include "core/proto/proto_http.h"
/** Return true if <b>cmd</b> looks like a HTTP (proxy) request. */
@@ -168,4 +173,3 @@ buf_http_find_content_length(const char *headers, size_t headerlen,
return ok ? 1 : -1;
}
-
diff --git a/src/core/proto/proto_http.h b/src/core/proto/proto_http.h
index cd70050205..e0c5135346 100644
--- a/src/core/proto/proto_http.h
+++ b/src/core/proto/proto_http.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file proto_http.h
+ * @brief Header for proto_http.c
+ **/
+
#ifndef TOR_PROTO_HTTP_H
#define TOR_PROTO_HTTP_H
@@ -21,4 +26,3 @@ STATIC int buf_http_find_content_length(const char *headers, size_t headerlen,
#endif
#endif /* !defined(TOR_PROTO_HTTP_H) */
-
diff --git a/src/core/proto/proto_socks.c b/src/core/proto/proto_socks.c
index 5a7d7ac9be..bcb0c2b2f9 100644
--- a/src/core/proto/proto_socks.c
+++ b/src/core/proto/proto_socks.c
@@ -1,14 +1,19 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file proto_socks.c
+ * @brief Implementations for SOCKS4 and SOCKS5 protocols.
+ **/
+
#include "core/or/or.h"
#include "feature/client/addressmap.h"
-#include "lib/container/buffers.h"
+#include "lib/buf/buffers.h"
#include "core/mainloop/connection.h"
-#include "feature/control/control.h"
+#include "feature/control/control_events.h"
#include "app/config/config.h"
#include "lib/crypt_ops/crypto_util.h"
#include "feature/relay/ext_orport.h"
@@ -105,7 +110,7 @@ socks_request_free_(socks_request_t *req)
/**
* Parse a single SOCKS4 request from buffer <b>raw_data</b> of length
* <b>datalen</b> and update relevant fields of <b>req</b>. If SOCKS4a
- * request is detected, set <b>*is_socks4a<b> to true. Set <b>*drain_out</b>
+ * request is detected, set <b>*is_socks4a</b> to true. Set <b>*drain_out</b>
* to number of bytes we parsed so far.
*
* Return SOCKS_RESULT_DONE if parsing succeeded, SOCKS_RESULT_INVALID if
@@ -584,9 +589,8 @@ parse_socks5_client_request(const uint8_t *raw_data, socks_request_t *req,
strlcpy(req->address, hostname, sizeof(req->address));
} break;
case 4: {
- const char *ipv6 =
- (const char *)socks5_client_request_getarray_dest_addr_ipv6(
- trunnel_req);
+ const uint8_t *ipv6 =
+ socks5_client_request_getarray_dest_addr_ipv6(trunnel_req);
tor_addr_from_ipv6_bytes(&destaddr, ipv6);
tor_addr_to_str(req->address, &destaddr, sizeof(req->address), 1);
@@ -618,6 +622,7 @@ process_socks5_client_request(socks_request_t *req,
int safe_socks)
{
socks_result_t res = SOCKS_RESULT_DONE;
+ tor_addr_t tmpaddr;
if (req->command != SOCKS_COMMAND_CONNECT &&
req->command != SOCKS_COMMAND_RESOLVE &&
@@ -628,11 +633,10 @@ process_socks5_client_request(socks_request_t *req,
}
if (req->command == SOCKS_COMMAND_RESOLVE_PTR &&
- !string_is_valid_ipv4_address(req->address) &&
- !string_is_valid_ipv6_address(req->address)) {
+ tor_addr_parse(&tmpaddr, req->address) < 0) {
socks_request_set_socks5_error(req, SOCKS5_ADDRESS_TYPE_NOT_SUPPORTED);
log_warn(LD_APP, "socks5 received RESOLVE_PTR command with "
- "hostname type. Rejecting.");
+ "a malformed address. Rejecting.");
res = SOCKS_RESULT_INVALID;
goto end;
diff --git a/src/core/proto/proto_socks.h b/src/core/proto/proto_socks.h
index 2a387bf848..f3af0d988e 100644
--- a/src/core/proto/proto_socks.h
+++ b/src/core/proto/proto_socks.h
@@ -1,9 +1,14 @@
/* Copyright (c) 2001 Matej Pfajfar.
* Copyright (c) 2001-2004, Roger Dingledine.
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
- * Copyright (c) 2007-2019, The Tor Project, Inc. */
+ * Copyright (c) 2007-2020, The Tor Project, Inc. */
/* See LICENSE for licensing information */
+/**
+ * @file proto_socks.h
+ * @brief Header for proto_socks.c
+ **/
+
#ifndef TOR_PROTO_SOCKS_H
#define TOR_PROTO_SOCKS_H
diff --git a/src/core/stA1RajU b/src/core/stA1RajU
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/core/stA1RajU
diff --git a/src/core/stiysZND b/src/core/stiysZND
new file mode 100644
index 0000000000..faa365b769
--- /dev/null
+++ b/src/core/stiysZND
Binary files differ