aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Mathewson <nickm@torproject.org>2024-01-12 14:34:27 +0000
committerNick Mathewson <nickm@torproject.org>2024-01-12 14:34:27 +0000
commit508a1dd2ddde4fd5f3e29538f24535014a60e613 (patch)
tree91cf5f767be351c648a6c061b0845463506b5de2
parent182ad93c2525662dd256640c484eabd6f6c3be8f (diff)
parent8c1ccecab23fe5239658b074f879fd224a163447 (diff)
downloadarti-1232-resolve-dead_code-and-unused_variables-in-svc-publish.tar.gz
arti-1232-resolve-dead_code-and-unused_variables-in-svc-publish.zip
Merge branch 'publisher-todo-triage' into 'main'1232-resolve-dead_code-and-unused_variables-in-svc-publish
tor-hsservice: Triage descriptor publisher TODOs See merge request tpo/core/arti!1875
-rw-r--r--crates/tor-hsservice/src/svc.rs1
-rw-r--r--crates/tor-hsservice/src/svc/publish.rs34
-rw-r--r--crates/tor-hsservice/src/svc/publish/backoff.rs2
-rw-r--r--crates/tor-hsservice/src/svc/publish/descriptor.rs8
-rw-r--r--crates/tor-hsservice/src/svc/publish/reactor.rs72
5 files changed, 39 insertions, 78 deletions
diff --git a/crates/tor-hsservice/src/svc.rs b/crates/tor-hsservice/src/svc.rs
index 08f1bc39c..a540daf2e 100644
--- a/crates/tor-hsservice/src/svc.rs
+++ b/crates/tor-hsservice/src/svc.rs
@@ -221,7 +221,6 @@ impl OnionService {
circ_pool,
publisher_view,
config_rx,
- shutdown_rx.clone(),
Arc::clone(&keymgr),
);
diff --git a/crates/tor-hsservice/src/svc/publish.rs b/crates/tor-hsservice/src/svc/publish.rs
index 3df885b66..19b912bf6 100644
--- a/crates/tor-hsservice/src/svc/publish.rs
+++ b/crates/tor-hsservice/src/svc/publish.rs
@@ -5,11 +5,10 @@ mod descriptor;
mod reactor;
use futures::task::SpawnExt;
-use postage::{broadcast, watch};
+use postage::watch;
use std::sync::Arc;
use tor_keymgr::KeyMgr;
use tracing::warn;
-use void::Void;
use tor_error::warn_report;
use tor_netdir::NetDirProvider;
@@ -46,8 +45,6 @@ pub(crate) struct Publisher<R: Runtime, M: Mockable> {
ipt_watcher: IptsPublisherView,
/// A channel for receiving onion service config change notifications.
config_rx: watch::Receiver<Arc<OnionServiceConfig>>,
- /// A channel for receiving the signal to shut down.
- shutdown_rx: broadcast::Receiver<Void>,
/// The key manager.
keymgr: Arc<KeyMgr>,
}
@@ -67,7 +64,6 @@ impl<R: Runtime, M: Mockable> Publisher<R, M> {
mockable: impl Into<M>,
ipt_watcher: IptsPublisherView,
config_rx: watch::Receiver<Arc<OnionServiceConfig>>,
- shutdown_rx: broadcast::Receiver<Void>,
keymgr: Arc<KeyMgr>,
) -> Self {
let config = config_rx.borrow().clone();
@@ -79,7 +75,6 @@ impl<R: Runtime, M: Mockable> Publisher<R, M> {
config,
ipt_watcher,
config_rx,
- shutdown_rx,
keymgr,
}
}
@@ -94,7 +89,6 @@ impl<R: Runtime, M: Mockable> Publisher<R, M> {
config,
ipt_watcher,
config_rx,
- shutdown_rx,
keymgr,
} = self;
@@ -106,7 +100,6 @@ impl<R: Runtime, M: Mockable> Publisher<R, M> {
config,
ipt_watcher,
config_rx,
- shutdown_rx,
keymgr,
);
@@ -127,7 +120,7 @@ impl<R: Runtime, M: Mockable> Publisher<R, M> {
/// Inform this publisher that its set of keys has changed.
///
- /// TODO HSS: Either this needs to take new keys as an argument, or there
+ /// TODO (#1217): Either this needs to take new keys as an argument, or there
/// needs to be a source of keys (including public keys) in Publisher.
pub(crate) fn new_hs_keys(&self, keys: ()) {
todo!()
@@ -135,20 +128,17 @@ impl<R: Runtime, M: Mockable> Publisher<R, M> {
/// Return our current status.
//
- // TODO HSS: There should also be a postage::Watcher -based stream of status
+ // TODO (#1083): There should also be a postage::Watcher -based stream of status
// change events.
pub(crate) fn status(&self) -> PublisherStatus {
todo!()
}
-
- // TODO HSS: We may also need to update descriptors based on configuration
- // or authentication changes.
}
/// Current status of our attempts to publish an onion service descriptor.
#[derive(Debug, Clone)]
pub(crate) struct PublisherStatus {
- // TODO HSS add fields
+ // TODO (#1083) add fields
}
//
@@ -315,7 +305,7 @@ mod test {
async fn begin_dir_stream(self: Arc<Self>) -> Result<Self::DataStream, tor_proto::Error> {
Ok(MockDataStream {
publish_count: Arc::clone(&self.publish_count),
- // TODO HSS: this will need to change when we start reusing circuits (currently,
+ // TODO: this will need to change when we start reusing circuits (currently,
// we only ever create one data stream per circuit).
poll_read_responses: Arc::clone(&self.poll_read_responses),
})
@@ -470,7 +460,6 @@ mod test {
keymgr: Arc<KeyMgr>,
pv: IptsPublisherView,
config_rx: watch::Receiver<Arc<OnionServiceConfig>>,
- shutdown_rx: broadcast::Receiver<Void>,
netdir: NetDir,
reactor_event: impl FnOnce(),
poll_read_responses: I,
@@ -493,7 +482,6 @@ mod test {
circpool,
pv,
config_rx,
- shutdown_rx,
keymgr,
);
@@ -561,7 +549,6 @@ mod test {
// If any of the uploads fail, they will be retried. Note that the upload failure will
// affect _each_ hsdir, so the expected number of uploads is a multiple of hsdir_count.
let expected_upload_count = hsdir_count * multiplier;
- let (_shutdown_tx, shutdown_rx) = broadcast::channel(0);
run_test(
runtime.clone(),
@@ -570,7 +557,6 @@ mod test {
keymgr,
pv,
config_rx,
- shutdown_rx,
netdir,
update_ipts,
poll_read_responses,
@@ -611,15 +597,15 @@ mod test {
}
}
- // TODO HSS: test that the descriptor is republished when the config changes
+ // TODO (#1120): test that the descriptor is republished when the config changes
- // TODO HSS: test that the descriptor is reuploaded only to the HSDirs that need it (i.e. the
+ // TODO (#1120): test that the descriptor is reuploaded only to the HSDirs that need it (i.e. the
// ones for which it's dirty)
- // TODO HSS: test that rate-limiting works correctly
+ // TODO (#1120): test that rate-limiting works correctly
- // TODO HSS: test that the uploaded descriptor contains the expected values
+ // TODO (#1120): test that the uploaded descriptor contains the expected values
- // TODO HSS: test that the publisher stops publishing if the IPT manager sets the IPTs to
+ // TODO (#1120): test that the publisher stops publishing if the IPT manager sets the IPTs to
// `None`.
}
diff --git a/crates/tor-hsservice/src/svc/publish/backoff.rs b/crates/tor-hsservice/src/svc/publish/backoff.rs
index 9df03964f..2a7da7c5f 100644
--- a/crates/tor-hsservice/src/svc/publish/backoff.rs
+++ b/crates/tor-hsservice/src/svc/publish/backoff.rs
@@ -391,5 +391,5 @@ mod tests {
);
}
- // TODO HSS: needs tests for the remaining corner cases
+ // TODO (#1120): needs tests for the remaining corner cases
}
diff --git a/crates/tor-hsservice/src/svc/publish/descriptor.rs b/crates/tor-hsservice/src/svc/publish/descriptor.rs
index d7c489546..4e40fcc4e 100644
--- a/crates/tor-hsservice/src/svc/publish/descriptor.rs
+++ b/crates/tor-hsservice/src/svc/publish/descriptor.rs
@@ -85,19 +85,19 @@ pub(super) fn build_sign<Rng: RngCore + CryptoRng>(
rng,
)?;
- // TODO HSS: support introduction-layer authentication.
+ // TODO #1028: support introduction-layer authentication.
let auth_required = None;
let is_single_onion_service =
matches!(config.anonymity, crate::Anonymity::DangerouslyNonAnonymous);
- // TODO HSS: perhaps the certificates should be read from the keystore, rather than created
+ // TODO (#955): perhaps the certificates should be read from the keystore, rather than created
// when building the descriptor. See #1048
let intro_auth_key_cert_expiry = now + HS_DESC_CERT_LIFETIME_SEC;
let intro_enc_key_cert_expiry = now + HS_DESC_CERT_LIFETIME_SEC;
let hs_desc_sign_cert_expiry = now + HS_DESC_CERT_LIFETIME_SEC;
- // TODO HSS: Temporarily disabled while we figure out how we want the client auth config to
+ // TODO (#1206): Temporarily disabled while we figure out how we want the client auth config to
// work; see #1028
/*
let auth_clients: Option<Vec<curve25519::PublicKey>> = config.encrypt_descriptor
@@ -156,7 +156,7 @@ fn decode_curve25519_str(key: &str) -> Result<curve25519::PublicKey, AuthorizedC
fn read_key_dir(
dir: &std::path::Path,
) -> Result<Vec<curve25519::PublicKey>, AuthorizedClientConfigError> {
- // TODO HSS: We will eventually need to validate the key file names and
+ // TODO (#1206): We will eventually need to validate the key file names and
// extensions.
std::fs::read_dir(dir)
.map_err(|e| AuthorizedClientConfigError::KeyDir {
diff --git a/crates/tor-hsservice/src/svc/publish/reactor.rs b/crates/tor-hsservice/src/svc/publish/reactor.rs
index 15f0ccf99..96bde6b6b 100644
--- a/crates/tor-hsservice/src/svc/publish/reactor.rs
+++ b/crates/tor-hsservice/src/svc/publish/reactor.rs
@@ -1,6 +1,6 @@
//! The onion service publisher reactor.
//!
-//! TODO HSS: write the docs
+//! TODO (#1216): write the docs
use std::fmt::Debug;
use std::iter;
@@ -13,13 +13,13 @@ use futures::channel::mpsc::{self, Receiver, Sender};
use futures::task::SpawnExt;
use futures::{select_biased, AsyncRead, AsyncWrite, FutureExt, SinkExt, StreamExt, TryStreamExt};
use postage::sink::SendError;
-use postage::{broadcast, watch};
+use postage::watch;
use tor_basic_utils::retry::RetryDelay;
use tor_hscrypto::ope::AesOpeKey;
use tor_hscrypto::RevisionCounter;
use tor_keymgr::KeyMgr;
use tor_llcrypto::pk::ed25519;
-use tracing::{debug, error, info, trace, warn};
+use tracing::{debug, error, trace, warn};
use tor_circmgr::hspool::{HsCircKind, HsCircPool};
use tor_dirclient::request::HsDescUploadRequest;
@@ -34,7 +34,6 @@ use tor_linkspec::{CircTarget, HasRelayIds, OwnedCircTarget, RelayIds};
use tor_netdir::{NetDir, NetDirProvider, Relay, Timeliness};
use tor_proto::circuit::ClientCirc;
use tor_rtcompat::{Runtime, SleepProviderExt};
-use void::Void;
use crate::config::OnionServiceConfig;
use crate::ipt_set::{IptsPublisherUploadView, IptsPublisherView};
@@ -54,12 +53,12 @@ use crate::{
/// need it. If not, it schedules the upload to happen `UPLOAD_RATE_LIM_THRESHOLD` seconds from the
/// current time.
//
-// TODO HSS: this value is probably not right.
+// TODO (#1121): this value is probably not right.
const UPLOAD_RATE_LIM_THRESHOLD: Duration = Duration::from_secs(60);
/// The maximum number of concurrent upload tasks per time period.
//
-// TODO HSS: this value was arbitrarily chosen and may not be optimal.
+// TODO (#1121): this value was arbitrarily chosen and may not be optimal.
//
// The uploads for all TPs happen in parallel. As a result, the actual limit for the maximum
// number of concurrent upload tasks is multiplied by a number which depends on the TP parameters
@@ -70,7 +69,7 @@ const MAX_CONCURRENT_UPLOADS: usize = 16;
/// The maximum time allowed for uploading a descriptor to an HSDirs.
//
-// TODO HSS: this value is probably not right.
+// TODO (#1121): this value is probably not right.
const UPLOAD_TIMEOUT: Duration = Duration::from_secs(5 * 60);
/// A reactor for the HsDir [`Publisher`](super::Publisher).
@@ -89,8 +88,6 @@ pub(super) struct Reactor<R: Runtime, M: Mockable> {
ipt_watcher: IptsPublisherView,
/// A channel for receiving onion service config change notifications.
config_rx: watch::Receiver<Arc<OnionServiceConfig>>,
- /// A channel for receiving the signal to shut down.
- shutdown_rx: broadcast::Receiver<Void>,
/// A channel for receiving updates regarding our [`PublishStatus`].
///
/// The main loop of the reactor watches for updates on this channel.
@@ -124,7 +121,7 @@ pub(super) struct Reactor<R: Runtime, M: Mockable> {
///
/// This field is initialized in [`Reactor::run`].
///
- // TODO HSS: decide if this is the right approach for implementing rate-limiting
+ // TODO: decide if this is the right approach for implementing rate-limiting
reattempt_upload_tx: Option<watch::Sender<Option<Instant>>>,
/// A channel for sending upload completion notifications.
///
@@ -162,7 +159,7 @@ impl<R: Runtime, M: Mockable> Immutable<R, M> {
/// Returns an error if the service is running in offline mode and the descriptor signing
/// keypair of the specified `period` is not available.
//
- // TODO HSS: we don't support "offline" mode (yet), so this always returns an AesOpeKey
+ // TODO (#1194): we don't support "offline" mode (yet), so this always returns an AesOpeKey
// built from the blinded id key
fn create_ope_key(&self, period: TimePeriod) -> Result<AesOpeKey, FatalError> {
let ope_key = match read_blind_id_keypair(&self.keymgr, &self.nickname, period)? {
@@ -173,7 +170,7 @@ impl<R: Runtime, M: Mockable> Immutable<R, M> {
.expect("Wrong length on slice")
}
None => {
- // TODO HSS: we don't support externally provisioned keys (yet), so this branch
+ // TODO (#1194): we don't support externally provisioned keys (yet), so this branch
// is unreachable (for now).
let desc_sign_key_spec =
DescSigningKeypairSpecifier::new(self.nickname.clone(), period);
@@ -473,7 +470,6 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
config: Arc<OnionServiceConfig>,
ipt_watcher: IptsPublisherView,
config_rx: watch::Receiver<Arc<OnionServiceConfig>>,
- shutdown_rx: broadcast::Receiver<Void>,
keymgr: Arc<KeyMgr>,
) -> Self {
/// The maximum size of the upload completion notifier channel.
@@ -508,7 +504,6 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
dir_provider,
ipt_watcher,
config_rx,
- shutdown_rx,
publish_status_rx,
publish_status_tx,
reattempt_upload_tx: None,
@@ -565,7 +560,7 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
// Enough time has elapsed. Remind the reactor to retry the upload.
if let Err(e) = schedule_upload_tx.send(()).await {
- // TODO HSS: update publisher state
+ // TODO (#1083): update publisher state
debug!(nickname=%nickname, "failed to notify reactor to reattempt upload");
}
}
@@ -584,7 +579,7 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
self.imm.nickname
);
- // TODO HSS: Set status to Shutdown.
+ // TODO (#1083): Set status to Shutdown.
return Err(e);
}
}
@@ -599,20 +594,6 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
let mut netdir_events = self.dir_provider.events();
select_biased! {
- // TODO HSS: Stop waiting for the shutdown signal
- // (instead, let the sender of the ipt_watcher being dropped
- // be our shutdown signal)
- //
- // See https://gitlab.torproject.org/tpo/core/arti/-/merge_requests/1812#note_2976757
- shutdown = self.shutdown_rx.next().fuse() => {
- info!(
- nickname=%self.imm.nickname,
- "descriptor publisher terminating due to shutdown signal"
- );
-
- assert!(shutdown.is_none());
- return Ok(ShutdownStatus::Terminate);
- },
res = self.upload_task_complete_rx.next().fuse() => {
let Some(upload_res) = res else {
return Ok(ShutdownStatus::Terminate);
@@ -629,7 +610,7 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
// Hopefully a netdir will appear in the future.
// in the meantime, suspend operations.
//
- // TODO HSS there is a bug here: we stop reading on our inputs
+ // TODO (#1218): there is a bug here: we stop reading on our inputs
// including eg publish_status_rx, but it is our job to log some of
// these things. While we are waiting for a netdir, all those messages
// are "stuck"; they'll appear later, with misleading timestamps.
@@ -718,7 +699,7 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
if update_last_successful {
period.last_successful = Some(upload_res.revision_counter);
- // TODO HSS: Is it possible that this won't update the statuses promptly
+ // TODO (#1098): Is it possible that this won't update the statuses promptly
// enough. For example, it's possible for the reactor to see a Dirty descriptor
// and start an upload task for a descriptor has already been uploaded (or is
// being uploaded) in another task, but whose upload results have not yet been
@@ -732,8 +713,6 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
*status = DescriptorStatus::Clean;
}
}
-
- // TODO HSS: maybe the failed uploads should be rescheduled at some point.
}
}
@@ -790,7 +769,7 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
.ok_or_else(|| FatalError::MissingHsIdKeypair(self.imm.nickname.clone()))?;
let svc_key_spec = BlindIdKeypairSpecifier::new(self.imm.nickname.clone(), *period);
- // TODO HSS: make this configurable
+ // TODO (#1106): make this configurable
let keystore_selector = Default::default();
let blind_id_kp = self
.imm
@@ -854,7 +833,7 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
// `DescriptorConfigView` as described in
// https://gitlab.torproject.org/tpo/core/arti/-/merge_requests/1603#note_2944902
- // TODO HSS: Temporarily disabled while we figure out how we want the client auth config to
+ // TODO (#1206): Temporarily disabled while we figure out how we want the client auth config to
// work; see #1028
/*
if old_config.anonymity == new_config.anonymity
@@ -972,10 +951,10 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
/// If we've recently uploaded some descriptors, we return immediately and schedule the upload
/// to happen N minutes from now.
///
- /// Any failed uploads are retried (TODO HSS: document the retry logic when we implement it, as
- /// well as in what cases this will return an error).
+ /// Any failed uploads are retried (TODO (#1216, #1098): document the retry logic when we
+ /// implement it, as well as in what cases this will return an error).
//
- // TODO HSS: what is N?
+ // TODO (#1121): what is N?
async fn upload_all(&mut self) -> Result<(), FatalError> {
trace!("starting descriptor upload task...");
@@ -1088,8 +1067,8 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
/// Upload the descriptor for the specified time period.
///
- /// Any failed uploads are retried (TODO HSS: document the retry logic when we implement it, as
- /// well as in what cases this will return an error).
+ /// Any failed uploads are retried (TODO (#1216, #1098): document the retry logic when we
+ /// implement it, as well as in what cases this will return an error).
async fn upload_for_time_period(
hs_dirs: Vec<RelayIds>,
netdir: &Arc<NetDir>,
@@ -1223,7 +1202,7 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
ipt_set.note_publication_attempt(&imm.runtime, worst_case_end)
{
let wait = e.log_retry_max(&imm.nickname)?;
- // TODO HSS retry instead of this
+ // TODO (#1219): retry instead of this
return Err(FatalError::Bug(internal!(
"ought to retry after {wait:?}, crashing instead"
))
@@ -1260,9 +1239,6 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
}
};
- // TODO HSS: add a mechanism for rescheduling uploads that have
- // UploadStatus::Failure.
- //
// Note: UploadStatus::Failure is only returned when
// upload_descriptor_with_retries fails, i.e. if all our retry
// attempts have failed
@@ -1367,7 +1343,7 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
/// Upload a descriptor to the specified HSDir, retrying if appropriate.
///
- /// TODO HSS: document the retry logic when we implement it.
+ /// TODO (#1216): document the retry logic when we implement it.
async fn upload_descriptor_with_retries(
hsdesc: String,
netdir: &Arc<NetDir>,
@@ -1423,7 +1399,7 @@ impl<R: Runtime, M: Mockable> Reactor<R, M> {
///
/// Returns `None` if the service is running in "offline" mode.
///
-// TODO HSS: we don't currently have support for "offline" mode so this can never return
+// TODO (#1194): we don't currently have support for "offline" mode so this can never return
// `Ok(None)`.
pub(super) fn read_blind_id_keypair(
keymgr: &Arc<KeyMgr>,
@@ -1487,7 +1463,7 @@ impl<M: Mockable> BackoffSchedule for PublisherBackoffSchedule<M> {
}
fn timeout(&self) -> Option<Duration> {
- // TODO HSS: pick a less arbitrary timeout
+ // TODO (#1121): pick a less arbitrary timeout
Some(Duration::from_secs(30))
}