rad:z3gqcJUoA1n9HaHKufZs5FCSGazv5 heartwooda06db3fa022d083d5c5987ae0b79eae96b64b272
{
"request": "trigger",
"version": 1,
"event_type": "patch",
"repository": {
"id": "rad:z3gqcJUoA1n9HaHKufZs5FCSGazv5",
"name": "heartwood",
"description": "Radicle Heartwood Protocol & Stack",
"private": false,
"default_branch": "master",
"delegates": [
"did:key:z6MksFqXN3Yhqk8pTJdUGLwATkRfQvwZXPqR2qMEhbS9wzpT",
"did:key:z6MktaNvN1KVFMkSRAiN4qK5yvX1zuEEaseeX5sffhzPZRZW",
"did:key:z6MkireRatUThvd3qzfKht1S44wpm4FEWSSa4PRMTSQZ3voM",
"did:key:z6MkgFq6z5fkF2hioLLSNu1zP2qEL1aHXHZzGH1FLFGAnBGz",
"did:key:z6MkkPvBfjP4bQmco5Dm7UGsX2ruDBieEHi8n9DVJWX5sTEz"
]
},
"action": "Created",
"patch": {
"id": "9f920f5d4639168302ffd9f2eef2c9e14bc5ea8d",
"author": {
"id": "did:key:z6Mkgi3EW1eVVb4REBZspeU9cHrJev8GEZae7uACZJkxCKMz",
"alias": "shadowbane"
},
"title": "stability/perf: reduce churn, add backoff/debounce, throttle GC, safer shutdown",
"state": {
"status": "open",
"conflicts": []
},
"before": "d2ab7b1b46935c95a46d0e7ddac3130b595eb15a",
"after": "a06db3fa022d083d5c5987ae0b79eae96b64b272",
"commits": [
"a06db3fa022d083d5c5987ae0b79eae96b64b272",
"c0498425e43428e4eb027551813d82feb39674f2",
"af4b32cbbf06801522c630f817a8b140b81bd345",
"b63ad6bb709427648eaa6ebcd4ef19851c902d39",
"9f57ab940ef166b826d562906894bf3344cbd794",
"8efa29a00f26c7e6297a1248c1b069aea9aaf33b",
"b896746f33195c77bf98c6e090b158195c220eff",
"57988f0f7655595f93ecf91fb2a3f40243ae2989",
"b241f0cbdce8f1f86dedfe05044eec6aec8f289f",
"b332c4bfc4c41a4a2d8d7e7dcf1be537342949cc",
"dd1af881731945840df147af94993168423545ee",
"b25bb21b1438ebf205f58f689fad05ac6984c2c4",
"e25268eea2014236c2ab25163fa3ba9e0217ed9e"
],
"target": "d2ab7b1b46935c95a46d0e7ddac3130b595eb15a",
"labels": [],
"assignees": [],
"revisions": [
{
"id": "9f920f5d4639168302ffd9f2eef2c9e14bc5ea8d",
"author": {
"id": "did:key:z6Mkgi3EW1eVVb4REBZspeU9cHrJev8GEZae7uACZJkxCKMz",
"alias": "shadowbane"
},
"description": "",
"base": "d2ab7b1b46935c95a46d0e7ddac3130b595eb15a",
"oid": "a06db3fa022d083d5c5987ae0b79eae96b64b272",
"timestamp": 1769377581
}
]
}
}
{
"response": "triggered",
"run_id": {
"id": "43554064-0252-4357-bd53-1509266e4f22"
},
"info_url": "https://cci.rad.levitte.org//43554064-0252-4357-bd53-1509266e4f22.html"
}
Started at: 2026-01-25 22:46:40.350146+01:00
Commands:
$ rad clone rad:z3gqcJUoA1n9HaHKufZs5FCSGazv5 .
✓ Creating checkout in ./...
✓ Remote cloudhead@z6MksFqXN3Yhqk8pTJdUGLwATkRfQvwZXPqR2qMEhbS9wzpT added
✓ Remote-tracking branch cloudhead@z6MksFqXN3Yhqk8pTJdUGLwATkRfQvwZXPqR2qMEhbS9wzpT/master created for z6MksFqXN3Yhqk8pTJdUGLwATkRfQvwZXPqR2qMEhbS9wzpT
✓ Remote cloudhead@z6MktaNvN1KVFMkSRAiN4qK5yvX1zuEEaseeX5sffhzPZRZW added
✓ Remote-tracking branch cloudhead@z6MktaNvN1KVFMkSRAiN4qK5yvX1zuEEaseeX5sffhzPZRZW/master created for z6MktaNvN1KVFMkSRAiN4qK5yvX1zuEEaseeX5sffhzPZRZW
✓ Remote fintohaps@z6MkireRatUThvd3qzfKht1S44wpm4FEWSSa4PRMTSQZ3voM added
✓ Remote-tracking branch fintohaps@z6MkireRatUThvd3qzfKht1S44wpm4FEWSSa4PRMTSQZ3voM/master created for z6MkireRatUThvd3qzfKht1S44wpm4FEWSSa4PRMTSQZ3voM
✓ Remote erikli@z6MkgFq6z5fkF2hioLLSNu1zP2qEL1aHXHZzGH1FLFGAnBGz added
✓ Remote-tracking branch erikli@z6MkgFq6z5fkF2hioLLSNu1zP2qEL1aHXHZzGH1FLFGAnBGz/master created for z6MkgFq6z5fkF2hioLLSNu1zP2qEL1aHXHZzGH1FLFGAnBGz
✓ Remote lorenz@z6MkkPvBfjP4bQmco5Dm7UGsX2ruDBieEHi8n9DVJWX5sTEz added
✓ Remote-tracking branch lorenz@z6MkkPvBfjP4bQmco5Dm7UGsX2ruDBieEHi8n9DVJWX5sTEz/master created for z6MkkPvBfjP4bQmco5Dm7UGsX2ruDBieEHi8n9DVJWX5sTEz
✓ Repository successfully cloned under /opt/radcis/ci.rad.levitte.org/cci/state/43554064-0252-4357-bd53-1509266e4f22/w/
╭────────────────────────────────────╮
│ heartwood │
│ Radicle Heartwood Protocol & Stack │
│ 135 issues · 17 patches │
╰────────────────────────────────────╯
Run `cd ./.` to go to the repository directory.
Exit code: 0
$ rad patch checkout 9f920f5d4639168302ffd9f2eef2c9e14bc5ea8d
✓ Switched to branch patch/9f920f5 at revision 9f920f5
✓ Branch patch/9f920f5 setup to track rad/patches/9f920f5d4639168302ffd9f2eef2c9e14bc5ea8d
Exit code: 0
$ git config advice.detachedHead false
Exit code: 0
$ git checkout a06db3fa022d083d5c5987ae0b79eae96b64b272
HEAD is now at a06db3fa meta: summarize stability/perf fixes (lower churn, safer shutdown, configurable intervals)
Exit code: 0
$ rad patch show 9f920f5d4639168302ffd9f2eef2c9e14bc5ea8d -p
╭────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ Title stability/perf: reduce churn, add backoff/debounce, throttle GC, safer shutdown │
│ Patch 9f920f5d4639168302ffd9f2eef2c9e14bc5ea8d │
│ Author shadowbane z6Mkgi3…JkxCKMz │
│ Head a06db3fa022d083d5c5987ae0b79eae96b64b272 │
│ Base d2ab7b1b46935c95a46d0e7ddac3130b595eb15a │
│ Branches patch/9f920f5 │
│ Commits ahead 13, behind 0 │
│ Status open │
├────────────────────────────────────────────────────────────────────────────────────────────────────┤
│ a06db3f meta: summarize stability/perf fixes (lower churn, safer shutdown, configurable intervals) │
│ c049842 reactor: shutdown gracefully on control disconnect │
│ af4b32c config: add gc interval limit │
│ b63ad6b node: throttle git gc during fetch │
│ 9f57ab9 protocol: debounce inventory announcements │
│ 8efa29a protocol: add adaptive backoff for gossip/sync │
│ b896746 protocol: fix borrow issues in keepalive/reconnect │
│ 57988f0 node: use configured fetch timeout │
│ b241f0c protocol: use configurable network intervals │
│ b332c4b config: add network scheduling limits │
│ dd1af88 protocol: defer inventory scan to idle │
│ b25bb21 protocol: defer refs db populate to idle │
│ e25268e protocol: skip PoW work when target is zero │
├────────────────────────────────────────────────────────────────────────────────────────────────────┤
│ ● Revision 9f920f5 @ a06db3f by shadowbane z6Mkgi3…JkxCKMz 21 seconds ago │
╰────────────────────────────────────────────────────────────────────────────────────────────────────╯
commit a06db3fa022d083d5c5987ae0b79eae96b64b272
Author: Quaylyn Rimer <quaylynrimer11@gmail.com>
Date: Sun Jan 25 14:35:59 2026 -0700
meta: summarize stability/perf fixes (lower churn, safer shutdown, configurable intervals)
commit c0498425e43428e4eb027551813d82feb39674f2
Author: Quaylyn Rimer <quaylynrimer11@gmail.com>
Date: Sun Jan 25 14:17:50 2026 -0700
reactor: shutdown gracefully on control disconnect
diff --git a/crates/radicle-node/src/reactor.rs b/crates/radicle-node/src/reactor.rs
index a9d099833..7f6e82dfc 100644
--- a/crates/radicle-node/src/reactor.rs
+++ b/crates/radicle-node/src/reactor.rs
@@ -413,7 +413,8 @@ impl<H: ReactionHandler> Runtime<H> {
match self.receiver.try_recv() {
Err(TryRecvError::Empty) => break,
Err(TryRecvError::Disconnected) => {
- panic!("control channel disconnected unexpectedly")
+ log::error!(target: "reactor", "Control channel disconnected; shutting down reactor");
+ return self.handle_shutdown();
}
Ok(ControlMessage::Shutdown) => return self.handle_shutdown(),
Ok(ControlMessage::Command(cmd)) => self.service.handle_command(*cmd),
commit af4b32cbbf06801522c630f817a8b140b81bd345
Author: Quaylyn Rimer <quaylynrimer11@gmail.com>
Date: Sun Jan 25 13:55:10 2026 -0700
config: add gc interval limit
diff --git a/crates/radicle-node/src/runtime.rs b/crates/radicle-node/src/runtime.rs
index b0cc2635a..1c62592d5 100644
--- a/crates/radicle-node/src/runtime.rs
+++ b/crates/radicle-node/src/runtime.rs
@@ -242,7 +242,7 @@ impl Runtime {
limit: FetchLimit::default(),
local: nid,
expiry: worker::garbage::Expiry::default(),
- gc_interval: LocalDuration::from_mins(10),
+ gc_interval: config.limits.gc_interval.into(),
};
let pool = worker::Pool::with(
worker_recv,
diff --git a/crates/radicle/src/node/config.rs b/crates/radicle/src/node/config.rs
index 1efdbbe83..a7233173f 100644
--- a/crates/radicle/src/node/config.rs
+++ b/crates/radicle/src/node/config.rs
@@ -140,6 +140,9 @@ pub struct Limits {
/// Network scheduling limits.
pub network: NetworkLimits,
+ /// Minimum interval between `git gc` runs per repository.
+ pub gc_interval: LimitGcInterval,
+
/// Channel limits.
pub fetch_pack_receive: FetchPackSizeLimit,
}
@@ -755,6 +758,29 @@ impl From<LocalDuration> for LimitKeepAliveDelta {
}
}
+#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(transparent)]
+#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+pub struct LimitGcInterval(localtime::LocalDuration);
+
+impl Default for LimitGcInterval {
+ fn default() -> Self {
+ Self(localtime::LocalDuration::from_mins(10))
+ }
+}
+
+impl From<LimitGcInterval> for LocalDuration {
+ fn from(value: LimitGcInterval) -> Self {
+ value.0
+ }
+}
+
+impl From<LocalDuration> for LimitGcInterval {
+ fn from(value: LocalDuration) -> Self {
+ Self(value)
+ }
+}
+
#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)]
#[serde(transparent)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
commit b63ad6bb709427648eaa6ebcd4ef19851c902d39
Author: Quaylyn Rimer <quaylynrimer11@gmail.com>
Date: Sun Jan 25 13:49:10 2026 -0700
node: throttle git gc during fetch
diff --git a/crates/radicle-node/src/runtime.rs b/crates/radicle-node/src/runtime.rs
index 115ef587a..b0cc2635a 100644
--- a/crates/radicle-node/src/runtime.rs
+++ b/crates/radicle-node/src/runtime.rs
@@ -36,7 +36,7 @@ use crate::reactor::Reactor;
use crate::service::gossip;
use crate::wire::Wire;
use crate::worker;
-use crate::{service, LocalTime};
+use crate::{service, LocalDuration, LocalTime};
pub use handle::Error as HandleError;
pub use handle::Handle;
@@ -242,6 +242,7 @@ impl Runtime {
limit: FetchLimit::default(),
local: nid,
expiry: worker::garbage::Expiry::default(),
+ gc_interval: LocalDuration::from_mins(10),
};
let pool = worker::Pool::with(
worker_recv,
diff --git a/crates/radicle-node/src/worker.rs b/crates/radicle-node/src/worker.rs
index 0449b526f..113c05a3e 100644
--- a/crates/radicle-node/src/worker.rs
+++ b/crates/radicle-node/src/worker.rs
@@ -5,6 +5,7 @@ mod upload_pack;
pub mod fetch;
pub mod garbage;
+use std::collections::HashMap;
use std::path::PathBuf;
use crossbeam_channel as chan;
@@ -25,6 +26,7 @@ pub use radicle_protocol::worker::{
use crate::runtime::{thread, Handle};
use crate::wire::StreamId;
+use crate::{LocalDuration, LocalTime};
pub use channels::{ChannelEvent, Channels, ChannelsConfig};
@@ -67,6 +69,8 @@ pub struct FetchConfig {
/// Configuration for `git gc` garbage collection. Defaults to `1
/// hour ago`.
pub expiry: garbage::Expiry,
+ /// Minimum interval between `git gc` runs per repository.
+ pub gc_interval: LocalDuration,
}
/// A worker that replicates git objects.
@@ -80,6 +84,7 @@ struct Worker {
notifications: notifications::StoreWriter,
cache: cob::cache::StoreWriter,
db: radicle::node::Database,
+ last_gc: HashMap<RepoId, LocalTime>,
}
impl Worker {
@@ -210,6 +215,7 @@ impl Worker {
limit,
local,
expiry,
+ gc_interval,
} = &self.fetch_config;
// N.b. if the `rid` is blocked this will return an error, so
// we won't continue with any further set up of the fetch.
@@ -236,11 +242,22 @@ impl Worker {
refs_at,
)?;
- if let Err(e) = garbage::collect(&self.storage, rid, *expiry) {
- // N.b. ensure that `git gc` works in debug mode.
- debug_assert!(false, "`git gc` failed: {e}");
-
- log::debug!(target: "worker", "Failed to run `git gc`: {e}");
+ let now = LocalTime::now();
+ let should_gc = self
+ .last_gc
+ .get(&rid)
+ .map(|last| now - *last >= *gc_interval)
+ .unwrap_or(true);
+ if should_gc {
+ if let Err(e) = garbage::collect(&self.storage, rid, *expiry) {
+ // N.b. ensure that `git gc` works in debug mode.
+ debug_assert!(false, "`git gc` failed: {e}");
+
+ log::debug!(target: "worker", "Failed to run `git gc`: {e}");
+ }
+ self.last_gc.insert(rid, now);
+ } else {
+ log::debug!(target: "worker", "Skipping `git gc` for {rid}; recently ran");
}
Ok(result)
}
@@ -276,6 +293,7 @@ impl Pool {
notifications: notifications.clone(),
cache: cache.clone(),
db: db.clone(),
+ last_gc: HashMap::new(),
};
let thread = thread::spawn(&nid, format!("worker#{i}"), || worker.run());
commit 9f57ab940ef166b826d562906894bf3344cbd794
Author: Quaylyn Rimer <quaylynrimer11@gmail.com>
Date: Sun Jan 25 13:02:27 2026 -0700
protocol: debounce inventory announcements
diff --git a/crates/radicle-protocol/src/service.rs b/crates/radicle-protocol/src/service.rs
index 25308f0b2..24150d03c 100644
--- a/crates/radicle-protocol/src/service.rs
+++ b/crates/radicle-protocol/src/service.rs
@@ -471,6 +471,10 @@ pub struct Service<D, S, G> {
refs_populate_pending: bool,
/// Whether inventory scan is pending.
inventory_scan_pending: bool,
+ /// Whether inventory announce is pending (debounced).
+ inventory_announce_pending: bool,
+ /// Last time inventory announcement was sent.
+ last_inventory_sent_at: LocalTime,
/// Gossip backoff multiplier.
gossip_backoff: u64,
/// Sync backoff multiplier.
@@ -597,6 +601,8 @@ where
metrics: Metrics::default(),
refs_populate_pending: false,
inventory_scan_pending: false,
+ inventory_announce_pending: false,
+ last_inventory_sent_at: LocalTime::default(),
gossip_backoff: 1,
sync_backoff: 1,
}
@@ -813,6 +819,11 @@ where
}
}
}
+ if self.inventory_announce_pending
+ && now - self.last_inventory_sent_at >= self.idle_interval()
+ {
+ self.announce_inventory();
+ }
self.keep_alive(&now);
self.disconnect_unresponsive_peers(&now);
@@ -2522,6 +2533,11 @@ where
/// Announce our inventory to all connected peers, unless it was already announced.
fn announce_inventory(&mut self) {
+ if self.clock - self.last_inventory_sent_at < self.idle_interval() {
+ debug!(target: "service", "Debouncing inventory announcement");
+ self.inventory_announce_pending = true;
+ return;
+ }
let timestamp = self.inventory.timestamp.to_local_time();
if self.last_inventory == timestamp {
@@ -2536,6 +2552,8 @@ where
self.db.gossip_mut(),
);
self.last_inventory = timestamp;
+ self.last_inventory_sent_at = self.clock;
+ self.inventory_announce_pending = false;
}
fn prune_routing_entries(&mut self, now: &LocalTime) -> Result<(), routing::Error> {
commit 8efa29a00f26c7e6297a1248c1b069aea9aaf33b
Author: Quaylyn Rimer <quaylynrimer11@gmail.com>
Date: Sun Jan 25 11:35:20 2026 -0700
protocol: add adaptive backoff for gossip/sync
diff --git a/crates/radicle-protocol/src/service.rs b/crates/radicle-protocol/src/service.rs
index 95925023d..25308f0b2 100644
--- a/crates/radicle-protocol/src/service.rs
+++ b/crates/radicle-protocol/src/service.rs
@@ -471,6 +471,10 @@ pub struct Service<D, S, G> {
refs_populate_pending: bool,
/// Whether inventory scan is pending.
inventory_scan_pending: bool,
+ /// Gossip backoff multiplier.
+ gossip_backoff: u64,
+ /// Sync backoff multiplier.
+ sync_backoff: u64,
}
impl<D, S, G> Service<D, S, G> {
@@ -500,6 +504,14 @@ impl<D, S, G> Service<D, S, G> {
self.config.limits.network.sync_interval.into()
}
+ fn gossip_interval_effective(&self) -> LocalDuration {
+ self.gossip_interval() * self.gossip_backoff
+ }
+
+ fn sync_interval_effective(&self) -> LocalDuration {
+ self.sync_interval() * self.sync_backoff
+ }
+
fn announce_interval(&self) -> LocalDuration {
self.config.limits.network.announce_interval.into()
}
@@ -585,6 +597,8 @@ where
metrics: Metrics::default(),
refs_populate_pending: false,
inventory_scan_pending: false,
+ gossip_backoff: 1,
+ sync_backoff: 1,
}
}
@@ -742,7 +756,7 @@ where
self.maintain_connections();
// Start periodic tasks.
self.outbox.wakeup(self.idle_interval());
- self.outbox.wakeup(self.gossip_interval());
+ self.outbox.wakeup(self.gossip_interval_effective());
Ok(())
}
@@ -770,6 +784,8 @@ where
pub fn wake(&mut self) {
let now = self.clock;
+ const MAX_BACKOFF: u64 = 10;
+
trace!(
target: "service",
"Wake +{}",
@@ -806,22 +822,32 @@ where
self.outbox.wakeup(self.idle_interval());
self.last_idle = now;
}
- if now - self.last_gossip >= self.gossip_interval() {
+ if now - self.last_gossip >= self.gossip_interval_effective() {
trace!(target: "service", "Running 'gossip' task...");
- if let Err(e) = self.relay_announcements() {
- warn!(target: "service", "Failed to relay stored announcements: {e}");
+ if self.is_online() {
+ if let Err(e) = self.relay_announcements() {
+ warn!(target: "service", "Failed to relay stored announcements: {e}");
+ }
+ self.gossip_backoff = 1;
+ } else {
+ self.gossip_backoff = (self.gossip_backoff * 2).min(MAX_BACKOFF);
}
- self.outbox.wakeup(self.gossip_interval());
+ self.outbox.wakeup(self.gossip_interval_effective());
self.last_gossip = now;
}
- if now - self.last_sync >= self.sync_interval() {
+ if now - self.last_sync >= self.sync_interval_effective() {
trace!(target: "service", "Running 'sync' task...");
- if let Err(e) = self.fetch_missing_repositories() {
- warn!(target: "service", "Failed to fetch missing inventory: {e}");
+ if self.is_online() {
+ if let Err(e) = self.fetch_missing_repositories() {
+ warn!(target: "service", "Failed to fetch missing inventory: {e}");
+ }
+ self.sync_backoff = 1;
+ } else {
+ self.sync_backoff = (self.sync_backoff * 2).min(MAX_BACKOFF);
}
- self.outbox.wakeup(self.sync_interval());
+ self.outbox.wakeup(self.sync_interval_effective());
self.last_sync = now;
}
if now - self.last_announce >= self.announce_interval() {
commit b896746f33195c77bf98c6e090b158195c220eff
Author: Quaylyn Rimer <quaylynrimer11@gmail.com>
Date: Sat Jan 24 22:25:06 2026 -0700
protocol: fix borrow issues in keepalive/reconnect
diff --git a/crates/radicle-protocol/src/service.rs b/crates/radicle-protocol/src/service.rs
index 2953be45f..95925023d 100644
--- a/crates/radicle-protocol/src/service.rs
+++ b/crates/radicle-protocol/src/service.rs
@@ -702,8 +702,6 @@ where
debug!(target: "service", "Init @{}", time.as_millis());
assert_ne!(time, LocalTime::default());
- let nid = self.node_id();
-
self.clock = time;
self.started_at = Some(time);
self.last_online_at = match self.db.gossip().last() {
@@ -1407,6 +1405,8 @@ where
pub fn disconnected(&mut self, remote: NodeId, link: Link, reason: &DisconnectReason) {
let since = self.local_time();
+ let min_reconnect = self.min_reconnection_delta();
+ let max_reconnect = self.max_reconnection_delta();
let Some(session) = self.sessions.get_mut(&remote) else {
// Since we sometimes disconnect the service eagerly, it's not unusual to get a second
// disconnection event once the transport is dropped.
@@ -1445,7 +1445,7 @@ where
// Attempt to re-connect to persistent peers.
if self.config.peer(&remote).is_some() {
let delay = LocalDuration::from_secs(2u64.saturating_pow(session.attempts() as u32))
- .clamp(self.min_reconnection_delta(), self.max_reconnection_delta());
+ .clamp(min_reconnect, max_reconnect);
// Nb. We always try to reconnect to persistent peers, even when the error appears
// to not be transient.
@@ -2549,13 +2549,17 @@ where
/// Ensure connection health by pinging connected peers.
fn keep_alive(&mut self, now: &LocalTime) {
- let inactive_sessions = self
+ let keep_alive_delta = self.keep_alive_delta();
+ let to_ping = self
.sessions
- .connected_mut()
- .filter(|(_, session)| *now - session.last_active >= self.keep_alive_delta())
- .map(|(_, session)| session);
- for session in inactive_sessions {
- session.ping(self.clock, &mut self.outbox).ok();
+ .connected()
+ .filter(|(_, session)| *now - session.last_active >= keep_alive_delta)
+ .map(|(nid, _)| *nid)
+ .collect::<Vec<_>>();
+ for nid in to_ping {
+ if let Some(session) = self.sessions.get_mut(&nid) {
+ session.ping(self.clock, &mut self.outbox).ok();
+ }
}
}
commit 57988f0f7655595f93ecf91fb2a3f40243ae2989
Author: Quaylyn Rimer <quaylynrimer11@gmail.com>
Date: Sat Jan 24 22:11:21 2026 -0700
node: use configured fetch timeout
diff --git a/crates/radicle-node/src/wire.rs b/crates/radicle-node/src/wire.rs
index 67e87588f..db3178f6c 100644
--- a/crates/radicle-node/src/wire.rs
+++ b/crates/radicle-node/src/wire.rs
@@ -35,7 +35,6 @@ use crate::reactor::{NoiseSession, ProtocolArtifact, SessionEvent, Socks5Session
use crate::reactor::{Token, Tokens};
use crate::service;
use crate::service::io::Io;
-use crate::service::FETCH_TIMEOUT;
use crate::service::{session, DisconnectReason, Metrics, Service};
use crate::worker;
use crate::worker::{ChannelEvent, ChannelsConfig};
@@ -722,9 +721,10 @@ where
metrics.streams_opened += 1;
metrics.received_fetch_requests += 1;
let reader_limit = self.service.config().limits.fetch_pack_receive;
+ let timeout = self.service.fetch_timeout();
let Some(channels) = streams.register(
stream,
- ChannelsConfig::new(FETCH_TIMEOUT)
+ ChannelsConfig::new(timeout)
.with_reader_limit(reader_limit),
) else {
log::debug!(target: "wire", "Peer attempted to open already-open stream stream {stream}");
commit b241f0cbdce8f1f86dedfe05044eec6aec8f289f
Author: Quaylyn Rimer <quaylynrimer11@gmail.com>
Date: Sat Jan 24 22:11:13 2026 -0700
protocol: use configurable network intervals
diff --git a/crates/radicle-protocol/src/service.rs b/crates/radicle-protocol/src/service.rs
index 069821b71..2953be45f 100644
--- a/crates/radicle-protocol/src/service.rs
+++ b/crates/radicle-protocol/src/service.rs
@@ -487,6 +487,50 @@ impl<D, S, G> Service<D, S, G> {
pub fn emitter(&self) -> Emitter<Event> {
self.emitter.clone()
}
+
+ fn idle_interval(&self) -> LocalDuration {
+ self.config.limits.network.idle_interval.into()
+ }
+
+ fn gossip_interval(&self) -> LocalDuration {
+ self.config.limits.network.gossip_interval.into()
+ }
+
+ fn sync_interval(&self) -> LocalDuration {
+ self.config.limits.network.sync_interval.into()
+ }
+
+ fn announce_interval(&self) -> LocalDuration {
+ self.config.limits.network.announce_interval.into()
+ }
+
+ fn prune_interval(&self) -> LocalDuration {
+ self.config.limits.network.prune_interval.into()
+ }
+
+ fn keep_alive_delta(&self) -> LocalDuration {
+ self.config.limits.network.keep_alive_delta.into()
+ }
+
+ pub fn fetch_timeout(&self) -> time::Duration {
+ self.config.limits.network.fetch_timeout_secs.into()
+ }
+
+ fn target_outbound_peers(&self) -> usize {
+ self.config.limits.network.target_outbound_peers.into()
+ }
+
+ fn min_reconnection_delta(&self) -> LocalDuration {
+ self.config.limits.network.min_reconnection_delta.into()
+ }
+
+ fn max_reconnection_delta(&self) -> LocalDuration {
+ self.config.limits.network.max_reconnection_delta.into()
+ }
+
+ fn connection_retry_delta(&self) -> LocalDuration {
+ self.config.limits.network.connection_retry_delta.into()
+ }
}
impl<D, S, G> Service<D, S, G>
@@ -699,8 +743,8 @@ where
// Try to establish some connections.
self.maintain_connections();
// Start periodic tasks.
- self.outbox.wakeup(IDLE_INTERVAL);
- self.outbox.wakeup(GOSSIP_INTERVAL);
+ self.outbox.wakeup(self.idle_interval());
+ self.outbox.wakeup(self.gossip_interval());
Ok(())
}
@@ -734,7 +778,7 @@ where
now - self.started_at.expect("Service::wake: service must be initialized")
);
- if now - self.last_idle >= IDLE_INTERVAL {
+ if now - self.last_idle >= self.idle_interval() {
trace!(target: "service", "Running 'idle' task...");
if self.refs_populate_pending {
@@ -761,35 +805,35 @@ where
self.idle_connections();
self.maintain_connections();
self.dequeue_fetches();
- self.outbox.wakeup(IDLE_INTERVAL);
+ self.outbox.wakeup(self.idle_interval());
self.last_idle = now;
}
- if now - self.last_gossip >= GOSSIP_INTERVAL {
+ if now - self.last_gossip >= self.gossip_interval() {
trace!(target: "service", "Running 'gossip' task...");
if let Err(e) = self.relay_announcements() {
warn!(target: "service", "Failed to relay stored announcements: {e}");
}
- self.outbox.wakeup(GOSSIP_INTERVAL);
+ self.outbox.wakeup(self.gossip_interval());
self.last_gossip = now;
}
- if now - self.last_sync >= SYNC_INTERVAL {
+ if now - self.last_sync >= self.sync_interval() {
trace!(target: "service", "Running 'sync' task...");
if let Err(e) = self.fetch_missing_repositories() {
warn!(target: "service", "Failed to fetch missing inventory: {e}");
}
- self.outbox.wakeup(SYNC_INTERVAL);
+ self.outbox.wakeup(self.sync_interval());
self.last_sync = now;
}
- if now - self.last_announce >= ANNOUNCE_INTERVAL {
+ if now - self.last_announce >= self.announce_interval() {
trace!(target: "service", "Running 'announce' task...");
self.announce_inventory();
- self.outbox.wakeup(ANNOUNCE_INTERVAL);
+ self.outbox.wakeup(self.announce_interval());
self.last_announce = now;
}
- if now - self.last_prune >= PRUNE_INTERVAL {
+ if now - self.last_prune >= self.prune_interval() {
trace!(target: "service", "Running 'prune' task...");
if let Err(err) = self.prune_routing_entries(&now) {
@@ -803,7 +847,7 @@ where
warn!(target: "service", "Failed to prune gossip entries: {err}");
}
- self.outbox.wakeup(PRUNE_INTERVAL);
+ self.outbox.wakeup(self.prune_interval());
self.last_prune = now;
}
@@ -1401,7 +1445,7 @@ where
// Attempt to re-connect to persistent peers.
if self.config.peer(&remote).is_some() {
let delay = LocalDuration::from_secs(2u64.saturating_pow(session.attempts() as u32))
- .clamp(MIN_RECONNECTION_DELTA, MAX_RECONNECTION_DELTA);
+ .clamp(self.min_reconnection_delta(), self.max_reconnection_delta());
// Nb. We always try to reconnect to persistent peers, even when the error appears
// to not be transient.
@@ -1520,7 +1564,7 @@ where
log::debug!(
target: "service",
"Stored announcement from {announcer} to be broadcast in {} (t={timestamp})",
- (self.last_gossip + GOSSIP_INTERVAL) - self.clock
+ (self.last_gossip + self.gossip_interval()) - self.clock
);
// Keep track of who relayed the message for later.
self.relayed_by.entry(id).or_default().push(*relayer);
@@ -1613,7 +1657,7 @@ where
for rid in missing {
debug!(target: "service", "Missing seeded inventory {rid}; initiating fetch..");
- self.fetch(rid, *announcer, FETCH_TIMEOUT, None);
+ self.fetch(rid, *announcer, self.fetch_timeout(), None);
}
return Ok(relay);
}
@@ -1694,7 +1738,7 @@ where
return Ok(relay);
};
// Finally, start the fetch.
- self.fetch_refs_at(message.rid, remote.id, refs, scope, FETCH_TIMEOUT, None);
+ self.fetch_refs_at(message.rid, remote.id, refs, scope, self.fetch_timeout(), None);
return Ok(relay);
}
@@ -1985,7 +2029,9 @@ where
fn is_online(&self) -> bool {
self.sessions
.connected()
- .filter(|(_, s)| s.addr.is_routable() && s.last_active >= self.clock - IDLE_INTERVAL)
+ .filter(|(_, s)| {
+ s.addr.is_routable() && s.last_active >= self.clock - self.idle_interval()
+ })
.count()
> 0
}
@@ -2506,7 +2552,7 @@ where
let inactive_sessions = self
.sessions
.connected_mut()
- .filter(|(_, session)| *now - session.last_active >= KEEP_ALIVE_DELTA)
+ .filter(|(_, session)| *now - session.last_active >= self.keep_alive_delta())
.map(|(_, session)| session);
for session in inactive_sessions {
session.ping(self.clock, &mut self.outbox).ok();
@@ -2581,7 +2627,7 @@ where
Ok(seeds) => {
if let Some(connected) = NonEmpty::from_vec(seeds.connected().collect()) {
for seed in connected {
- self.fetch(rid, seed.nid, FETCH_TIMEOUT, None);
+ self.fetch(rid, seed.nid, self.fetch_timeout(), None);
}
} else {
// TODO: We should make sure that this fetch is retried later, either
@@ -2629,7 +2675,7 @@ where
};
trace!(target: "service", "Maintaining connections..");
- let target = TARGET_OUTBOUND_PEERS;
+ let target = self.target_outbound_peers();
let now = self.clock;
let outbound = self
.sessions
@@ -2655,10 +2701,10 @@ where
// If we succeeded the last time we tried, this is a good address.
// If it's been long enough that we failed to connect, we also try again.
(Some(success), Some(attempt)) => {
- success >= attempt || now - attempt >= CONNECTION_RETRY_DELTA
+ success >= attempt || now - attempt >= self.connection_retry_delta()
}
// If we haven't succeeded yet, and we waited long enough, we can try this address.
- (None, Some(attempt)) => now - attempt >= CONNECTION_RETRY_DELTA,
+ (None, Some(attempt)) => now - attempt >= self.connection_retry_delta(),
// If we have no failed attempts for this address, it's worth a try.
(_, None) => true,
})
commit b332c4bfc4c41a4a2d8d7e7dcf1be537342949cc
Author: Quaylyn Rimer <quaylynrimer11@gmail.com>
Date: Sat Jan 24 22:11:01 2026 -0700
config: add network scheduling limits
diff --git a/crates/radicle/src/node/config.rs b/crates/radicle/src/node/config.rs
index 6d289e78e..1efdbbe83 100644
--- a/crates/radicle/src/node/config.rs
+++ b/crates/radicle/src/node/config.rs
@@ -1,7 +1,7 @@
use std::collections::HashSet;
use std::ops::Deref;
use std::str::FromStr;
-use std::{fmt, net};
+use std::{fmt, net, time};
use cyphernet::addr::PeerAddr;
use localtime::LocalDuration;
@@ -137,6 +137,9 @@ pub struct Limits {
/// Connection limits.
pub connection: ConnectionLimits,
+ /// Network scheduling limits.
+ pub network: NetworkLimits,
+
/// Channel limits.
pub fetch_pack_receive: FetchPackSizeLimit,
}
@@ -263,6 +266,35 @@ pub struct RateLimits {
pub outbound: LimitRateOutbound,
}
+/// Network scheduling limits.
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+#[serde(default, rename_all = "camelCase")]
+#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+pub struct NetworkLimits {
+ /// How often to run the "idle" task.
+ pub idle_interval: LimitIdleInterval,
+ /// How often to run the "gossip" task.
+ pub gossip_interval: LimitGossipInterval,
+ /// How often to run the "sync" task.
+ pub sync_interval: LimitSyncInterval,
+ /// How often to run the "announce" task.
+ pub announce_interval: LimitAnnounceInterval,
+ /// How often to run the "prune" task.
+ pub prune_interval: LimitPruneInterval,
+ /// How long before a keep-alive ping is sent.
+ pub keep_alive_delta: LimitKeepAliveDelta,
+ /// Fetch timeout, in seconds.
+ pub fetch_timeout_secs: LimitFetchTimeoutSecs,
+ /// Target number of outbound peers.
+ pub target_outbound_peers: TargetOutboundPeers,
+ /// Minimum amount of time to wait before reconnecting to a peer.
+ pub min_reconnection_delta: LimitMinReconnectionDelta,
+ /// Maximum amount of time to wait before reconnecting to a peer.
+ pub max_reconnection_delta: LimitMaxReconnectionDelta,
+ /// Connection retry delta used for peers that failed to connect previously.
+ pub connection_retry_delta: LimitConnectionRetryDelta,
+}
+
/// Full address used to connect to a remote node.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[cfg_attr(
@@ -585,6 +617,213 @@ impl From<LimitGossipMaxAge> for LocalDuration {
}
}
+#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(transparent)]
+#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+pub struct LimitIdleInterval(localtime::LocalDuration);
+
+impl Default for LimitIdleInterval {
+ fn default() -> Self {
+ Self(localtime::LocalDuration::from_secs(30))
+ }
+}
+
+impl From<LimitIdleInterval> for LocalDuration {
+ fn from(value: LimitIdleInterval) -> Self {
+ value.0
+ }
+}
+
+impl From<LocalDuration> for LimitIdleInterval {
+ fn from(value: LocalDuration) -> Self {
+ Self(value)
+ }
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(transparent)]
+#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+pub struct LimitGossipInterval(localtime::LocalDuration);
+
+impl Default for LimitGossipInterval {
+ fn default() -> Self {
+ Self(localtime::LocalDuration::from_secs(6))
+ }
+}
+
+impl From<LimitGossipInterval> for LocalDuration {
+ fn from(value: LimitGossipInterval) -> Self {
+ value.0
+ }
+}
+
+impl From<LocalDuration> for LimitGossipInterval {
+ fn from(value: LocalDuration) -> Self {
+ Self(value)
+ }
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(transparent)]
+#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+pub struct LimitSyncInterval(localtime::LocalDuration);
+
+impl Default for LimitSyncInterval {
+ fn default() -> Self {
+ Self(localtime::LocalDuration::from_secs(60))
+ }
+}
+
+impl From<LimitSyncInterval> for LocalDuration {
+ fn from(value: LimitSyncInterval) -> Self {
+ value.0
+ }
+}
+
+impl From<LocalDuration> for LimitSyncInterval {
+ fn from(value: LocalDuration) -> Self {
+ Self(value)
+ }
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(transparent)]
+#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+pub struct LimitAnnounceInterval(localtime::LocalDuration);
+
+impl Default for LimitAnnounceInterval {
+ fn default() -> Self {
+ Self(localtime::LocalDuration::from_mins(60))
+ }
+}
+
+impl From<LimitAnnounceInterval> for LocalDuration {
+ fn from(value: LimitAnnounceInterval) -> Self {
+ value.0
+ }
+}
+
+impl From<LocalDuration> for LimitAnnounceInterval {
+ fn from(value: LocalDuration) -> Self {
+ Self(value)
+ }
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(transparent)]
+#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+pub struct LimitPruneInterval(localtime::LocalDuration);
+
+impl Default for LimitPruneInterval {
+ fn default() -> Self {
+ Self(localtime::LocalDuration::from_mins(30))
+ }
+}
+
+impl From<LimitPruneInterval> for LocalDuration {
+ fn from(value: LimitPruneInterval) -> Self {
+ value.0
+ }
+}
+
+impl From<LocalDuration> for LimitPruneInterval {
+ fn from(value: LocalDuration) -> Self {
+ Self(value)
+ }
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(transparent)]
+#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+pub struct LimitKeepAliveDelta(localtime::LocalDuration);
+
+impl Default for LimitKeepAliveDelta {
+ fn default() -> Self {
+ Self(localtime::LocalDuration::from_mins(1))
+ }
+}
+
+impl From<LimitKeepAliveDelta> for LocalDuration {
+ fn from(value: LimitKeepAliveDelta) -> Self {
+ value.0
+ }
+}
+
+impl From<LocalDuration> for LimitKeepAliveDelta {
+ fn from(value: LocalDuration) -> Self {
+ Self(value)
+ }
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(transparent)]
+#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+pub struct LimitMinReconnectionDelta(localtime::LocalDuration);
+
+impl Default for LimitMinReconnectionDelta {
+ fn default() -> Self {
+ Self(localtime::LocalDuration::from_secs(3))
+ }
+}
+
+impl From<LimitMinReconnectionDelta> for LocalDuration {
+ fn from(value: LimitMinReconnectionDelta) -> Self {
+ value.0
+ }
+}
+
+impl From<LocalDuration> for LimitMinReconnectionDelta {
+ fn from(value: LocalDuration) -> Self {
+ Self(value)
+ }
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(transparent)]
+#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+pub struct LimitMaxReconnectionDelta(localtime::LocalDuration);
+
+impl Default for LimitMaxReconnectionDelta {
+ fn default() -> Self {
+ Self(localtime::LocalDuration::from_mins(60))
+ }
+}
+
+impl From<LimitMaxReconnectionDelta> for LocalDuration {
+ fn from(value: LimitMaxReconnectionDelta) -> Self {
+ value.0
+ }
+}
+
+impl From<LocalDuration> for LimitMaxReconnectionDelta {
+ fn from(value: LocalDuration) -> Self {
+ Self(value)
+ }
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(transparent)]
+#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
+pub struct LimitConnectionRetryDelta(localtime::LocalDuration);
+
+impl Default for LimitConnectionRetryDelta {
+ fn default() -> Self {
+ Self(localtime::LocalDuration::from_mins(10))
+ }
+}
+
+impl From<LimitConnectionRetryDelta> for LocalDuration {
+ fn from(value: LimitConnectionRetryDelta) -> Self {
+ value.0
+ }
+}
+
+impl From<LocalDuration> for LimitConnectionRetryDelta {
+ fn from(value: LocalDuration) -> Self {
+ Self(value)
+ }
+}
+
/// Create a new type (`$name`) around a given type (`$type`), with a provided
/// default (`$default`).
///
@@ -642,6 +881,14 @@ wrapper!(
},
Copy
);
+wrapper!(LimitFetchTimeoutSecs, u64, 3, Copy);
+wrapper!(TargetOutboundPeers, usize, 8, Copy);
+
+impl From<LimitFetchTimeoutSecs> for time::Duration {
+ fn from(value: LimitFetchTimeoutSecs) -> Self {
+ time::Duration::from_secs(value.0)
+ }
+}
#[cfg(test)]
#[allow(clippy::unwrap_used)]
commit dd1af881731945840df147af94993168423545ee
Author: Quaylyn Rimer <quaylynrimer11@gmail.com>
Date: Sat Jan 24 22:00:28 2026 -0700
protocol: defer inventory scan to idle
diff --git a/crates/radicle-protocol/src/service.rs b/crates/radicle-protocol/src/service.rs
index e48ed4d3d..069821b71 100644
--- a/crates/radicle-protocol/src/service.rs
+++ b/crates/radicle-protocol/src/service.rs
@@ -469,6 +469,8 @@ pub struct Service<D, S, G> {
metrics: Metrics,
/// Whether refs DB population is pending.
refs_populate_pending: bool,
+ /// Whether inventory scan is pending.
+ inventory_scan_pending: bool,
}
impl<D, S, G> Service<D, S, G> {
@@ -538,6 +540,7 @@ where
listening: vec![],
metrics: Metrics::default(),
refs_populate_pending: false,
+ inventory_scan_pending: false,
}
}
@@ -681,69 +684,8 @@ where
}
}
- let announced = self
- .db
- .seeds()
- .seeded_by(&nid)?
- .collect::<Result<HashMap<_, _>, _>>()?;
- let mut inventory = BTreeSet::new();
- let mut private = BTreeSet::new();
-
- for repo in self.storage.repositories()? {
- let rid = repo.rid;
-
- // If we're not seeding this repo, just skip it.
- if !self.policies.is_seeding(&rid)? {
- debug!(target: "service", "Local repository {rid} is not seeded");
- continue;
- }
- // Add public repositories to inventory.
- if repo.doc.is_public() {
- inventory.insert(rid);
- } else {
- private.insert(rid);
- }
- // If we have no owned refs for this repo, then there's nothing to announce.
- let Some(updated_at) = repo.synced_at else {
- continue;
- };
- // Skip this repo if the sync status matches what we have in storage.
- if let Some(announced) = announced.get(&rid) {
- if updated_at.oid == announced.oid {
- continue;
- }
- }
- // Make sure our local node's sync status is up to date with storage.
- if self.db.seeds_mut().synced(
- &rid,
- &nid,
- updated_at.oid,
- updated_at.timestamp.into(),
- )? {
- debug!(target: "service", "Saved local sync status for {rid}..");
- }
- // If we got here, it likely means a repo was updated while the node was stopped.
- // Therefore, we pre-load a refs announcement for this repo, so that it is included in
- // the historical gossip messages when a node connects and subscribes to this repo.
- if let Ok((ann, _)) = self.refs_announcement_for(rid, [nid]) {
- debug!(target: "service", "Adding refs announcement for {rid} to historical gossip messages..");
- self.db.gossip_mut().announced(&nid, &ann)?;
- }
- }
-
- // Ensure that our inventory is recorded in our routing table, and we are seeding
- // all of it. It can happen that inventory is not properly seeded if for eg. the
- // user creates a new repository while the node is stopped.
- self.db
- .routing_mut()
- .add_inventory(inventory.iter(), nid, time.into())?;
- self.inventory = gossip::inventory(self.timestamp(), inventory);
-
- // Ensure that private repositories are not in our inventory. It's possible that
- // a repository was public and then it was made private.
- self.db
- .routing_mut()
- .remove_inventories(private.iter(), &nid)?;
+ info!(target: "service", "Deferring local inventory scan until idle..");
+ self.inventory_scan_pending = true;
// Setup subscription filter for seeded repos.
self.filter = Filter::allowed_by(self.policies.seed_policies()?);
@@ -803,6 +745,16 @@ where
self.refs_populate_pending = true;
}
}
+ if self.inventory_scan_pending {
+ info!(target: "service", "Scanning local inventory from storage..");
+ match self.refresh_inventory_from_storage() {
+ Ok(()) => self.inventory_scan_pending = false,
+ Err(e) => {
+ warn!(target: "service", "Failed to scan local inventory: {e}");
+ self.inventory_scan_pending = true;
+ }
+ }
+ }
self.keep_alive(&now);
self.disconnect_unresponsive_peers(&now);
@@ -2096,6 +2048,78 @@ where
.map_err(Error::from)
}
+ fn refresh_inventory_from_storage(&mut self) -> Result<(), Error> {
+ let nid = self.node_id();
+ let time = self.clock;
+ let announced = self
+ .db
+ .seeds()
+ .seeded_by(&nid)?
+ .collect::<Result<HashMap<_, _>, _>>()?;
+ let mut inventory = BTreeSet::new();
+ let mut private = BTreeSet::new();
+
+ for repo in self.storage.repositories()? {
+ let rid = repo.rid;
+
+ // If we're not seeding this repo, just skip it.
+ if !self.policies.is_seeding(&rid)? {
+ debug!(target: "service", "Local repository {rid} is not seeded");
+ continue;
+ }
+ // Add public repositories to inventory.
+ if repo.doc.is_public() {
+ inventory.insert(rid);
+ } else {
+ private.insert(rid);
+ }
+ // If we have no owned refs for this repo, then there's nothing to announce.
+ let Some(updated_at) = repo.synced_at else {
+ continue;
+ };
+ // Skip this repo if the sync status matches what we have in storage.
+ if let Some(announced) = announced.get(&rid) {
+ if updated_at.oid == announced.oid {
+ continue;
+ }
+ }
+ // Make sure our local node's sync status is up to date with storage.
+ if self.db.seeds_mut().synced(
+ &rid,
+ &nid,
+ updated_at.oid,
+ updated_at.timestamp.into(),
+ )? {
+ debug!(target: "service", "Saved local sync status for {rid}..");
+ }
+ // If we got here, it likely means a repo was updated while the node was stopped.
+ // Therefore, we pre-load a refs announcement for this repo, so that it is included in
+ // the historical gossip messages when a node connects and subscribes to this repo.
+ if let Ok((ann, _)) = self.refs_announcement_for(rid, [nid]) {
+ debug!(target: "service", "Adding refs announcement for {rid} to historical gossip messages..");
+ self.db.gossip_mut().announced(&nid, &ann)?;
+ }
+ }
+
+ // Ensure that our inventory is recorded in our routing table, and we are seeding
+ // all of it. It can happen that inventory is not properly seeded if for eg. the
+ // user creates a new repository while the node is stopped.
+ self.db
+ .routing_mut()
+ .add_inventory(inventory.iter(), nid, time.into())?;
+ self.inventory = gossip::inventory(self.timestamp(), inventory);
+
+ // Ensure that private repositories are not in our inventory. It's possible that
+ // a repository was public and then it was made private.
+ self.db
+ .routing_mut()
+ .remove_inventories(private.iter(), &nid)?;
+
+ self.announce_inventory();
+
+ Ok(())
+ }
+
/// Process a peer inventory announcement by updating our routing table.
/// This function expects the peer's full inventory, and prunes entries that are not in the
/// given inventory.
commit b25bb21b1438ebf205f58f689fad05ac6984c2c4
Author: Quaylyn Rimer <quaylynrimer11@gmail.com>
Date: Sat Jan 24 21:58:51 2026 -0700
protocol: defer refs db populate to idle
diff --git a/crates/radicle-protocol/src/service.rs b/crates/radicle-protocol/src/service.rs
index 42fc27d2c..e48ed4d3d 100644
--- a/crates/radicle-protocol/src/service.rs
+++ b/crates/radicle-protocol/src/service.rs
@@ -467,6 +467,8 @@ pub struct Service<D, S, G> {
listening: Vec<net::SocketAddr>,
/// Latest metrics for all nodes connected to since the last start.
metrics: Metrics,
+ /// Whether refs DB population is pending.
+ refs_populate_pending: bool,
}
impl<D, S, G> Service<D, S, G> {
@@ -535,6 +537,7 @@ where
emitter,
listening: vec![],
metrics: Metrics::default(),
+ refs_populate_pending: false,
}
}
@@ -669,10 +672,8 @@ where
// that have been online since before the refs database was created.
match self.db.refs().count() {
Ok(0) => {
- info!(target: "service", "Empty refs database, populating from storage..");
- if let Err(e) = self.db.refs_mut().populate(&self.storage) {
- warn!(target: "service", "Failed to populate refs database: {e}");
- }
+ info!(target: "service", "Empty refs database, deferring populate from storage..");
+ self.refs_populate_pending = true;
}
Ok(n) => debug!(target: "service", "Refs database has {n} cached references"),
Err(e) => {
@@ -794,6 +795,15 @@ where
if now - self.last_idle >= IDLE_INTERVAL {
trace!(target: "service", "Running 'idle' task...");
+ if self.refs_populate_pending {
+ info!(target: "service", "Populating refs database from storage..");
+ self.refs_populate_pending = false;
+ if let Err(e) = self.db.refs_mut().populate(&self.storage) {
+ warn!(target: "service", "Failed to populate refs database: {e}");
+ self.refs_populate_pending = true;
+ }
+ }
+
self.keep_alive(&now);
self.disconnect_unresponsive_peers(&now);
self.idle_connections();
commit e25268eea2014236c2ab25163fa3ba9e0217ed9e
Author: Quaylyn Rimer <quaylynrimer11@gmail.com>
Date: Sat Jan 24 21:54:30 2026 -0700
protocol: skip PoW work when target is zero
diff --git a/crates/radicle-protocol/src/service/message.rs b/crates/radicle-protocol/src/service/message.rs
index afcea6317..f5d305c35 100644
--- a/crates/radicle-protocol/src/service/message.rs
+++ b/crates/radicle-protocol/src/service/message.rs
@@ -105,6 +105,9 @@ impl NodeAnnouncement {
/// If the given difficulty target is too high, there may not be a result. In that case, `None`
/// is returned.
pub fn solve(mut self, target: u32) -> Option<Self> {
+ if target == 0 {
+ return Some(self);
+ }
loop {
if let Some(nonce) = self.nonce.checked_add(1) {
self.nonce = nonce;
Exit code: 0
shell: 'export RUSTDOCFLAGS=''-D warnings'' cargo --version rustc --version cargo fmt --check cargo clippy --all-targets --workspace -- --deny warnings cargo build --all-targets --workspace cargo doc --workspace --no-deps --all-features cargo test --workspace --no-fail-fast '
Commands:
$ podman run --name 43554064-0252-4357-bd53-1509266e4f22 -v /opt/radcis/ci.rad.levitte.org/cci/state/43554064-0252-4357-bd53-1509266e4f22/s:/43554064-0252-4357-bd53-1509266e4f22/s:ro -v /opt/radcis/ci.rad.levitte.org/cci/state/43554064-0252-4357-bd53-1509266e4f22/w:/43554064-0252-4357-bd53-1509266e4f22/w -w /43554064-0252-4357-bd53-1509266e4f22/w -v /opt/radcis/ci.rad.levitte.org/.radicle:/${id}/.radicle:ro -e RAD_HOME=/${id}/.radicle rust:trixie bash /43554064-0252-4357-bd53-1509266e4f22/s/script.sh
+ export 'RUSTDOCFLAGS=-D warnings'
+ RUSTDOCFLAGS='-D warnings'
+ cargo --version
info: syncing channel updates for '1.90-x86_64-unknown-linux-gnu'
info: latest update on 2025-09-18, rust version 1.90.0 (1159e78c4 2025-09-14)
info: downloading component 'cargo'
info: downloading component 'clippy'
info: downloading component 'rust-docs'
info: downloading component 'rust-src'
info: downloading component 'rust-std'
info: downloading component 'rustc'
info: downloading component 'rustfmt'
info: installing component 'cargo'
info: installing component 'clippy'
info: installing component 'rust-docs'
info: installing component 'rust-src'
info: installing component 'rust-std'
info: installing component 'rustc'
info: installing component 'rustfmt'
cargo 1.90.0 (840b83a10 2025-07-30)
+ rustc --version
rustc 1.90.0 (1159e78c4 2025-09-14)
+ cargo fmt --check
Diff in /43554064-0252-4357-bd53-1509266e4f22/w/crates/radicle-node/src/wire.rs:724:
let timeout = self.service.fetch_timeout();
let Some(channels) = streams.register(
stream,
- ChannelsConfig::new(timeout)
- .with_reader_limit(reader_limit),
+ ChannelsConfig::new(timeout).with_reader_limit(reader_limit),
) else {
log::debug!(target: "wire", "Peer attempted to open already-open stream stream {stream}");
continue;
Diff in /43554064-0252-4357-bd53-1509266e4f22/w/crates/radicle-protocol/src/service.rs:1775:
return Ok(relay);
};
// Finally, start the fetch.
- self.fetch_refs_at(message.rid, remote.id, refs, scope, self.fetch_timeout(), None);
+ self.fetch_refs_at(
+ message.rid,
+ remote.id,
+ refs,
+ scope,
+ self.fetch_timeout(),
+ None,
+ );
return Ok(relay);
}
Exit code: 1
{
"response": "finished",
"result": "failure"
}