mirror of
https://github.com/LemmyNet/lemmy.git
synced 2024-11-14 19:35:23 +00:00
federation: parallel sending
This commit is contained in:
parent
3a0c1dca90
commit
539f06af97
4
Cargo.lock
generated
4
Cargo.lock
generated
|
@ -5439,9 +5439,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio"
|
name = "tokio"
|
||||||
version = "1.36.0"
|
version = "1.37.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931"
|
checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"backtrace",
|
"backtrace",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
|
|
@ -139,7 +139,7 @@ anyhow = { version = "1.0.81", features = [
|
||||||
diesel_ltree = "0.3.1"
|
diesel_ltree = "0.3.1"
|
||||||
typed-builder = "0.18.1"
|
typed-builder = "0.18.1"
|
||||||
serial_test = "2.0.0"
|
serial_test = "2.0.0"
|
||||||
tokio = { version = "1.36.0", features = ["full"] }
|
tokio = { version = "1.37.0", features = ["full"] }
|
||||||
regex = "1.10.3"
|
regex = "1.10.3"
|
||||||
once_cell = "1.19.0"
|
once_cell = "1.19.0"
|
||||||
diesel-derive-newtype = "2.1.0"
|
diesel-derive-newtype = "2.1.0"
|
||||||
|
|
|
@ -73,19 +73,16 @@ async fn start_stop_federation_workers(
|
||||||
// create new worker
|
// create new worker
|
||||||
let config = federation_config.clone();
|
let config = federation_config.clone();
|
||||||
let stats_sender = stats_sender.clone();
|
let stats_sender = stats_sender.clone();
|
||||||
let pool = pool.clone();
|
|
||||||
workers.insert(
|
workers.insert(
|
||||||
instance.id,
|
instance.id,
|
||||||
CancellableTask::spawn(WORKER_EXIT_TIMEOUT, move |stop| {
|
CancellableTask::spawn(WORKER_EXIT_TIMEOUT, move |stop| {
|
||||||
let instance = instance.clone();
|
let instance = instance.clone();
|
||||||
let req_data = config.clone().to_request_data();
|
let config = config.clone();
|
||||||
let stats_sender = stats_sender.clone();
|
let stats_sender = stats_sender.clone();
|
||||||
let pool = pool.clone();
|
|
||||||
async move {
|
async move {
|
||||||
InstanceWorker::init_and_loop(
|
InstanceWorker::init_and_loop(
|
||||||
instance,
|
instance,
|
||||||
req_data,
|
config,
|
||||||
&mut DbPool::Pool(&pool),
|
|
||||||
stop,
|
stop,
|
||||||
stats_sender,
|
stats_sender,
|
||||||
)
|
)
|
||||||
|
|
|
@ -7,7 +7,7 @@ use crate::util::{
|
||||||
};
|
};
|
||||||
use activitypub_federation::{
|
use activitypub_federation::{
|
||||||
activity_sending::SendActivityTask,
|
activity_sending::SendActivityTask,
|
||||||
config::Data,
|
config::{Data, FederationConfig},
|
||||||
protocol::context::WithContext,
|
protocol::context::WithContext,
|
||||||
};
|
};
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
|
@ -22,22 +22,22 @@ use lemmy_db_schema::{
|
||||||
instance::{Instance, InstanceForm},
|
instance::{Instance, InstanceForm},
|
||||||
site::Site,
|
site::Site,
|
||||||
},
|
},
|
||||||
utils::{naive_now, DbPool},
|
utils::{naive_now, ActualDbPool, DbPool},
|
||||||
};
|
};
|
||||||
use lemmy_db_views_actor::structs::CommunityFollowerView;
|
use lemmy_db_views_actor::structs::CommunityFollowerView;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use reqwest::Url;
|
use reqwest::Url;
|
||||||
use std::{
|
use std::{
|
||||||
collections::{HashMap, HashSet},
|
collections::{BinaryHeap, HashMap, HashSet},
|
||||||
ops::{Add, Deref},
|
ops::{Add, Deref},
|
||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
use tokio::{sync::mpsc::UnboundedSender, time::sleep};
|
use tokio::{
|
||||||
|
sync::mpsc::{self, UnboundedSender},
|
||||||
|
time::sleep,
|
||||||
|
};
|
||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
|
|
||||||
/// Check whether to save state to db every n sends if there's no failures (during failures state is saved after every attempt)
|
|
||||||
/// This determines the batch size for loop_batch. After a batch ends and SAVE_STATE_EVERY_TIME has passed, the federation_queue_state is updated in the DB.
|
|
||||||
static CHECK_SAVE_STATE_EVERY_IT: i64 = 100;
|
|
||||||
/// Save state to db after this time has passed since the last state (so if the server crashes or is SIGKILLed, less than X seconds of activities are resent)
|
/// Save state to db after this time has passed since the last state (so if the server crashes or is SIGKILLed, less than X seconds of activities are resent)
|
||||||
static SAVE_STATE_EVERY_TIME: Duration = Duration::from_secs(60);
|
static SAVE_STATE_EVERY_TIME: Duration = Duration::from_secs(60);
|
||||||
/// interval with which new additions to community_followers are queried.
|
/// interval with which new additions to community_followers are queried.
|
||||||
|
@ -57,6 +57,16 @@ static FOLLOW_ADDITIONS_RECHECK_DELAY: Lazy<chrono::TimeDelta> = Lazy::new(|| {
|
||||||
/// This is expected to happen pretty rarely and updating it in a timely manner is not too important.
|
/// This is expected to happen pretty rarely and updating it in a timely manner is not too important.
|
||||||
static FOLLOW_REMOVALS_RECHECK_DELAY: Lazy<chrono::TimeDelta> =
|
static FOLLOW_REMOVALS_RECHECK_DELAY: Lazy<chrono::TimeDelta> =
|
||||||
Lazy::new(|| chrono::TimeDelta::try_hours(1).expect("TimeDelta out of bounds"));
|
Lazy::new(|| chrono::TimeDelta::try_hours(1).expect("TimeDelta out of bounds"));
|
||||||
|
|
||||||
|
static CONCURRENT_SENDS: Lazy<i64> = Lazy::new(|| {
|
||||||
|
std::env::var("LEMMY_FEDERATION_CONCURRENT_SENDS_PER_INSTANCE")
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| s.parse().ok())
|
||||||
|
.unwrap_or(8)
|
||||||
|
});
|
||||||
|
/// Maximum number of successful sends to allow out of order
|
||||||
|
const MAX_SUCCESSFULS: usize = 1000;
|
||||||
|
|
||||||
pub(crate) struct InstanceWorker {
|
pub(crate) struct InstanceWorker {
|
||||||
instance: Instance,
|
instance: Instance,
|
||||||
// load site lazily because if an instance is first seen due to being on allowlist,
|
// load site lazily because if an instance is first seen due to being on allowlist,
|
||||||
|
@ -67,60 +77,116 @@ pub(crate) struct InstanceWorker {
|
||||||
site: Option<Site>,
|
site: Option<Site>,
|
||||||
followed_communities: HashMap<CommunityId, HashSet<Url>>,
|
followed_communities: HashMap<CommunityId, HashSet<Url>>,
|
||||||
stop: CancellationToken,
|
stop: CancellationToken,
|
||||||
context: Data<LemmyContext>,
|
config: FederationConfig<LemmyContext>,
|
||||||
stats_sender: UnboundedSender<(String, FederationQueueState)>,
|
stats_sender: UnboundedSender<(String, FederationQueueState)>,
|
||||||
last_full_communities_fetch: DateTime<Utc>,
|
last_full_communities_fetch: DateTime<Utc>,
|
||||||
last_incremental_communities_fetch: DateTime<Utc>,
|
last_incremental_communities_fetch: DateTime<Utc>,
|
||||||
state: FederationQueueState,
|
state: FederationQueueState,
|
||||||
last_state_insert: DateTime<Utc>,
|
last_state_insert: DateTime<Utc>,
|
||||||
|
pool: ActualDbPool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
|
struct SendSuccessInfo {
|
||||||
|
activity_id: ActivityId,
|
||||||
|
published: Option<DateTime<Utc>>,
|
||||||
|
was_skipped: bool,
|
||||||
|
}
|
||||||
|
impl PartialOrd for SendSuccessInfo {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||||
|
other.activity_id.partial_cmp(&self.activity_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Ord for SendSuccessInfo {
|
||||||
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
|
other.activity_id.cmp(&self.activity_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
enum SendActivityResult {
|
||||||
|
Success(SendSuccessInfo),
|
||||||
|
Failure {
|
||||||
|
fail_count: i32,
|
||||||
|
// activity_id: ActivityId,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InstanceWorker {
|
impl InstanceWorker {
|
||||||
pub(crate) async fn init_and_loop(
|
pub(crate) async fn init_and_loop(
|
||||||
instance: Instance,
|
instance: Instance,
|
||||||
context: Data<LemmyContext>,
|
config: FederationConfig<LemmyContext>,
|
||||||
pool: &mut DbPool<'_>, // in theory there's a ref to the pool in context, but i couldn't get that to work wrt lifetimes
|
// pool: ActualDbPool, // in theory there's a ref to the pool in context, but i couldn't get that to work wrt lifetimes
|
||||||
stop: CancellationToken,
|
stop: CancellationToken,
|
||||||
stats_sender: UnboundedSender<(String, FederationQueueState)>,
|
stats_sender: UnboundedSender<(String, FederationQueueState)>,
|
||||||
) -> Result<(), anyhow::Error> {
|
) -> Result<(), anyhow::Error> {
|
||||||
let state = FederationQueueState::load(pool, instance.id).await?;
|
let state =
|
||||||
|
FederationQueueState::load(&mut config.to_request_data().pool(), instance.id).await?;
|
||||||
|
let pool = config.to_request_data().inner_pool().clone();
|
||||||
let mut worker = InstanceWorker {
|
let mut worker = InstanceWorker {
|
||||||
instance,
|
instance,
|
||||||
site_loaded: false,
|
site_loaded: false,
|
||||||
site: None,
|
site: None,
|
||||||
followed_communities: HashMap::new(),
|
followed_communities: HashMap::new(),
|
||||||
stop,
|
stop,
|
||||||
context,
|
config,
|
||||||
stats_sender,
|
stats_sender,
|
||||||
last_full_communities_fetch: Utc.timestamp_nanos(0),
|
last_full_communities_fetch: Utc.timestamp_nanos(0),
|
||||||
last_incremental_communities_fetch: Utc.timestamp_nanos(0),
|
last_incremental_communities_fetch: Utc.timestamp_nanos(0),
|
||||||
state,
|
state,
|
||||||
last_state_insert: Utc.timestamp_nanos(0),
|
last_state_insert: Utc.timestamp_nanos(0),
|
||||||
|
pool,
|
||||||
};
|
};
|
||||||
worker.loop_until_stopped(pool).await
|
worker.loop_until_stopped().await
|
||||||
}
|
}
|
||||||
/// loop fetch new activities from db and send them to the inboxes of the given instances
|
/// loop fetch new activities from db and send them to the inboxes of the given instances
|
||||||
/// this worker only returns if (a) there is an internal error or (b) the cancellation token is cancelled (graceful exit)
|
/// this worker only returns if (a) there is an internal error or (b) the cancellation token is cancelled (graceful exit)
|
||||||
pub(crate) async fn loop_until_stopped(
|
async fn loop_until_stopped(&mut self) -> Result<()> {
|
||||||
&mut self,
|
|
||||||
pool: &mut DbPool<'_>,
|
|
||||||
) -> Result<(), anyhow::Error> {
|
|
||||||
let save_state_every = chrono::Duration::from_std(SAVE_STATE_EVERY_TIME).expect("not negative");
|
|
||||||
|
|
||||||
self.update_communities(pool).await?;
|
|
||||||
self.initial_fail_sleep().await?;
|
self.initial_fail_sleep().await?;
|
||||||
|
let mut latest_id = self.get_latest_id().await?;
|
||||||
|
|
||||||
|
// activities that have been successfully sent but
|
||||||
|
// that are not the lowest number and thus can't be written to the database yet
|
||||||
|
let mut successfuls = BinaryHeap::<SendSuccessInfo>::new();
|
||||||
|
let mut in_flight: i64 = 0;
|
||||||
|
|
||||||
|
let (report_inbox_result, mut receive_inbox_result) =
|
||||||
|
tokio::sync::mpsc::unbounded_channel::<SendActivityResult>();
|
||||||
while !self.stop.is_cancelled() {
|
while !self.stop.is_cancelled() {
|
||||||
self.loop_batch(pool).await?;
|
// check if we need to wait for a send to finish before sending the next one
|
||||||
if self.stop.is_cancelled() {
|
let need_wait_for_event = (in_flight != 0 && self.state.fail_count > 0)
|
||||||
break;
|
|| successfuls.len() > MAX_SUCCESSFULS
|
||||||
|
|| in_flight >= *CONCURRENT_SENDS;
|
||||||
|
if need_wait_for_event || receive_inbox_result.len() > 4 {
|
||||||
|
self
|
||||||
|
.handle_send_results(&mut receive_inbox_result, &mut successfuls, &mut in_flight)
|
||||||
|
.await?;
|
||||||
|
} else {
|
||||||
|
self.update_communities().await?;
|
||||||
|
let last_successful_id = self
|
||||||
|
.state
|
||||||
|
.last_successful_id
|
||||||
|
.map(|e| e.0)
|
||||||
|
.expect("set above");
|
||||||
|
let next_id = ActivityId(last_successful_id + (successfuls.len() as i64) + in_flight + 1);
|
||||||
|
if next_id > latest_id {
|
||||||
|
latest_id = self.get_latest_id().await?;
|
||||||
|
if next_id > latest_id {
|
||||||
|
// no more work to be done, wait before rechecking
|
||||||
|
tokio::select! {
|
||||||
|
() = sleep(*WORK_FINISHED_RECHECK_DELAY) => {},
|
||||||
|
() = self.stop.cancelled() => {}
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
in_flight += 1;
|
||||||
|
self
|
||||||
|
.spawn_send_if_needed(next_id, report_inbox_result.clone())
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
if (Utc::now() - self.last_state_insert) > save_state_every {
|
|
||||||
self.save_and_send_state(pool).await?;
|
|
||||||
}
|
|
||||||
self.update_communities(pool).await?;
|
|
||||||
}
|
}
|
||||||
// final update of state in db
|
// final update of state in db on shutdown
|
||||||
self.save_and_send_state(pool).await?;
|
self.save_and_send_state().await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,6 +203,11 @@ impl InstanceWorker {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let remaining = required - elapsed;
|
let remaining = required - elapsed;
|
||||||
|
tracing::debug!(
|
||||||
|
"{}: fail-sleeping for {:?} before starting queue",
|
||||||
|
self.instance.domain,
|
||||||
|
remaining
|
||||||
|
);
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
() = sleep(remaining) => {},
|
() = sleep(remaining) => {},
|
||||||
() = self.stop.cancelled() => {}
|
() = self.stop.cancelled() => {}
|
||||||
|
@ -144,78 +215,174 @@ impl InstanceWorker {
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
/// send out a batch of CHECK_SAVE_STATE_EVERY_IT activities
|
|
||||||
async fn loop_batch(&mut self, pool: &mut DbPool<'_>) -> Result<()> {
|
/// get newest activity id and set it as last_successful_id if it's the first time this instance is seen
|
||||||
let latest_id = get_latest_activity_id(pool).await?;
|
async fn get_latest_id(&mut self) -> Result<ActivityId> {
|
||||||
let mut id = if let Some(id) = self.state.last_successful_id {
|
let latest_id = get_latest_activity_id(&mut self.pool()).await?;
|
||||||
id
|
if let None = self.state.last_successful_id {
|
||||||
} else {
|
|
||||||
// this is the initial creation (instance first seen) of the federation queue for this instance
|
// this is the initial creation (instance first seen) of the federation queue for this instance
|
||||||
// skip all past activities:
|
// skip all past activities:
|
||||||
self.state.last_successful_id = Some(latest_id);
|
self.state.last_successful_id = Some(latest_id);
|
||||||
// save here to ensure it's not read as 0 again later if no activities have happened
|
// save here to ensure it's not read as 0 again later if no activities have happened
|
||||||
self.save_and_send_state(pool).await?;
|
self.save_and_send_state().await?;
|
||||||
latest_id
|
}
|
||||||
};
|
Ok(latest_id)
|
||||||
if id >= latest_id {
|
}
|
||||||
// no more work to be done, wait before rechecking
|
|
||||||
tokio::select! {
|
async fn handle_send_results(
|
||||||
() = sleep(*WORK_FINISHED_RECHECK_DELAY) => {},
|
&mut self,
|
||||||
() = self.stop.cancelled() => {}
|
receive_inbox_result: &mut mpsc::UnboundedReceiver<SendActivityResult>,
|
||||||
|
successfuls: &mut BinaryHeap<SendSuccessInfo>,
|
||||||
|
in_flight: &mut i64,
|
||||||
|
) -> Result<(), anyhow::Error> {
|
||||||
|
let force_write = false;
|
||||||
|
let mut events = Vec::new();
|
||||||
|
// wait for at least one event but if there's multiple handle them all
|
||||||
|
receive_inbox_result.recv_many(&mut events, 1000).await;
|
||||||
|
for event in events {
|
||||||
|
match event {
|
||||||
|
SendActivityResult::Success(s) => {
|
||||||
|
self.state.fail_count = 0;
|
||||||
|
*in_flight -= 1;
|
||||||
|
if !s.was_skipped {
|
||||||
|
self.mark_instance_alive().await?;
|
||||||
|
}
|
||||||
|
successfuls.push(s);
|
||||||
|
}
|
||||||
|
SendActivityResult::Failure { fail_count, .. } => {
|
||||||
|
if fail_count > self.state.fail_count {
|
||||||
|
// override fail count - if multiple activities are currently sending this value may get conflicting info but that's fine
|
||||||
|
self.state.fail_count = fail_count;
|
||||||
|
self.state.last_retry = Some(Utc::now());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
self
|
||||||
|
.pop_successfuls_and_write(successfuls, force_write)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
async fn mark_instance_alive(&mut self) -> Result<()> {
|
||||||
|
// Activity send successful, mark instance as alive if it hasn't been updated in a while.
|
||||||
|
let updated = self.instance.updated.unwrap_or(self.instance.published);
|
||||||
|
if updated.add(Days::new(1)) < Utc::now() {
|
||||||
|
self.instance.updated = Some(Utc::now());
|
||||||
|
|
||||||
|
let form = InstanceForm::builder()
|
||||||
|
.domain(self.instance.domain.clone())
|
||||||
|
.updated(Some(naive_now()))
|
||||||
|
.build();
|
||||||
|
Instance::update(&mut self.pool(), self.instance.id, form).await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
/// checks whether the highest successful id can be updated and writes to db if so
|
||||||
|
async fn pop_successfuls_and_write(
|
||||||
|
&mut self,
|
||||||
|
successfuls: &mut BinaryHeap<SendSuccessInfo>,
|
||||||
|
force_write: bool,
|
||||||
|
) -> Result<()> {
|
||||||
|
let Some(mut last_id) = self.state.last_successful_id else {
|
||||||
|
tracing::warn!("should be impossible: last successful id is None");
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
tracing::debug!(
|
||||||
|
"last: {:?}, next: {:?}, currently in successfuls: {:?}",
|
||||||
|
last_id,
|
||||||
|
successfuls.peek(),
|
||||||
|
successfuls.iter()
|
||||||
|
);
|
||||||
|
while successfuls
|
||||||
|
.peek()
|
||||||
|
.map(|a| &a.activity_id == &ActivityId(last_id.0 + 1))
|
||||||
|
.unwrap_or(false)
|
||||||
|
{
|
||||||
|
let next = successfuls.pop().unwrap();
|
||||||
|
last_id = next.activity_id;
|
||||||
|
self.state.last_successful_id = Some(next.activity_id);
|
||||||
|
self.state.last_successful_published_time = next.published;
|
||||||
|
}
|
||||||
|
|
||||||
|
let save_state_every = chrono::Duration::from_std(SAVE_STATE_EVERY_TIME).expect("not negative");
|
||||||
|
if force_write || (Utc::now() - self.last_state_insert) > save_state_every {
|
||||||
|
self.save_and_send_state().await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn spawn_send_if_needed(
|
||||||
|
&mut self,
|
||||||
|
activity_id: ActivityId,
|
||||||
|
report: UnboundedSender<SendActivityResult>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let Some(ele) = get_activity_cached(&mut self.pool(), activity_id)
|
||||||
|
.await
|
||||||
|
.context("failed reading activity from db")?
|
||||||
|
else {
|
||||||
|
tracing::debug!("{}: {:?} does not exist", self.instance.domain, activity_id);
|
||||||
|
report.send(SendActivityResult::Success(SendSuccessInfo {
|
||||||
|
activity_id,
|
||||||
|
published: None,
|
||||||
|
was_skipped: true,
|
||||||
|
}))?;
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
let activity = &ele.0;
|
||||||
|
let inbox_urls = self
|
||||||
|
.get_inbox_urls(activity)
|
||||||
|
.await
|
||||||
|
.context("failed figuring out inbox urls")?;
|
||||||
|
if inbox_urls.is_empty() {
|
||||||
|
tracing::debug!("{}: {:?} no inboxes", self.instance.domain, activity.id);
|
||||||
|
report.send(SendActivityResult::Success(SendSuccessInfo {
|
||||||
|
activity_id,
|
||||||
|
published: Some(activity.published),
|
||||||
|
was_skipped: true,
|
||||||
|
}))?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let mut processed_activities = 0;
|
let inbox_urls = inbox_urls.into_iter().collect();
|
||||||
while id < latest_id
|
let initial_fail_count = self.state.fail_count;
|
||||||
&& processed_activities < CHECK_SAVE_STATE_EVERY_IT
|
let data = self.config.to_request_data();
|
||||||
&& !self.stop.is_cancelled()
|
let stop = self.stop.clone();
|
||||||
{
|
let domain = self.instance.domain.clone();
|
||||||
id = ActivityId(id.0 + 1);
|
tokio::spawn(async move {
|
||||||
processed_activities += 1;
|
if let Err(e) = InstanceWorker::send_retry_loop(
|
||||||
let Some(ele) = get_activity_cached(pool, id)
|
&ele.0,
|
||||||
.await
|
&ele.1,
|
||||||
.context("failed reading activity from db")?
|
inbox_urls,
|
||||||
else {
|
report,
|
||||||
tracing::debug!("{}: {:?} does not exist", self.instance.domain, id);
|
initial_fail_count,
|
||||||
self.state.last_successful_id = Some(id);
|
domain,
|
||||||
continue;
|
data,
|
||||||
};
|
stop,
|
||||||
if let Err(e) = self.send_retry_loop(pool, &ele.0, &ele.1).await {
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
tracing::warn!(
|
tracing::warn!(
|
||||||
"sending {} errored internally, skipping activity: {:?}",
|
"sending {} errored internally, skipping activity: {:?}",
|
||||||
ele.0.ap_id,
|
ele.0.ap_id,
|
||||||
e
|
e
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if self.stop.is_cancelled() {
|
});
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
// send success!
|
|
||||||
self.state.last_successful_id = Some(id);
|
|
||||||
self.state.last_successful_published_time = Some(ele.0.published);
|
|
||||||
self.state.fail_count = 0;
|
|
||||||
}
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// this function will return successfully when (a) send succeeded or (b) worker cancelled
|
// this function will return successfully when (a) send succeeded or (b) worker cancelled
|
||||||
// and will return an error if an internal error occurred (send errors cause an infinite loop)
|
// and will return an error if an internal error occurred (send errors cause an infinite loop)
|
||||||
async fn send_retry_loop(
|
async fn send_retry_loop(
|
||||||
&mut self,
|
|
||||||
pool: &mut DbPool<'_>,
|
|
||||||
activity: &SentActivity,
|
activity: &SentActivity,
|
||||||
object: &SharedInboxActivities,
|
object: &SharedInboxActivities,
|
||||||
|
inbox_urls: Vec<Url>,
|
||||||
|
report: UnboundedSender<SendActivityResult>,
|
||||||
|
initial_fail_count: i32,
|
||||||
|
domain: String,
|
||||||
|
context: Data<LemmyContext>,
|
||||||
|
stop: CancellationToken,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let inbox_urls = self
|
let pool = &mut context.pool();
|
||||||
.get_inbox_urls(pool, activity)
|
|
||||||
.await
|
|
||||||
.context("failed figuring out inbox urls")?;
|
|
||||||
if inbox_urls.is_empty() {
|
|
||||||
tracing::debug!("{}: {:?} no inboxes", self.instance.domain, activity.id);
|
|
||||||
self.state.last_successful_id = Some(activity.id);
|
|
||||||
self.state.last_successful_published_time = Some(activity.published);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
let Some(actor_apub_id) = &activity.actor_apub_id else {
|
let Some(actor_apub_id) = &activity.actor_apub_id else {
|
||||||
return Ok(()); // activity was inserted before persistent queue was activated
|
return Ok(()); // activity was inserted before persistent queue was activated
|
||||||
};
|
};
|
||||||
|
@ -224,61 +391,50 @@ impl InstanceWorker {
|
||||||
.context("failed getting actor instance (was it marked deleted / removed?)")?;
|
.context("failed getting actor instance (was it marked deleted / removed?)")?;
|
||||||
|
|
||||||
let object = WithContext::new(object.clone(), FEDERATION_CONTEXT.deref().clone());
|
let object = WithContext::new(object.clone(), FEDERATION_CONTEXT.deref().clone());
|
||||||
let inbox_urls = inbox_urls.into_iter().collect();
|
let requests = SendActivityTask::prepare(&object, actor.as_ref(), inbox_urls, &context).await?;
|
||||||
let requests =
|
|
||||||
SendActivityTask::prepare(&object, actor.as_ref(), inbox_urls, &self.context).await?;
|
|
||||||
for task in requests {
|
for task in requests {
|
||||||
// usually only one due to shared inbox
|
// usually only one due to shared inbox
|
||||||
tracing::debug!("sending out {}", task);
|
tracing::debug!("sending out {}", task);
|
||||||
while let Err(e) = task.sign_and_send(&self.context).await {
|
let mut fail_count = initial_fail_count;
|
||||||
self.state.fail_count += 1;
|
while let Err(e) = task.sign_and_send(&context).await {
|
||||||
self.state.last_retry = Some(Utc::now());
|
fail_count += 1;
|
||||||
let retry_delay: Duration = federate_retry_sleep_duration(self.state.fail_count);
|
report.send(SendActivityResult::Failure {
|
||||||
|
fail_count,
|
||||||
|
// activity_id: activity.id,
|
||||||
|
})?;
|
||||||
|
let retry_delay: Duration = federate_retry_sleep_duration(fail_count);
|
||||||
tracing::info!(
|
tracing::info!(
|
||||||
"{}: retrying {:?} attempt {} with delay {retry_delay:.2?}. ({e})",
|
"{}: retrying {:?} attempt {} with delay {retry_delay:.2?}. ({e})",
|
||||||
self.instance.domain,
|
domain,
|
||||||
activity.id,
|
activity.id,
|
||||||
self.state.fail_count
|
fail_count
|
||||||
);
|
);
|
||||||
self.save_and_send_state(pool).await?;
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
() = sleep(retry_delay) => {},
|
() = sleep(retry_delay) => {},
|
||||||
() = self.stop.cancelled() => {
|
() = stop.cancelled() => {
|
||||||
// save state to db and exit
|
// save state to db and exit
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Activity send successful, mark instance as alive if it hasn't been updated in a while.
|
|
||||||
let updated = self.instance.updated.unwrap_or(self.instance.published);
|
|
||||||
if updated.add(Days::new(1)) < Utc::now() {
|
|
||||||
self.instance.updated = Some(Utc::now());
|
|
||||||
|
|
||||||
let form = InstanceForm::builder()
|
|
||||||
.domain(self.instance.domain.clone())
|
|
||||||
.updated(Some(naive_now()))
|
|
||||||
.build();
|
|
||||||
Instance::update(pool, self.instance.id, form).await?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
report.send(SendActivityResult::Success(SendSuccessInfo {
|
||||||
|
activity_id: activity.id,
|
||||||
|
published: Some(activity.published),
|
||||||
|
was_skipped: false,
|
||||||
|
}))?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// get inbox urls of sending the given activity to the given instance
|
/// get inbox urls of sending the given activity to the given instance
|
||||||
/// most often this will return 0 values (if instance doesn't care about the activity)
|
/// most often this will return 0 values (if instance doesn't care about the activity)
|
||||||
/// or 1 value (the shared inbox)
|
/// or 1 value (the shared inbox)
|
||||||
/// > 1 values only happens for non-lemmy software
|
/// > 1 values only happens for non-lemmy software
|
||||||
async fn get_inbox_urls(
|
async fn get_inbox_urls(&mut self, activity: &SentActivity) -> Result<HashSet<Url>> {
|
||||||
&mut self,
|
|
||||||
pool: &mut DbPool<'_>,
|
|
||||||
activity: &SentActivity,
|
|
||||||
) -> Result<HashSet<Url>> {
|
|
||||||
let mut inbox_urls: HashSet<Url> = HashSet::new();
|
let mut inbox_urls: HashSet<Url> = HashSet::new();
|
||||||
|
|
||||||
if activity.send_all_instances {
|
if activity.send_all_instances {
|
||||||
if !self.site_loaded {
|
if !self.site_loaded {
|
||||||
self.site = Site::read_from_instance_id(pool, self.instance.id).await?;
|
self.site = Site::read_from_instance_id(&mut self.pool(), self.instance.id).await?;
|
||||||
self.site_loaded = true;
|
self.site_loaded = true;
|
||||||
}
|
}
|
||||||
if let Some(site) = &self.site {
|
if let Some(site) = &self.site {
|
||||||
|
@ -302,23 +458,30 @@ impl InstanceWorker {
|
||||||
Ok(inbox_urls)
|
Ok(inbox_urls)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn update_communities(&mut self, pool: &mut DbPool<'_>) -> Result<()> {
|
async fn update_communities(&mut self) -> Result<()> {
|
||||||
if (Utc::now() - self.last_full_communities_fetch) > *FOLLOW_REMOVALS_RECHECK_DELAY {
|
if (Utc::now() - self.last_full_communities_fetch) > *FOLLOW_REMOVALS_RECHECK_DELAY {
|
||||||
|
tracing::debug!(
|
||||||
|
"{}: fetching full list of communities",
|
||||||
|
self.instance.domain
|
||||||
|
);
|
||||||
// process removals every hour
|
// process removals every hour
|
||||||
(self.followed_communities, self.last_full_communities_fetch) = self
|
(self.followed_communities, self.last_full_communities_fetch) = self
|
||||||
.get_communities(pool, self.instance.id, Utc.timestamp_nanos(0))
|
.get_communities(self.instance.id, Utc.timestamp_nanos(0))
|
||||||
.await?;
|
.await?;
|
||||||
self.last_incremental_communities_fetch = self.last_full_communities_fetch;
|
self.last_incremental_communities_fetch = self.last_full_communities_fetch;
|
||||||
}
|
}
|
||||||
if (Utc::now() - self.last_incremental_communities_fetch) > *FOLLOW_ADDITIONS_RECHECK_DELAY {
|
if (Utc::now() - self.last_incremental_communities_fetch) > *FOLLOW_ADDITIONS_RECHECK_DELAY {
|
||||||
// process additions every minute
|
// process additions every minute
|
||||||
let (news, time) = self
|
let (news, time) = self
|
||||||
.get_communities(
|
.get_communities(self.instance.id, self.last_incremental_communities_fetch)
|
||||||
pool,
|
|
||||||
self.instance.id,
|
|
||||||
self.last_incremental_communities_fetch,
|
|
||||||
)
|
|
||||||
.await?;
|
.await?;
|
||||||
|
if !news.is_empty() {
|
||||||
|
tracing::debug!(
|
||||||
|
"{}: fetched {} incremental new followed communities",
|
||||||
|
self.instance.domain,
|
||||||
|
news.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
self.followed_communities.extend(news);
|
self.followed_communities.extend(news);
|
||||||
self.last_incremental_communities_fetch = time;
|
self.last_incremental_communities_fetch = time;
|
||||||
}
|
}
|
||||||
|
@ -328,29 +491,38 @@ impl InstanceWorker {
|
||||||
/// get a list of local communities with the remote inboxes on the given instance that cares about them
|
/// get a list of local communities with the remote inboxes on the given instance that cares about them
|
||||||
async fn get_communities(
|
async fn get_communities(
|
||||||
&mut self,
|
&mut self,
|
||||||
pool: &mut DbPool<'_>,
|
|
||||||
instance_id: InstanceId,
|
instance_id: InstanceId,
|
||||||
last_fetch: DateTime<Utc>,
|
last_fetch: DateTime<Utc>,
|
||||||
) -> Result<(HashMap<CommunityId, HashSet<Url>>, DateTime<Utc>)> {
|
) -> Result<(HashMap<CommunityId, HashSet<Url>>, DateTime<Utc>)> {
|
||||||
let new_last_fetch =
|
let new_last_fetch =
|
||||||
Utc::now() - chrono::TimeDelta::try_seconds(10).expect("TimeDelta out of bounds"); // update to time before fetch to ensure overlap. subtract 10s to ensure overlap even if published date is not exact
|
Utc::now() - chrono::TimeDelta::try_seconds(10).expect("TimeDelta out of bounds"); // update to time before fetch to ensure overlap. subtract 10s to ensure overlap even if published date is not exact
|
||||||
Ok((
|
Ok((
|
||||||
CommunityFollowerView::get_instance_followed_community_inboxes(pool, instance_id, last_fetch)
|
CommunityFollowerView::get_instance_followed_community_inboxes(
|
||||||
.await?
|
&mut self.pool(),
|
||||||
.into_iter()
|
instance_id,
|
||||||
.fold(HashMap::new(), |mut map, (c, u)| {
|
last_fetch,
|
||||||
map.entry(c).or_default().insert(u.into());
|
)
|
||||||
map
|
.await?
|
||||||
}),
|
.into_iter()
|
||||||
|
.fold(HashMap::new(), |mut map, (c, u)| {
|
||||||
|
map.entry(c).or_default().insert(u.into());
|
||||||
|
map
|
||||||
|
}),
|
||||||
new_last_fetch,
|
new_last_fetch,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
async fn save_and_send_state(&mut self, pool: &mut DbPool<'_>) -> Result<()> {
|
async fn save_and_send_state(&mut self) -> Result<()> {
|
||||||
|
tracing::debug!("{}: saving and sending state", self.instance.domain);
|
||||||
self.last_state_insert = Utc::now();
|
self.last_state_insert = Utc::now();
|
||||||
FederationQueueState::upsert(pool, &self.state).await?;
|
FederationQueueState::upsert(&mut self.pool(), &self.state).await?;
|
||||||
self
|
self
|
||||||
.stats_sender
|
.stats_sender
|
||||||
.send((self.instance.domain.clone(), self.state.clone()))?;
|
.send((self.instance.domain.clone(), self.state.clone()))?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn pool(&self) -> DbPool<'_> {
|
||||||
|
//self.config.to_request_data()
|
||||||
|
DbPool::Pool(&self.pool)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue