101
crates/sq-server/src/shipper.rs
Normal file
101
crates/sq-server/src/shipper.rs
Normal file
@@ -0,0 +1,101 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use notmad::{Component, ComponentInfo, MadError};
|
||||
use sq_sim::fs::RealFileSystem;
|
||||
use sq_storage::object_store::s3::S3ObjectStore;
|
||||
use sq_storage::object_store::shipper::{SegmentShipper, ShippedSegments};
|
||||
use tokio::sync::Mutex;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::state::State;
|
||||
|
||||
/// Background component that periodically ships closed WAL segments to S3
|
||||
/// and trims local files after successful upload.
|
||||
pub struct BackgroundShipper {
|
||||
state: State,
|
||||
shipper: SegmentShipper<RealFileSystem, S3ObjectStore>,
|
||||
interval: Duration,
|
||||
}
|
||||
|
||||
impl BackgroundShipper {
|
||||
pub fn new(
|
||||
state: State,
|
||||
object_store: Arc<S3ObjectStore>,
|
||||
cluster_id: String,
|
||||
interval: Duration,
|
||||
) -> Self {
|
||||
let fs = Arc::new(sq_sim::fs::RealFileSystem);
|
||||
let shipped = Arc::new(Mutex::new(ShippedSegments::new()));
|
||||
let shipper = SegmentShipper::new(fs, object_store, cluster_id, shipped);
|
||||
|
||||
Self {
|
||||
state,
|
||||
shipper,
|
||||
interval,
|
||||
}
|
||||
}
|
||||
|
||||
async fn cycle(&self) {
|
||||
let closed = match self.state.engine.close_all_segments() {
|
||||
Ok(segments) => segments,
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "failed to close segments for shipping");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if closed.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let count = self.shipper.ship_all(&closed).await;
|
||||
if count > 0 {
|
||||
tracing::info!(shipped = count, total = closed.len(), "shipped segments to S3");
|
||||
}
|
||||
|
||||
// Trim local WAL files for successfully shipped segments.
|
||||
// The shipper tracks which segments were shipped; we delete local copies.
|
||||
// For now, we only delete if all segments were shipped successfully.
|
||||
if count == closed.len() {
|
||||
let fs = sq_sim::fs::RealFileSystem;
|
||||
for seg in &closed {
|
||||
if let Err(e) = sq_sim::fs::FileSystem::remove_file(&fs, &seg.path) {
|
||||
tracing::warn!(
|
||||
path = %seg.path.display(),
|
||||
error = %e,
|
||||
"failed to trim shipped segment"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Component for BackgroundShipper {
|
||||
fn info(&self) -> ComponentInfo {
|
||||
"sq-server/shipper".into()
|
||||
}
|
||||
|
||||
async fn run(&self, cancellation_token: CancellationToken) -> Result<(), MadError> {
|
||||
tracing::info!(
|
||||
interval_secs = self.interval.as_secs(),
|
||||
"background shipper started"
|
||||
);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
() = cancellation_token.cancelled() => {
|
||||
// Final flush on shutdown.
|
||||
self.cycle().await;
|
||||
break;
|
||||
}
|
||||
() = tokio::time::sleep(self.interval) => {
|
||||
self.cycle().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user