use std::path::PathBuf; use std::sync::Arc; use drop_queue::DropQueue; use sq_sim::fs::RealFileSystem; use sq_sim::RealClock; use sq_storage::engine::StorageEngine; use sq_storage::object_store::reader::ObjectStoreReader; use sq_storage::object_store::s3::S3ObjectStore; use crate::pipeline::{self, PipelineHandle, WritePipeline}; #[derive(Clone)] pub struct State { pub engine: Arc>, pub pipeline: PipelineHandle, pub s3_reader: Option>>, pub drop_queue: DropQueue, pub config: Config, } #[derive(Clone)] pub struct Config { pub node_id: String, pub data_dir: PathBuf, pub seeds: Vec, pub grpc_address: String, pub cluster_id: String, pub s3_bucket: Option, pub s3_endpoint: Option, pub s3_region: Option, pub sync_policy: sq_models::SyncPolicy, } impl State { pub fn new(config: Config) -> anyhow::Result<(Self, WritePipeline)> { let fs = Arc::new(RealFileSystem); let clock = Arc::new(RealClock); let wal_config = sq_models::WalConfig { data_dir: config.data_dir.clone(), sync_policy: config.sync_policy.clone(), ..Default::default() }; let engine = StorageEngine::new(fs, clock, wal_config)?; engine.recover()?; let engine = Arc::new(engine); let (handle, writer) = pipeline::create_pipeline(engine.clone(), 10_000); Ok(( Self { engine, pipeline: handle, s3_reader: None, drop_queue: DropQueue::new(), config, }, writer, )) } }