feat: add post3 s3 proxy for postgresql
Signed-off-by: kjuulh <contact@kjuulh.io>
This commit is contained in:
161
crates/post3-sdk/examples/large_upload.rs
Normal file
161
crates/post3-sdk/examples/large_upload.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
//! Stress test: upload and verify large files.
|
||||
//!
|
||||
//! Tests progressively larger files to find limits and measure performance.
|
||||
//! Generates deterministic pseudo-random data so we can verify integrity
|
||||
//! without keeping the full payload in memory twice.
|
||||
//!
|
||||
//! Prerequisites: post3-server running on localhost:9000
|
||||
//! mise run up && mise run dev
|
||||
//!
|
||||
//! Run:
|
||||
//! cargo run -p post3-sdk --example large_upload --release
|
||||
//!
|
||||
//! Or with custom sizes (in MB):
|
||||
//! POST3_SIZES=10,50,100,500,1024 cargo run -p post3-sdk --example large_upload --release
|
||||
|
||||
use post3_sdk::Post3Client;
|
||||
use std::time::Instant;
|
||||
|
||||
fn generate_data(size_bytes: usize) -> Vec<u8> {
|
||||
// Deterministic pattern: repeating 256-byte blocks with position-dependent content
|
||||
let mut data = Vec::with_capacity(size_bytes);
|
||||
let mut state: u64 = 0xdeadbeef;
|
||||
while data.len() < size_bytes {
|
||||
// Simple xorshift64 PRNG for fast deterministic data
|
||||
state ^= state << 13;
|
||||
state ^= state >> 7;
|
||||
state ^= state << 17;
|
||||
data.extend_from_slice(&state.to_le_bytes());
|
||||
}
|
||||
data.truncate(size_bytes);
|
||||
data
|
||||
}
|
||||
|
||||
fn format_size(bytes: usize) -> String {
|
||||
if bytes >= 1024 * 1024 * 1024 {
|
||||
format!("{:.1} GiB", bytes as f64 / (1024.0 * 1024.0 * 1024.0))
|
||||
} else if bytes >= 1024 * 1024 {
|
||||
format!("{:.1} MiB", bytes as f64 / (1024.0 * 1024.0))
|
||||
} else if bytes >= 1024 {
|
||||
format!("{:.1} KiB", bytes as f64 / 1024.0)
|
||||
} else {
|
||||
format!("{} B", bytes)
|
||||
}
|
||||
}
|
||||
|
||||
fn format_throughput(bytes: usize, duration: std::time::Duration) -> String {
|
||||
let secs = duration.as_secs_f64();
|
||||
if secs == 0.0 {
|
||||
return "∞".to_string();
|
||||
}
|
||||
let mb_per_sec = bytes as f64 / (1024.0 * 1024.0) / secs;
|
||||
format!("{:.1} MiB/s", mb_per_sec)
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let endpoint = std::env::var("POST3_ENDPOINT")
|
||||
.unwrap_or_else(|_| "http://localhost:9000".to_string());
|
||||
let client = Post3Client::new(&endpoint);
|
||||
|
||||
// Parse sizes from env or use defaults
|
||||
let sizes_mb: Vec<usize> = std::env::var("POST3_SIZES")
|
||||
.unwrap_or_else(|_| "1,10,50,100,500,1024,2048".to_string())
|
||||
.split(',')
|
||||
.filter_map(|s| s.trim().parse().ok())
|
||||
.collect();
|
||||
|
||||
println!("=== post3 Large File Stress Test ===");
|
||||
println!("Endpoint: {}", endpoint);
|
||||
println!("Sizes: {:?} MB", sizes_mb);
|
||||
println!();
|
||||
|
||||
client.create_bucket("stress-test").await?;
|
||||
|
||||
for size_mb in &sizes_mb {
|
||||
let size_bytes = size_mb * 1024 * 1024;
|
||||
let key = format!("test-{}mb.bin", size_mb);
|
||||
|
||||
println!("--- {} ---", format_size(size_bytes));
|
||||
|
||||
// Generate data
|
||||
print!(" Generating data... ");
|
||||
let gen_start = Instant::now();
|
||||
let data = generate_data(size_bytes);
|
||||
println!("done ({:.1}s)", gen_start.elapsed().as_secs_f64());
|
||||
|
||||
// Upload
|
||||
print!(" Uploading... ");
|
||||
let upload_start = Instant::now();
|
||||
match client.put_object("stress-test", &key, &data).await {
|
||||
Ok(()) => {
|
||||
let upload_dur = upload_start.elapsed();
|
||||
println!(
|
||||
"done ({:.1}s, {})",
|
||||
upload_dur.as_secs_f64(),
|
||||
format_throughput(size_bytes, upload_dur)
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("FAILED: {}", e);
|
||||
println!(" Skipping remaining sizes (hit server limit)");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Head (verify metadata)
|
||||
let head = client.head_object("stress-test", &key).await?;
|
||||
if let Some(info) = &head {
|
||||
println!(
|
||||
" Head: size={}, etag={:?}",
|
||||
format_size(info.size as usize),
|
||||
info.etag
|
||||
);
|
||||
}
|
||||
|
||||
// Download
|
||||
print!(" Downloading... ");
|
||||
let download_start = Instant::now();
|
||||
match client.get_object("stress-test", &key).await {
|
||||
Ok(downloaded) => {
|
||||
let download_dur = download_start.elapsed();
|
||||
println!(
|
||||
"done ({:.1}s, {})",
|
||||
download_dur.as_secs_f64(),
|
||||
format_throughput(size_bytes, download_dur)
|
||||
);
|
||||
|
||||
// Verify integrity
|
||||
print!(" Verifying... ");
|
||||
if downloaded.len() != data.len() {
|
||||
println!(
|
||||
"FAILED: size mismatch (expected {}, got {})",
|
||||
data.len(),
|
||||
downloaded.len()
|
||||
);
|
||||
} else if downloaded.as_ref() == data.as_slice() {
|
||||
println!("OK (byte-for-byte match)");
|
||||
} else {
|
||||
// Find first mismatch
|
||||
let pos = data
|
||||
.iter()
|
||||
.zip(downloaded.iter())
|
||||
.position(|(a, b)| a != b)
|
||||
.unwrap_or(0);
|
||||
println!("FAILED: mismatch at byte {}", pos);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("FAILED: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup this object
|
||||
client.delete_object("stress-test", &key).await?;
|
||||
println!();
|
||||
}
|
||||
|
||||
client.delete_bucket("stress-test").await?;
|
||||
println!("=== Done ===");
|
||||
Ok(())
|
||||
}
|
||||
Reference in New Issue
Block a user