feat: add post3 s3 proxy for postgresql

Signed-off-by: kjuulh <contact@kjuulh.io>
This commit is contained in:
2026-02-27 11:37:48 +01:00
commit 21bac4a33f
67 changed files with 14403 additions and 0 deletions

View File

@@ -0,0 +1,107 @@
//! Use aws-sdk-s3 directly against post3 (without the post3-sdk wrapper).
//! Shows the raw configuration needed.
//!
//! Prerequisites: post3-server running on localhost:9000
//! mise run up && mise run dev
//!
//! Run:
//! cargo run -p post3-sdk --example aws_sdk_direct
use post3_sdk::aws_sdk_s3;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let endpoint = std::env::var("POST3_ENDPOINT")
.unwrap_or_else(|_| "http://localhost:9000".to_string());
// Configure aws-sdk-s3 manually for post3
let creds = aws_sdk_s3::config::Credentials::new(
"test", // access key (any value works when auth is disabled)
"test", // secret key
None, // session token
None, // expiry
"example", // provider name
);
let config = aws_sdk_s3::Config::builder()
.behavior_version_latest()
.region(aws_sdk_s3::config::Region::new("us-east-1"))
.endpoint_url(&endpoint)
.credentials_provider(creds)
.force_path_style(true) // Required: post3 uses path-style, not virtual-hosted
.build();
let client = aws_sdk_s3::Client::from_conf(config);
// Create bucket
println!("Creating bucket...");
client
.create_bucket()
.bucket("direct-bucket")
.send()
.await?;
// Put object
println!("Putting object...");
client
.put_object()
.bucket("direct-bucket")
.key("greeting.txt")
.body(Vec::from(&b"Hello from aws-sdk-s3!"[..]).into())
.send()
.await?;
// Get object
let resp = client
.get_object()
.bucket("direct-bucket")
.key("greeting.txt")
.send()
.await?;
let body = resp.body.collect().await?.into_bytes();
println!("Got: {}", String::from_utf8_lossy(&body));
// List objects
let list = client
.list_objects_v2()
.bucket("direct-bucket")
.send()
.await?;
println!("Objects:");
for obj in list.contents() {
println!(
" {} ({} bytes)",
obj.key().unwrap_or("?"),
obj.size().unwrap_or(0)
);
}
// Head object
let head = client
.head_object()
.bucket("direct-bucket")
.key("greeting.txt")
.send()
.await?;
println!(
"Head: size={}, etag={:?}",
head.content_length().unwrap_or(0),
head.e_tag()
);
// Cleanup
client
.delete_object()
.bucket("direct-bucket")
.key("greeting.txt")
.send()
.await?;
client
.delete_bucket()
.bucket("direct-bucket")
.send()
.await?;
println!("Done!");
Ok(())
}

View File

@@ -0,0 +1,76 @@
//! Basic post3 usage: create a bucket, put/get/delete objects, list objects.
//!
//! Prerequisites: post3-server running on localhost:9000
//! mise run up && mise run dev
//!
//! Run:
//! cargo run -p post3-sdk --example basic
use post3_sdk::Post3Client;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let endpoint = std::env::var("POST3_ENDPOINT")
.unwrap_or_else(|_| "http://localhost:9000".to_string());
let client = Post3Client::new(&endpoint);
// Create a bucket
println!("Creating bucket 'example-bucket'...");
client.create_bucket("example-bucket").await?;
// List buckets
let buckets = client.list_buckets().await?;
println!("Buckets: {:?}", buckets);
// Put an object
println!("Putting 'hello.txt'...");
client
.put_object("example-bucket", "hello.txt", b"Hello, post3!")
.await?;
// Get the object back
let data = client.get_object("example-bucket", "hello.txt").await?;
println!("Got: {}", String::from_utf8_lossy(&data));
// Put a few more objects
client
.put_object("example-bucket", "docs/readme.md", b"# README")
.await?;
client
.put_object("example-bucket", "docs/guide.md", b"# Guide")
.await?;
// List all objects
let objects = client.list_objects("example-bucket", None).await?;
println!("All objects:");
for obj in &objects {
println!(" {} ({} bytes)", obj.key, obj.size);
}
// List with prefix filter
let docs = client
.list_objects("example-bucket", Some("docs/"))
.await?;
println!("Objects under docs/:");
for obj in &docs {
println!(" {} ({} bytes)", obj.key, obj.size);
}
// Delete objects
println!("Cleaning up...");
client
.delete_object("example-bucket", "hello.txt")
.await?;
client
.delete_object("example-bucket", "docs/readme.md")
.await?;
client
.delete_object("example-bucket", "docs/guide.md")
.await?;
// Delete the bucket
client.delete_bucket("example-bucket").await?;
println!("Done!");
Ok(())
}

View File

@@ -0,0 +1,161 @@
//! Stress test: upload and verify large files.
//!
//! Tests progressively larger files to find limits and measure performance.
//! Generates deterministic pseudo-random data so we can verify integrity
//! without keeping the full payload in memory twice.
//!
//! Prerequisites: post3-server running on localhost:9000
//! mise run up && mise run dev
//!
//! Run:
//! cargo run -p post3-sdk --example large_upload --release
//!
//! Or with custom sizes (in MB):
//! POST3_SIZES=10,50,100,500,1024 cargo run -p post3-sdk --example large_upload --release
use post3_sdk::Post3Client;
use std::time::Instant;
fn generate_data(size_bytes: usize) -> Vec<u8> {
// Deterministic pattern: repeating 256-byte blocks with position-dependent content
let mut data = Vec::with_capacity(size_bytes);
let mut state: u64 = 0xdeadbeef;
while data.len() < size_bytes {
// Simple xorshift64 PRNG for fast deterministic data
state ^= state << 13;
state ^= state >> 7;
state ^= state << 17;
data.extend_from_slice(&state.to_le_bytes());
}
data.truncate(size_bytes);
data
}
fn format_size(bytes: usize) -> String {
if bytes >= 1024 * 1024 * 1024 {
format!("{:.1} GiB", bytes as f64 / (1024.0 * 1024.0 * 1024.0))
} else if bytes >= 1024 * 1024 {
format!("{:.1} MiB", bytes as f64 / (1024.0 * 1024.0))
} else if bytes >= 1024 {
format!("{:.1} KiB", bytes as f64 / 1024.0)
} else {
format!("{} B", bytes)
}
}
fn format_throughput(bytes: usize, duration: std::time::Duration) -> String {
let secs = duration.as_secs_f64();
if secs == 0.0 {
return "".to_string();
}
let mb_per_sec = bytes as f64 / (1024.0 * 1024.0) / secs;
format!("{:.1} MiB/s", mb_per_sec)
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let endpoint = std::env::var("POST3_ENDPOINT")
.unwrap_or_else(|_| "http://localhost:9000".to_string());
let client = Post3Client::new(&endpoint);
// Parse sizes from env or use defaults
let sizes_mb: Vec<usize> = std::env::var("POST3_SIZES")
.unwrap_or_else(|_| "1,10,50,100,500,1024,2048".to_string())
.split(',')
.filter_map(|s| s.trim().parse().ok())
.collect();
println!("=== post3 Large File Stress Test ===");
println!("Endpoint: {}", endpoint);
println!("Sizes: {:?} MB", sizes_mb);
println!();
client.create_bucket("stress-test").await?;
for size_mb in &sizes_mb {
let size_bytes = size_mb * 1024 * 1024;
let key = format!("test-{}mb.bin", size_mb);
println!("--- {} ---", format_size(size_bytes));
// Generate data
print!(" Generating data... ");
let gen_start = Instant::now();
let data = generate_data(size_bytes);
println!("done ({:.1}s)", gen_start.elapsed().as_secs_f64());
// Upload
print!(" Uploading... ");
let upload_start = Instant::now();
match client.put_object("stress-test", &key, &data).await {
Ok(()) => {
let upload_dur = upload_start.elapsed();
println!(
"done ({:.1}s, {})",
upload_dur.as_secs_f64(),
format_throughput(size_bytes, upload_dur)
);
}
Err(e) => {
println!("FAILED: {}", e);
println!(" Skipping remaining sizes (hit server limit)");
break;
}
}
// Head (verify metadata)
let head = client.head_object("stress-test", &key).await?;
if let Some(info) = &head {
println!(
" Head: size={}, etag={:?}",
format_size(info.size as usize),
info.etag
);
}
// Download
print!(" Downloading... ");
let download_start = Instant::now();
match client.get_object("stress-test", &key).await {
Ok(downloaded) => {
let download_dur = download_start.elapsed();
println!(
"done ({:.1}s, {})",
download_dur.as_secs_f64(),
format_throughput(size_bytes, download_dur)
);
// Verify integrity
print!(" Verifying... ");
if downloaded.len() != data.len() {
println!(
"FAILED: size mismatch (expected {}, got {})",
data.len(),
downloaded.len()
);
} else if downloaded.as_ref() == data.as_slice() {
println!("OK (byte-for-byte match)");
} else {
// Find first mismatch
let pos = data
.iter()
.zip(downloaded.iter())
.position(|(a, b)| a != b)
.unwrap_or(0);
println!("FAILED: mismatch at byte {}", pos);
}
}
Err(e) => {
println!("FAILED: {}", e);
}
}
// Cleanup this object
client.delete_object("stress-test", &key).await?;
println!();
}
client.delete_bucket("stress-test").await?;
println!("=== Done ===");
Ok(())
}

View File

@@ -0,0 +1,78 @@
//! Demonstrate custom metadata (x-amz-meta-*) with post3.
//!
//! Prerequisites: post3-server running on localhost:9000
//! mise run up && mise run dev
//!
//! Run:
//! cargo run -p post3-sdk --example metadata
use post3_sdk::Post3Client;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let endpoint = std::env::var("POST3_ENDPOINT")
.unwrap_or_else(|_| "http://localhost:9000".to_string());
let client = Post3Client::new(&endpoint);
client.create_bucket("meta-bucket").await?;
// Use the inner aws-sdk-s3 client to set custom metadata
let inner = client.inner();
println!("Putting object with custom metadata...");
inner
.put_object()
.bucket("meta-bucket")
.key("report.pdf")
.body(Vec::from(&b"fake pdf content"[..]).into())
.content_type("application/pdf")
.metadata("author", "alice")
.metadata("department", "engineering")
.metadata("version", "2")
.send()
.await?;
// Retrieve metadata via head_object
let head = inner
.head_object()
.bucket("meta-bucket")
.key("report.pdf")
.send()
.await?;
println!("Content-Type: {:?}", head.content_type());
println!("Content-Length: {:?}", head.content_length());
println!("ETag: {:?}", head.e_tag());
if let Some(metadata) = head.metadata() {
println!("Custom metadata:");
for (k, v) in metadata {
println!(" x-amz-meta-{}: {}", k, v);
}
}
// Retrieve the full object with metadata
let resp = inner
.get_object()
.bucket("meta-bucket")
.key("report.pdf")
.send()
.await?;
println!("\nGet object response:");
println!(" Content-Type: {:?}", resp.content_type());
if let Some(metadata) = resp.metadata() {
println!(" Metadata:");
for (k, v) in metadata {
println!(" x-amz-meta-{}: {}", k, v);
}
}
let body = resp.body.collect().await?.into_bytes();
println!(" Body: {}", String::from_utf8_lossy(&body));
// Cleanup
client.delete_object("meta-bucket", "report.pdf").await?;
client.delete_bucket("meta-bucket").await?;
println!("\nDone!");
Ok(())
}

View File

@@ -0,0 +1,177 @@
//! Stress test: multipart upload and verify huge files (416 GiB).
//!
//! Uses the SDK's multipart_upload convenience method which splits data into
//! parts and uploads them sequentially via CreateMultipartUpload / UploadPart /
//! CompleteMultipartUpload.
//!
//! Prerequisites: post3-server running on localhost:9000
//! mise run up && mise run dev
//!
//! Run:
//! cargo run -p post3-sdk --example multipart_upload --release
//!
//! Or with custom sizes (in MB) and part size:
//! POST3_SIZES=4096,8192,16384 POST3_PART_SIZE=64 cargo run -p post3-sdk --example multipart_upload --release
use post3_sdk::Post3Client;
use std::time::Instant;
fn generate_data(size_bytes: usize) -> Vec<u8> {
let mut data = Vec::with_capacity(size_bytes);
let mut state: u64 = 0xdeadbeef;
while data.len() < size_bytes {
state ^= state << 13;
state ^= state >> 7;
state ^= state << 17;
data.extend_from_slice(&state.to_le_bytes());
}
data.truncate(size_bytes);
data
}
fn format_size(bytes: usize) -> String {
if bytes >= 1024 * 1024 * 1024 {
format!("{:.1} GiB", bytes as f64 / (1024.0 * 1024.0 * 1024.0))
} else if bytes >= 1024 * 1024 {
format!("{:.1} MiB", bytes as f64 / (1024.0 * 1024.0))
} else if bytes >= 1024 {
format!("{:.1} KiB", bytes as f64 / 1024.0)
} else {
format!("{} B", bytes)
}
}
fn format_throughput(bytes: usize, duration: std::time::Duration) -> String {
let secs = duration.as_secs_f64();
if secs == 0.0 {
return "".to_string();
}
let mb_per_sec = bytes as f64 / (1024.0 * 1024.0) / secs;
format!("{:.1} MiB/s", mb_per_sec)
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let endpoint = std::env::var("POST3_ENDPOINT")
.unwrap_or_else(|_| "http://localhost:9000".to_string());
let client = Post3Client::new(&endpoint);
let sizes_mb: Vec<usize> = std::env::var("POST3_SIZES")
.unwrap_or_else(|_| "100,1024,4096,8192,16384".to_string())
.split(',')
.filter_map(|s| s.trim().parse().ok())
.collect();
// Part size in MiB (default 64 MiB — good balance of part count vs memory)
let part_size_mb: usize = std::env::var("POST3_PART_SIZE")
.unwrap_or_else(|_| "64".to_string())
.parse()
.unwrap_or(64);
let part_size = part_size_mb * 1024 * 1024;
println!("=== post3 Multipart Upload Stress Test ===");
println!("Endpoint: {}", endpoint);
println!("Sizes: {:?} MB", sizes_mb);
println!("Part size: {} MiB", part_size_mb);
println!();
client.create_bucket("mp-stress").await?;
for size_mb in &sizes_mb {
let size_bytes = size_mb * 1024 * 1024;
let key = format!("mp-test-{}mb.bin", size_mb);
let num_parts = (size_bytes + part_size - 1) / part_size;
println!("--- {} ({} parts of {} each) ---",
format_size(size_bytes),
num_parts,
format_size(part_size.min(size_bytes)),
);
// Generate data
print!(" Generating data... ");
let gen_start = Instant::now();
let data = generate_data(size_bytes);
println!("done ({:.1}s)", gen_start.elapsed().as_secs_f64());
// Multipart upload
print!(" Uploading (multipart)... ");
let upload_start = Instant::now();
match client.multipart_upload("mp-stress", &key, &data, part_size).await {
Ok(()) => {
let upload_dur = upload_start.elapsed();
println!(
"done ({:.1}s, {})",
upload_dur.as_secs_f64(),
format_throughput(size_bytes, upload_dur)
);
}
Err(e) => {
println!("FAILED: {}", e);
println!(" Skipping remaining sizes");
break;
}
}
// Head (verify metadata)
let head = client.head_object("mp-stress", &key).await?;
if let Some(info) = &head {
println!(
" Head: size={}, etag={:?}",
format_size(info.size as usize),
info.etag
);
// Verify the compound ETag format (md5-N)
if let Some(ref etag) = info.etag {
let stripped = etag.trim_matches('"');
if stripped.contains('-') {
let parts_str = stripped.split('-').last().unwrap_or("?");
println!(" ETag format: compound ({} parts)", parts_str);
}
}
}
// Download and verify
print!(" Downloading... ");
let download_start = Instant::now();
match client.get_object("mp-stress", &key).await {
Ok(downloaded) => {
let download_dur = download_start.elapsed();
println!(
"done ({:.1}s, {})",
download_dur.as_secs_f64(),
format_throughput(size_bytes, download_dur)
);
print!(" Verifying... ");
if downloaded.len() != data.len() {
println!(
"FAILED: size mismatch (expected {}, got {})",
data.len(),
downloaded.len()
);
} else if downloaded.as_ref() == data.as_slice() {
println!("OK (byte-for-byte match)");
} else {
let pos = data
.iter()
.zip(downloaded.iter())
.position(|(a, b)| a != b)
.unwrap_or(0);
println!("FAILED: mismatch at byte {}", pos);
}
}
Err(e) => {
println!("FAILED: {}", e);
}
}
// Cleanup
client.delete_object("mp-stress", &key).await?;
println!();
}
client.delete_bucket("mp-stress").await?;
println!("=== Done ===");
Ok(())
}