feat: add post3 s3 proxy for postgresql
Signed-off-by: kjuulh <contact@kjuulh.io>
This commit is contained in:
17
crates/post3-sdk/Cargo.toml
Normal file
17
crates/post3-sdk/Cargo.toml
Normal file
@@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "post3-sdk"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
aws-sdk-s3 = "1"
|
||||
aws-credential-types = { version = "1", features = ["hardcoded-credentials"] }
|
||||
aws-types = "1"
|
||||
aws-config = "1"
|
||||
bytes.workspace = true
|
||||
thiserror.workspace = true
|
||||
chrono.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tokio.workspace = true
|
||||
anyhow.workspace = true
|
||||
107
crates/post3-sdk/examples/aws_sdk_direct.rs
Normal file
107
crates/post3-sdk/examples/aws_sdk_direct.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
//! Use aws-sdk-s3 directly against post3 (without the post3-sdk wrapper).
|
||||
//! Shows the raw configuration needed.
|
||||
//!
|
||||
//! Prerequisites: post3-server running on localhost:9000
|
||||
//! mise run up && mise run dev
|
||||
//!
|
||||
//! Run:
|
||||
//! cargo run -p post3-sdk --example aws_sdk_direct
|
||||
|
||||
use post3_sdk::aws_sdk_s3;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let endpoint = std::env::var("POST3_ENDPOINT")
|
||||
.unwrap_or_else(|_| "http://localhost:9000".to_string());
|
||||
|
||||
// Configure aws-sdk-s3 manually for post3
|
||||
let creds = aws_sdk_s3::config::Credentials::new(
|
||||
"test", // access key (any value works when auth is disabled)
|
||||
"test", // secret key
|
||||
None, // session token
|
||||
None, // expiry
|
||||
"example", // provider name
|
||||
);
|
||||
|
||||
let config = aws_sdk_s3::Config::builder()
|
||||
.behavior_version_latest()
|
||||
.region(aws_sdk_s3::config::Region::new("us-east-1"))
|
||||
.endpoint_url(&endpoint)
|
||||
.credentials_provider(creds)
|
||||
.force_path_style(true) // Required: post3 uses path-style, not virtual-hosted
|
||||
.build();
|
||||
|
||||
let client = aws_sdk_s3::Client::from_conf(config);
|
||||
|
||||
// Create bucket
|
||||
println!("Creating bucket...");
|
||||
client
|
||||
.create_bucket()
|
||||
.bucket("direct-bucket")
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
// Put object
|
||||
println!("Putting object...");
|
||||
client
|
||||
.put_object()
|
||||
.bucket("direct-bucket")
|
||||
.key("greeting.txt")
|
||||
.body(Vec::from(&b"Hello from aws-sdk-s3!"[..]).into())
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
// Get object
|
||||
let resp = client
|
||||
.get_object()
|
||||
.bucket("direct-bucket")
|
||||
.key("greeting.txt")
|
||||
.send()
|
||||
.await?;
|
||||
let body = resp.body.collect().await?.into_bytes();
|
||||
println!("Got: {}", String::from_utf8_lossy(&body));
|
||||
|
||||
// List objects
|
||||
let list = client
|
||||
.list_objects_v2()
|
||||
.bucket("direct-bucket")
|
||||
.send()
|
||||
.await?;
|
||||
println!("Objects:");
|
||||
for obj in list.contents() {
|
||||
println!(
|
||||
" {} ({} bytes)",
|
||||
obj.key().unwrap_or("?"),
|
||||
obj.size().unwrap_or(0)
|
||||
);
|
||||
}
|
||||
|
||||
// Head object
|
||||
let head = client
|
||||
.head_object()
|
||||
.bucket("direct-bucket")
|
||||
.key("greeting.txt")
|
||||
.send()
|
||||
.await?;
|
||||
println!(
|
||||
"Head: size={}, etag={:?}",
|
||||
head.content_length().unwrap_or(0),
|
||||
head.e_tag()
|
||||
);
|
||||
|
||||
// Cleanup
|
||||
client
|
||||
.delete_object()
|
||||
.bucket("direct-bucket")
|
||||
.key("greeting.txt")
|
||||
.send()
|
||||
.await?;
|
||||
client
|
||||
.delete_bucket()
|
||||
.bucket("direct-bucket")
|
||||
.send()
|
||||
.await?;
|
||||
println!("Done!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
76
crates/post3-sdk/examples/basic.rs
Normal file
76
crates/post3-sdk/examples/basic.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
//! Basic post3 usage: create a bucket, put/get/delete objects, list objects.
|
||||
//!
|
||||
//! Prerequisites: post3-server running on localhost:9000
|
||||
//! mise run up && mise run dev
|
||||
//!
|
||||
//! Run:
|
||||
//! cargo run -p post3-sdk --example basic
|
||||
|
||||
use post3_sdk::Post3Client;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let endpoint = std::env::var("POST3_ENDPOINT")
|
||||
.unwrap_or_else(|_| "http://localhost:9000".to_string());
|
||||
let client = Post3Client::new(&endpoint);
|
||||
|
||||
// Create a bucket
|
||||
println!("Creating bucket 'example-bucket'...");
|
||||
client.create_bucket("example-bucket").await?;
|
||||
|
||||
// List buckets
|
||||
let buckets = client.list_buckets().await?;
|
||||
println!("Buckets: {:?}", buckets);
|
||||
|
||||
// Put an object
|
||||
println!("Putting 'hello.txt'...");
|
||||
client
|
||||
.put_object("example-bucket", "hello.txt", b"Hello, post3!")
|
||||
.await?;
|
||||
|
||||
// Get the object back
|
||||
let data = client.get_object("example-bucket", "hello.txt").await?;
|
||||
println!("Got: {}", String::from_utf8_lossy(&data));
|
||||
|
||||
// Put a few more objects
|
||||
client
|
||||
.put_object("example-bucket", "docs/readme.md", b"# README")
|
||||
.await?;
|
||||
client
|
||||
.put_object("example-bucket", "docs/guide.md", b"# Guide")
|
||||
.await?;
|
||||
|
||||
// List all objects
|
||||
let objects = client.list_objects("example-bucket", None).await?;
|
||||
println!("All objects:");
|
||||
for obj in &objects {
|
||||
println!(" {} ({} bytes)", obj.key, obj.size);
|
||||
}
|
||||
|
||||
// List with prefix filter
|
||||
let docs = client
|
||||
.list_objects("example-bucket", Some("docs/"))
|
||||
.await?;
|
||||
println!("Objects under docs/:");
|
||||
for obj in &docs {
|
||||
println!(" {} ({} bytes)", obj.key, obj.size);
|
||||
}
|
||||
|
||||
// Delete objects
|
||||
println!("Cleaning up...");
|
||||
client
|
||||
.delete_object("example-bucket", "hello.txt")
|
||||
.await?;
|
||||
client
|
||||
.delete_object("example-bucket", "docs/readme.md")
|
||||
.await?;
|
||||
client
|
||||
.delete_object("example-bucket", "docs/guide.md")
|
||||
.await?;
|
||||
|
||||
// Delete the bucket
|
||||
client.delete_bucket("example-bucket").await?;
|
||||
println!("Done!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
161
crates/post3-sdk/examples/large_upload.rs
Normal file
161
crates/post3-sdk/examples/large_upload.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
//! Stress test: upload and verify large files.
|
||||
//!
|
||||
//! Tests progressively larger files to find limits and measure performance.
|
||||
//! Generates deterministic pseudo-random data so we can verify integrity
|
||||
//! without keeping the full payload in memory twice.
|
||||
//!
|
||||
//! Prerequisites: post3-server running on localhost:9000
|
||||
//! mise run up && mise run dev
|
||||
//!
|
||||
//! Run:
|
||||
//! cargo run -p post3-sdk --example large_upload --release
|
||||
//!
|
||||
//! Or with custom sizes (in MB):
|
||||
//! POST3_SIZES=10,50,100,500,1024 cargo run -p post3-sdk --example large_upload --release
|
||||
|
||||
use post3_sdk::Post3Client;
|
||||
use std::time::Instant;
|
||||
|
||||
fn generate_data(size_bytes: usize) -> Vec<u8> {
|
||||
// Deterministic pattern: repeating 256-byte blocks with position-dependent content
|
||||
let mut data = Vec::with_capacity(size_bytes);
|
||||
let mut state: u64 = 0xdeadbeef;
|
||||
while data.len() < size_bytes {
|
||||
// Simple xorshift64 PRNG for fast deterministic data
|
||||
state ^= state << 13;
|
||||
state ^= state >> 7;
|
||||
state ^= state << 17;
|
||||
data.extend_from_slice(&state.to_le_bytes());
|
||||
}
|
||||
data.truncate(size_bytes);
|
||||
data
|
||||
}
|
||||
|
||||
fn format_size(bytes: usize) -> String {
|
||||
if bytes >= 1024 * 1024 * 1024 {
|
||||
format!("{:.1} GiB", bytes as f64 / (1024.0 * 1024.0 * 1024.0))
|
||||
} else if bytes >= 1024 * 1024 {
|
||||
format!("{:.1} MiB", bytes as f64 / (1024.0 * 1024.0))
|
||||
} else if bytes >= 1024 {
|
||||
format!("{:.1} KiB", bytes as f64 / 1024.0)
|
||||
} else {
|
||||
format!("{} B", bytes)
|
||||
}
|
||||
}
|
||||
|
||||
fn format_throughput(bytes: usize, duration: std::time::Duration) -> String {
|
||||
let secs = duration.as_secs_f64();
|
||||
if secs == 0.0 {
|
||||
return "∞".to_string();
|
||||
}
|
||||
let mb_per_sec = bytes as f64 / (1024.0 * 1024.0) / secs;
|
||||
format!("{:.1} MiB/s", mb_per_sec)
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let endpoint = std::env::var("POST3_ENDPOINT")
|
||||
.unwrap_or_else(|_| "http://localhost:9000".to_string());
|
||||
let client = Post3Client::new(&endpoint);
|
||||
|
||||
// Parse sizes from env or use defaults
|
||||
let sizes_mb: Vec<usize> = std::env::var("POST3_SIZES")
|
||||
.unwrap_or_else(|_| "1,10,50,100,500,1024,2048".to_string())
|
||||
.split(',')
|
||||
.filter_map(|s| s.trim().parse().ok())
|
||||
.collect();
|
||||
|
||||
println!("=== post3 Large File Stress Test ===");
|
||||
println!("Endpoint: {}", endpoint);
|
||||
println!("Sizes: {:?} MB", sizes_mb);
|
||||
println!();
|
||||
|
||||
client.create_bucket("stress-test").await?;
|
||||
|
||||
for size_mb in &sizes_mb {
|
||||
let size_bytes = size_mb * 1024 * 1024;
|
||||
let key = format!("test-{}mb.bin", size_mb);
|
||||
|
||||
println!("--- {} ---", format_size(size_bytes));
|
||||
|
||||
// Generate data
|
||||
print!(" Generating data... ");
|
||||
let gen_start = Instant::now();
|
||||
let data = generate_data(size_bytes);
|
||||
println!("done ({:.1}s)", gen_start.elapsed().as_secs_f64());
|
||||
|
||||
// Upload
|
||||
print!(" Uploading... ");
|
||||
let upload_start = Instant::now();
|
||||
match client.put_object("stress-test", &key, &data).await {
|
||||
Ok(()) => {
|
||||
let upload_dur = upload_start.elapsed();
|
||||
println!(
|
||||
"done ({:.1}s, {})",
|
||||
upload_dur.as_secs_f64(),
|
||||
format_throughput(size_bytes, upload_dur)
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("FAILED: {}", e);
|
||||
println!(" Skipping remaining sizes (hit server limit)");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Head (verify metadata)
|
||||
let head = client.head_object("stress-test", &key).await?;
|
||||
if let Some(info) = &head {
|
||||
println!(
|
||||
" Head: size={}, etag={:?}",
|
||||
format_size(info.size as usize),
|
||||
info.etag
|
||||
);
|
||||
}
|
||||
|
||||
// Download
|
||||
print!(" Downloading... ");
|
||||
let download_start = Instant::now();
|
||||
match client.get_object("stress-test", &key).await {
|
||||
Ok(downloaded) => {
|
||||
let download_dur = download_start.elapsed();
|
||||
println!(
|
||||
"done ({:.1}s, {})",
|
||||
download_dur.as_secs_f64(),
|
||||
format_throughput(size_bytes, download_dur)
|
||||
);
|
||||
|
||||
// Verify integrity
|
||||
print!(" Verifying... ");
|
||||
if downloaded.len() != data.len() {
|
||||
println!(
|
||||
"FAILED: size mismatch (expected {}, got {})",
|
||||
data.len(),
|
||||
downloaded.len()
|
||||
);
|
||||
} else if downloaded.as_ref() == data.as_slice() {
|
||||
println!("OK (byte-for-byte match)");
|
||||
} else {
|
||||
// Find first mismatch
|
||||
let pos = data
|
||||
.iter()
|
||||
.zip(downloaded.iter())
|
||||
.position(|(a, b)| a != b)
|
||||
.unwrap_or(0);
|
||||
println!("FAILED: mismatch at byte {}", pos);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("FAILED: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup this object
|
||||
client.delete_object("stress-test", &key).await?;
|
||||
println!();
|
||||
}
|
||||
|
||||
client.delete_bucket("stress-test").await?;
|
||||
println!("=== Done ===");
|
||||
Ok(())
|
||||
}
|
||||
78
crates/post3-sdk/examples/metadata.rs
Normal file
78
crates/post3-sdk/examples/metadata.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
//! Demonstrate custom metadata (x-amz-meta-*) with post3.
|
||||
//!
|
||||
//! Prerequisites: post3-server running on localhost:9000
|
||||
//! mise run up && mise run dev
|
||||
//!
|
||||
//! Run:
|
||||
//! cargo run -p post3-sdk --example metadata
|
||||
|
||||
use post3_sdk::Post3Client;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let endpoint = std::env::var("POST3_ENDPOINT")
|
||||
.unwrap_or_else(|_| "http://localhost:9000".to_string());
|
||||
let client = Post3Client::new(&endpoint);
|
||||
|
||||
client.create_bucket("meta-bucket").await?;
|
||||
|
||||
// Use the inner aws-sdk-s3 client to set custom metadata
|
||||
let inner = client.inner();
|
||||
println!("Putting object with custom metadata...");
|
||||
inner
|
||||
.put_object()
|
||||
.bucket("meta-bucket")
|
||||
.key("report.pdf")
|
||||
.body(Vec::from(&b"fake pdf content"[..]).into())
|
||||
.content_type("application/pdf")
|
||||
.metadata("author", "alice")
|
||||
.metadata("department", "engineering")
|
||||
.metadata("version", "2")
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
// Retrieve metadata via head_object
|
||||
let head = inner
|
||||
.head_object()
|
||||
.bucket("meta-bucket")
|
||||
.key("report.pdf")
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
println!("Content-Type: {:?}", head.content_type());
|
||||
println!("Content-Length: {:?}", head.content_length());
|
||||
println!("ETag: {:?}", head.e_tag());
|
||||
if let Some(metadata) = head.metadata() {
|
||||
println!("Custom metadata:");
|
||||
for (k, v) in metadata {
|
||||
println!(" x-amz-meta-{}: {}", k, v);
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the full object with metadata
|
||||
let resp = inner
|
||||
.get_object()
|
||||
.bucket("meta-bucket")
|
||||
.key("report.pdf")
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
println!("\nGet object response:");
|
||||
println!(" Content-Type: {:?}", resp.content_type());
|
||||
if let Some(metadata) = resp.metadata() {
|
||||
println!(" Metadata:");
|
||||
for (k, v) in metadata {
|
||||
println!(" x-amz-meta-{}: {}", k, v);
|
||||
}
|
||||
}
|
||||
|
||||
let body = resp.body.collect().await?.into_bytes();
|
||||
println!(" Body: {}", String::from_utf8_lossy(&body));
|
||||
|
||||
// Cleanup
|
||||
client.delete_object("meta-bucket", "report.pdf").await?;
|
||||
client.delete_bucket("meta-bucket").await?;
|
||||
println!("\nDone!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
177
crates/post3-sdk/examples/multipart_upload.rs
Normal file
177
crates/post3-sdk/examples/multipart_upload.rs
Normal file
@@ -0,0 +1,177 @@
|
||||
//! Stress test: multipart upload and verify huge files (4–16 GiB).
|
||||
//!
|
||||
//! Uses the SDK's multipart_upload convenience method which splits data into
|
||||
//! parts and uploads them sequentially via CreateMultipartUpload / UploadPart /
|
||||
//! CompleteMultipartUpload.
|
||||
//!
|
||||
//! Prerequisites: post3-server running on localhost:9000
|
||||
//! mise run up && mise run dev
|
||||
//!
|
||||
//! Run:
|
||||
//! cargo run -p post3-sdk --example multipart_upload --release
|
||||
//!
|
||||
//! Or with custom sizes (in MB) and part size:
|
||||
//! POST3_SIZES=4096,8192,16384 POST3_PART_SIZE=64 cargo run -p post3-sdk --example multipart_upload --release
|
||||
|
||||
use post3_sdk::Post3Client;
|
||||
use std::time::Instant;
|
||||
|
||||
fn generate_data(size_bytes: usize) -> Vec<u8> {
|
||||
let mut data = Vec::with_capacity(size_bytes);
|
||||
let mut state: u64 = 0xdeadbeef;
|
||||
while data.len() < size_bytes {
|
||||
state ^= state << 13;
|
||||
state ^= state >> 7;
|
||||
state ^= state << 17;
|
||||
data.extend_from_slice(&state.to_le_bytes());
|
||||
}
|
||||
data.truncate(size_bytes);
|
||||
data
|
||||
}
|
||||
|
||||
fn format_size(bytes: usize) -> String {
|
||||
if bytes >= 1024 * 1024 * 1024 {
|
||||
format!("{:.1} GiB", bytes as f64 / (1024.0 * 1024.0 * 1024.0))
|
||||
} else if bytes >= 1024 * 1024 {
|
||||
format!("{:.1} MiB", bytes as f64 / (1024.0 * 1024.0))
|
||||
} else if bytes >= 1024 {
|
||||
format!("{:.1} KiB", bytes as f64 / 1024.0)
|
||||
} else {
|
||||
format!("{} B", bytes)
|
||||
}
|
||||
}
|
||||
|
||||
fn format_throughput(bytes: usize, duration: std::time::Duration) -> String {
|
||||
let secs = duration.as_secs_f64();
|
||||
if secs == 0.0 {
|
||||
return "∞".to_string();
|
||||
}
|
||||
let mb_per_sec = bytes as f64 / (1024.0 * 1024.0) / secs;
|
||||
format!("{:.1} MiB/s", mb_per_sec)
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let endpoint = std::env::var("POST3_ENDPOINT")
|
||||
.unwrap_or_else(|_| "http://localhost:9000".to_string());
|
||||
let client = Post3Client::new(&endpoint);
|
||||
|
||||
let sizes_mb: Vec<usize> = std::env::var("POST3_SIZES")
|
||||
.unwrap_or_else(|_| "100,1024,4096,8192,16384".to_string())
|
||||
.split(',')
|
||||
.filter_map(|s| s.trim().parse().ok())
|
||||
.collect();
|
||||
|
||||
// Part size in MiB (default 64 MiB — good balance of part count vs memory)
|
||||
let part_size_mb: usize = std::env::var("POST3_PART_SIZE")
|
||||
.unwrap_or_else(|_| "64".to_string())
|
||||
.parse()
|
||||
.unwrap_or(64);
|
||||
let part_size = part_size_mb * 1024 * 1024;
|
||||
|
||||
println!("=== post3 Multipart Upload Stress Test ===");
|
||||
println!("Endpoint: {}", endpoint);
|
||||
println!("Sizes: {:?} MB", sizes_mb);
|
||||
println!("Part size: {} MiB", part_size_mb);
|
||||
println!();
|
||||
|
||||
client.create_bucket("mp-stress").await?;
|
||||
|
||||
for size_mb in &sizes_mb {
|
||||
let size_bytes = size_mb * 1024 * 1024;
|
||||
let key = format!("mp-test-{}mb.bin", size_mb);
|
||||
let num_parts = (size_bytes + part_size - 1) / part_size;
|
||||
|
||||
println!("--- {} ({} parts of {} each) ---",
|
||||
format_size(size_bytes),
|
||||
num_parts,
|
||||
format_size(part_size.min(size_bytes)),
|
||||
);
|
||||
|
||||
// Generate data
|
||||
print!(" Generating data... ");
|
||||
let gen_start = Instant::now();
|
||||
let data = generate_data(size_bytes);
|
||||
println!("done ({:.1}s)", gen_start.elapsed().as_secs_f64());
|
||||
|
||||
// Multipart upload
|
||||
print!(" Uploading (multipart)... ");
|
||||
let upload_start = Instant::now();
|
||||
match client.multipart_upload("mp-stress", &key, &data, part_size).await {
|
||||
Ok(()) => {
|
||||
let upload_dur = upload_start.elapsed();
|
||||
println!(
|
||||
"done ({:.1}s, {})",
|
||||
upload_dur.as_secs_f64(),
|
||||
format_throughput(size_bytes, upload_dur)
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("FAILED: {}", e);
|
||||
println!(" Skipping remaining sizes");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Head (verify metadata)
|
||||
let head = client.head_object("mp-stress", &key).await?;
|
||||
if let Some(info) = &head {
|
||||
println!(
|
||||
" Head: size={}, etag={:?}",
|
||||
format_size(info.size as usize),
|
||||
info.etag
|
||||
);
|
||||
// Verify the compound ETag format (md5-N)
|
||||
if let Some(ref etag) = info.etag {
|
||||
let stripped = etag.trim_matches('"');
|
||||
if stripped.contains('-') {
|
||||
let parts_str = stripped.split('-').last().unwrap_or("?");
|
||||
println!(" ETag format: compound ({} parts)", parts_str);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Download and verify
|
||||
print!(" Downloading... ");
|
||||
let download_start = Instant::now();
|
||||
match client.get_object("mp-stress", &key).await {
|
||||
Ok(downloaded) => {
|
||||
let download_dur = download_start.elapsed();
|
||||
println!(
|
||||
"done ({:.1}s, {})",
|
||||
download_dur.as_secs_f64(),
|
||||
format_throughput(size_bytes, download_dur)
|
||||
);
|
||||
|
||||
print!(" Verifying... ");
|
||||
if downloaded.len() != data.len() {
|
||||
println!(
|
||||
"FAILED: size mismatch (expected {}, got {})",
|
||||
data.len(),
|
||||
downloaded.len()
|
||||
);
|
||||
} else if downloaded.as_ref() == data.as_slice() {
|
||||
println!("OK (byte-for-byte match)");
|
||||
} else {
|
||||
let pos = data
|
||||
.iter()
|
||||
.zip(downloaded.iter())
|
||||
.position(|(a, b)| a != b)
|
||||
.unwrap_or(0);
|
||||
println!("FAILED: mismatch at byte {}", pos);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
println!("FAILED: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
client.delete_object("mp-stress", &key).await?;
|
||||
println!();
|
||||
}
|
||||
|
||||
client.delete_bucket("mp-stress").await?;
|
||||
println!("=== Done ===");
|
||||
Ok(())
|
||||
}
|
||||
408
crates/post3-sdk/src/lib.rs
Normal file
408
crates/post3-sdk/src/lib.rs
Normal file
@@ -0,0 +1,408 @@
|
||||
use aws_credential_types::Credentials;
|
||||
use aws_sdk_s3::types::{CompletedMultipartUpload, CompletedPart};
|
||||
use aws_sdk_s3::Client;
|
||||
use bytes::Bytes;
|
||||
|
||||
pub use aws_sdk_s3;
|
||||
pub use bytes;
|
||||
|
||||
/// Error type for post3-sdk operations.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error("bucket not found: {0}")]
|
||||
BucketNotFound(String),
|
||||
|
||||
#[error("object not found: {bucket}/{key}")]
|
||||
ObjectNotFound { bucket: String, key: String },
|
||||
|
||||
#[error("s3 error: {0}")]
|
||||
S3(String),
|
||||
}
|
||||
|
||||
impl<E: std::fmt::Display> From<aws_sdk_s3::error::SdkError<E>> for Error {
|
||||
fn from(err: aws_sdk_s3::error::SdkError<E>) -> Self {
|
||||
Error::S3(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
/// Summary of an object returned by list operations.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ObjectInfo {
|
||||
pub key: String,
|
||||
pub size: i64,
|
||||
pub etag: Option<String>,
|
||||
pub last_modified: Option<chrono::DateTime<chrono::Utc>>,
|
||||
}
|
||||
|
||||
/// A client for post3 that wraps `aws-sdk-s3` with ergonomic defaults.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```no_run
|
||||
/// # async fn example() -> post3_sdk::Result<()> {
|
||||
/// let client = post3_sdk::Post3Client::new("http://localhost:9000");
|
||||
///
|
||||
/// client.create_bucket("my-bucket").await?;
|
||||
/// client.put_object("my-bucket", "hello.txt", b"hello world").await?;
|
||||
///
|
||||
/// let data = client.get_object("my-bucket", "hello.txt").await?;
|
||||
/// assert_eq!(data.as_ref(), b"hello world");
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub struct Post3Client {
|
||||
inner: Client,
|
||||
}
|
||||
|
||||
impl Post3Client {
|
||||
/// Create a client with default configuration (dummy credentials, us-east-1, path-style).
|
||||
pub fn new(endpoint_url: impl Into<String>) -> Self {
|
||||
Self::builder().endpoint_url(endpoint_url).build()
|
||||
}
|
||||
|
||||
/// Access the underlying `aws_sdk_s3::Client` for advanced operations.
|
||||
pub fn inner(&self) -> &Client {
|
||||
&self.inner
|
||||
}
|
||||
|
||||
/// Start building a client with custom configuration.
|
||||
pub fn builder() -> Post3ClientBuilder {
|
||||
Post3ClientBuilder::default()
|
||||
}
|
||||
|
||||
// -- Bucket operations --
|
||||
|
||||
pub async fn create_bucket(&self, name: &str) -> Result<()> {
|
||||
self.inner
|
||||
.create_bucket()
|
||||
.bucket(name)
|
||||
.send()
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn head_bucket(&self, name: &str) -> Result<bool> {
|
||||
match self.inner.head_bucket().bucket(name).send().await {
|
||||
Ok(_) => Ok(true),
|
||||
Err(err) => {
|
||||
if err
|
||||
.as_service_error()
|
||||
.map_or(false, |e| e.is_not_found())
|
||||
{
|
||||
Ok(false)
|
||||
} else {
|
||||
Err(Error::S3(err.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_bucket(&self, name: &str) -> Result<()> {
|
||||
self.inner
|
||||
.delete_bucket()
|
||||
.bucket(name)
|
||||
.send()
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn list_buckets(&self) -> Result<Vec<String>> {
|
||||
let resp = self.inner.list_buckets().send().await?;
|
||||
Ok(resp
|
||||
.buckets()
|
||||
.iter()
|
||||
.filter_map(|b| b.name().map(|s| s.to_string()))
|
||||
.collect())
|
||||
}
|
||||
|
||||
// -- Object operations --
|
||||
|
||||
pub async fn put_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
body: impl AsRef<[u8]>,
|
||||
) -> Result<()> {
|
||||
let body = Bytes::copy_from_slice(body.as_ref());
|
||||
self.inner
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(body.into())
|
||||
.send()
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_object(&self, bucket: &str, key: &str) -> Result<Bytes> {
|
||||
let resp = self
|
||||
.inner
|
||||
.get_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
if e.as_service_error()
|
||||
.map_or(false, |se| se.is_no_such_key())
|
||||
{
|
||||
Error::ObjectNotFound {
|
||||
bucket: bucket.to_string(),
|
||||
key: key.to_string(),
|
||||
}
|
||||
} else {
|
||||
Error::S3(e.to_string())
|
||||
}
|
||||
})?;
|
||||
|
||||
let data = resp
|
||||
.body
|
||||
.collect()
|
||||
.await
|
||||
.map_err(|e| Error::S3(e.to_string()))?;
|
||||
Ok(data.into_bytes())
|
||||
}
|
||||
|
||||
pub async fn head_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
) -> Result<Option<ObjectInfo>> {
|
||||
match self
|
||||
.inner
|
||||
.head_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(resp) => Ok(Some(ObjectInfo {
|
||||
key: key.to_string(),
|
||||
size: resp.content_length().unwrap_or(0),
|
||||
etag: resp.e_tag().map(|s| s.to_string()),
|
||||
last_modified: resp
|
||||
.last_modified()
|
||||
.and_then(|t| {
|
||||
chrono::DateTime::from_timestamp(t.secs(), t.subsec_nanos())
|
||||
}),
|
||||
})),
|
||||
Err(err) => {
|
||||
if err
|
||||
.as_service_error()
|
||||
.map_or(false, |e| e.is_not_found())
|
||||
{
|
||||
Ok(None)
|
||||
} else {
|
||||
Err(Error::S3(err.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_object(&self, bucket: &str, key: &str) -> Result<()> {
|
||||
self.inner
|
||||
.delete_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.send()
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Upload an object using multipart upload, splitting into parts of the given size.
|
||||
///
|
||||
/// This is useful for large files where multipart upload provides better performance
|
||||
/// through parallelism and resumability.
|
||||
pub async fn multipart_upload(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
data: impl AsRef<[u8]>,
|
||||
part_size: usize,
|
||||
) -> Result<()> {
|
||||
let data = data.as_ref();
|
||||
|
||||
// Create multipart upload
|
||||
let create_resp = self
|
||||
.inner
|
||||
.create_multipart_upload()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let upload_id = create_resp
|
||||
.upload_id()
|
||||
.ok_or_else(|| Error::S3("missing upload_id in response".to_string()))?
|
||||
.to_string();
|
||||
|
||||
// Upload parts
|
||||
let mut completed_parts = Vec::new();
|
||||
let mut part_number = 1i32;
|
||||
|
||||
for chunk in data.chunks(part_size) {
|
||||
let body = Bytes::copy_from_slice(chunk);
|
||||
let upload_resp = self
|
||||
.inner
|
||||
.upload_part()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.upload_id(&upload_id)
|
||||
.part_number(part_number)
|
||||
.body(body.into())
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
// Try to abort on failure
|
||||
Error::S3(e.to_string())
|
||||
})?;
|
||||
|
||||
let etag = upload_resp
|
||||
.e_tag()
|
||||
.ok_or_else(|| Error::S3("missing ETag in upload_part response".to_string()))?
|
||||
.to_string();
|
||||
|
||||
completed_parts.push(
|
||||
CompletedPart::builder()
|
||||
.part_number(part_number)
|
||||
.e_tag(etag)
|
||||
.build(),
|
||||
);
|
||||
|
||||
part_number += 1;
|
||||
}
|
||||
|
||||
// Complete multipart upload
|
||||
let mut builder = CompletedMultipartUpload::builder();
|
||||
for part in completed_parts {
|
||||
builder = builder.parts(part);
|
||||
}
|
||||
|
||||
self.inner
|
||||
.complete_multipart_upload()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.upload_id(&upload_id)
|
||||
.multipart_upload(builder.build())
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn list_objects(
|
||||
&self,
|
||||
bucket: &str,
|
||||
prefix: Option<&str>,
|
||||
) -> Result<Vec<ObjectInfo>> {
|
||||
let mut req = self
|
||||
.inner
|
||||
.list_objects_v2()
|
||||
.bucket(bucket);
|
||||
|
||||
if let Some(p) = prefix {
|
||||
req = req.prefix(p);
|
||||
}
|
||||
|
||||
let resp = req.send().await?;
|
||||
Ok(resp
|
||||
.contents()
|
||||
.iter()
|
||||
.map(|obj| ObjectInfo {
|
||||
key: obj.key().unwrap_or_default().to_string(),
|
||||
size: obj.size().unwrap_or(0),
|
||||
etag: obj.e_tag().map(|s| s.to_string()),
|
||||
last_modified: obj
|
||||
.last_modified()
|
||||
.and_then(|t| {
|
||||
chrono::DateTime::from_timestamp(t.secs(), t.subsec_nanos())
|
||||
}),
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for `Post3Client` with custom configuration.
|
||||
pub struct Post3ClientBuilder {
|
||||
endpoint_url: Option<String>,
|
||||
access_key: String,
|
||||
secret_key: String,
|
||||
region: String,
|
||||
}
|
||||
|
||||
impl Default for Post3ClientBuilder {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
endpoint_url: None,
|
||||
access_key: "test".to_string(),
|
||||
secret_key: "test".to_string(),
|
||||
region: "us-east-1".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Post3ClientBuilder {
|
||||
pub fn endpoint_url(mut self, url: impl Into<String>) -> Self {
|
||||
self.endpoint_url = Some(url.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn credentials(mut self, access_key: impl Into<String>, secret_key: impl Into<String>) -> Self {
|
||||
self.access_key = access_key.into();
|
||||
self.secret_key = secret_key.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn region(mut self, region: impl Into<String>) -> Self {
|
||||
self.region = region.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Post3Client {
|
||||
let creds = Credentials::new(
|
||||
&self.access_key,
|
||||
&self.secret_key,
|
||||
None,
|
||||
None,
|
||||
"post3-sdk",
|
||||
);
|
||||
|
||||
let mut config = aws_sdk_s3::Config::builder()
|
||||
.behavior_version_latest()
|
||||
.region(aws_types::region::Region::new(self.region))
|
||||
.credentials_provider(creds)
|
||||
.force_path_style(true);
|
||||
|
||||
if let Some(url) = self.endpoint_url {
|
||||
config = config.endpoint_url(url);
|
||||
}
|
||||
|
||||
Post3Client {
|
||||
inner: Client::from_conf(config.build()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_construct_client() {
|
||||
let client = Post3Client::new("http://localhost:9000");
|
||||
// Verify we can access the inner client
|
||||
let _inner = client.inner();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_builder_custom_creds() {
|
||||
let client = Post3Client::builder()
|
||||
.endpoint_url("http://localhost:9000")
|
||||
.credentials("my-access-key", "my-secret-key")
|
||||
.region("eu-west-1")
|
||||
.build();
|
||||
let _inner = client.inner();
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user