feat: add post3 s3 proxy for postgresql

Signed-off-by: kjuulh <contact@kjuulh.io>
This commit is contained in:
2026-02-27 11:37:48 +01:00
commit 21bac4a33f
67 changed files with 14403 additions and 0 deletions

View File

@@ -0,0 +1,106 @@
use std::net::SocketAddr;
use aws_credential_types::Credentials;
use aws_sdk_s3::Client;
use post3::PostgresBackend;
use sqlx::PgPool;
use tokio::net::TcpListener;
use tokio_util::sync::CancellationToken;
static TRACING: std::sync::Once = std::sync::Once::new();
fn init_tracing() {
TRACING.call_once(|| {
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::from_default_env()
.add_directive("post3_server=debug".parse().unwrap())
.add_directive("tower_http=debug".parse().unwrap()),
)
.with_test_writer()
.init();
});
}
pub struct TestServer {
pub addr: SocketAddr,
pub client: Client,
cancel: CancellationToken,
pool: PgPool,
}
impl TestServer {
pub async fn start() -> Self {
init_tracing();
let db_url = std::env::var("DATABASE_URL").unwrap_or_else(|_| {
"postgresql://devuser:devpassword@localhost:5435/post3_dev".into()
});
let pool = sqlx::pool::PoolOptions::new()
.max_connections(5)
.connect(&db_url)
.await
.unwrap();
// Run migrations
sqlx::migrate!("../post3/migrations/")
.set_locking(false)
.run(&pool)
.await
.unwrap();
// Clean slate
sqlx::query("DELETE FROM upload_parts").execute(&pool).await.unwrap();
sqlx::query("DELETE FROM multipart_upload_metadata").execute(&pool).await.unwrap();
sqlx::query("DELETE FROM multipart_uploads").execute(&pool).await.unwrap();
sqlx::query("DELETE FROM blocks").execute(&pool).await.unwrap();
sqlx::query("DELETE FROM object_metadata").execute(&pool).await.unwrap();
sqlx::query("DELETE FROM objects").execute(&pool).await.unwrap();
sqlx::query("DELETE FROM buckets").execute(&pool).await.unwrap();
let backend = PostgresBackend::new(pool.clone());
let state = post3_server::state::State { store: backend };
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
let cancel = CancellationToken::new();
let cancel_clone = cancel.clone();
let router = post3_server::s3::router::build_router(state);
tokio::spawn(async move {
axum::serve(listener, router.into_make_service())
.with_graceful_shutdown(async move {
cancel_clone.cancelled().await;
})
.await
.unwrap();
});
let creds = Credentials::new("test", "test", None, None, "test");
let config = aws_sdk_s3::Config::builder()
.behavior_version_latest()
.region(aws_types::region::Region::new("us-east-1"))
.endpoint_url(format!("http://{}", addr))
.credentials_provider(creds)
.force_path_style(true)
.build();
let client = Client::from_conf(config);
Self {
addr,
client,
cancel,
pool,
}
}
pub async fn shutdown(self) {
self.cancel.cancel();
// Give the server task a moment to wind down
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
self.pool.close().await;
}
}

View File

@@ -0,0 +1,390 @@
//! Integration tests using FilesystemBackend (no PostgreSQL required).
use std::net::SocketAddr;
use aws_credential_types::Credentials;
use aws_sdk_s3::types::{CompletedMultipartUpload, CompletedPart};
use aws_sdk_s3::Client;
use post3::FilesystemBackend;
use tokio::net::TcpListener;
use tokio_util::sync::CancellationToken;
struct FsTestServer {
client: Client,
cancel: CancellationToken,
_tmpdir: tempfile::TempDir,
}
impl FsTestServer {
async fn start() -> Self {
let tmpdir = tempfile::tempdir().unwrap();
let backend = FilesystemBackend::new(tmpdir.path());
let state = post3_server::state::State { store: backend };
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr: SocketAddr = listener.local_addr().unwrap();
let cancel = CancellationToken::new();
let cancel_clone = cancel.clone();
let router = post3_server::s3::router::build_router(state);
tokio::spawn(async move {
axum::serve(listener, router.into_make_service())
.with_graceful_shutdown(async move {
cancel_clone.cancelled().await;
})
.await
.unwrap();
});
let creds = Credentials::new("test", "test", None, None, "test");
let config = aws_sdk_s3::Config::builder()
.behavior_version_latest()
.region(aws_types::region::Region::new("us-east-1"))
.endpoint_url(format!("http://{}", addr))
.credentials_provider(creds)
.force_path_style(true)
.build();
let client = Client::from_conf(config);
Self {
client,
cancel,
_tmpdir: tmpdir,
}
}
async fn shutdown(self) {
self.cancel.cancel();
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
}
}
// --- Tests ---
#[tokio::test]
async fn test_fs_bucket_crud() {
let server = FsTestServer::start().await;
let c = &server.client;
// Create
c.create_bucket().bucket("my-bucket").send().await.unwrap();
// Head
c.head_bucket().bucket("my-bucket").send().await.unwrap();
// List
let resp = c.list_buckets().send().await.unwrap();
let names: Vec<_> = resp
.buckets()
.iter()
.filter_map(|b| b.name())
.collect();
assert!(names.contains(&"my-bucket"));
// Delete
c.delete_bucket().bucket("my-bucket").send().await.unwrap();
// Verify gone
let result = c.head_bucket().bucket("my-bucket").send().await;
assert!(result.is_err());
server.shutdown().await;
}
#[tokio::test]
async fn test_fs_put_get_delete() {
let server = FsTestServer::start().await;
let c = &server.client;
c.create_bucket().bucket("test").send().await.unwrap();
// Put
c.put_object()
.bucket("test")
.key("hello.txt")
.content_type("text/plain")
.body(aws_sdk_s3::primitives::ByteStream::from_static(
b"hello world",
))
.send()
.await
.unwrap();
// Get
let resp = c
.get_object()
.bucket("test")
.key("hello.txt")
.send()
.await
.unwrap();
let body = resp.body.collect().await.unwrap().into_bytes();
assert_eq!(body.as_ref(), b"hello world");
// Head
let head = c
.head_object()
.bucket("test")
.key("hello.txt")
.send()
.await
.unwrap();
assert_eq!(head.content_length(), Some(11));
assert_eq!(head.content_type(), Some("text/plain"));
// Delete
c.delete_object()
.bucket("test")
.key("hello.txt")
.send()
.await
.unwrap();
// Verify gone
let result = c
.get_object()
.bucket("test")
.key("hello.txt")
.send()
.await;
assert!(result.is_err());
// Cleanup
c.delete_bucket().bucket("test").send().await.unwrap();
server.shutdown().await;
}
#[tokio::test]
async fn test_fs_list_objects() {
let server = FsTestServer::start().await;
let c = &server.client;
c.create_bucket().bucket("test").send().await.unwrap();
for i in 0..5 {
c.put_object()
.bucket("test")
.key(format!("item-{i:02}"))
.body(aws_sdk_s3::primitives::ByteStream::from_static(b"data"))
.send()
.await
.unwrap();
}
// List all
let resp = c
.list_objects_v2()
.bucket("test")
.send()
.await
.unwrap();
assert_eq!(resp.key_count(), Some(5));
// List with prefix
let resp = c
.list_objects_v2()
.bucket("test")
.prefix("item-03")
.send()
.await
.unwrap();
assert_eq!(resp.key_count(), Some(1));
// Cleanup
for i in 0..5 {
c.delete_object()
.bucket("test")
.key(format!("item-{i:02}"))
.send()
.await
.unwrap();
}
c.delete_bucket().bucket("test").send().await.unwrap();
server.shutdown().await;
}
#[tokio::test]
async fn test_fs_user_metadata() {
let server = FsTestServer::start().await;
let c = &server.client;
c.create_bucket().bucket("test").send().await.unwrap();
c.put_object()
.bucket("test")
.key("meta.txt")
.metadata("author", "test-user")
.metadata("version", "1")
.body(aws_sdk_s3::primitives::ByteStream::from_static(b"data"))
.send()
.await
.unwrap();
let head = c
.head_object()
.bucket("test")
.key("meta.txt")
.send()
.await
.unwrap();
let meta = head.metadata().unwrap();
assert_eq!(meta.get("author").unwrap(), "test-user");
assert_eq!(meta.get("version").unwrap(), "1");
// Cleanup
c.delete_object()
.bucket("test")
.key("meta.txt")
.send()
.await
.unwrap();
c.delete_bucket().bucket("test").send().await.unwrap();
server.shutdown().await;
}
#[tokio::test]
async fn test_fs_multipart_upload() {
let server = FsTestServer::start().await;
let c = &server.client;
c.create_bucket().bucket("test").send().await.unwrap();
// Create multipart upload
let create = c
.create_multipart_upload()
.bucket("test")
.key("big.bin")
.send()
.await
.unwrap();
let upload_id = create.upload_id().unwrap();
// Upload parts (non-last parts must be >= 5 MB per S3 spec)
let min_part = 5 * 1024 * 1024;
let part1 = c
.upload_part()
.bucket("test")
.key("big.bin")
.upload_id(upload_id)
.part_number(1)
.body(aws_sdk_s3::primitives::ByteStream::from(vec![0xAAu8; min_part]))
.send()
.await
.unwrap();
let part2 = c
.upload_part()
.bucket("test")
.key("big.bin")
.upload_id(upload_id)
.part_number(2)
.body(aws_sdk_s3::primitives::ByteStream::from(vec![0xBBu8; 1024]))
.send()
.await
.unwrap();
// Complete
let completed = CompletedMultipartUpload::builder()
.parts(
CompletedPart::builder()
.part_number(1)
.e_tag(part1.e_tag().unwrap())
.build(),
)
.parts(
CompletedPart::builder()
.part_number(2)
.e_tag(part2.e_tag().unwrap())
.build(),
)
.build();
let complete_resp = c
.complete_multipart_upload()
.bucket("test")
.key("big.bin")
.upload_id(upload_id)
.multipart_upload(completed)
.send()
.await
.unwrap();
// Verify compound ETag
let etag = complete_resp.e_tag().unwrap();
assert!(etag.contains("-2"), "Expected compound ETag, got: {etag}");
// Verify data
let resp = c
.get_object()
.bucket("test")
.key("big.bin")
.send()
.await
.unwrap();
let body = resp.body.collect().await.unwrap().into_bytes();
assert_eq!(body.len(), min_part + 1024);
assert!(body[..min_part].iter().all(|b| *b == 0xAA));
assert!(body[min_part..].iter().all(|b| *b == 0xBB));
// Cleanup
c.delete_object()
.bucket("test")
.key("big.bin")
.send()
.await
.unwrap();
c.delete_bucket().bucket("test").send().await.unwrap();
server.shutdown().await;
}
#[tokio::test]
async fn test_fs_abort_multipart() {
let server = FsTestServer::start().await;
let c = &server.client;
c.create_bucket().bucket("test").send().await.unwrap();
let create = c
.create_multipart_upload()
.bucket("test")
.key("aborted.bin")
.send()
.await
.unwrap();
let upload_id = create.upload_id().unwrap();
// Upload a part
c.upload_part()
.bucket("test")
.key("aborted.bin")
.upload_id(upload_id)
.part_number(1)
.body(aws_sdk_s3::primitives::ByteStream::from(vec![0u8; 100]))
.send()
.await
.unwrap();
// Abort
c.abort_multipart_upload()
.bucket("test")
.key("aborted.bin")
.upload_id(upload_id)
.send()
.await
.unwrap();
// Verify no object was created
let result = c
.get_object()
.bucket("test")
.key("aborted.bin")
.send()
.await;
assert!(result.is_err());
c.delete_bucket().bucket("test").send().await.unwrap();
server.shutdown().await;
}

View File

@@ -0,0 +1,871 @@
mod common;
use aws_sdk_s3::primitives::ByteStream;
use aws_sdk_s3::types::{CompletedMultipartUpload, CompletedPart};
use common::TestServer;
#[tokio::test]
async fn test_create_and_list_buckets() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("test-bucket")
.send()
.await
.unwrap();
let resp = server.client.list_buckets().send().await.unwrap();
let names: Vec<_> = resp
.buckets()
.iter()
.filter_map(|b| b.name())
.collect();
assert!(names.contains(&"test-bucket"));
server.shutdown().await;
}
#[tokio::test]
async fn test_head_bucket() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("hb-test")
.send()
.await
.unwrap();
server
.client
.head_bucket()
.bucket("hb-test")
.send()
.await
.unwrap();
let err = server
.client
.head_bucket()
.bucket("no-such-bucket")
.send()
.await;
assert!(err.is_err());
server.shutdown().await;
}
#[tokio::test]
async fn test_delete_bucket() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("to-delete")
.send()
.await
.unwrap();
server
.client
.delete_bucket()
.bucket("to-delete")
.send()
.await
.unwrap();
let err = server
.client
.head_bucket()
.bucket("to-delete")
.send()
.await;
assert!(err.is_err());
server.shutdown().await;
}
#[tokio::test]
async fn test_put_and_get_object() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("data")
.send()
.await
.unwrap();
let body = ByteStream::from_static(b"hello world");
server
.client
.put_object()
.bucket("data")
.key("greeting.txt")
.content_type("text/plain")
.body(body)
.send()
.await
.unwrap();
let resp = server
.client
.get_object()
.bucket("data")
.key("greeting.txt")
.send()
.await
.unwrap();
let content_type = resp.content_type().map(|s| s.to_string());
let bytes = resp.body.collect().await.unwrap().into_bytes();
assert_eq!(bytes.as_ref(), b"hello world");
assert_eq!(content_type.as_deref(), Some("text/plain"));
server.shutdown().await;
}
#[tokio::test]
async fn test_put_large_object_chunked() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("large")
.send()
.await
.unwrap();
// 3 MiB object => should be split into 3 blocks at 1 MiB each
let data = vec![0x42u8; 3 * 1024 * 1024];
let body = ByteStream::from(data.clone());
server
.client
.put_object()
.bucket("large")
.key("big-file.bin")
.body(body)
.send()
.await
.unwrap();
let resp = server
.client
.get_object()
.bucket("large")
.key("big-file.bin")
.send()
.await
.unwrap();
let bytes = resp.body.collect().await.unwrap().into_bytes();
assert_eq!(bytes.len(), 3 * 1024 * 1024);
assert_eq!(bytes.as_ref(), data.as_slice());
server.shutdown().await;
}
#[tokio::test]
async fn test_head_object() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("meta")
.send()
.await
.unwrap();
let body = ByteStream::from_static(b"test");
server
.client
.put_object()
.bucket("meta")
.key("file.txt")
.body(body)
.send()
.await
.unwrap();
let resp = server
.client
.head_object()
.bucket("meta")
.key("file.txt")
.send()
.await
.unwrap();
assert_eq!(resp.content_length(), Some(4));
server.shutdown().await;
}
#[tokio::test]
async fn test_delete_object() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("del")
.send()
.await
.unwrap();
let body = ByteStream::from_static(b"bye");
server
.client
.put_object()
.bucket("del")
.key("gone.txt")
.body(body)
.send()
.await
.unwrap();
server
.client
.delete_object()
.bucket("del")
.key("gone.txt")
.send()
.await
.unwrap();
let err = server
.client
.get_object()
.bucket("del")
.key("gone.txt")
.send()
.await;
assert!(err.is_err());
server.shutdown().await;
}
#[tokio::test]
async fn test_list_objects_v2() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("list-test")
.send()
.await
.unwrap();
for i in 0..5 {
let body = ByteStream::from_static(b"x");
server
.client
.put_object()
.bucket("list-test")
.key(format!("prefix/file-{i}.txt"))
.body(body)
.send()
.await
.unwrap();
}
let resp = server
.client
.list_objects_v2()
.bucket("list-test")
.prefix("prefix/")
.send()
.await
.unwrap();
assert_eq!(resp.key_count(), Some(5));
server.shutdown().await;
}
#[tokio::test]
async fn test_overwrite_object() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("ow")
.send()
.await
.unwrap();
let body1 = ByteStream::from_static(b"version1");
server
.client
.put_object()
.bucket("ow")
.key("file.txt")
.body(body1)
.send()
.await
.unwrap();
let body2 = ByteStream::from_static(b"version2-longer");
server
.client
.put_object()
.bucket("ow")
.key("file.txt")
.body(body2)
.send()
.await
.unwrap();
let resp = server
.client
.get_object()
.bucket("ow")
.key("file.txt")
.send()
.await
.unwrap();
let bytes = resp.body.collect().await.unwrap().into_bytes();
assert_eq!(bytes.as_ref(), b"version2-longer");
server.shutdown().await;
}
#[tokio::test]
async fn test_user_metadata_roundtrip() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("meta-test")
.send()
.await
.unwrap();
let body = ByteStream::from_static(b"with metadata");
server
.client
.put_object()
.bucket("meta-test")
.key("doc.txt")
.body(body)
.metadata("author", "test-user")
.metadata("version", "42")
.send()
.await
.unwrap();
let resp = server
.client
.head_object()
.bucket("meta-test")
.key("doc.txt")
.send()
.await
.unwrap();
let meta = resp.metadata().unwrap();
assert_eq!(meta.get("author").map(|s| s.as_str()), Some("test-user"));
assert_eq!(meta.get("version").map(|s| s.as_str()), Some("42"));
server.shutdown().await;
}
// --- Multipart upload tests ---
#[tokio::test]
async fn test_multipart_upload_basic() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("mp-basic")
.send()
.await
.unwrap();
// Create multipart upload
let create_resp = server
.client
.create_multipart_upload()
.bucket("mp-basic")
.key("large-file.bin")
.send()
.await
.unwrap();
let upload_id = create_resp.upload_id().unwrap().to_string();
// Upload 3 parts (non-last parts must be >= 5 MB per S3 spec)
let min_part = 5 * 1024 * 1024;
let part1_data = vec![0x11u8; min_part];
let part2_data = vec![0x22u8; min_part];
let part3_data = vec![0x33u8; 1024 * 1024];
let p1 = server
.client
.upload_part()
.bucket("mp-basic")
.key("large-file.bin")
.upload_id(&upload_id)
.part_number(1)
.body(ByteStream::from(part1_data.clone()))
.send()
.await
.unwrap();
let p2 = server
.client
.upload_part()
.bucket("mp-basic")
.key("large-file.bin")
.upload_id(&upload_id)
.part_number(2)
.body(ByteStream::from(part2_data.clone()))
.send()
.await
.unwrap();
let p3 = server
.client
.upload_part()
.bucket("mp-basic")
.key("large-file.bin")
.upload_id(&upload_id)
.part_number(3)
.body(ByteStream::from(part3_data.clone()))
.send()
.await
.unwrap();
// Complete multipart upload
let completed = CompletedMultipartUpload::builder()
.parts(
CompletedPart::builder()
.part_number(1)
.e_tag(p1.e_tag().unwrap())
.build(),
)
.parts(
CompletedPart::builder()
.part_number(2)
.e_tag(p2.e_tag().unwrap())
.build(),
)
.parts(
CompletedPart::builder()
.part_number(3)
.e_tag(p3.e_tag().unwrap())
.build(),
)
.build();
let complete_resp = server
.client
.complete_multipart_upload()
.bucket("mp-basic")
.key("large-file.bin")
.upload_id(&upload_id)
.multipart_upload(completed)
.send()
.await
.unwrap();
// Verify ETag is compound format (hex-3)
let etag = complete_resp.e_tag().unwrap();
assert!(etag.contains("-3"), "Expected compound ETag, got: {etag}");
// Get and verify assembled data
let get_resp = server
.client
.get_object()
.bucket("mp-basic")
.key("large-file.bin")
.send()
.await
.unwrap();
let body = get_resp.body.collect().await.unwrap().into_bytes();
assert_eq!(body.len(), min_part * 2 + 1024 * 1024);
let mut expected = Vec::new();
expected.extend_from_slice(&part1_data);
expected.extend_from_slice(&part2_data);
expected.extend_from_slice(&part3_data);
assert_eq!(body.as_ref(), expected.as_slice());
server.shutdown().await;
}
#[tokio::test]
async fn test_abort_multipart_upload() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("mp-abort")
.send()
.await
.unwrap();
let create_resp = server
.client
.create_multipart_upload()
.bucket("mp-abort")
.key("aborted.bin")
.send()
.await
.unwrap();
let upload_id = create_resp.upload_id().unwrap().to_string();
// Upload a part
server
.client
.upload_part()
.bucket("mp-abort")
.key("aborted.bin")
.upload_id(&upload_id)
.part_number(1)
.body(ByteStream::from(vec![0xAAu8; 1024]))
.send()
.await
.unwrap();
// Abort
server
.client
.abort_multipart_upload()
.bucket("mp-abort")
.key("aborted.bin")
.upload_id(&upload_id)
.send()
.await
.unwrap();
// Verify object doesn't exist
let err = server
.client
.get_object()
.bucket("mp-abort")
.key("aborted.bin")
.send()
.await;
assert!(err.is_err());
server.shutdown().await;
}
#[tokio::test]
async fn test_list_parts() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("mp-list-parts")
.send()
.await
.unwrap();
let create_resp = server
.client
.create_multipart_upload()
.bucket("mp-list-parts")
.key("parts.bin")
.send()
.await
.unwrap();
let upload_id = create_resp.upload_id().unwrap().to_string();
// Upload 3 parts
for i in 1..=3 {
server
.client
.upload_part()
.bucket("mp-list-parts")
.key("parts.bin")
.upload_id(&upload_id)
.part_number(i)
.body(ByteStream::from(vec![i as u8; 1024 * 100]))
.send()
.await
.unwrap();
}
// List parts
let list_resp = server
.client
.list_parts()
.bucket("mp-list-parts")
.key("parts.bin")
.upload_id(&upload_id)
.send()
.await
.unwrap();
let parts = list_resp.parts();
assert_eq!(parts.len(), 3);
assert_eq!(parts[0].part_number(), Some(1));
assert_eq!(parts[1].part_number(), Some(2));
assert_eq!(parts[2].part_number(), Some(3));
for p in parts {
assert_eq!(p.size(), Some(1024 * 100));
}
// Cleanup
server
.client
.abort_multipart_upload()
.bucket("mp-list-parts")
.key("parts.bin")
.upload_id(&upload_id)
.send()
.await
.unwrap();
server.shutdown().await;
}
#[tokio::test]
async fn test_list_multipart_uploads() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("mp-list-uploads")
.send()
.await
.unwrap();
// Create two uploads
let u1 = server
.client
.create_multipart_upload()
.bucket("mp-list-uploads")
.key("file-a.bin")
.send()
.await
.unwrap();
let u1_id = u1.upload_id().unwrap().to_string();
let u2 = server
.client
.create_multipart_upload()
.bucket("mp-list-uploads")
.key("file-b.bin")
.send()
.await
.unwrap();
let u2_id = u2.upload_id().unwrap().to_string();
// List multipart uploads
let list_resp = server
.client
.list_multipart_uploads()
.bucket("mp-list-uploads")
.send()
.await
.unwrap();
let uploads = list_resp.uploads();
assert_eq!(uploads.len(), 2);
let keys: Vec<&str> = uploads.iter().filter_map(|u| u.key()).collect();
assert!(keys.contains(&"file-a.bin"));
assert!(keys.contains(&"file-b.bin"));
// Cleanup
server
.client
.abort_multipart_upload()
.bucket("mp-list-uploads")
.key("file-a.bin")
.upload_id(&u1_id)
.send()
.await
.unwrap();
server
.client
.abort_multipart_upload()
.bucket("mp-list-uploads")
.key("file-b.bin")
.upload_id(&u2_id)
.send()
.await
.unwrap();
server.shutdown().await;
}
#[tokio::test]
async fn test_overwrite_part() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("mp-overwrite")
.send()
.await
.unwrap();
let create_resp = server
.client
.create_multipart_upload()
.bucket("mp-overwrite")
.key("ow.bin")
.send()
.await
.unwrap();
let upload_id = create_resp.upload_id().unwrap().to_string();
// Upload part 1 with data A
server
.client
.upload_part()
.bucket("mp-overwrite")
.key("ow.bin")
.upload_id(&upload_id)
.part_number(1)
.body(ByteStream::from(vec![0xAAu8; 1024]))
.send()
.await
.unwrap();
// Re-upload part 1 with data B
let p1 = server
.client
.upload_part()
.bucket("mp-overwrite")
.key("ow.bin")
.upload_id(&upload_id)
.part_number(1)
.body(ByteStream::from(vec![0xBBu8; 1024]))
.send()
.await
.unwrap();
// Complete with the latest etag
let completed = CompletedMultipartUpload::builder()
.parts(
CompletedPart::builder()
.part_number(1)
.e_tag(p1.e_tag().unwrap())
.build(),
)
.build();
server
.client
.complete_multipart_upload()
.bucket("mp-overwrite")
.key("ow.bin")
.upload_id(&upload_id)
.multipart_upload(completed)
.send()
.await
.unwrap();
// Verify data B
let get_resp = server
.client
.get_object()
.bucket("mp-overwrite")
.key("ow.bin")
.send()
.await
.unwrap();
let body = get_resp.body.collect().await.unwrap().into_bytes();
assert_eq!(body.as_ref(), vec![0xBBu8; 1024].as_slice());
server.shutdown().await;
}
#[tokio::test]
async fn test_multipart_with_metadata() {
let server = TestServer::start().await;
server
.client
.create_bucket()
.bucket("mp-meta")
.send()
.await
.unwrap();
// Create multipart upload with metadata
let create_resp = server
.client
.create_multipart_upload()
.bucket("mp-meta")
.key("meta-file.bin")
.metadata("author", "test-user")
.metadata("version", "7")
.send()
.await
.unwrap();
let upload_id = create_resp.upload_id().unwrap().to_string();
// Upload one part
let p1 = server
.client
.upload_part()
.bucket("mp-meta")
.key("meta-file.bin")
.upload_id(&upload_id)
.part_number(1)
.body(ByteStream::from(vec![0xFFu8; 512]))
.send()
.await
.unwrap();
// Complete
let completed = CompletedMultipartUpload::builder()
.parts(
CompletedPart::builder()
.part_number(1)
.e_tag(p1.e_tag().unwrap())
.build(),
)
.build();
server
.client
.complete_multipart_upload()
.bucket("mp-meta")
.key("meta-file.bin")
.upload_id(&upload_id)
.multipart_upload(completed)
.send()
.await
.unwrap();
// Head object — verify metadata came through
let head = server
.client
.head_object()
.bucket("mp-meta")
.key("meta-file.bin")
.send()
.await
.unwrap();
let meta = head.metadata().unwrap();
assert_eq!(meta.get("author").map(|s| s.as_str()), Some("test-user"));
assert_eq!(meta.get("version").map(|s| s.as_str()), Some("7"));
server.shutdown().await;
}