feat: add post3 s3 proxy for postgresql

Signed-off-by: kjuulh <contact@kjuulh.io>
This commit is contained in:
2026-02-27 11:37:48 +01:00
commit 21bac4a33f
67 changed files with 14403 additions and 0 deletions

View File

@@ -0,0 +1,390 @@
//! Integration tests using FilesystemBackend (no PostgreSQL required).
use std::net::SocketAddr;
use aws_credential_types::Credentials;
use aws_sdk_s3::types::{CompletedMultipartUpload, CompletedPart};
use aws_sdk_s3::Client;
use post3::FilesystemBackend;
use tokio::net::TcpListener;
use tokio_util::sync::CancellationToken;
struct FsTestServer {
client: Client,
cancel: CancellationToken,
_tmpdir: tempfile::TempDir,
}
impl FsTestServer {
async fn start() -> Self {
let tmpdir = tempfile::tempdir().unwrap();
let backend = FilesystemBackend::new(tmpdir.path());
let state = post3_server::state::State { store: backend };
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr: SocketAddr = listener.local_addr().unwrap();
let cancel = CancellationToken::new();
let cancel_clone = cancel.clone();
let router = post3_server::s3::router::build_router(state);
tokio::spawn(async move {
axum::serve(listener, router.into_make_service())
.with_graceful_shutdown(async move {
cancel_clone.cancelled().await;
})
.await
.unwrap();
});
let creds = Credentials::new("test", "test", None, None, "test");
let config = aws_sdk_s3::Config::builder()
.behavior_version_latest()
.region(aws_types::region::Region::new("us-east-1"))
.endpoint_url(format!("http://{}", addr))
.credentials_provider(creds)
.force_path_style(true)
.build();
let client = Client::from_conf(config);
Self {
client,
cancel,
_tmpdir: tmpdir,
}
}
async fn shutdown(self) {
self.cancel.cancel();
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
}
}
// --- Tests ---
#[tokio::test]
async fn test_fs_bucket_crud() {
let server = FsTestServer::start().await;
let c = &server.client;
// Create
c.create_bucket().bucket("my-bucket").send().await.unwrap();
// Head
c.head_bucket().bucket("my-bucket").send().await.unwrap();
// List
let resp = c.list_buckets().send().await.unwrap();
let names: Vec<_> = resp
.buckets()
.iter()
.filter_map(|b| b.name())
.collect();
assert!(names.contains(&"my-bucket"));
// Delete
c.delete_bucket().bucket("my-bucket").send().await.unwrap();
// Verify gone
let result = c.head_bucket().bucket("my-bucket").send().await;
assert!(result.is_err());
server.shutdown().await;
}
#[tokio::test]
async fn test_fs_put_get_delete() {
let server = FsTestServer::start().await;
let c = &server.client;
c.create_bucket().bucket("test").send().await.unwrap();
// Put
c.put_object()
.bucket("test")
.key("hello.txt")
.content_type("text/plain")
.body(aws_sdk_s3::primitives::ByteStream::from_static(
b"hello world",
))
.send()
.await
.unwrap();
// Get
let resp = c
.get_object()
.bucket("test")
.key("hello.txt")
.send()
.await
.unwrap();
let body = resp.body.collect().await.unwrap().into_bytes();
assert_eq!(body.as_ref(), b"hello world");
// Head
let head = c
.head_object()
.bucket("test")
.key("hello.txt")
.send()
.await
.unwrap();
assert_eq!(head.content_length(), Some(11));
assert_eq!(head.content_type(), Some("text/plain"));
// Delete
c.delete_object()
.bucket("test")
.key("hello.txt")
.send()
.await
.unwrap();
// Verify gone
let result = c
.get_object()
.bucket("test")
.key("hello.txt")
.send()
.await;
assert!(result.is_err());
// Cleanup
c.delete_bucket().bucket("test").send().await.unwrap();
server.shutdown().await;
}
#[tokio::test]
async fn test_fs_list_objects() {
let server = FsTestServer::start().await;
let c = &server.client;
c.create_bucket().bucket("test").send().await.unwrap();
for i in 0..5 {
c.put_object()
.bucket("test")
.key(format!("item-{i:02}"))
.body(aws_sdk_s3::primitives::ByteStream::from_static(b"data"))
.send()
.await
.unwrap();
}
// List all
let resp = c
.list_objects_v2()
.bucket("test")
.send()
.await
.unwrap();
assert_eq!(resp.key_count(), Some(5));
// List with prefix
let resp = c
.list_objects_v2()
.bucket("test")
.prefix("item-03")
.send()
.await
.unwrap();
assert_eq!(resp.key_count(), Some(1));
// Cleanup
for i in 0..5 {
c.delete_object()
.bucket("test")
.key(format!("item-{i:02}"))
.send()
.await
.unwrap();
}
c.delete_bucket().bucket("test").send().await.unwrap();
server.shutdown().await;
}
#[tokio::test]
async fn test_fs_user_metadata() {
let server = FsTestServer::start().await;
let c = &server.client;
c.create_bucket().bucket("test").send().await.unwrap();
c.put_object()
.bucket("test")
.key("meta.txt")
.metadata("author", "test-user")
.metadata("version", "1")
.body(aws_sdk_s3::primitives::ByteStream::from_static(b"data"))
.send()
.await
.unwrap();
let head = c
.head_object()
.bucket("test")
.key("meta.txt")
.send()
.await
.unwrap();
let meta = head.metadata().unwrap();
assert_eq!(meta.get("author").unwrap(), "test-user");
assert_eq!(meta.get("version").unwrap(), "1");
// Cleanup
c.delete_object()
.bucket("test")
.key("meta.txt")
.send()
.await
.unwrap();
c.delete_bucket().bucket("test").send().await.unwrap();
server.shutdown().await;
}
#[tokio::test]
async fn test_fs_multipart_upload() {
let server = FsTestServer::start().await;
let c = &server.client;
c.create_bucket().bucket("test").send().await.unwrap();
// Create multipart upload
let create = c
.create_multipart_upload()
.bucket("test")
.key("big.bin")
.send()
.await
.unwrap();
let upload_id = create.upload_id().unwrap();
// Upload parts (non-last parts must be >= 5 MB per S3 spec)
let min_part = 5 * 1024 * 1024;
let part1 = c
.upload_part()
.bucket("test")
.key("big.bin")
.upload_id(upload_id)
.part_number(1)
.body(aws_sdk_s3::primitives::ByteStream::from(vec![0xAAu8; min_part]))
.send()
.await
.unwrap();
let part2 = c
.upload_part()
.bucket("test")
.key("big.bin")
.upload_id(upload_id)
.part_number(2)
.body(aws_sdk_s3::primitives::ByteStream::from(vec![0xBBu8; 1024]))
.send()
.await
.unwrap();
// Complete
let completed = CompletedMultipartUpload::builder()
.parts(
CompletedPart::builder()
.part_number(1)
.e_tag(part1.e_tag().unwrap())
.build(),
)
.parts(
CompletedPart::builder()
.part_number(2)
.e_tag(part2.e_tag().unwrap())
.build(),
)
.build();
let complete_resp = c
.complete_multipart_upload()
.bucket("test")
.key("big.bin")
.upload_id(upload_id)
.multipart_upload(completed)
.send()
.await
.unwrap();
// Verify compound ETag
let etag = complete_resp.e_tag().unwrap();
assert!(etag.contains("-2"), "Expected compound ETag, got: {etag}");
// Verify data
let resp = c
.get_object()
.bucket("test")
.key("big.bin")
.send()
.await
.unwrap();
let body = resp.body.collect().await.unwrap().into_bytes();
assert_eq!(body.len(), min_part + 1024);
assert!(body[..min_part].iter().all(|b| *b == 0xAA));
assert!(body[min_part..].iter().all(|b| *b == 0xBB));
// Cleanup
c.delete_object()
.bucket("test")
.key("big.bin")
.send()
.await
.unwrap();
c.delete_bucket().bucket("test").send().await.unwrap();
server.shutdown().await;
}
#[tokio::test]
async fn test_fs_abort_multipart() {
let server = FsTestServer::start().await;
let c = &server.client;
c.create_bucket().bucket("test").send().await.unwrap();
let create = c
.create_multipart_upload()
.bucket("test")
.key("aborted.bin")
.send()
.await
.unwrap();
let upload_id = create.upload_id().unwrap();
// Upload a part
c.upload_part()
.bucket("test")
.key("aborted.bin")
.upload_id(upload_id)
.part_number(1)
.body(aws_sdk_s3::primitives::ByteStream::from(vec![0u8; 100]))
.send()
.await
.unwrap();
// Abort
c.abort_multipart_upload()
.bucket("test")
.key("aborted.bin")
.upload_id(upload_id)
.send()
.await
.unwrap();
// Verify no object was created
let result = c
.get_object()
.bucket("test")
.key("aborted.bin")
.send()
.await;
assert!(result.is_err());
c.delete_bucket().bucket("test").send().await.unwrap();
server.shutdown().await;
}