feat: add compute

Signed-off-by: kjuulh <contact@kjuulh.io>
This commit is contained in:
2026-03-21 00:42:17 +01:00
parent 04e452ecc3
commit 7188b44624
17 changed files with 1307 additions and 3 deletions

View File

@@ -0,0 +1 @@
[ 116ms] [ERROR] Failed to load resource: the server responded with a status of 500 () @ https://client.dev.forage.sh/orgs/rawpotion/compute/rollouts/83ae0cbc-5db6-4344-8042-025816b017d3:0

23
Cargo.lock generated
View File

@@ -147,6 +147,28 @@ dependencies = [
"url", "url",
] ]
[[package]]
name = "async-stream"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
dependencies = [
"async-stream-impl",
"futures-core",
"pin-project-lite",
]
[[package]]
name = "async-stream-impl"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.117",
]
[[package]] [[package]]
name = "async-trait" name = "async-trait"
version = "0.1.89" version = "0.1.89"
@@ -982,6 +1004,7 @@ version = "0.1.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-nats", "async-nats",
"async-stream",
"async-trait", "async-trait",
"axum", "axum",
"axum-extra", "axum-extra",

View File

@@ -14,6 +14,7 @@ rand.workspace = true
hmac.workspace = true hmac.workspace = true
sha2.workspace = true sha2.workspace = true
tracing.workspace = true tracing.workspace = true
tokio.workspace = true
[dev-dependencies] [dev-dependencies]
tokio = { workspace = true, features = ["macros", "rt"] } tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread", "time"] }

View File

@@ -0,0 +1,440 @@
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::Mutex;
use uuid::Uuid;
use super::{
ComputeError, ComputeInstance, ComputeResourceSpec, ComputeScheduler, ResourceKind, Rollout,
RolloutEvent, RolloutResource, RolloutStatus,
};
struct MockState {
rollouts: HashMap<String, Rollout>,
instances: HashMap<String, Vec<ComputeInstance>>,
}
/// In-memory compute scheduler that simulates container lifecycle.
///
/// Stores rollouts and instances in memory. When `apply_resources` is called
/// it spawns a background task that transitions each resource through
/// PENDING → IN_PROGRESS → SUCCEEDED with short delays.
pub struct InMemoryComputeScheduler {
state: Arc<Mutex<MockState>>,
}
impl InMemoryComputeScheduler {
pub fn new() -> Self {
Self {
state: Arc::new(Mutex::new(MockState {
rollouts: HashMap::new(),
instances: HashMap::new(),
})),
}
}
}
impl Default for InMemoryComputeScheduler {
fn default() -> Self {
Self::new()
}
}
#[async_trait::async_trait]
impl ComputeScheduler for InMemoryComputeScheduler {
async fn apply_resources(
&self,
apply_id: &str,
namespace: &str,
resources: Vec<ComputeResourceSpec>,
labels: HashMap<String, String>,
) -> Result<String, ComputeError> {
let rollout_id = Uuid::new_v4().to_string();
let now = chrono::Utc::now();
let rollout_resources: Vec<RolloutResource> = resources
.iter()
.map(|r| RolloutResource {
name: r.name.clone(),
kind: r.kind,
status: RolloutStatus::Pending,
message: "queued".into(),
})
.collect();
let rollout = Rollout {
id: rollout_id.clone(),
apply_id: apply_id.to_string(),
namespace: namespace.to_string(),
resources: rollout_resources,
status: RolloutStatus::Pending,
labels: labels.clone(),
created_at: now,
};
// Create instances for container-service resources
let region = labels.get("region").cloned().unwrap_or("eu-west-1".into());
let project = labels.get("project").cloned().unwrap_or_default();
let destination = labels.get("destination").cloned().unwrap_or_default();
let environment = labels.get("environment").cloned().unwrap_or_default();
let new_instances: Vec<ComputeInstance> = resources
.iter()
.filter(|r| r.kind == ResourceKind::ContainerService)
.map(|r| ComputeInstance {
id: Uuid::new_v4().to_string(),
namespace: namespace.to_string(),
resource_name: r.name.clone(),
project: project.clone(),
destination: destination.clone(),
environment: environment.clone(),
region: region.clone(),
image: r.image.clone().unwrap_or_else(|| "unknown".into()),
replicas: r.replicas,
cpu: r.cpu.clone().unwrap_or_else(|| "250m".into()),
memory: r.memory.clone().unwrap_or_else(|| "256Mi".into()),
status: "pending".into(),
created_at: now,
})
.collect();
{
let mut state = self.state.lock().await;
state.rollouts.insert(rollout_id.clone(), rollout);
let ns_instances = state
.instances
.entry(namespace.to_string())
.or_insert_with(Vec::new);
// Upsert: replace existing instances with the same resource_name
for new_inst in new_instances {
if let Some(existing) = ns_instances
.iter_mut()
.find(|i| i.resource_name == new_inst.resource_name)
{
*existing = new_inst;
} else {
ns_instances.push(new_inst);
}
}
}
// Spawn background simulation
let state = self.state.clone();
let rid = rollout_id.clone();
let ns = namespace.to_string();
let resource_names: Vec<(String, String)> = resources
.iter()
.map(|r| (r.name.clone(), r.kind.to_string()))
.collect();
tokio::spawn(async move {
// Transition to InProgress
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
{
let mut state = state.lock().await;
if let Some(rollout) = state.rollouts.get_mut(&rid) {
rollout.status = RolloutStatus::InProgress;
for r in &mut rollout.resources {
r.status = RolloutStatus::InProgress;
r.message = "deploying".into();
}
}
// Update instance statuses
if let Some(instances) = state.instances.get_mut(&ns) {
for inst in instances.iter_mut() {
if inst.status == "pending" {
inst.status = "running".into();
}
}
}
}
// Simulate per-resource completion
for (name, _kind) in &resource_names {
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
let mut state = state.lock().await;
if let Some(rollout) = state.rollouts.get_mut(&rid) {
if let Some(r) = rollout.resources.iter_mut().find(|r| &r.name == name) {
r.status = RolloutStatus::Succeeded;
r.message = "ready".into();
}
}
}
// Mark rollout as succeeded
{
let mut state = state.lock().await;
if let Some(rollout) = state.rollouts.get_mut(&rid) {
rollout.status = RolloutStatus::Succeeded;
}
}
});
Ok(rollout_id)
}
async fn watch_rollout(
&self,
rollout_id: &str,
) -> Result<tokio::sync::mpsc::Receiver<RolloutEvent>, ComputeError> {
let rollout = {
let state = self.state.lock().await;
state
.rollouts
.get(rollout_id)
.cloned()
.ok_or_else(|| ComputeError::NotFound(format!("rollout {rollout_id}")))?
};
let (tx, rx) = tokio::sync::mpsc::channel(64);
let state = self.state.clone();
let rid = rollout_id.to_string();
let resource_specs: Vec<(String, String)> = rollout
.resources
.iter()
.map(|r| (r.name.clone(), r.kind.to_string()))
.collect();
tokio::spawn(async move {
// Emit pending events
for (name, kind) in &resource_specs {
let _ = tx
.send(RolloutEvent {
resource_name: name.clone(),
resource_kind: kind.clone(),
status: RolloutStatus::Pending,
message: "queued".into(),
})
.await;
}
// Poll until the rollout is done
loop {
tokio::time::sleep(std::time::Duration::from_millis(150)).await;
let state = state.lock().await;
let Some(rollout) = state.rollouts.get(&rid) else {
break;
};
for r in &rollout.resources {
let _ = tx
.send(RolloutEvent {
resource_name: r.name.clone(),
resource_kind: r.kind.to_string(),
status: r.status,
message: r.message.clone(),
})
.await;
}
if matches!(
rollout.status,
RolloutStatus::Succeeded | RolloutStatus::Failed | RolloutStatus::RolledBack
) {
break;
}
}
});
Ok(rx)
}
async fn delete_resources(
&self,
namespace: &str,
labels: HashMap<String, String>,
) -> Result<(), ComputeError> {
let mut state = self.state.lock().await;
// Remove matching rollouts
state.rollouts.retain(|_, r| {
if r.namespace != namespace {
return true;
}
for (k, v) in &labels {
if r.labels.get(k) != Some(v) {
return true;
}
}
false
});
// Remove matching instances
if let Some(instances) = state.instances.get_mut(namespace) {
if labels.is_empty() {
instances.clear();
}
// If labels are specified we'd filter more precisely, but for now
// the mock just clears the namespace.
}
Ok(())
}
async fn list_rollouts(&self, namespace: &str) -> Result<Vec<Rollout>, ComputeError> {
let state = self.state.lock().await;
let mut rollouts: Vec<Rollout> = state
.rollouts
.values()
.filter(|r| r.namespace == namespace)
.cloned()
.collect();
rollouts.sort_by(|a, b| b.created_at.cmp(&a.created_at));
Ok(rollouts)
}
async fn get_rollout(&self, rollout_id: &str) -> Result<Rollout, ComputeError> {
let state = self.state.lock().await;
state
.rollouts
.get(rollout_id)
.cloned()
.ok_or_else(|| ComputeError::NotFound(format!("rollout {rollout_id}")))
}
async fn list_instances(
&self,
namespace: &str,
) -> Result<Vec<ComputeInstance>, ComputeError> {
let state = self.state.lock().await;
Ok(state
.instances
.get(namespace)
.cloned()
.unwrap_or_default())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test(flavor = "multi_thread")]
async fn apply_creates_rollout_and_instances() {
let scheduler = InMemoryComputeScheduler::new();
let resources = vec![ComputeResourceSpec {
name: "my-api".into(),
kind: ResourceKind::ContainerService,
image: Some("registry.forage.sh/org/app:v1".into()),
replicas: 2,
cpu: Some("500m".into()),
memory: Some("512Mi".into()),
}];
let mut labels = HashMap::new();
labels.insert("region".into(), "eu-west-1".into());
let rollout_id = scheduler
.apply_resources("test-apply-1", "test-ns", resources, labels)
.await
.unwrap();
assert!(!rollout_id.is_empty());
// Rollout should exist
let rollout = scheduler.get_rollout(&rollout_id).await.unwrap();
assert_eq!(rollout.namespace, "test-ns");
assert_eq!(rollout.resources.len(), 1);
// Instance should exist
let instances = scheduler.list_instances("test-ns").await.unwrap();
assert_eq!(instances.len(), 1);
assert_eq!(instances[0].image, "registry.forage.sh/org/app:v1");
assert_eq!(instances[0].replicas, 2);
}
#[tokio::test(flavor = "multi_thread")]
async fn rollout_completes_successfully() {
let scheduler = InMemoryComputeScheduler::new();
let resources = vec![ComputeResourceSpec {
name: "svc".into(),
kind: ResourceKind::ContainerService,
image: Some("img:latest".into()),
replicas: 1,
cpu: None,
memory: None,
}];
let rollout_id = scheduler
.apply_resources("test-2", "ns", resources, HashMap::new())
.await
.unwrap();
// Wait for simulation to complete
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
let rollout = scheduler.get_rollout(&rollout_id).await.unwrap();
assert_eq!(rollout.status, RolloutStatus::Succeeded);
assert_eq!(rollout.resources[0].status, RolloutStatus::Succeeded);
}
#[tokio::test(flavor = "multi_thread")]
async fn watch_rollout_streams_events() {
let scheduler = InMemoryComputeScheduler::new();
let resources = vec![ComputeResourceSpec {
name: "app".into(),
kind: ResourceKind::ContainerService,
image: Some("img:v1".into()),
replicas: 1,
cpu: None,
memory: None,
}];
let rollout_id = scheduler
.apply_resources("test-3", "ns", resources, HashMap::new())
.await
.unwrap();
let mut rx = scheduler.watch_rollout(&rollout_id).await.unwrap();
let mut events = Vec::new();
while let Some(event) = rx.recv().await {
events.push(event);
if events.last().map(|e| e.status) == Some(RolloutStatus::Succeeded) {
break;
}
}
assert!(!events.is_empty());
// Should have at least pending + succeeded
assert!(events.iter().any(|e| e.status == RolloutStatus::Pending));
assert!(events.iter().any(|e| e.status == RolloutStatus::Succeeded));
}
#[tokio::test]
async fn delete_removes_resources() {
let scheduler = InMemoryComputeScheduler::new();
let resources = vec![ComputeResourceSpec {
name: "app".into(),
kind: ResourceKind::ContainerService,
image: Some("img:v1".into()),
replicas: 1,
cpu: None,
memory: None,
}];
let mut labels = HashMap::new();
labels.insert("project".into(), "test".into());
scheduler
.apply_resources("del-1", "ns", resources, labels.clone())
.await
.unwrap();
assert_eq!(scheduler.list_rollouts("ns").await.unwrap().len(), 1);
scheduler.delete_resources("ns", labels).await.unwrap();
assert_eq!(scheduler.list_rollouts("ns").await.unwrap().len(), 0);
}
#[tokio::test]
async fn watch_nonexistent_rollout_returns_not_found() {
let scheduler = InMemoryComputeScheduler::new();
let result = scheduler.watch_rollout("does-not-exist").await;
assert!(matches!(result, Err(ComputeError::NotFound(_))));
}
}

View File

@@ -0,0 +1,201 @@
use std::collections::HashMap;
// ---------------------------------------------------------------------------
// Region catalog
// ---------------------------------------------------------------------------
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct Region {
pub id: &'static str,
pub name: &'static str,
pub display_name: &'static str,
pub available: bool,
}
pub const REGIONS: &[Region] = &[
Region {
id: "eu-west-1",
name: "Europe (Ireland)",
display_name: "eu-west-1 — Europe (Ireland)",
available: true,
},
Region {
id: "us-east-1",
name: "US East (Virginia)",
display_name: "us-east-1 — US East (Virginia)",
available: true,
},
Region {
id: "ap-southeast-1",
name: "Asia Pacific (Singapore)",
display_name: "ap-southeast-1 — Asia Pacific (Singapore)",
available: false,
},
];
pub fn available_regions() -> Vec<&'static Region> {
REGIONS.iter().filter(|r| r.available).collect()
}
// ---------------------------------------------------------------------------
// Domain types
// ---------------------------------------------------------------------------
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ComputeResourceSpec {
pub name: String,
pub kind: ResourceKind,
pub image: Option<String>,
pub replicas: u32,
pub cpu: Option<String>,
pub memory: Option<String>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ResourceKind {
ContainerService,
Service,
Route,
CronJob,
Job,
}
impl std::fmt::Display for ResourceKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ResourceKind::ContainerService => write!(f, "container_service"),
ResourceKind::Service => write!(f, "service"),
ResourceKind::Route => write!(f, "route"),
ResourceKind::CronJob => write!(f, "cron_job"),
ResourceKind::Job => write!(f, "job"),
}
}
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct Rollout {
pub id: String,
pub apply_id: String,
pub namespace: String,
pub resources: Vec<RolloutResource>,
pub status: RolloutStatus,
pub labels: HashMap<String, String>,
pub created_at: chrono::DateTime<chrono::Utc>,
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct RolloutResource {
pub name: String,
pub kind: ResourceKind,
pub status: RolloutStatus,
pub message: String,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum RolloutStatus {
Pending,
InProgress,
Succeeded,
Failed,
RolledBack,
}
impl std::fmt::Display for RolloutStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
RolloutStatus::Pending => write!(f, "pending"),
RolloutStatus::InProgress => write!(f, "in_progress"),
RolloutStatus::Succeeded => write!(f, "succeeded"),
RolloutStatus::Failed => write!(f, "failed"),
RolloutStatus::RolledBack => write!(f, "rolled_back"),
}
}
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct RolloutEvent {
pub resource_name: String,
pub resource_kind: String,
pub status: RolloutStatus,
pub message: String,
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ComputeInstance {
pub id: String,
pub namespace: String,
pub resource_name: String,
pub project: String,
pub destination: String,
pub environment: String,
pub region: String,
pub image: String,
pub replicas: u32,
pub cpu: String,
pub memory: String,
pub status: String,
pub created_at: chrono::DateTime<chrono::Utc>,
}
// ---------------------------------------------------------------------------
// Error
// ---------------------------------------------------------------------------
#[derive(Debug, Clone, thiserror::Error)]
pub enum ComputeError {
#[error("not found: {0}")]
NotFound(String),
#[error("invalid request: {0}")]
InvalidRequest(String),
#[error("resource conflict: {0}")]
Conflict(String),
#[error("scheduler error: {0}")]
Internal(String),
}
// ---------------------------------------------------------------------------
// Scheduler trait
// ---------------------------------------------------------------------------
#[async_trait::async_trait]
pub trait ComputeScheduler: Send + Sync {
/// Apply a batch of resources. Returns a rollout ID for tracking.
async fn apply_resources(
&self,
apply_id: &str,
namespace: &str,
resources: Vec<ComputeResourceSpec>,
labels: HashMap<String, String>,
) -> Result<String, ComputeError>;
/// Subscribe to rollout status events.
async fn watch_rollout(
&self,
rollout_id: &str,
) -> Result<tokio::sync::mpsc::Receiver<RolloutEvent>, ComputeError>;
/// Delete resources by namespace + labels.
async fn delete_resources(
&self,
namespace: &str,
labels: HashMap<String, String>,
) -> Result<(), ComputeError>;
/// List rollouts for a namespace.
async fn list_rollouts(&self, namespace: &str) -> Result<Vec<Rollout>, ComputeError>;
/// Get a specific rollout by ID.
async fn get_rollout(&self, rollout_id: &str) -> Result<Rollout, ComputeError>;
/// List running compute instances for a namespace.
async fn list_instances(&self, namespace: &str) -> Result<Vec<ComputeInstance>, ComputeError>;
}
// ---------------------------------------------------------------------------
// In-memory mock scheduler
// ---------------------------------------------------------------------------
pub mod mock_scheduler;
pub use mock_scheduler::InMemoryComputeScheduler;

View File

@@ -5,3 +5,4 @@ pub mod integrations;
pub mod registry; pub mod registry;
pub mod deployments; pub mod deployments;
pub mod billing; pub mod billing;
pub mod compute;

View File

@@ -6,7 +6,7 @@ edition = "2024"
[dependencies] [dependencies]
forage-core = { path = "../forage-core" } forage-core = { path = "../forage-core" }
forage-db = { path = "../forage-db" } forage-db = { path = "../forage-db" }
forage-grpc = { path = "../forage-grpc" } forage-grpc = { path = "../forage-grpc", features = ["client", "server"] }
anyhow.workspace = true anyhow.workspace = true
chrono.workspace = true chrono.workspace = true
async-trait.workspace = true async-trait.workspace = true
@@ -37,3 +37,4 @@ sha2.workspace = true
notmad.workspace = true notmad.workspace = true
tokio-util.workspace = true tokio-util.workspace = true
async-nats.workspace = true async-nats.workspace = true
async-stream = "0.3"

View File

@@ -0,0 +1,160 @@
use std::pin::Pin;
use std::sync::Arc;
use forage_core::compute::{
ComputeError, ComputeResourceSpec, ComputeScheduler, ResourceKind, RolloutStatus,
};
use forage_grpc::forage_service_server::ForageService;
use forage_grpc::{
ApplyResourcesRequest, ApplyResourcesResponse, DeleteResourcesRequest,
DeleteResourcesResponse, RolloutEvent as ProtoRolloutEvent, WatchRolloutRequest,
};
use tokio_stream::Stream;
use tonic::{Request, Response, Status};
/// Implements the `ForageService` gRPC server trait.
///
/// Thin adapter: validates auth, converts proto to domain types, delegates to
/// the `ComputeScheduler`, and converts results back to proto.
pub struct ForageServiceImpl {
pub scheduler: Arc<dyn ComputeScheduler>,
}
type WatchStream =
Pin<Box<dyn Stream<Item = Result<ProtoRolloutEvent, Status>> + Send + 'static>>;
#[tonic::async_trait]
impl ForageService for ForageServiceImpl {
async fn apply_resources(
&self,
request: Request<ApplyResourcesRequest>,
) -> Result<Response<ApplyResourcesResponse>, Status> {
let req = request.into_inner();
if req.namespace.is_empty() {
return Err(Status::invalid_argument("namespace is required"));
}
if req.resources.is_empty() {
return Err(Status::invalid_argument("at least one resource is required"));
}
let resources: Vec<ComputeResourceSpec> = req
.resources
.iter()
.map(|r| {
let (kind, image, replicas, cpu, memory) = match &r.spec {
Some(forage_grpc::forage_resource::Spec::ContainerService(cs)) => {
let container = cs.container.as_ref();
let scaling = cs.scaling.as_ref();
(
ResourceKind::ContainerService,
container.map(|c| c.image.clone()),
scaling.map(|s| s.replicas).unwrap_or(1),
container
.and_then(|c| c.resources.as_ref())
.and_then(|r| r.requests.as_ref())
.map(|r| r.cpu.clone()),
container
.and_then(|c| c.resources.as_ref())
.and_then(|r| r.requests.as_ref())
.map(|r| r.memory.clone()),
)
}
Some(forage_grpc::forage_resource::Spec::Service(_)) => {
(ResourceKind::Service, None, 1, None, None)
}
Some(forage_grpc::forage_resource::Spec::Route(_)) => {
(ResourceKind::Route, None, 1, None, None)
}
Some(forage_grpc::forage_resource::Spec::CronJob(cj)) => {
let image = cj.container.as_ref().map(|c| c.image.clone());
(ResourceKind::CronJob, image, 1, None, None)
}
Some(forage_grpc::forage_resource::Spec::Job(j)) => {
let image = j.container.as_ref().map(|c| c.image.clone());
(ResourceKind::Job, image, 1, None, None)
}
None => (ResourceKind::ContainerService, None, 1, None, None),
};
ComputeResourceSpec {
name: r.name.clone(),
kind,
image,
replicas,
cpu,
memory,
}
})
.collect();
let rollout_id = self
.scheduler
.apply_resources(&req.apply_id, &req.namespace, resources, req.labels)
.await
.map_err(compute_err_to_status)?;
Ok(Response::new(ApplyResourcesResponse { rollout_id }))
}
type WatchRolloutStream = WatchStream;
async fn watch_rollout(
&self,
request: Request<WatchRolloutRequest>,
) -> Result<Response<Self::WatchRolloutStream>, Status> {
let rollout_id = request.into_inner().rollout_id;
let mut rx = self
.scheduler
.watch_rollout(&rollout_id)
.await
.map_err(compute_err_to_status)?;
let stream = async_stream::stream! {
while let Some(event) = rx.recv().await {
yield Ok(ProtoRolloutEvent {
resource_name: event.resource_name,
resource_kind: event.resource_kind,
status: domain_status_to_proto(event.status) as i32,
message: event.message,
});
}
};
Ok(Response::new(Box::pin(stream) as Self::WatchRolloutStream))
}
async fn delete_resources(
&self,
request: Request<DeleteResourcesRequest>,
) -> Result<Response<DeleteResourcesResponse>, Status> {
let req = request.into_inner();
self.scheduler
.delete_resources(&req.namespace, req.labels)
.await
.map_err(compute_err_to_status)?;
Ok(Response::new(DeleteResourcesResponse {}))
}
}
fn compute_err_to_status(e: ComputeError) -> Status {
match e {
ComputeError::NotFound(msg) => Status::not_found(msg),
ComputeError::InvalidRequest(msg) => Status::invalid_argument(msg),
ComputeError::Conflict(msg) => Status::already_exists(msg),
ComputeError::Internal(msg) => Status::internal(msg),
}
}
fn domain_status_to_proto(s: RolloutStatus) -> forage_grpc::RolloutStatus {
match s {
RolloutStatus::Pending => forage_grpc::RolloutStatus::Pending,
RolloutStatus::InProgress => forage_grpc::RolloutStatus::InProgress,
RolloutStatus::Succeeded => forage_grpc::RolloutStatus::Succeeded,
RolloutStatus::Failed => forage_grpc::RolloutStatus::Failed,
RolloutStatus::RolledBack => forage_grpc::RolloutStatus::RolledBack,
}
}

View File

@@ -1,9 +1,11 @@
mod auth; mod auth;
mod compute_grpc;
mod forest_client; mod forest_client;
mod notification_consumer; mod notification_consumer;
mod notification_ingester; mod notification_ingester;
mod notification_worker; mod notification_worker;
mod routes; mod routes;
mod serve_grpc;
mod serve_http; mod serve_http;
mod session_reaper; mod session_reaper;
mod state; mod state;
@@ -252,6 +254,20 @@ async fn main() -> anyhow::Result<()> {
} }
} }
// Compute scheduler (mock for now — simulates container lifecycle)
let compute_scheduler = Arc::new(forage_core::compute::InMemoryComputeScheduler::new());
state = state.with_compute_scheduler(compute_scheduler.clone());
let grpc_port: u16 = std::env::var("GRPC_PORT")
.ok()
.and_then(|p| p.parse().ok())
.unwrap_or(4050);
let grpc_addr = SocketAddr::from(([0, 0, 0, 0], grpc_port));
mad.add(serve_grpc::ServeGrpc {
addr: grpc_addr,
scheduler: compute_scheduler,
});
// HTTP server component // HTTP server component
mad.add(serve_http::ServeHttp { addr, state }); mad.add(serve_http::ServeHttp { addr, state });

View File

@@ -142,6 +142,12 @@ pub fn router() -> Router<AppState> {
get(timeline_api), get(timeline_api),
) )
.route("/api/orgs/{org}/timeline", get(org_timeline_api)) .route("/api/orgs/{org}/timeline", get(org_timeline_api))
.route("/orgs/{org}/compute", get(compute_page))
.route(
"/orgs/{org}/compute/rollouts/{rollout_id}",
get(rollout_detail_page),
)
.route("/api/compute/regions", get(regions_api))
} }
fn orgs_context(orgs: &[CachedOrg]) -> Vec<minijinja::Value> { fn orgs_context(orgs: &[CachedOrg]) -> Vec<minijinja::Value> {
@@ -4989,3 +4995,202 @@ async fn get_plan_output_api(
})) }))
.into_response()) .into_response())
} }
// ---------------------------------------------------------------------------
// Compute
// ---------------------------------------------------------------------------
async fn compute_page(
State(state): State<AppState>,
session: Session,
Path(org): Path<String>,
) -> Result<Response, Response> {
let orgs = &session.user.orgs;
let _cached_org = require_org_membership(&state, orgs, &org)?;
let (instances, rollouts) = if let Some(ref scheduler) = state.compute_scheduler {
let namespace = &org;
let instances = scheduler
.list_instances(namespace)
.await
.unwrap_or_default();
let rollouts = scheduler
.list_rollouts(namespace)
.await
.unwrap_or_default();
(instances, rollouts)
} else {
(vec![], vec![])
};
let instances_ctx: Vec<minijinja::Value> = instances
.iter()
.map(|i| {
context! {
id => i.id,
resource_name => i.resource_name,
project => i.project,
destination => i.destination,
environment => i.environment,
image => i.image,
region => i.region,
replicas => i.replicas,
cpu => i.cpu,
memory => i.memory,
status => i.status,
}
})
.collect();
let rollouts_ctx: Vec<minijinja::Value> = rollouts
.iter()
.take(20)
.map(|r| {
let resources: Vec<minijinja::Value> = r
.resources
.iter()
.map(|res| {
context! {
name => res.name,
kind => res.kind.to_string(),
status => res.status.to_string(),
message => res.message,
}
})
.collect();
context! {
id => r.id,
apply_id => r.apply_id,
namespace => r.namespace,
status => r.status.to_string(),
resources => resources,
}
})
.collect();
let projects = warn_default(
"compute: list projects",
state
.platform_client
.list_projects(&session.access_token, &org)
.await,
);
let html = state
.templates
.render(
"pages/compute.html.jinja",
context! {
title => format!("Compute - {} - Forage", org),
description => "Managed compute instances",
user => context! { username => session.user.username },
csrf_token => &session.csrf_token,
orgs => orgs_context(orgs),
current_org => &org,
active_tab => "compute",
projects => projects,
instances => instances_ctx,
rollouts => rollouts_ctx,
org_name => &org,
},
)
.map_err(|e| internal_error(&state, "compute render", &e))?;
Ok(Html(html).into_response())
}
async fn rollout_detail_page(
State(state): State<AppState>,
session: Session,
Path((org, rollout_id)): Path<(String, String)>,
) -> Result<Response, Response> {
let orgs = &session.user.orgs;
let _cached_org = require_org_membership(&state, orgs, &org)?;
let scheduler = state.compute_scheduler.as_ref().ok_or_else(|| {
error_page(
&state,
StatusCode::NOT_FOUND,
"Not available",
"Compute is not enabled.",
)
})?;
let rollout = scheduler.get_rollout(&rollout_id).await.map_err(|_| {
error_page(
&state,
StatusCode::NOT_FOUND,
"Not found",
"Rollout not found.",
)
})?;
let resources_ctx: Vec<minijinja::Value> = rollout
.resources
.iter()
.map(|r| {
context! {
name => r.name,
kind => r.kind.to_string(),
status => r.status.to_string(),
message => r.message,
}
})
.collect();
let labels_ctx: Vec<minijinja::Value> = rollout.labels.iter().map(|(k, v)| context! { key => k, value => v }).collect();
let rollout_ctx = context! {
id => rollout.id,
apply_id => rollout.apply_id,
namespace => rollout.namespace,
status => rollout.status.to_string(),
resources => resources_ctx,
labels => labels_ctx,
};
let projects = warn_default(
"rollout detail: list projects",
state
.platform_client
.list_projects(&session.access_token, &org)
.await,
);
let html = state
.templates
.render(
"pages/rollout_detail.html.jinja",
context! {
title => format!("Rollout {} - Forage", rollout.apply_id),
description => "Rollout details",
user => context! { username => session.user.username },
csrf_token => &session.csrf_token,
orgs => orgs_context(orgs),
current_org => &org,
active_tab => "compute",
projects => projects,
rollout => rollout_ctx,
org_name => &org,
},
)
.map_err(|e| internal_error(&state, "rollout detail render", &e))?;
Ok(Html(html).into_response())
}
async fn regions_api() -> impl IntoResponse {
let regions: Vec<serde_json::Value> = forage_core::compute::REGIONS
.iter()
.map(|r| {
serde_json::json!({
"id": r.id,
"name": r.name,
"display_name": r.display_name,
"available": r.available,
})
})
.collect();
Json(regions)
}

View File

@@ -0,0 +1,39 @@
use std::net::SocketAddr;
use std::sync::Arc;
use anyhow::Context;
use forage_core::compute::ComputeScheduler;
use forage_grpc::forage_service_server::ForageServiceServer;
use notmad::{Component, ComponentInfo, MadError};
use tokio_util::sync::CancellationToken;
use crate::compute_grpc::ForageServiceImpl;
pub struct ServeGrpc {
pub addr: SocketAddr,
pub scheduler: Arc<dyn ComputeScheduler>,
}
impl Component for ServeGrpc {
fn info(&self) -> ComponentInfo {
"forage/grpc".into()
}
async fn run(&self, cancellation_token: CancellationToken) -> Result<(), MadError> {
let svc = ForageServiceImpl {
scheduler: self.scheduler.clone(),
};
tracing::info!("gRPC server listening on {}", self.addr);
tonic::transport::Server::builder()
.add_service(ForageServiceServer::new(svc))
.serve_with_shutdown(self.addr, async move {
cancellation_token.cancelled().await;
})
.await
.context("failed to run gRPC server")?;
Ok(())
}
}

View File

@@ -3,6 +3,7 @@ use std::sync::Arc;
use crate::forest_client::GrpcForestClient; use crate::forest_client::GrpcForestClient;
use crate::templates::TemplateEngine; use crate::templates::TemplateEngine;
use forage_core::auth::ForestAuth; use forage_core::auth::ForestAuth;
use forage_core::compute::ComputeScheduler;
use forage_core::integrations::IntegrationStore; use forage_core::integrations::IntegrationStore;
use forage_core::platform::ForestPlatform; use forage_core::platform::ForestPlatform;
use forage_core::session::SessionStore; use forage_core::session::SessionStore;
@@ -24,6 +25,7 @@ pub struct AppState {
pub grpc_client: Option<Arc<GrpcForestClient>>, pub grpc_client: Option<Arc<GrpcForestClient>>,
pub integration_store: Option<Arc<dyn IntegrationStore>>, pub integration_store: Option<Arc<dyn IntegrationStore>>,
pub slack_config: Option<SlackConfig>, pub slack_config: Option<SlackConfig>,
pub compute_scheduler: Option<Arc<dyn ComputeScheduler>>,
} }
impl AppState { impl AppState {
@@ -41,6 +43,7 @@ impl AppState {
grpc_client: None, grpc_client: None,
integration_store: None, integration_store: None,
slack_config: None, slack_config: None,
compute_scheduler: None,
} }
} }
@@ -58,4 +61,9 @@ impl AppState {
self.slack_config = Some(config); self.slack_config = Some(config);
self self
} }
pub fn with_compute_scheduler(mut self, scheduler: Arc<dyn ComputeScheduler>) -> Self {
self.compute_scheduler = Some(scheduler);
self
}
} }

View File

@@ -28,6 +28,8 @@
/* Remap Tailwind's color variables so all existing utilities adapt automatically. */ /* Remap Tailwind's color variables so all existing utilities adapt automatically. */
@media (prefers-color-scheme: dark) { @media (prefers-color-scheme: dark) {
:root, :host { :root, :host {
color-scheme: dark;
/* Neutrals — invert the gray scale */ /* Neutrals — invert the gray scale */
--color-white: oklch(14.5% 0.015 260); --color-white: oklch(14.5% 0.015 260);
--color-black: oklch(98% 0.002 248); --color-black: oklch(98% 0.002 248);
@@ -91,4 +93,17 @@
/* Amber */ /* Amber */
--color-amber-400: oklch(80% 0.17 84); --color-amber-400: oklch(80% 0.17 84);
} }
/* Form elements — inherit the dark palette */
input, textarea, select {
background-color: var(--color-gray-50);
color: var(--color-gray-900);
border-color: var(--color-gray-200);
}
input::placeholder, textarea::placeholder {
color: var(--color-gray-400);
}
input:focus, textarea:focus, select:focus {
border-color: var(--color-green-300);
}
} }

File diff suppressed because one or more lines are too long

View File

@@ -99,6 +99,7 @@
<a href="/orgs/{{ current_org }}/projects" class="whitespace-nowrap px-3 py-2 text-sm text-gray-500 hover:text-gray-900 border-b-2 border-transparent hover:border-gray-300{% if active_tab is defined and active_tab == 'projects' %} text-gray-900 border-gray-900{% endif %}">Projects</a> <a href="/orgs/{{ current_org }}/projects" class="whitespace-nowrap px-3 py-2 text-sm text-gray-500 hover:text-gray-900 border-b-2 border-transparent hover:border-gray-300{% if active_tab is defined and active_tab == 'projects' %} text-gray-900 border-gray-900{% endif %}">Projects</a>
<a href="/orgs/{{ current_org }}/settings/members" class="whitespace-nowrap px-3 py-2 text-sm text-gray-500 hover:text-gray-900 border-b-2 border-transparent hover:border-gray-300{% if active_tab is defined and active_tab == 'members' %} text-gray-900 border-gray-900{% endif %}">Members</a> <a href="/orgs/{{ current_org }}/settings/members" class="whitespace-nowrap px-3 py-2 text-sm text-gray-500 hover:text-gray-900 border-b-2 border-transparent hover:border-gray-300{% if active_tab is defined and active_tab == 'members' %} text-gray-900 border-gray-900{% endif %}">Members</a>
<a href="/orgs/{{ current_org }}/destinations" class="whitespace-nowrap px-3 py-2 text-sm text-gray-500 hover:text-gray-900 border-b-2 border-transparent hover:border-gray-300{% if active_tab is defined and active_tab == 'destinations' %} text-gray-900 border-gray-900{% endif %}">Destinations</a> <a href="/orgs/{{ current_org }}/destinations" class="whitespace-nowrap px-3 py-2 text-sm text-gray-500 hover:text-gray-900 border-b-2 border-transparent hover:border-gray-300{% if active_tab is defined and active_tab == 'destinations' %} text-gray-900 border-gray-900{% endif %}">Destinations</a>
<a href="/orgs/{{ current_org }}/compute" class="whitespace-nowrap px-3 py-2 text-sm text-gray-500 hover:text-gray-900 border-b-2 border-transparent hover:border-gray-300{% if active_tab is defined and active_tab == 'compute' %} text-gray-900 border-gray-900{% endif %}">Compute</a>
<a href="/orgs/{{ current_org }}/settings/integrations" class="whitespace-nowrap px-3 py-2 text-sm text-gray-500 hover:text-gray-900 border-b-2 border-transparent hover:border-gray-300{% if active_tab is defined and active_tab == 'integrations' %} text-gray-900 border-gray-900{% endif %}">Integrations</a> <a href="/orgs/{{ current_org }}/settings/integrations" class="whitespace-nowrap px-3 py-2 text-sm text-gray-500 hover:text-gray-900 border-b-2 border-transparent hover:border-gray-300{% if active_tab is defined and active_tab == 'integrations' %} text-gray-900 border-gray-900{% endif %}">Integrations</a>
<a href="/orgs/{{ current_org }}/usage" class="whitespace-nowrap px-3 py-2 text-sm text-gray-500 hover:text-gray-900 border-b-2 border-transparent hover:border-gray-300{% if active_tab is defined and active_tab == 'usage' %} text-gray-900 border-gray-900{% endif %}">Usage</a> <a href="/orgs/{{ current_org }}/usage" class="whitespace-nowrap px-3 py-2 text-sm text-gray-500 hover:text-gray-900 border-b-2 border-transparent hover:border-gray-300{% if active_tab is defined and active_tab == 'usage' %} text-gray-900 border-gray-900{% endif %}">Usage</a>
<a href="/settings/tokens" class="whitespace-nowrap px-3 py-2 text-sm text-gray-500 hover:text-gray-900 border-b-2 border-transparent hover:border-gray-300{% if active_tab is defined and active_tab == 'tokens' %} text-gray-900 border-gray-900{% endif %}">Tokens</a> <a href="/settings/tokens" class="whitespace-nowrap px-3 py-2 text-sm text-gray-500 hover:text-gray-900 border-b-2 border-transparent hover:border-gray-300{% if active_tab is defined and active_tab == 'tokens' %} text-gray-900 border-gray-900{% endif %}">Tokens</a>

View File

@@ -0,0 +1,103 @@
{% extends "base.html.jinja" %}
{% block content %}
<section class="max-w-4xl mx-auto px-4 py-12">
<div class="flex items-center justify-between mb-8">
<h1 class="text-2xl font-bold">Compute</h1>
</div>
<p class="text-sm text-gray-500 mb-6">Managed container instances deployed through forage/containers destinations. Pay-as-you-go compute — no cluster setup required.</p>
{% if instances | length > 0 %}
<div class="border border-gray-200 rounded-lg overflow-hidden">
<table class="w-full text-sm">
<thead class="bg-gray-50 text-left text-gray-500">
<tr>
<th class="px-5 py-3 font-medium">Project / Destination</th>
<th class="px-5 py-3 font-medium">Image</th>
<th class="px-5 py-3 font-medium">Region</th>
<th class="px-5 py-3 font-medium">Replicas</th>
<th class="px-5 py-3 font-medium">Resources</th>
<th class="px-5 py-3 font-medium">Status</th>
</tr>
</thead>
<tbody class="divide-y divide-gray-100">
{% for inst in instances %}
<tr class="hover:bg-gray-50">
<td class="px-5 py-3">
{% if inst.project %}
<a href="/orgs/{{ org_name }}/projects/{{ inst.project }}" class="font-medium text-gray-900 hover:text-blue-600">{{ inst.project }}</a>
<span class="text-gray-400">/</span>
<span class="text-sm font-mono text-gray-600">{{ inst.destination }}</span>
{% if inst.environment %}
<span class="ml-1 text-xs px-1.5 py-0.5 rounded-full bg-gray-100 text-gray-500">{{ inst.environment }}</span>
{% endif %}
{% else %}
<span class="font-medium font-mono text-gray-900">{{ inst.resource_name }}</span>
{% endif %}
</td>
<td class="px-5 py-3 text-gray-600 font-mono text-xs">{{ inst.image }}</td>
<td class="px-5 py-3">
<span class="inline-flex items-center gap-1 text-xs font-mono">
<span class="w-1.5 h-1.5 rounded-full bg-green-400"></span>
{{ inst.region }}
</span>
</td>
<td class="px-5 py-3 text-gray-600 font-mono">{{ inst.replicas }}</td>
<td class="px-5 py-3 text-xs text-gray-500 font-mono">{{ inst.cpu }} / {{ inst.memory }}</td>
<td class="px-5 py-3">
{% if inst.status == "running" %}
<span class="inline-flex items-center gap-1 text-xs font-medium text-green-700 bg-green-50 px-2 py-0.5 rounded-full">
<span class="w-1.5 h-1.5 rounded-full bg-green-500"></span>
Running
</span>
{% elif inst.status == "pending" %}
<span class="inline-flex items-center gap-1 text-xs font-medium text-yellow-700 bg-yellow-50 px-2 py-0.5 rounded-full">
<span class="w-1.5 h-1.5 rounded-full bg-yellow-500 animate-pulse"></span>
Pending
</span>
{% else %}
<span class="text-xs text-gray-500">{{ inst.status }}</span>
{% endif %}
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% else %}
<div class="border border-dashed border-gray-300 rounded-lg p-8 text-center">
<svg class="mx-auto mb-3 w-8 h-8 text-gray-300" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="1.5" d="M5 12h14M5 12a2 2 0 01-2-2V6a2 2 0 012-2h14a2 2 0 012 2v4a2 2 0 01-2 2M5 12a2 2 0 00-2 2v4a2 2 0 002 2h14a2 2 0 002-2v-4a2 2 0 00-2-2m-2-4h.01M17 16h.01"/></svg>
<p class="text-sm text-gray-500 mb-2">No compute instances running</p>
<p class="text-xs text-gray-400">Create a <a href="/orgs/{{ org_name }}/destinations" class="text-blue-600 hover:underline">forage/containers destination</a> and deploy to get started.</p>
</div>
{% endif %}
{% if rollouts | length > 0 %}
<h2 class="text-lg font-semibold mt-10 mb-4">Recent Rollouts</h2>
<div class="border border-gray-200 rounded-lg overflow-hidden divide-y divide-gray-100">
{% for rollout in rollouts %}
<a href="/orgs/{{ org_name }}/compute/rollouts/{{ rollout.id }}" class="px-5 py-3 flex items-center justify-between hover:bg-gray-50 block">
<div class="flex items-center gap-3 min-w-0">
{% if rollout.status == "succeeded" %}
<span class="w-2 h-2 rounded-full bg-green-500 shrink-0"></span>
{% elif rollout.status == "in_progress" %}
<span class="w-2 h-2 rounded-full bg-blue-500 animate-pulse shrink-0"></span>
{% elif rollout.status == "failed" %}
<span class="w-2 h-2 rounded-full bg-red-500 shrink-0"></span>
{% else %}
<span class="w-2 h-2 rounded-full bg-gray-400 shrink-0"></span>
{% endif %}
<span class="text-sm font-medium font-mono text-gray-900 truncate">{{ rollout.apply_id }}</span>
<span class="text-xs text-gray-400">{{ rollout.resources | length }} resource{% if rollout.resources | length != 1 %}s{% endif %}</span>
</div>
<div class="flex items-center gap-2">
<span class="text-xs text-gray-400">{{ rollout.status }}</span>
<svg class="w-4 h-4 text-gray-300" fill="none" stroke="currentColor" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 5l7 7-7 7"/></svg>
</div>
</a>
{% endfor %}
</div>
{% endif %}
</section>
{% endblock %}

View File

@@ -0,0 +1,89 @@
{% extends "base.html.jinja" %}
{% block content %}
<section class="max-w-4xl mx-auto px-4 py-12">
<div class="flex items-center gap-2 text-sm text-gray-500 mb-6">
<a href="/orgs/{{ org_name }}/compute" class="hover:text-gray-700">Compute</a>
<span>/</span>
<span class="text-gray-900 font-mono">{{ rollout.apply_id }}</span>
</div>
<div class="flex items-center justify-between mb-8">
<div class="flex items-center gap-3">
<h1 class="text-2xl font-bold">Rollout</h1>
{% if rollout.status == "succeeded" %}
<span class="inline-flex items-center gap-1 text-xs font-medium text-green-700 bg-green-50 px-2 py-0.5 rounded-full">Succeeded</span>
{% elif rollout.status == "in_progress" %}
<span class="inline-flex items-center gap-1 text-xs font-medium text-blue-700 bg-blue-50 px-2 py-0.5 rounded-full">In Progress</span>
{% elif rollout.status == "failed" %}
<span class="inline-flex items-center gap-1 text-xs font-medium text-red-700 bg-red-50 px-2 py-0.5 rounded-full">Failed</span>
{% else %}
<span class="inline-flex items-center gap-1 text-xs font-medium text-gray-600 bg-gray-100 px-2 py-0.5 rounded-full">{{ rollout.status }}</span>
{% endif %}
</div>
</div>
<div class="grid grid-cols-2 gap-4 mb-8 text-sm">
<div>
<dt class="text-gray-500">Apply ID</dt>
<dd class="font-mono text-xs mt-0.5">{{ rollout.apply_id }}</dd>
</div>
<div>
<dt class="text-gray-500">Namespace</dt>
<dd class="font-mono text-xs mt-0.5">{{ rollout.namespace }}</dd>
</div>
</div>
<h2 class="text-lg font-semibold mb-4">Resources ({{ rollout.resources | length }})</h2>
<div class="border border-gray-200 rounded-lg overflow-hidden">
<table class="w-full text-sm">
<thead class="bg-gray-50 text-left text-gray-500">
<tr>
<th class="px-5 py-3 font-medium">Name</th>
<th class="px-5 py-3 font-medium">Kind</th>
<th class="px-5 py-3 font-medium">Status</th>
<th class="px-5 py-3 font-medium">Message</th>
</tr>
</thead>
<tbody class="divide-y divide-gray-100">
{% for r in rollout.resources %}
<tr>
<td class="px-5 py-3 font-medium font-mono text-gray-900">{{ r.name }}</td>
<td class="px-5 py-3 text-xs font-mono text-gray-500">{{ r.kind }}</td>
<td class="px-5 py-3">
{% if r.status == "succeeded" %}
<span class="inline-flex items-center gap-1 text-xs font-medium text-green-700">
<span class="w-1.5 h-1.5 rounded-full bg-green-500"></span>
Succeeded
</span>
{% elif r.status == "in_progress" %}
<span class="inline-flex items-center gap-1 text-xs font-medium text-blue-700">
<span class="w-1.5 h-1.5 rounded-full bg-blue-500 animate-pulse"></span>
In Progress
</span>
{% elif r.status == "failed" %}
<span class="inline-flex items-center gap-1 text-xs font-medium text-red-700">
<span class="w-1.5 h-1.5 rounded-full bg-red-500"></span>
Failed
</span>
{% else %}
<span class="text-xs text-gray-500">{{ r.status }}</span>
{% endif %}
</td>
<td class="px-5 py-3 text-xs text-gray-500">{{ r.message }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if rollout.labels | length > 0 %}
<h2 class="text-lg font-semibold mt-8 mb-4">Labels</h2>
<div class="flex flex-wrap gap-2">
{% for label in rollout.labels %}
<span class="text-xs font-mono px-2 py-1 rounded bg-gray-100 text-gray-600">{{ label.key }}={{ label.value }}</span>
{% endfor %}
</div>
{% endif %}
</section>
{% endblock %}