feat: add many things

Signed-off-by: kjuulh <contact@kjuulh.io>
This commit is contained in:
2026-03-08 23:00:03 +01:00
parent 45353089c2
commit 5a5f9a3003
104 changed files with 23417 additions and 2027 deletions

View File

@@ -20,6 +20,14 @@ pub struct User {
pub emails: Vec<UserEmail>,
}
/// Public user profile (no emails).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UserProfile {
pub user_id: String,
pub username: String,
pub created_at: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UserEmail {
pub email: String,
@@ -91,6 +99,12 @@ pub trait ForestAuth: Send + Sync {
async fn get_user(&self, access_token: &str) -> Result<User, AuthError>;
async fn get_user_by_username(
&self,
access_token: &str,
username: &str,
) -> Result<UserProfile, AuthError>;
async fn list_tokens(
&self,
access_token: &str,

View File

@@ -69,6 +69,8 @@ pub struct ArtifactDestination {
pub type_name: Option<String>,
#[serde(default)]
pub type_version: Option<u64>,
#[serde(default)]
pub status: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -79,6 +81,16 @@ pub struct OrgMember {
pub joined_at: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Environment {
pub id: String,
pub organisation: String,
pub name: String,
pub description: Option<String>,
pub sort_order: i32,
pub created_at: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Destination {
pub name: String,
@@ -97,6 +109,201 @@ pub struct DestinationType {
pub version: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DestinationState {
pub destination_id: String,
pub destination_name: String,
pub environment: String,
pub release_id: Option<String>,
pub artifact_id: Option<String>,
pub status: Option<String>,
pub error_message: Option<String>,
pub queued_at: Option<String>,
pub completed_at: Option<String>,
pub queue_position: Option<i32>,
#[serde(default)]
pub started_at: Option<String>,
}
/// Runtime status of a single pipeline stage.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PipelineRunStageState {
pub stage_id: String,
pub depends_on: Vec<String>,
pub stage_type: String, // "deploy" or "wait"
pub status: String, // "PENDING", "RUNNING", "SUCCEEDED", "FAILED", "CANCELLED"
pub environment: Option<String>,
pub duration_seconds: Option<i64>,
pub queued_at: Option<String>,
pub started_at: Option<String>,
pub completed_at: Option<String>,
pub error_message: Option<String>,
pub wait_until: Option<String>,
#[serde(default)]
pub release_ids: Vec<String>,
}
/// Combined response from get_destination_states: destinations only.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct DeploymentStates {
pub destinations: Vec<DestinationState>,
}
/// Full state of a release intent: pipeline stages + individual release steps.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReleaseIntentState {
pub release_intent_id: String,
pub artifact_id: String,
pub project: String,
pub created_at: String,
pub stages: Vec<PipelineRunStageState>,
pub steps: Vec<ReleaseStepState>,
}
/// Status of an individual release step (deploy work item).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReleaseStepState {
pub release_id: String,
pub stage_id: Option<String>,
pub destination_name: String,
pub environment: String,
pub status: String,
pub queued_at: Option<String>,
pub assigned_at: Option<String>,
pub started_at: Option<String>,
pub completed_at: Option<String>,
pub error_message: Option<String>,
}
// ── Triggers (auto-release triggers) ────────────────────────────────
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Trigger {
pub id: String,
pub name: String,
pub enabled: bool,
pub branch_pattern: Option<String>,
pub title_pattern: Option<String>,
pub author_pattern: Option<String>,
pub commit_message_pattern: Option<String>,
pub source_type_pattern: Option<String>,
pub target_environments: Vec<String>,
pub target_destinations: Vec<String>,
pub force_release: bool,
pub use_pipeline: bool,
pub created_at: String,
pub updated_at: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateTriggerInput {
pub name: String,
pub branch_pattern: Option<String>,
pub title_pattern: Option<String>,
pub author_pattern: Option<String>,
pub commit_message_pattern: Option<String>,
pub source_type_pattern: Option<String>,
pub target_environments: Vec<String>,
pub target_destinations: Vec<String>,
pub force_release: bool,
pub use_pipeline: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpdateTriggerInput {
pub enabled: Option<bool>,
pub branch_pattern: Option<String>,
pub title_pattern: Option<String>,
pub author_pattern: Option<String>,
pub commit_message_pattern: Option<String>,
pub source_type_pattern: Option<String>,
pub target_environments: Vec<String>,
pub target_destinations: Vec<String>,
pub force_release: Option<bool>,
pub use_pipeline: Option<bool>,
}
// ── Policies (deployment gating) ────────────────────────────────────
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Policy {
pub id: String,
pub name: String,
pub enabled: bool,
pub policy_type: String,
pub config: PolicyConfig,
pub created_at: String,
pub updated_at: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PolicyConfig {
SoakTime {
source_environment: String,
target_environment: String,
duration_seconds: i64,
},
BranchRestriction {
target_environment: String,
branch_pattern: String,
},
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreatePolicyInput {
pub name: String,
pub config: PolicyConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpdatePolicyInput {
pub enabled: Option<bool>,
pub config: Option<PolicyConfig>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PolicyEvaluation {
pub policy_name: String,
pub policy_type: String,
pub passed: bool,
pub reason: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PipelineStage {
pub id: String,
pub depends_on: Vec<String>,
pub config: PipelineStageConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PipelineStageConfig {
Deploy { environment: String },
Wait { duration_seconds: i64 },
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReleasePipeline {
pub id: String,
pub name: String,
pub enabled: bool,
pub stages: Vec<PipelineStage>,
pub created_at: String,
pub updated_at: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateReleasePipelineInput {
pub name: String,
pub stages: Vec<PipelineStage>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UpdateReleasePipelineInput {
pub enabled: Option<bool>,
pub stages: Option<Vec<PipelineStage>>,
}
#[derive(Debug, Clone, thiserror::Error)]
pub enum PlatformError {
#[error("not authenticated")]
@@ -175,11 +382,170 @@ pub trait ForestPlatform: Send + Sync {
slug: &str,
) -> Result<Artifact, PlatformError>;
async fn list_environments(
&self,
access_token: &str,
organisation: &str,
) -> Result<Vec<Environment>, PlatformError>;
async fn list_destinations(
&self,
access_token: &str,
organisation: &str,
) -> Result<Vec<Destination>, PlatformError>;
async fn create_environment(
&self,
access_token: &str,
organisation: &str,
name: &str,
description: Option<&str>,
sort_order: i32,
) -> Result<Environment, PlatformError>;
async fn create_destination(
&self,
access_token: &str,
organisation: &str,
name: &str,
environment: &str,
metadata: &std::collections::HashMap<String, String>,
dest_type: Option<&DestinationType>,
) -> Result<(), PlatformError>;
async fn update_destination(
&self,
access_token: &str,
name: &str,
metadata: &std::collections::HashMap<String, String>,
) -> Result<(), PlatformError>;
async fn get_destination_states(
&self,
access_token: &str,
organisation: &str,
project: Option<&str>,
) -> Result<DeploymentStates, PlatformError>;
async fn get_release_intent_states(
&self,
access_token: &str,
organisation: &str,
project: Option<&str>,
include_completed: bool,
) -> Result<Vec<ReleaseIntentState>, PlatformError>;
async fn release_artifact(
&self,
access_token: &str,
artifact_id: &str,
destinations: &[String],
environments: &[String],
use_pipeline: bool,
) -> Result<(), PlatformError>;
async fn list_triggers(
&self,
access_token: &str,
organisation: &str,
project: &str,
) -> Result<Vec<Trigger>, PlatformError>;
async fn create_trigger(
&self,
access_token: &str,
organisation: &str,
project: &str,
input: &CreateTriggerInput,
) -> Result<Trigger, PlatformError>;
async fn update_trigger(
&self,
access_token: &str,
organisation: &str,
project: &str,
name: &str,
input: &UpdateTriggerInput,
) -> Result<Trigger, PlatformError>;
async fn delete_trigger(
&self,
access_token: &str,
organisation: &str,
project: &str,
name: &str,
) -> Result<(), PlatformError>;
async fn list_policies(
&self,
access_token: &str,
organisation: &str,
project: &str,
) -> Result<Vec<Policy>, PlatformError>;
async fn create_policy(
&self,
access_token: &str,
organisation: &str,
project: &str,
input: &CreatePolicyInput,
) -> Result<Policy, PlatformError>;
async fn update_policy(
&self,
access_token: &str,
organisation: &str,
project: &str,
name: &str,
input: &UpdatePolicyInput,
) -> Result<Policy, PlatformError>;
async fn delete_policy(
&self,
access_token: &str,
organisation: &str,
project: &str,
name: &str,
) -> Result<(), PlatformError>;
async fn list_release_pipelines(
&self,
access_token: &str,
organisation: &str,
project: &str,
) -> Result<Vec<ReleasePipeline>, PlatformError>;
async fn create_release_pipeline(
&self,
access_token: &str,
organisation: &str,
project: &str,
input: &CreateReleasePipelineInput,
) -> Result<ReleasePipeline, PlatformError>;
async fn update_release_pipeline(
&self,
access_token: &str,
organisation: &str,
project: &str,
name: &str,
input: &UpdateReleasePipelineInput,
) -> Result<ReleasePipeline, PlatformError>;
async fn delete_release_pipeline(
&self,
access_token: &str,
organisation: &str,
project: &str,
name: &str,
) -> Result<(), PlatformError>;
/// Get the spec (forest.cue) content for an artifact. Returns empty string if no spec was uploaded.
async fn get_artifact_spec(
&self,
access_token: &str,
artifact_id: &str,
) -> Result<String, PlatformError>;
}
#[cfg(test)]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -24,3 +24,10 @@ tracing.workspace = true
tracing-subscriber.workspace = true
time.workspace = true
uuid.workspace = true
urlencoding = "2.1.3"
opentelemetry.workspace = true
opentelemetry_sdk.workspace = true
opentelemetry-otlp.workspace = true
tracing-opentelemetry.workspace = true
futures-util = "0.3"
tokio-stream = "0.1"

View File

@@ -1,10 +1,19 @@
use forage_core::auth::{
AuthError, AuthTokens, CreatedToken, ForestAuth, PersonalAccessToken, User, UserEmail,
UserProfile,
};
use forage_core::platform::{
Artifact, ArtifactContext, ArtifactDestination, ArtifactSource, Destination, ForestPlatform,
Organisation, OrgMember, PlatformError,
Artifact, ArtifactContext, ArtifactDestination, ArtifactRef, ArtifactSource, CreatePolicyInput,
CreateReleasePipelineInput, CreateTriggerInput, Destination, DestinationType, Environment,
ForestPlatform, Organisation, OrgMember, PipelineStage, PipelineStageConfig, PlatformError,
Policy, PolicyConfig, ReleasePipeline, Trigger, UpdatePolicyInput,
UpdateReleasePipelineInput, UpdateTriggerInput,
};
use forage_grpc::policy_service_client::PolicyServiceClient;
use forage_grpc::release_pipeline_service_client::ReleasePipelineServiceClient;
use forage_grpc::trigger_service_client::TriggerServiceClient;
use forage_grpc::destination_service_client::DestinationServiceClient;
use forage_grpc::environment_service_client::EnvironmentServiceClient;
use forage_grpc::organisation_service_client::OrganisationServiceClient;
use forage_grpc::release_service_client::ReleaseServiceClient;
use forage_grpc::users_service_client::UsersServiceClient;
@@ -42,10 +51,42 @@ impl GrpcForestClient {
OrganisationServiceClient::new(self.channel.clone())
}
fn release_client(&self) -> ReleaseServiceClient<Channel> {
pub(crate) fn artifact_client(
&self,
) -> forage_grpc::artifact_service_client::ArtifactServiceClient<Channel> {
forage_grpc::artifact_service_client::ArtifactServiceClient::new(self.channel.clone())
}
pub(crate) fn release_client(&self) -> ReleaseServiceClient<Channel> {
ReleaseServiceClient::new(self.channel.clone())
}
fn env_client(&self) -> EnvironmentServiceClient<Channel> {
EnvironmentServiceClient::new(self.channel.clone())
}
fn dest_client(&self) -> DestinationServiceClient<Channel> {
DestinationServiceClient::new(self.channel.clone())
}
fn trigger_client(&self) -> TriggerServiceClient<Channel> {
TriggerServiceClient::new(self.channel.clone())
}
fn policy_client(&self) -> PolicyServiceClient<Channel> {
PolicyServiceClient::new(self.channel.clone())
}
fn pipeline_client(&self) -> ReleasePipelineServiceClient<Channel> {
ReleasePipelineServiceClient::new(self.channel.clone())
}
pub fn event_client(
&self,
) -> forage_grpc::event_service_client::EventServiceClient<Channel> {
forage_grpc::event_service_client::EventServiceClient::new(self.channel.clone())
}
fn authed_request<T>(access_token: &str, msg: T) -> Result<Request<T>, AuthError> {
bearer_request(access_token, msg).map_err(AuthError::Other)
}
@@ -202,6 +243,41 @@ impl ForestAuth for GrpcForestClient {
Ok(convert_user(user))
}
async fn get_user_by_username(
&self,
access_token: &str,
username: &str,
) -> Result<UserProfile, AuthError> {
let req = Self::authed_request(
access_token,
forage_grpc::GetUserRequest {
identifier: Some(forage_grpc::get_user_request::Identifier::Username(
username.into(),
)),
},
)?;
let resp = self
.client()
.get_user(req)
.await
.map_err(map_status)?
.into_inner();
let user = resp
.user
.ok_or(AuthError::Other("no user in response".into()))?;
Ok(UserProfile {
user_id: user.user_id,
username: user.username,
created_at: user.created_at.map(|ts| {
chrono::DateTime::from_timestamp(ts.seconds, ts.nanos as u32)
.map(|dt| dt.to_rfc3339())
.unwrap_or_default()
}),
})
}
async fn list_tokens(
&self,
access_token: &str,
@@ -396,8 +472,13 @@ fn convert_artifact(a: forage_grpc::Artifact) -> Artifact {
source_type: s.source_type.filter(|v| !v.is_empty()),
run_url: s.run_url.filter(|v| !v.is_empty()),
});
// Artifact proto does not carry git ref directly; git info comes from AnnotateRelease.
// We leave git_ref as None for now.
let git_ref = a.r#ref.map(|r| ArtifactRef {
commit_sha: r.commit_sha,
branch: r.branch.filter(|v| !v.is_empty()),
commit_message: r.commit_message.filter(|v| !v.is_empty()),
version: r.version.filter(|v| !v.is_empty()),
repo_url: r.repo_url.filter(|v| !v.is_empty()),
});
let destinations = a
.destinations
.into_iter()
@@ -419,6 +500,11 @@ fn convert_artifact(a: forage_grpc::Artifact) -> Artifact {
} else {
Some(d.type_version)
},
status: if d.status.is_empty() {
None
} else {
Some(d.status)
},
})
.collect();
Artifact {
@@ -435,12 +521,202 @@ fn convert_artifact(a: forage_grpc::Artifact) -> Artifact {
pr: ctx.pr.filter(|v| !v.is_empty()),
},
source,
git_ref: None,
git_ref,
destinations,
created_at: a.created_at,
}
}
fn convert_pipeline_stage(s: forage_grpc::PipelineStage) -> PipelineStage {
let config = match s.config {
Some(forage_grpc::pipeline_stage::Config::Deploy(d)) => {
PipelineStageConfig::Deploy { environment: d.environment }
}
Some(forage_grpc::pipeline_stage::Config::Wait(w)) => {
PipelineStageConfig::Wait { duration_seconds: w.duration_seconds }
}
None => PipelineStageConfig::Deploy { environment: String::new() },
};
PipelineStage {
id: s.id,
depends_on: s.depends_on,
config,
}
}
/// Convert a `PipelineStageState` proto message (from GetReleaseIntentStates)
/// to the domain type. Same enum mapping as `convert_pipeline_run_stage`.
fn convert_pipeline_stage_state(
s: forage_grpc::PipelineStageState,
) -> forage_core::platform::PipelineRunStageState {
let stage_type = match forage_grpc::PipelineRunStageType::try_from(s.stage_type) {
Ok(forage_grpc::PipelineRunStageType::Deploy) => "deploy",
Ok(forage_grpc::PipelineRunStageType::Wait) => "wait",
_ => "unknown",
};
let status = match forage_grpc::PipelineRunStageStatus::try_from(s.status) {
Ok(forage_grpc::PipelineRunStageStatus::Pending) => "PENDING",
Ok(forage_grpc::PipelineRunStageStatus::Active) => "RUNNING",
Ok(forage_grpc::PipelineRunStageStatus::Succeeded) => "SUCCEEDED",
Ok(forage_grpc::PipelineRunStageStatus::Failed) => "FAILED",
Ok(forage_grpc::PipelineRunStageStatus::Cancelled) => "CANCELLED",
_ => "PENDING",
};
forage_core::platform::PipelineRunStageState {
stage_id: s.stage_id,
depends_on: s.depends_on,
stage_type: stage_type.into(),
status: status.into(),
environment: s.environment,
duration_seconds: s.duration_seconds,
queued_at: s.queued_at,
started_at: s.started_at,
completed_at: s.completed_at,
error_message: s.error_message,
wait_until: s.wait_until,
release_ids: s.release_ids,
}
}
fn convert_release_step_state(
s: forage_grpc::ReleaseStepState,
) -> forage_core::platform::ReleaseStepState {
forage_core::platform::ReleaseStepState {
release_id: s.release_id,
stage_id: s.stage_id,
destination_name: s.destination_name,
environment: s.environment,
status: s.status,
queued_at: s.queued_at,
assigned_at: s.assigned_at,
started_at: s.started_at,
completed_at: s.completed_at,
error_message: s.error_message,
}
}
fn convert_stages_to_grpc(stages: &[PipelineStage]) -> Vec<forage_grpc::PipelineStage> {
stages
.iter()
.map(|s| forage_grpc::PipelineStage {
id: s.id.clone(),
depends_on: s.depends_on.clone(),
config: Some(match &s.config {
PipelineStageConfig::Deploy { environment } => {
forage_grpc::pipeline_stage::Config::Deploy(forage_grpc::DeployStageConfig {
environment: environment.clone(),
})
}
PipelineStageConfig::Wait { duration_seconds } => {
forage_grpc::pipeline_stage::Config::Wait(forage_grpc::WaitStageConfig {
duration_seconds: *duration_seconds,
})
}
}),
})
.collect()
}
fn convert_release_pipeline(p: forage_grpc::ReleasePipeline) -> ReleasePipeline {
ReleasePipeline {
id: p.id,
name: p.name,
enabled: p.enabled,
stages: p.stages.into_iter().map(convert_pipeline_stage).collect(),
created_at: p.created_at,
updated_at: p.updated_at,
}
}
fn convert_trigger(t: forage_grpc::Trigger) -> Trigger {
Trigger {
id: t.id,
name: t.name,
enabled: t.enabled,
branch_pattern: t.branch_pattern,
title_pattern: t.title_pattern,
author_pattern: t.author_pattern,
commit_message_pattern: t.commit_message_pattern,
source_type_pattern: t.source_type_pattern,
target_environments: t.target_environments,
target_destinations: t.target_destinations,
force_release: t.force_release,
use_pipeline: t.use_pipeline,
created_at: t.created_at,
updated_at: t.updated_at,
}
}
fn convert_policy(p: forage_grpc::Policy) -> Policy {
let policy_type_str = match forage_grpc::PolicyType::try_from(p.policy_type) {
Ok(forage_grpc::PolicyType::SoakTime) => "soak_time",
Ok(forage_grpc::PolicyType::BranchRestriction) => "branch_restriction",
_ => "unknown",
};
let config = match p.config {
Some(forage_grpc::policy::Config::SoakTime(c)) => PolicyConfig::SoakTime {
source_environment: c.source_environment,
target_environment: c.target_environment,
duration_seconds: c.duration_seconds,
},
Some(forage_grpc::policy::Config::BranchRestriction(c)) => {
PolicyConfig::BranchRestriction {
target_environment: c.target_environment,
branch_pattern: c.branch_pattern,
}
}
None => PolicyConfig::SoakTime {
source_environment: String::new(),
target_environment: String::new(),
duration_seconds: 0,
},
};
Policy {
id: p.id,
name: p.name,
enabled: p.enabled,
policy_type: policy_type_str.into(),
config,
created_at: p.created_at,
updated_at: p.updated_at,
}
}
fn policy_config_to_grpc(
config: &PolicyConfig,
) -> (i32, Option<forage_grpc::create_policy_request::Config>) {
match config {
PolicyConfig::SoakTime {
source_environment,
target_environment,
duration_seconds,
} => (
forage_grpc::PolicyType::SoakTime as i32,
Some(forage_grpc::create_policy_request::Config::SoakTime(
forage_grpc::SoakTimeConfig {
source_environment: source_environment.clone(),
target_environment: target_environment.clone(),
duration_seconds: *duration_seconds,
},
)),
),
PolicyConfig::BranchRestriction {
target_environment,
branch_pattern,
} => (
forage_grpc::PolicyType::BranchRestriction as i32,
Some(
forage_grpc::create_policy_request::Config::BranchRestriction(
forage_grpc::BranchRestrictionConfig {
target_environment: target_environment.clone(),
branch_pattern: branch_pattern.clone(),
},
),
),
),
}
}
fn convert_member(m: forage_grpc::OrganisationMember) -> OrgMember {
OrgMember {
user_id: m.user_id,
@@ -688,13 +964,661 @@ impl ForestPlatform for GrpcForestClient {
Ok(convert_artifact(artifact))
}
async fn list_environments(
&self,
access_token: &str,
organisation: &str,
) -> Result<Vec<Environment>, PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::ListEnvironmentsRequest {
organisation: organisation.into(),
},
)?;
let resp = self
.env_client()
.list_environments(req)
.await
.map_err(map_platform_status)?
.into_inner();
Ok(resp
.environments
.into_iter()
.map(|e| Environment {
id: e.id,
organisation: e.organisation,
name: e.name,
description: e.description.filter(|v| !v.is_empty()),
sort_order: e.sort_order,
created_at: e.created_at,
})
.collect())
}
async fn list_destinations(
&self,
_access_token: &str,
_organisation: &str,
access_token: &str,
organisation: &str,
) -> Result<Vec<Destination>, PlatformError> {
// DestinationService client not yet generated; return empty for now
Ok(vec![])
let req = platform_authed_request(
access_token,
forage_grpc::GetDestinationsRequest {
organisation: organisation.into(),
},
)?;
let resp = self
.dest_client()
.get_destinations(req)
.await
.map_err(map_platform_status)?
.into_inner();
Ok(resp
.destinations
.into_iter()
.map(|d| Destination {
name: d.name,
environment: d.environment,
organisation: d.organisation,
metadata: d.metadata,
dest_type: d.r#type.map(|t| DestinationType {
organisation: t.organisation,
name: t.name,
version: t.version,
}),
})
.collect())
}
async fn create_environment(
&self,
access_token: &str,
organisation: &str,
name: &str,
description: Option<&str>,
sort_order: i32,
) -> Result<Environment, PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::CreateEnvironmentRequest {
organisation: organisation.into(),
name: name.into(),
description: description.map(|s| s.to_string()),
sort_order,
},
)?;
let resp = self
.env_client()
.create_environment(req)
.await
.map_err(map_platform_status)?
.into_inner();
let e = resp
.environment
.ok_or(PlatformError::Other("no environment in response".into()))?;
Ok(Environment {
id: e.id,
organisation: e.organisation,
name: e.name,
description: e.description.filter(|v| !v.is_empty()),
sort_order: e.sort_order,
created_at: e.created_at,
})
}
async fn create_destination(
&self,
access_token: &str,
organisation: &str,
name: &str,
environment: &str,
metadata: &std::collections::HashMap<String, String>,
dest_type: Option<&forage_core::platform::DestinationType>,
) -> Result<(), PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::CreateDestinationRequest {
organisation: organisation.into(),
name: name.into(),
environment: environment.into(),
metadata: metadata.clone(),
r#type: dest_type.map(|t| forage_grpc::DestinationType {
organisation: t.organisation.clone(),
name: t.name.clone(),
version: t.version,
}),
},
)?;
self.dest_client()
.create_destination(req)
.await
.map_err(map_platform_status)?;
Ok(())
}
async fn update_destination(
&self,
access_token: &str,
name: &str,
metadata: &std::collections::HashMap<String, String>,
) -> Result<(), PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::UpdateDestinationRequest {
name: name.into(),
metadata: metadata.clone(),
},
)?;
self.dest_client()
.update_destination(req)
.await
.map_err(map_platform_status)?;
Ok(())
}
async fn get_destination_states(
&self,
access_token: &str,
organisation: &str,
project: Option<&str>,
) -> Result<forage_core::platform::DeploymentStates, PlatformError> {
let req = bearer_request(
access_token,
forage_grpc::GetDestinationStatesRequest {
organisation: organisation.into(),
project: project.map(|p| p.into()),
},
)
.map_err(|e| PlatformError::Other(e.to_string()))?;
let resp = self
.release_client()
.get_destination_states(req)
.await
.map_err(map_platform_status)?;
let inner = resp.into_inner();
let destinations = inner
.destinations
.into_iter()
.map(|d| forage_core::platform::DestinationState {
destination_id: d.destination_id,
destination_name: d.destination_name,
environment: d.environment,
release_id: d.release_id,
artifact_id: d.artifact_id,
status: d.status,
error_message: d.error_message,
queued_at: d.queued_at,
completed_at: d.completed_at,
queue_position: d.queue_position,
started_at: d.started_at,
})
.collect();
Ok(forage_core::platform::DeploymentStates {
destinations,
})
}
async fn get_release_intent_states(
&self,
access_token: &str,
organisation: &str,
project: Option<&str>,
include_completed: bool,
) -> Result<Vec<forage_core::platform::ReleaseIntentState>, PlatformError> {
let req = bearer_request(
access_token,
forage_grpc::GetReleaseIntentStatesRequest {
organisation: organisation.into(),
project: project.map(|p| p.into()),
include_completed,
},
)
.map_err(|e| PlatformError::Other(e.to_string()))?;
let resp = self
.release_client()
.get_release_intent_states(req)
.await
.map_err(map_platform_status)?;
Ok(resp
.into_inner()
.release_intents
.into_iter()
.map(|ri| forage_core::platform::ReleaseIntentState {
release_intent_id: ri.release_intent_id,
artifact_id: ri.artifact_id,
project: ri.project,
created_at: ri.created_at,
stages: ri
.stages
.into_iter()
.map(convert_pipeline_stage_state)
.collect(),
steps: ri
.steps
.into_iter()
.map(convert_release_step_state)
.collect(),
})
.collect())
}
async fn release_artifact(
&self,
access_token: &str,
artifact_id: &str,
destinations: &[String],
environments: &[String],
use_pipeline: bool,
) -> Result<(), PlatformError> {
let req = bearer_request(
access_token,
forage_grpc::ReleaseRequest {
artifact_id: artifact_id.into(),
destinations: destinations.to_vec(),
environments: environments.to_vec(),
force: false,
use_pipeline,
},
)
.map_err(|e| PlatformError::Other(e.to_string()))?;
self.release_client()
.release(req)
.await
.map_err(map_platform_status)?;
Ok(())
}
async fn list_triggers(
&self,
access_token: &str,
organisation: &str,
project: &str,
) -> Result<Vec<Trigger>, PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::ListTriggersRequest {
project: Some(forage_grpc::Project {
organisation: organisation.into(),
project: project.into(),
}),
},
)?;
let resp = self
.trigger_client()
.list_triggers(req)
.await
.map_err(map_platform_status)?
.into_inner();
Ok(resp.triggers.into_iter().map(convert_trigger).collect())
}
async fn create_trigger(
&self,
access_token: &str,
organisation: &str,
project: &str,
input: &CreateTriggerInput,
) -> Result<Trigger, PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::CreateTriggerRequest {
project: Some(forage_grpc::Project {
organisation: organisation.into(),
project: project.into(),
}),
name: input.name.clone(),
branch_pattern: input.branch_pattern.clone(),
title_pattern: input.title_pattern.clone(),
author_pattern: input.author_pattern.clone(),
commit_message_pattern: input.commit_message_pattern.clone(),
source_type_pattern: input.source_type_pattern.clone(),
target_environments: input.target_environments.clone(),
target_destinations: input.target_destinations.clone(),
force_release: input.force_release,
use_pipeline: input.use_pipeline,
},
)?;
let resp = self
.trigger_client()
.create_trigger(req)
.await
.map_err(map_platform_status)?
.into_inner();
let trigger = resp
.trigger
.ok_or(PlatformError::Other("no trigger in response".into()))?;
Ok(convert_trigger(trigger))
}
async fn update_trigger(
&self,
access_token: &str,
organisation: &str,
project: &str,
name: &str,
input: &UpdateTriggerInput,
) -> Result<Trigger, PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::UpdateTriggerRequest {
project: Some(forage_grpc::Project {
organisation: organisation.into(),
project: project.into(),
}),
name: name.into(),
enabled: input.enabled,
branch_pattern: input.branch_pattern.clone(),
title_pattern: input.title_pattern.clone(),
author_pattern: input.author_pattern.clone(),
commit_message_pattern: input.commit_message_pattern.clone(),
source_type_pattern: input.source_type_pattern.clone(),
target_environments: input.target_environments.clone(),
target_destinations: input.target_destinations.clone(),
force_release: input.force_release,
use_pipeline: input.use_pipeline,
},
)?;
let resp = self
.trigger_client()
.update_trigger(req)
.await
.map_err(map_platform_status)?
.into_inner();
let trigger = resp
.trigger
.ok_or(PlatformError::Other("no trigger in response".into()))?;
Ok(convert_trigger(trigger))
}
async fn delete_trigger(
&self,
access_token: &str,
organisation: &str,
project: &str,
name: &str,
) -> Result<(), PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::DeleteTriggerRequest {
project: Some(forage_grpc::Project {
organisation: organisation.into(),
project: project.into(),
}),
name: name.into(),
},
)?;
self.trigger_client()
.delete_trigger(req)
.await
.map_err(map_platform_status)?;
Ok(())
}
async fn list_policies(
&self,
access_token: &str,
organisation: &str,
project: &str,
) -> Result<Vec<Policy>, PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::ListPoliciesRequest {
project: Some(forage_grpc::Project {
organisation: organisation.into(),
project: project.into(),
}),
},
)?;
let resp = self
.policy_client()
.list_policies(req)
.await
.map_err(map_platform_status)?
.into_inner();
Ok(resp.policies.into_iter().map(convert_policy).collect())
}
async fn create_policy(
&self,
access_token: &str,
organisation: &str,
project: &str,
input: &CreatePolicyInput,
) -> Result<Policy, PlatformError> {
let (policy_type, config) = policy_config_to_grpc(&input.config);
let req = platform_authed_request(
access_token,
forage_grpc::CreatePolicyRequest {
project: Some(forage_grpc::Project {
organisation: organisation.into(),
project: project.into(),
}),
name: input.name.clone(),
policy_type,
config,
},
)?;
let resp = self
.policy_client()
.create_policy(req)
.await
.map_err(map_platform_status)?
.into_inner();
let policy = resp
.policy
.ok_or(PlatformError::Other("no policy in response".into()))?;
Ok(convert_policy(policy))
}
async fn update_policy(
&self,
access_token: &str,
organisation: &str,
project: &str,
name: &str,
input: &UpdatePolicyInput,
) -> Result<Policy, PlatformError> {
let config = input.config.as_ref().map(|c| {
let (_, grpc_config) = policy_config_to_grpc(c);
match grpc_config {
Some(forage_grpc::create_policy_request::Config::SoakTime(s)) => {
forage_grpc::update_policy_request::Config::SoakTime(s)
}
Some(forage_grpc::create_policy_request::Config::BranchRestriction(b)) => {
forage_grpc::update_policy_request::Config::BranchRestriction(b)
}
None => forage_grpc::update_policy_request::Config::SoakTime(
forage_grpc::SoakTimeConfig::default(),
),
}
});
let req = platform_authed_request(
access_token,
forage_grpc::UpdatePolicyRequest {
project: Some(forage_grpc::Project {
organisation: organisation.into(),
project: project.into(),
}),
name: name.into(),
enabled: input.enabled,
config,
},
)?;
let resp = self
.policy_client()
.update_policy(req)
.await
.map_err(map_platform_status)?
.into_inner();
let policy = resp
.policy
.ok_or(PlatformError::Other("no policy in response".into()))?;
Ok(convert_policy(policy))
}
async fn delete_policy(
&self,
access_token: &str,
organisation: &str,
project: &str,
name: &str,
) -> Result<(), PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::DeletePolicyRequest {
project: Some(forage_grpc::Project {
organisation: organisation.into(),
project: project.into(),
}),
name: name.into(),
},
)?;
self.policy_client()
.delete_policy(req)
.await
.map_err(map_platform_status)?;
Ok(())
}
async fn list_release_pipelines(
&self,
access_token: &str,
organisation: &str,
project: &str,
) -> Result<Vec<ReleasePipeline>, PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::ListReleasePipelinesRequest {
project: Some(forage_grpc::Project {
organisation: organisation.into(),
project: project.into(),
}),
},
)?;
let resp = self
.pipeline_client()
.list_release_pipelines(req)
.await
.map_err(map_platform_status)?
.into_inner();
Ok(resp
.pipelines
.into_iter()
.map(convert_release_pipeline)
.collect())
}
async fn create_release_pipeline(
&self,
access_token: &str,
organisation: &str,
project: &str,
input: &CreateReleasePipelineInput,
) -> Result<ReleasePipeline, PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::CreateReleasePipelineRequest {
project: Some(forage_grpc::Project {
organisation: organisation.into(),
project: project.into(),
}),
name: input.name.clone(),
stages: convert_stages_to_grpc(&input.stages),
},
)?;
let resp = self
.pipeline_client()
.create_release_pipeline(req)
.await
.map_err(map_platform_status)?
.into_inner();
let pipeline = resp
.pipeline
.ok_or(PlatformError::Other("no pipeline in response".into()))?;
Ok(convert_release_pipeline(pipeline))
}
async fn update_release_pipeline(
&self,
access_token: &str,
organisation: &str,
project: &str,
name: &str,
input: &UpdateReleasePipelineInput,
) -> Result<ReleasePipeline, PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::UpdateReleasePipelineRequest {
project: Some(forage_grpc::Project {
organisation: organisation.into(),
project: project.into(),
}),
name: name.into(),
enabled: input.enabled,
stages: input.stages.as_ref().map(|s| convert_stages_to_grpc(s)).unwrap_or_default(),
update_stages: input.stages.is_some(),
},
)?;
let resp = self
.pipeline_client()
.update_release_pipeline(req)
.await
.map_err(map_platform_status)?
.into_inner();
let pipeline = resp
.pipeline
.ok_or(PlatformError::Other("no pipeline in response".into()))?;
Ok(convert_release_pipeline(pipeline))
}
async fn delete_release_pipeline(
&self,
access_token: &str,
organisation: &str,
project: &str,
name: &str,
) -> Result<(), PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::DeleteReleasePipelineRequest {
project: Some(forage_grpc::Project {
organisation: organisation.into(),
project: project.into(),
}),
name: name.into(),
},
)?;
self.pipeline_client()
.delete_release_pipeline(req)
.await
.map_err(map_platform_status)?;
Ok(())
}
async fn get_artifact_spec(
&self,
access_token: &str,
artifact_id: &str,
) -> Result<String, PlatformError> {
let req = platform_authed_request(
access_token,
forage_grpc::GetArtifactSpecRequest {
artifact_id: artifact_id.into(),
},
)?;
let resp = self
.artifact_client()
.get_artifact_spec(req)
.await
.map_err(map_platform_status)?;
Ok(resp.into_inner().content)
}
}

View File

@@ -8,29 +8,94 @@ use std::net::SocketAddr;
use std::sync::Arc;
use axum::Router;
use axum::extract::State;
use axum::http::StatusCode;
use axum::response::{Html, IntoResponse, Response};
use forage_core::session::{FileSessionStore, SessionStore};
use forage_db::PgSessionStore;
use minijinja::context;
use tower_http::services::ServeDir;
use tower_http::trace::TraceLayer;
use opentelemetry::trace::TracerProvider as _;
use tracing_subscriber::EnvFilter;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use crate::forest_client::GrpcForestClient;
use crate::state::AppState;
use crate::templates::TemplateEngine;
fn init_telemetry() {
let env_filter =
EnvFilter::try_from_default_env().unwrap_or_else(|_| "info,h2=warn,tonic=info".into());
let fmt_layer = tracing_subscriber::fmt::layer();
if std::env::var("OTEL_EXPORTER_OTLP_ENDPOINT").is_ok() {
// OTLP exporter configured — send spans + logs to collector
let tracer = opentelemetry_otlp::SpanExporter::builder()
.with_tonic()
.build()
.expect("failed to create OTLP span exporter");
let tracer_provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
.with_batch_exporter(tracer)
.with_resource(
opentelemetry_sdk::Resource::builder()
.with_service_name(
std::env::var("OTEL_SERVICE_NAME")
.unwrap_or_else(|_| "forage-server".into()),
)
.build(),
)
.build();
let otel_layer = tracing_opentelemetry::layer()
.with_tracer(tracer_provider.tracer("forage-server"));
tracing_subscriber::registry()
.with(env_filter)
.with(fmt_layer)
.with(otel_layer)
.init();
tracing::info!("OpenTelemetry enabled — exporting to OTLP endpoint");
} else {
tracing_subscriber::registry()
.with(env_filter)
.with(fmt_layer)
.init();
}
}
async fn fallback_404(State(state): State<AppState>) -> Response {
let html = state.templates.render(
"pages/error.html.jinja",
context! {
title => "Not Found - Forage",
description => "The page you're looking for doesn't exist.",
status => 404u16,
heading => "Page not found",
message => "The page you're looking for doesn't exist.",
},
);
match html {
Ok(body) => (StatusCode::NOT_FOUND, Html(body)).into_response(),
Err(_) => StatusCode::NOT_FOUND.into_response(),
}
}
pub fn build_router(state: AppState) -> Router {
Router::new()
.merge(routes::router())
.nest_service("/static", ServeDir::new("static"))
.fallback(fallback_404)
.layer(TraceLayer::new_for_http())
.with_state(state)
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::try_from_default_env().unwrap_or_else(|_| "info".into()))
.init();
init_telemetry();
let forest_endpoint =
std::env::var("FOREST_SERVER_URL").unwrap_or_else(|_| "http://localhost:4040".into());
@@ -81,7 +146,8 @@ async fn main() -> anyhow::Result<()> {
};
let forest_client = Arc::new(forest_client);
let state = AppState::new(template_engine, forest_client.clone(), forest_client, sessions);
let state = AppState::new(template_engine, forest_client.clone(), forest_client.clone(), sessions)
.with_grpc_client(forest_client);
let app = build_router(state);
let port: u16 = std::env::var("PORT")

View File

@@ -7,7 +7,7 @@ use chrono::Utc;
use minijinja::context;
use serde::Deserialize;
use super::error_page;
use super::{error_page, internal_error};
use crate::auth::{self, MaybeSession, Session};
use crate::state::AppState;
use forage_core::auth::{validate_email, validate_password, validate_username, UserEmail};
@@ -390,8 +390,7 @@ async fn tokens_page(
},
)
.map_err(|e| {
tracing::error!("template error: {e:#}");
error_page(&state, StatusCode::INTERNAL_SERVER_ERROR, "Something went wrong", "Please try again.")
internal_error(&state, "template error", &e)
})?;
Ok(Html(html).into_response())
@@ -422,8 +421,7 @@ async fn create_token_submit(
.create_token(&session.access_token, &session.user.user_id, &form.name)
.await
.map_err(|e| {
tracing::error!("failed to create token: {e}");
error_page(&state, StatusCode::INTERNAL_SERVER_ERROR, "Something went wrong", "Please try again.")
internal_error(&state, "failed to create token", &e)
})?;
let tokens = state
@@ -455,8 +453,7 @@ async fn create_token_submit(
},
)
.map_err(|e| {
tracing::error!("template error: {e:#}");
error_page(&state, StatusCode::INTERNAL_SERVER_ERROR, "Something went wrong", "Please try again.")
internal_error(&state, "template error", &e)
})?;
Ok(Html(html).into_response())
@@ -477,8 +474,7 @@ async fn delete_token_submit(
.delete_token(&session.access_token, &token_id)
.await
.map_err(|e| {
tracing::error!("failed to delete token: {e}");
error_page(&state, StatusCode::INTERNAL_SERVER_ERROR, "Something went wrong", "Please try again.")
internal_error(&state, "failed to delete token", &e)
})?;
Ok(Redirect::to("/settings/tokens").into_response())
@@ -522,13 +518,7 @@ fn render_account(
},
)
.map_err(|e| {
tracing::error!("template error: {e:#}");
error_page(
state,
StatusCode::INTERNAL_SERVER_ERROR,
"Something went wrong",
"Please try again.",
)
internal_error(state, "template error", &e)
})?;
Ok(Html(html).into_response())

View File

@@ -0,0 +1,312 @@
use axum::extract::{Path, State};
use axum::response::sse::{Event, KeepAlive, Sse};
use axum::response::{IntoResponse, Response};
use axum::routing::get;
use axum::Router;
use forage_core::platform::validate_slug;
use futures_util::StreamExt;
use std::convert::Infallible;
use tokio_stream::wrappers::ReceiverStream;
use crate::auth::Session;
use crate::forest_client::GrpcForestClient;
use crate::state::AppState;
use super::error_page;
pub fn router() -> Router<AppState> {
Router::new()
.route(
"/orgs/{org}/projects/{project}/events",
get(project_events_sse),
)
.route(
"/api/orgs/{org}/projects/{project}/releases/{slug}/logs",
get(release_logs_sse),
)
}
async fn project_events_sse(
State(state): State<AppState>,
session: Session,
Path((org, project)): Path<(String, String)>,
) -> Result<Response, Response> {
// Validate access
let orgs = &session.user.orgs;
if !orgs.iter().any(|o| o.name == org) {
return Err(error_page(
&state,
axum::http::StatusCode::FORBIDDEN,
"Access denied",
"You are not a member of this organisation.",
));
}
if !validate_slug(&project) {
return Err(error_page(
&state,
axum::http::StatusCode::BAD_REQUEST,
"Invalid request",
"Invalid project name.",
));
}
let grpc_client = state.grpc_client.as_ref().ok_or_else(|| {
error_page(
&state,
axum::http::StatusCode::SERVICE_UNAVAILABLE,
"Service unavailable",
"Event streaming is not available.",
)
})?;
let access_token = session.access_token.clone();
let mut event_client = grpc_client.event_client();
let mut req = tonic::Request::new(forage_grpc::SubscribeEventsRequest {
organisation: org.clone(),
project: project.clone(),
resource_types: vec![],
actions: vec![],
since_sequence: 0,
});
let bearer: tonic::metadata::MetadataValue<_> = format!("Bearer {access_token}")
.parse()
.map_err(|_| {
error_page(
&state,
axum::http::StatusCode::INTERNAL_SERVER_ERROR,
"Internal error",
"Failed to create auth header.",
)
})?;
req.metadata_mut().insert("authorization", bearer);
let grpc_stream = event_client.subscribe(req).await.map_err(|e| {
tracing::error!("event subscribe failed: {e}");
error_page(
&state,
axum::http::StatusCode::BAD_GATEWAY,
"Connection failed",
"Could not connect to event stream.",
)
})?;
let mut grpc_stream = grpc_stream.into_inner();
// Bridge gRPC stream -> SSE via a channel
let (tx, rx) = tokio::sync::mpsc::channel::<Result<Event, Infallible>>(32);
tokio::spawn(async move {
while let Some(result) = grpc_stream.next().await {
match result {
Ok(event) => {
let data = serde_json::json!({
"sequence": event.sequence,
"event_id": event.event_id,
"timestamp": event.timestamp,
"organisation": event.organisation,
"project": event.project,
"resource_type": event.resource_type,
"action": event.action,
"resource_id": event.resource_id,
"metadata": event.metadata,
});
let sse_event = Event::default()
.event(&event.resource_type)
.data(data.to_string())
.id(event.sequence.to_string());
if tx.send(Ok(sse_event)).await.is_err() {
break; // Client disconnected
}
}
Err(e) => {
tracing::warn!("event stream error: {e}");
break;
}
}
}
});
let stream = ReceiverStream::new(rx);
let sse = Sse::new(stream).keep_alive(KeepAlive::default());
Ok(sse.into_response())
}
// ─── Release logs SSE ────────────────────────────────────────────────
async fn release_logs_sse(
State(state): State<AppState>,
session: Session,
Path((org, project, slug)): Path<(String, String, String)>,
) -> Result<Response, Response> {
let orgs = &session.user.orgs;
if !orgs.iter().any(|o| o.name == org) {
return Err(error_page(
&state,
axum::http::StatusCode::FORBIDDEN,
"Access denied",
"You are not a member of this organisation.",
));
}
if !validate_slug(&project) {
return Err(error_page(
&state,
axum::http::StatusCode::BAD_REQUEST,
"Invalid request",
"Invalid project name.",
));
}
let grpc_client = state.grpc_client.as_ref().ok_or_else(|| {
error_page(
&state,
axum::http::StatusCode::SERVICE_UNAVAILABLE,
"Service unavailable",
"Log streaming is not available.",
)
})?;
let access_token = session.access_token.clone();
// Fetch the artifact to get its artifact_id.
let artifact = state
.platform_client
.get_artifact_by_slug(&access_token, &slug)
.await
.map_err(|e| {
tracing::error!("release_logs_sse get_artifact_by_slug: {e}");
error_page(
&state,
axum::http::StatusCode::NOT_FOUND,
"Not found",
"Release not found.",
)
})?;
// Fetch release intent states to find intent IDs for this artifact.
let release_intents = state
.platform_client
.get_release_intent_states(&access_token, &org, Some(&project), true)
.await
.unwrap_or_default();
let intent_ids: Vec<String> = release_intents
.into_iter()
.filter(|ri| ri.artifact_id == artifact.artifact_id)
.map(|ri| ri.release_intent_id)
.collect();
if intent_ids.is_empty() {
// No release intents — return an SSE stream that sends a "done" event and closes.
let (tx, rx) = tokio::sync::mpsc::channel::<Result<Event, Infallible>>(1);
tokio::spawn(async move {
let _ = tx
.send(Ok(Event::default()
.event("done")
.data(r#"{"message":"no logs"}"#)))
.await;
});
let stream = ReceiverStream::new(rx);
return Ok(Sse::new(stream).keep_alive(KeepAlive::default()).into_response());
}
let (tx, rx) = tokio::sync::mpsc::channel::<Result<Event, Infallible>>(128);
// Spawn a WaitRelease stream for each release intent.
for intent_id in intent_ids {
let grpc = grpc_client.clone();
let token = access_token.clone();
let tx = tx.clone();
tokio::spawn(async move {
if let Err(e) = stream_release_logs(&grpc, &token, &intent_id, &tx).await {
tracing::warn!("release log stream for {intent_id}: {e}");
}
});
}
// Drop our copy of tx so the stream ends when all spawned tasks finish.
drop(tx);
let stream = ReceiverStream::new(rx);
let sse = Sse::new(stream).keep_alive(KeepAlive::default());
Ok(sse.into_response())
}
async fn stream_release_logs(
grpc: &GrpcForestClient,
access_token: &str,
release_intent_id: &str,
tx: &tokio::sync::mpsc::Sender<Result<Event, Infallible>>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let mut client = grpc.release_client();
let mut req = tonic::Request::new(forage_grpc::WaitReleaseRequest {
release_intent_id: release_intent_id.to_string(),
});
let bearer: tonic::metadata::MetadataValue<_> =
format!("Bearer {access_token}").parse()?;
req.metadata_mut().insert("authorization", bearer);
let resp = client.wait_release(req).await?;
let mut stream = resp.into_inner();
while let Some(result) = stream.next().await {
match result {
Ok(event) => {
let sse_event = match event.event {
Some(forage_grpc::wait_release_event::Event::LogLine(log)) => {
let channel = match log.channel {
1 => "stdout",
2 => "stderr",
_ => "stdout",
};
let data = serde_json::json!({
"destination": log.destination,
"line": log.line,
"timestamp": log.timestamp,
"channel": channel,
});
Some(Event::default().event("log").data(data.to_string()))
}
Some(forage_grpc::wait_release_event::Event::StatusUpdate(su)) => {
let data = serde_json::json!({
"destination": su.destination,
"status": su.status,
});
Some(Event::default().event("status").data(data.to_string()))
}
Some(forage_grpc::wait_release_event::Event::StageUpdate(su)) => {
let data = serde_json::json!({
"stage_id": su.stage_id,
"stage_type": su.stage_type,
"status": su.status,
});
Some(Event::default().event("stage").data(data.to_string()))
}
None => None,
};
if let Some(sse_event) = sse_event {
if tx.send(Ok(sse_event)).await.is_err() {
return Ok(()); // Client disconnected
}
}
}
Err(e) => {
tracing::warn!("wait_release stream error: {e}");
break;
}
}
}
// Signal that this intent's stream is done.
let _ = tx
.send(Ok(Event::default()
.event("done")
.data(format!(
r#"{{"release_intent_id":"{}"}}"#,
release_intent_id
))))
.await;
Ok(())
}

View File

@@ -1,4 +1,5 @@
mod auth;
mod events;
mod pages;
mod platform;
@@ -14,10 +15,22 @@ pub fn router() -> Router<AppState> {
.merge(pages::router())
.merge(auth::router())
.merge(platform::router())
.merge(events::router())
}
/// Render an error page with the given status code, heading, and message.
fn error_page(state: &AppState, status: StatusCode, heading: &str, message: &str) -> Response {
error_page_detail(state, status, heading, message, None)
}
/// Render an error page with optional error detail (shown in a collapsible section).
fn error_page_detail(
state: &AppState,
status: StatusCode,
heading: &str,
message: &str,
detail: Option<&str>,
) -> Response {
let html = state.templates.render(
"pages/error.html.jinja",
context! {
@@ -26,6 +39,7 @@ fn error_page(state: &AppState, status: StatusCode, heading: &str, message: &str
status => status.as_u16(),
heading => heading,
message => message,
detail => detail,
},
);
match html {
@@ -33,3 +47,28 @@ fn error_page(state: &AppState, status: StatusCode, heading: &str, message: &str
Err(_) => status.into_response(),
}
}
/// Log an error and render a 500 page with the error detail.
fn internal_error(state: &AppState, context: &str, err: &dyn std::fmt::Display) -> Response {
let detail = format!("{err:#}");
tracing::error!("{context}: {detail}");
error_page_detail(
state,
StatusCode::INTERNAL_SERVER_ERROR,
"Something went wrong",
"An internal error occurred. Please try again.",
Some(&detail),
)
}
/// Log a warning for a failed call and return the default value.
/// Use for supplementary data where graceful degradation is acceptable.
fn warn_default<T: Default>(context: &str, result: Result<T, impl std::fmt::Display>) -> T {
match result {
Ok(v) => v,
Err(e) => {
tracing::warn!("{context}: {e:#}");
T::default()
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,6 @@
use std::sync::Arc;
use crate::forest_client::GrpcForestClient;
use crate::templates::TemplateEngine;
use forage_core::auth::ForestAuth;
use forage_core::platform::ForestPlatform;
@@ -11,6 +12,7 @@ pub struct AppState {
pub forest_client: Arc<dyn ForestAuth>,
pub platform_client: Arc<dyn ForestPlatform>,
pub sessions: Arc<dyn SessionStore>,
pub grpc_client: Option<Arc<GrpcForestClient>>,
}
impl AppState {
@@ -25,6 +27,12 @@ impl AppState {
forest_client,
platform_client,
sessions,
grpc_client: None,
}
}
pub fn with_grpc_client(mut self, client: Arc<GrpcForestClient>) -> Self {
self.grpc_client = Some(client);
self
}
}

View File

@@ -40,6 +40,44 @@ fn timeago(value: &str) -> String {
}
}
/// Format a future ISO 8601 / RFC 3339 timestamp as a relative countdown.
fn timeuntil(value: &str) -> String {
let Ok(dt) = chrono::DateTime::parse_from_rfc3339(value)
.or_else(|_| chrono::DateTime::parse_from_rfc3339(&format!("{value}Z")))
else {
return value.to_string();
};
let now = chrono::Utc::now();
let diff = dt.signed_duration_since(now);
if diff.num_seconds() <= 0 {
"now".into()
} else if diff.num_seconds() < 60 {
let s = diff.num_seconds();
format!("in {s}s")
} else if diff.num_minutes() < 60 {
let m = diff.num_minutes();
let s = diff.num_seconds() % 60;
if s > 0 {
format!("in {m}m {s}s")
} else {
format!("in {m}m")
}
} else if diff.num_hours() < 24 {
let h = diff.num_hours();
let m = diff.num_minutes() % 60;
if m > 0 {
format!("in {h}h {m}m")
} else {
format!("in {h}h")
}
} else {
let d = diff.num_days();
format!("in {d}d")
}
}
/// Format an ISO 8601 / RFC 3339 timestamp as a full human-readable datetime.
fn datetime(value: &str) -> String {
let Ok(dt) = chrono::DateTime::parse_from_rfc3339(value)
@@ -73,7 +111,11 @@ impl TemplateEngine {
let mut env = Environment::new();
env.set_loader(minijinja::path_loader(path));
env.add_filter("timeago", |v: String| -> String { timeago(&v) });
env.add_filter("timeuntil", |v: String| -> String { timeuntil(&v) });
env.add_filter("datetime", |v: String| -> String { datetime(&v) });
env.add_filter("urlencode", |v: String| -> String {
urlencoding::encode(&v).into_owned()
});
Ok(Self { env })
}

View File

@@ -4,7 +4,9 @@ use axum::Router;
use chrono::Utc;
use forage_core::auth::*;
use forage_core::platform::{
Artifact, ArtifactContext, Destination, ForestPlatform, Organisation, OrgMember, PlatformError,
Artifact, ArtifactContext, CreatePolicyInput, CreateReleasePipelineInput, CreateTriggerInput,
Destination, Environment, ForestPlatform, Organisation, OrgMember, PlatformError, Policy,
ReleasePipeline, Trigger, UpdatePolicyInput, UpdateReleasePipelineInput, UpdateTriggerInput,
};
use forage_core::session::{
CachedOrg, CachedUser, InMemorySessionStore, SessionData, SessionStore,
@@ -41,7 +43,16 @@ pub(crate) struct MockPlatformBehavior {
pub remove_member_result: Option<Result<(), PlatformError>>,
pub update_member_role_result: Option<Result<OrgMember, PlatformError>>,
pub get_artifact_by_slug_result: Option<Result<Artifact, PlatformError>>,
pub list_environments_result: Option<Result<Vec<Environment>, PlatformError>>,
pub list_destinations_result: Option<Result<Vec<Destination>, PlatformError>>,
pub list_triggers_result: Option<Result<Vec<Trigger>, PlatformError>>,
pub create_trigger_result: Option<Result<Trigger, PlatformError>>,
pub update_trigger_result: Option<Result<Trigger, PlatformError>>,
pub delete_trigger_result: Option<Result<(), PlatformError>>,
pub list_release_pipelines_result: Option<Result<Vec<ReleasePipeline>, PlatformError>>,
pub create_release_pipeline_result: Option<Result<ReleasePipeline, PlatformError>>,
pub update_release_pipeline_result: Option<Result<ReleasePipeline, PlatformError>>,
pub delete_release_pipeline_result: Option<Result<(), PlatformError>>,
}
pub(crate) fn ok_tokens() -> AuthTokens {
@@ -214,6 +225,18 @@ impl ForestAuth for MockForestClient {
}))
}
async fn get_user_by_username(
&self,
_access_token: &str,
username: &str,
) -> Result<UserProfile, AuthError> {
Ok(UserProfile {
user_id: "user-123".into(),
username: username.into(),
created_at: Some("2025-01-15T10:00:00Z".into()),
})
}
async fn remove_email(
&self,
_access_token: &str,
@@ -386,6 +409,15 @@ impl ForestPlatform for MockPlatformClient {
}))
}
async fn list_environments(
&self,
_access_token: &str,
_organisation: &str,
) -> Result<Vec<Environment>, PlatformError> {
let b = self.behavior.lock().unwrap();
b.list_environments_result.clone().unwrap_or(Ok(vec![]))
}
async fn list_destinations(
&self,
_access_token: &str,
@@ -394,6 +426,255 @@ impl ForestPlatform for MockPlatformClient {
let b = self.behavior.lock().unwrap();
b.list_destinations_result.clone().unwrap_or(Ok(vec![]))
}
async fn create_environment(
&self,
_access_token: &str,
organisation: &str,
name: &str,
description: Option<&str>,
sort_order: i32,
) -> Result<Environment, PlatformError> {
Ok(Environment {
id: format!("env-{name}"),
organisation: organisation.into(),
name: name.into(),
description: description.map(|s| s.to_string()),
sort_order,
created_at: "2026-03-08T00:00:00Z".into(),
})
}
async fn create_destination(
&self,
_access_token: &str,
_organisation: &str,
_name: &str,
_environment: &str,
_metadata: &std::collections::HashMap<String, String>,
_dest_type: Option<&forage_core::platform::DestinationType>,
) -> Result<(), PlatformError> {
Ok(())
}
async fn update_destination(
&self,
_access_token: &str,
_name: &str,
_metadata: &std::collections::HashMap<String, String>,
) -> Result<(), PlatformError> {
Ok(())
}
async fn get_destination_states(
&self,
_access_token: &str,
_organisation: &str,
_project: Option<&str>,
) -> Result<forage_core::platform::DeploymentStates, PlatformError> {
Ok(forage_core::platform::DeploymentStates {
destinations: vec![],
})
}
async fn get_release_intent_states(
&self,
_access_token: &str,
_organisation: &str,
_project: Option<&str>,
_include_completed: bool,
) -> Result<Vec<forage_core::platform::ReleaseIntentState>, PlatformError> {
Ok(vec![])
}
async fn release_artifact(
&self,
_access_token: &str,
_artifact_id: &str,
_destinations: &[String],
_environments: &[String],
_use_pipeline: bool,
) -> Result<(), PlatformError> {
Ok(())
}
async fn list_triggers(
&self,
_access_token: &str,
_organisation: &str,
_project: &str,
) -> Result<Vec<Trigger>, PlatformError> {
let b = self.behavior.lock().unwrap();
b.list_triggers_result.clone().unwrap_or(Ok(vec![]))
}
async fn create_trigger(
&self,
_access_token: &str,
_organisation: &str,
_project: &str,
input: &CreateTriggerInput,
) -> Result<Trigger, PlatformError> {
let b = self.behavior.lock().unwrap();
b.create_trigger_result.clone().unwrap_or(Ok(Trigger {
id: "trigger-1".into(),
name: input.name.clone(),
enabled: true,
branch_pattern: input.branch_pattern.clone(),
title_pattern: input.title_pattern.clone(),
author_pattern: input.author_pattern.clone(),
commit_message_pattern: input.commit_message_pattern.clone(),
source_type_pattern: input.source_type_pattern.clone(),
target_environments: input.target_environments.clone(),
target_destinations: input.target_destinations.clone(),
force_release: input.force_release,
use_pipeline: input.use_pipeline,
created_at: "2026-03-08T00:00:00Z".into(),
updated_at: "2026-03-08T00:00:00Z".into(),
}))
}
async fn update_trigger(
&self,
_access_token: &str,
_organisation: &str,
_project: &str,
name: &str,
input: &UpdateTriggerInput,
) -> Result<Trigger, PlatformError> {
let b = self.behavior.lock().unwrap();
b.update_trigger_result.clone().unwrap_or(Ok(Trigger {
id: "trigger-1".into(),
name: name.into(),
enabled: input.enabled.unwrap_or(true),
branch_pattern: input.branch_pattern.clone(),
title_pattern: input.title_pattern.clone(),
author_pattern: input.author_pattern.clone(),
commit_message_pattern: input.commit_message_pattern.clone(),
source_type_pattern: input.source_type_pattern.clone(),
target_environments: input.target_environments.clone(),
target_destinations: input.target_destinations.clone(),
force_release: input.force_release.unwrap_or(false),
use_pipeline: input.use_pipeline.unwrap_or(false),
created_at: "2026-03-08T00:00:00Z".into(),
updated_at: "2026-03-08T00:00:00Z".into(),
}))
}
async fn delete_trigger(
&self,
_access_token: &str,
_organisation: &str,
_project: &str,
_name: &str,
) -> Result<(), PlatformError> {
let b = self.behavior.lock().unwrap();
b.delete_trigger_result.clone().unwrap_or(Ok(()))
}
async fn list_policies(
&self,
_access_token: &str,
_organisation: &str,
_project: &str,
) -> Result<Vec<Policy>, PlatformError> {
Ok(vec![])
}
async fn create_policy(
&self,
_access_token: &str,
_organisation: &str,
_project: &str,
_input: &CreatePolicyInput,
) -> Result<Policy, PlatformError> {
Err(PlatformError::Other("not implemented in mock".into()))
}
async fn update_policy(
&self,
_access_token: &str,
_organisation: &str,
_project: &str,
_name: &str,
_input: &UpdatePolicyInput,
) -> Result<Policy, PlatformError> {
Err(PlatformError::Other("not implemented in mock".into()))
}
async fn delete_policy(
&self,
_access_token: &str,
_organisation: &str,
_project: &str,
_name: &str,
) -> Result<(), PlatformError> {
Ok(())
}
async fn list_release_pipelines(
&self,
_access_token: &str,
_organisation: &str,
_project: &str,
) -> Result<Vec<ReleasePipeline>, PlatformError> {
let b = self.behavior.lock().unwrap();
b.list_release_pipelines_result
.clone()
.unwrap_or(Ok(vec![]))
}
async fn create_release_pipeline(
&self,
_access_token: &str,
_organisation: &str,
_project: &str,
input: &CreateReleasePipelineInput,
) -> Result<ReleasePipeline, PlatformError> {
let b = self.behavior.lock().unwrap();
b.create_release_pipeline_result
.clone()
.unwrap_or(Ok(ReleasePipeline {
id: "pipeline-1".into(),
name: input.name.clone(),
enabled: true,
stages: input.stages.clone(),
created_at: "2026-03-08T00:00:00Z".into(),
updated_at: "2026-03-08T00:00:00Z".into(),
}))
}
async fn update_release_pipeline(
&self,
_access_token: &str,
_organisation: &str,
_project: &str,
name: &str,
input: &UpdateReleasePipelineInput,
) -> Result<ReleasePipeline, PlatformError> {
let b = self.behavior.lock().unwrap();
b.update_release_pipeline_result
.clone()
.unwrap_or(Ok(ReleasePipeline {
id: "pipeline-1".into(),
name: name.into(),
enabled: input.enabled.unwrap_or(true),
stages: input.stages.clone().unwrap_or_default(),
created_at: "2026-03-08T00:00:00Z".into(),
updated_at: "2026-03-08T00:00:00Z".into(),
}))
}
async fn delete_release_pipeline(
&self,
_access_token: &str,
_organisation: &str,
_project: &str,
_name: &str,
) -> Result<(), PlatformError> {
let b = self.behavior.lock().unwrap();
b.delete_release_pipeline_result.clone().unwrap_or(Ok(()))
}
}
pub(crate) fn make_templates() -> TemplateEngine {

View File

@@ -582,7 +582,7 @@ async fn projects_list_non_member_returns_403() {
}
#[tokio::test]
async fn projects_list_platform_unavailable_degrades_gracefully() {
async fn projects_list_platform_unavailable_returns_500() {
let platform = MockPlatformClient::with_behavior(MockPlatformBehavior {
list_projects_result: Some(Err(PlatformError::Unavailable(
"connection refused".into(),
@@ -603,12 +603,13 @@ async fn projects_list_platform_unavailable_degrades_gracefully() {
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
assert_eq!(response.status(), StatusCode::INTERNAL_SERVER_ERROR);
let body = axum::body::to_bytes(response.into_body(), usize::MAX)
.await
.unwrap();
let html = String::from_utf8(body.to_vec()).unwrap();
assert!(html.contains("No projects yet"));
assert!(html.contains("Something went wrong"));
assert!(html.contains("connection refused"));
}
// ─── Project detail ─────────────────────────────────────────────────
@@ -634,9 +635,10 @@ async fn project_detail_returns_200_with_artifacts() {
.await
.unwrap();
let html = String::from_utf8(body.to_vec()).unwrap();
assert!(html.contains("my-api"));
assert!(html.contains("Deploy v1.0"));
assert!(html.contains("my-api-abc123"));
// The timeline is now rendered by a Svelte web component
assert!(html.contains("release-timeline"));
assert!(html.contains("org=\"testorg\""));
assert!(html.contains("project=\"my-api\""));
}
#[tokio::test]
@@ -664,7 +666,9 @@ async fn project_detail_empty_artifacts_shows_empty_state() {
.await
.unwrap();
let html = String::from_utf8(body.to_vec()).unwrap();
assert!(html.contains("No releases yet"));
// Empty state is now rendered client-side by the Svelte component
assert!(html.contains("release-timeline"));
assert!(html.contains("project=\"my-api\""));
}
#[tokio::test]
@@ -698,6 +702,7 @@ async fn project_detail_shows_enriched_artifact_data() {
type_organisation: None,
type_name: None,
type_version: None,
status: None,
}],
created_at: "2026-03-07T12:00:00Z".into(),
}])),
@@ -722,10 +727,79 @@ async fn project_detail_shows_enriched_artifact_data() {
.await
.unwrap();
let html = String::from_utf8(body.to_vec()).unwrap();
assert!(html.contains("v2.0.0"));
assert!(html.contains("main"));
assert!(html.contains("abc1234"));
assert!(html.contains("production"));
// Enriched data is now rendered client-side by the Svelte component
assert!(html.contains("release-timeline"));
assert!(html.contains("project=\"my-api\""));
}
#[tokio::test]
async fn timeline_api_returns_json_with_artifacts() {
let (state, sessions) = test_state();
let cookie = create_test_session(&sessions).await;
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.uri("/api/orgs/testorg/projects/my-api/timeline")
.header("cookie", &cookie)
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = axum::body::to_bytes(response.into_body(), usize::MAX)
.await
.unwrap();
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert!(json["timeline"].is_array());
assert!(json["lanes"].is_array());
// Should have at least one timeline item from the mock data
assert!(!json["timeline"].as_array().unwrap().is_empty());
}
#[tokio::test]
async fn org_timeline_api_returns_json() {
let (state, sessions) = test_state();
let cookie = create_test_session(&sessions).await;
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.uri("/api/orgs/testorg/timeline")
.header("cookie", &cookie)
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = axum::body::to_bytes(response.into_body(), usize::MAX)
.await
.unwrap();
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert!(json["timeline"].is_array());
assert!(json["lanes"].is_array());
}
#[tokio::test]
async fn timeline_api_requires_auth() {
let (state, _sessions) = test_state();
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.uri("/api/orgs/testorg/projects/my-api/timeline")
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
// Should redirect to login (302) when not authenticated
assert_eq!(response.status(), StatusCode::SEE_OTHER);
}
// ─── Artifact detail ────────────────────────────────────────────────
@@ -787,6 +861,7 @@ async fn artifact_detail_shows_enriched_data() {
type_organisation: None,
type_name: None,
type_version: None,
status: None,
},
ArtifactDestination {
name: "staging".into(),
@@ -794,6 +869,7 @@ async fn artifact_detail_shows_enriched_data() {
type_organisation: None,
type_name: None,
type_version: None,
status: None,
},
],
created_at: "2026-03-07T12:00:00Z".into(),
@@ -1081,7 +1157,7 @@ async fn destinations_page_shows_empty_state() {
.await
.unwrap();
let html = String::from_utf8(body.to_vec()).unwrap();
assert!(html.contains("No destinations yet"));
assert!(html.contains("No environments yet"));
}
// ─── Releases ────────────────────────────────────────────────────────
@@ -1169,5 +1245,288 @@ async fn releases_page_shows_empty_state() {
.await
.unwrap();
let html = String::from_utf8(body.to_vec()).unwrap();
assert!(html.contains("No releases yet"));
// Empty state is now rendered client-side by the Svelte component
assert!(html.contains("release-timeline"));
assert!(html.contains("org=\"testorg\""));
}
// ─── User profile ──────────────────────────────────────────────────
#[tokio::test]
async fn user_profile_shows_username() {
let (state, sessions) = test_state();
let cookie = create_test_session(&sessions).await;
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.uri("/users/testuser")
.header("cookie", &cookie)
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = axum::body::to_bytes(response.into_body(), usize::MAX)
.await
.unwrap();
let html = String::from_utf8(body.to_vec()).unwrap();
assert!(html.contains("testuser"));
assert!(html.contains("Member since"));
}
// ─── Triggers (auto-release) ────────────────────────────────────────
#[tokio::test]
async fn triggers_page_returns_200() {
let (state, sessions) = test_state();
let cookie = create_test_session(&sessions).await;
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.uri("/orgs/testorg/projects/my-api/triggers")
.header("cookie", &cookie)
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = axum::body::to_bytes(response.into_body(), usize::MAX)
.await
.unwrap();
let html = String::from_utf8(body.to_vec()).unwrap();
assert!(html.contains("Triggers"));
}
#[tokio::test]
async fn triggers_page_shows_existing_triggers() {
use forage_core::platform::Trigger;
let platform = MockPlatformClient::with_behavior(MockPlatformBehavior {
list_triggers_result: Some(Ok(vec![Trigger {
id: "t1".into(),
name: "deploy-main".into(),
enabled: true,
branch_pattern: Some("main".into()),
title_pattern: None,
author_pattern: None,
commit_message_pattern: None,
source_type_pattern: None,
target_environments: vec!["staging".into()],
target_destinations: vec![],
force_release: false,
use_pipeline: false,
created_at: "2026-03-08T00:00:00Z".into(),
updated_at: "2026-03-08T00:00:00Z".into(),
}])),
..Default::default()
});
let (state, sessions) = test_state_with(MockForestClient::new(), platform);
let cookie = create_test_session(&sessions).await;
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.uri("/orgs/testorg/projects/my-api/triggers")
.header("cookie", &cookie)
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = axum::body::to_bytes(response.into_body(), usize::MAX)
.await
.unwrap();
let html = String::from_utf8(body.to_vec()).unwrap();
assert!(html.contains("deploy-main"));
assert!(html.contains("staging"));
}
#[tokio::test]
async fn create_trigger_requires_admin() {
let (state, sessions) = test_state();
let cookie = create_test_session_member(&sessions).await;
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.method("POST")
.uri("/orgs/testorg/projects/my-api/triggers")
.header("cookie", &cookie)
.header("content-type", "application/x-www-form-urlencoded")
.body(Body::from("csrf_token=test-csrf&name=test-trigger"))
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::FORBIDDEN);
}
#[tokio::test]
async fn create_trigger_requires_csrf() {
let (state, sessions) = test_state();
let cookie = create_test_session(&sessions).await;
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.method("POST")
.uri("/orgs/testorg/projects/my-api/triggers")
.header("cookie", &cookie)
.header("content-type", "application/x-www-form-urlencoded")
.body(Body::from("csrf_token=wrong-token&name=test-trigger"))
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::FORBIDDEN);
}
#[tokio::test]
async fn create_trigger_success_redirects() {
let (state, sessions) = test_state();
let cookie = create_test_session(&sessions).await;
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.method("POST")
.uri("/orgs/testorg/projects/my-api/triggers")
.header("cookie", &cookie)
.header("content-type", "application/x-www-form-urlencoded")
.body(Body::from("csrf_token=test-csrf&name=deploy-main&branch_pattern=main&target_environments=staging")
)
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::SEE_OTHER);
assert_eq!(
response.headers().get("location").unwrap(),
"/orgs/testorg/projects/my-api/triggers"
);
}
#[tokio::test]
async fn toggle_trigger_requires_admin() {
let (state, sessions) = test_state();
let cookie = create_test_session_member(&sessions).await;
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.method("POST")
.uri("/orgs/testorg/projects/my-api/triggers/deploy-main/toggle")
.header("cookie", &cookie)
.header("content-type", "application/x-www-form-urlencoded")
.body(Body::from("csrf_token=test-csrf"))
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::FORBIDDEN);
}
#[tokio::test]
async fn delete_trigger_success_redirects() {
let (state, sessions) = test_state();
let cookie = create_test_session(&sessions).await;
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.method("POST")
.uri("/orgs/testorg/projects/my-api/triggers/deploy-main/delete")
.header("cookie", &cookie)
.header("content-type", "application/x-www-form-urlencoded")
.body(Body::from("csrf_token=test-csrf"))
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::SEE_OTHER);
assert_eq!(
response.headers().get("location").unwrap(),
"/orgs/testorg/projects/my-api/triggers"
);
}
// ─── Deployment Policies ────────────────────────────────────────────
#[tokio::test]
async fn policies_page_returns_200() {
let (state, sessions) = test_state();
let cookie = create_test_session(&sessions).await;
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.uri("/orgs/testorg/projects/my-api/policies")
.header("cookie", &cookie)
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = axum::body::to_bytes(response.into_body(), usize::MAX)
.await
.unwrap();
let html = String::from_utf8(body.to_vec()).unwrap();
assert!(html.contains("Deployment Policies"));
}
#[tokio::test]
async fn create_policy_requires_admin() {
let (state, sessions) = test_state();
let cookie = create_test_session_member(&sessions).await;
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.method("POST")
.uri("/orgs/testorg/projects/my-api/policies")
.header("cookie", &cookie)
.header("content-type", "application/x-www-form-urlencoded")
.body(Body::from("csrf_token=test-csrf&name=test-policy&policy_type=soak_time"))
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::FORBIDDEN);
}
#[tokio::test]
async fn create_policy_requires_csrf() {
let (state, sessions) = test_state();
let cookie = create_test_session(&sessions).await;
let app = build_router(state);
let response = app
.oneshot(
Request::builder()
.method("POST")
.uri("/orgs/testorg/projects/my-api/policies")
.header("cookie", &cookie)
.header("content-type", "application/x-www-form-urlencoded")
.body(Body::from("csrf_token=wrong-token&name=test-policy&policy_type=soak_time"))
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::FORBIDDEN);
}