@@ -13,3 +13,4 @@ tracing.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
async-trait.workspace = true
|
||||
moka.workspace = true
|
||||
|
||||
426
crates/forage-db/src/integrations.rs
Normal file
426
crates/forage-db/src/integrations.rs
Normal file
@@ -0,0 +1,426 @@
|
||||
use forage_core::integrations::{
|
||||
CreateIntegrationInput, DeliveryStatus, Integration, IntegrationConfig, IntegrationError,
|
||||
IntegrationStore, IntegrationType, NotificationDelivery, NotificationRule, NOTIFICATION_TYPES,
|
||||
};
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// PostgreSQL-backed integration store.
|
||||
pub struct PgIntegrationStore {
|
||||
pool: PgPool,
|
||||
/// AES-256 key for encrypting/decrypting integration configs.
|
||||
/// In production this comes from INTEGRATION_ENCRYPTION_KEY env var.
|
||||
/// For simplicity, we use a basic XOR-based obfuscation for now
|
||||
/// and will upgrade to proper AES when the `aes-gcm` crate is added.
|
||||
encryption_key: Vec<u8>,
|
||||
}
|
||||
|
||||
impl PgIntegrationStore {
|
||||
pub fn new(pool: PgPool, encryption_key: Vec<u8>) -> Self {
|
||||
Self {
|
||||
pool,
|
||||
encryption_key,
|
||||
}
|
||||
}
|
||||
|
||||
fn encrypt_config(&self, config: &IntegrationConfig) -> Result<Vec<u8>, IntegrationError> {
|
||||
let json = serde_json::to_vec(config)
|
||||
.map_err(|e| IntegrationError::Encryption(e.to_string()))?;
|
||||
Ok(xor_bytes(&json, &self.encryption_key))
|
||||
}
|
||||
|
||||
fn decrypt_config(&self, encrypted: &[u8]) -> Result<IntegrationConfig, IntegrationError> {
|
||||
let json = xor_bytes(encrypted, &self.encryption_key);
|
||||
serde_json::from_slice(&json)
|
||||
.map_err(|e| IntegrationError::Encryption(format!("decrypt failed: {e}")))
|
||||
}
|
||||
|
||||
fn row_to_integration(&self, row: IntegrationRow) -> Result<Integration, IntegrationError> {
|
||||
let config = self.decrypt_config(&row.config_encrypted)?;
|
||||
let integration_type = IntegrationType::parse(&row.integration_type)
|
||||
.ok_or_else(|| IntegrationError::Store(format!("unknown type: {}", row.integration_type)))?;
|
||||
Ok(Integration {
|
||||
id: row.id.to_string(),
|
||||
organisation: row.organisation,
|
||||
integration_type,
|
||||
name: row.name,
|
||||
config,
|
||||
enabled: row.enabled,
|
||||
created_by: row.created_by,
|
||||
created_at: row.created_at.to_rfc3339(),
|
||||
updated_at: row.updated_at.to_rfc3339(),
|
||||
api_token: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple XOR obfuscation. This is NOT production-grade encryption.
|
||||
/// TODO: Replace with AES-256-GCM when aes-gcm dependency is added.
|
||||
fn xor_bytes(data: &[u8], key: &[u8]) -> Vec<u8> {
|
||||
if key.is_empty() {
|
||||
return data.to_vec();
|
||||
}
|
||||
data.iter()
|
||||
.enumerate()
|
||||
.map(|(i, b)| b ^ key[i % key.len()])
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl IntegrationStore for PgIntegrationStore {
|
||||
async fn list_integrations(
|
||||
&self,
|
||||
organisation: &str,
|
||||
) -> Result<Vec<Integration>, IntegrationError> {
|
||||
let rows: Vec<IntegrationRow> = sqlx::query_as(
|
||||
"SELECT id, organisation, integration_type, name, config_encrypted, enabled, created_by, created_at, updated_at
|
||||
FROM integrations WHERE organisation = $1 ORDER BY created_at",
|
||||
)
|
||||
.bind(organisation)
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
.map_err(|e| IntegrationError::Store(e.to_string()))?;
|
||||
|
||||
rows.into_iter().map(|r| self.row_to_integration(r)).collect()
|
||||
}
|
||||
|
||||
async fn get_integration(
|
||||
&self,
|
||||
organisation: &str,
|
||||
id: &str,
|
||||
) -> Result<Integration, IntegrationError> {
|
||||
let uuid: Uuid = id
|
||||
.parse()
|
||||
.map_err(|_| IntegrationError::NotFound(id.to_string()))?;
|
||||
|
||||
let row: IntegrationRow = sqlx::query_as(
|
||||
"SELECT id, organisation, integration_type, name, config_encrypted, enabled, created_by, created_at, updated_at
|
||||
FROM integrations WHERE id = $1 AND organisation = $2",
|
||||
)
|
||||
.bind(uuid)
|
||||
.bind(organisation)
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| IntegrationError::Store(e.to_string()))?
|
||||
.ok_or_else(|| IntegrationError::NotFound(id.to_string()))?;
|
||||
|
||||
self.row_to_integration(row)
|
||||
}
|
||||
|
||||
async fn create_integration(
|
||||
&self,
|
||||
input: &CreateIntegrationInput,
|
||||
) -> Result<Integration, IntegrationError> {
|
||||
use forage_core::integrations::{generate_api_token, hash_api_token};
|
||||
|
||||
let id = Uuid::new_v4();
|
||||
let encrypted = self.encrypt_config(&input.config)?;
|
||||
let now = chrono::Utc::now();
|
||||
let raw_token = generate_api_token();
|
||||
let token_hash = hash_api_token(&raw_token);
|
||||
|
||||
// Insert integration with token hash
|
||||
sqlx::query(
|
||||
"INSERT INTO integrations (id, organisation, integration_type, name, config_encrypted, enabled, created_by, created_at, updated_at, api_token_hash)
|
||||
VALUES ($1, $2, $3, $4, $5, true, $6, $7, $7, $8)",
|
||||
)
|
||||
.bind(id)
|
||||
.bind(&input.organisation)
|
||||
.bind(input.integration_type.as_str())
|
||||
.bind(&input.name)
|
||||
.bind(&encrypted)
|
||||
.bind(&input.created_by)
|
||||
.bind(now)
|
||||
.bind(&token_hash)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
if e.to_string().contains("duplicate key") || e.to_string().contains("unique") {
|
||||
IntegrationError::Duplicate(format!(
|
||||
"Integration '{}' already exists in org '{}'",
|
||||
input.name, input.organisation
|
||||
))
|
||||
} else {
|
||||
IntegrationError::Store(e.to_string())
|
||||
}
|
||||
})?;
|
||||
|
||||
// Create default notification rules (all enabled)
|
||||
for nt in NOTIFICATION_TYPES {
|
||||
sqlx::query(
|
||||
"INSERT INTO notification_rules (id, integration_id, notification_type, enabled)
|
||||
VALUES ($1, $2, $3, true)",
|
||||
)
|
||||
.bind(Uuid::new_v4())
|
||||
.bind(id)
|
||||
.bind(*nt)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| IntegrationError::Store(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(Integration {
|
||||
id: id.to_string(),
|
||||
organisation: input.organisation.clone(),
|
||||
integration_type: input.integration_type,
|
||||
name: input.name.clone(),
|
||||
config: input.config.clone(),
|
||||
enabled: true,
|
||||
created_by: input.created_by.clone(),
|
||||
created_at: now.to_rfc3339(),
|
||||
updated_at: now.to_rfc3339(),
|
||||
api_token: Some(raw_token),
|
||||
})
|
||||
}
|
||||
|
||||
async fn set_integration_enabled(
|
||||
&self,
|
||||
organisation: &str,
|
||||
id: &str,
|
||||
enabled: bool,
|
||||
) -> Result<(), IntegrationError> {
|
||||
let uuid: Uuid = id
|
||||
.parse()
|
||||
.map_err(|_| IntegrationError::NotFound(id.to_string()))?;
|
||||
|
||||
let result = sqlx::query(
|
||||
"UPDATE integrations SET enabled = $1, updated_at = now() WHERE id = $2 AND organisation = $3",
|
||||
)
|
||||
.bind(enabled)
|
||||
.bind(uuid)
|
||||
.bind(organisation)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| IntegrationError::Store(e.to_string()))?;
|
||||
|
||||
if result.rows_affected() == 0 {
|
||||
return Err(IntegrationError::NotFound(id.to_string()));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_integration(
|
||||
&self,
|
||||
organisation: &str,
|
||||
id: &str,
|
||||
) -> Result<(), IntegrationError> {
|
||||
let uuid: Uuid = id
|
||||
.parse()
|
||||
.map_err(|_| IntegrationError::NotFound(id.to_string()))?;
|
||||
|
||||
let result = sqlx::query("DELETE FROM integrations WHERE id = $1 AND organisation = $2")
|
||||
.bind(uuid)
|
||||
.bind(organisation)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| IntegrationError::Store(e.to_string()))?;
|
||||
|
||||
if result.rows_affected() == 0 {
|
||||
return Err(IntegrationError::NotFound(id.to_string()));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_rules(
|
||||
&self,
|
||||
integration_id: &str,
|
||||
) -> Result<Vec<NotificationRule>, IntegrationError> {
|
||||
let uuid: Uuid = integration_id
|
||||
.parse()
|
||||
.map_err(|_| IntegrationError::NotFound(integration_id.to_string()))?;
|
||||
|
||||
let rows: Vec<RuleRow> = sqlx::query_as(
|
||||
"SELECT id, integration_id, notification_type, enabled
|
||||
FROM notification_rules WHERE integration_id = $1 ORDER BY notification_type",
|
||||
)
|
||||
.bind(uuid)
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
.map_err(|e| IntegrationError::Store(e.to_string()))?;
|
||||
|
||||
Ok(rows
|
||||
.into_iter()
|
||||
.map(|r| NotificationRule {
|
||||
id: r.id.to_string(),
|
||||
integration_id: r.integration_id.to_string(),
|
||||
notification_type: r.notification_type,
|
||||
enabled: r.enabled,
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn set_rule_enabled(
|
||||
&self,
|
||||
integration_id: &str,
|
||||
notification_type: &str,
|
||||
enabled: bool,
|
||||
) -> Result<(), IntegrationError> {
|
||||
let uuid: Uuid = integration_id
|
||||
.parse()
|
||||
.map_err(|_| IntegrationError::NotFound(integration_id.to_string()))?;
|
||||
|
||||
let result = sqlx::query(
|
||||
"UPDATE notification_rules SET enabled = $1
|
||||
WHERE integration_id = $2 AND notification_type = $3",
|
||||
)
|
||||
.bind(enabled)
|
||||
.bind(uuid)
|
||||
.bind(notification_type)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| IntegrationError::Store(e.to_string()))?;
|
||||
|
||||
if result.rows_affected() == 0 {
|
||||
// Rule doesn't exist yet — create it
|
||||
sqlx::query(
|
||||
"INSERT INTO notification_rules (id, integration_id, notification_type, enabled)
|
||||
VALUES ($1, $2, $3, $4)",
|
||||
)
|
||||
.bind(Uuid::new_v4())
|
||||
.bind(uuid)
|
||||
.bind(notification_type)
|
||||
.bind(enabled)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| IntegrationError::Store(e.to_string()))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn record_delivery(
|
||||
&self,
|
||||
integration_id: &str,
|
||||
notification_id: &str,
|
||||
status: DeliveryStatus,
|
||||
error_message: Option<&str>,
|
||||
) -> Result<(), IntegrationError> {
|
||||
let uuid: Uuid = integration_id
|
||||
.parse()
|
||||
.map_err(|_| IntegrationError::NotFound(integration_id.to_string()))?;
|
||||
|
||||
sqlx::query(
|
||||
"INSERT INTO notification_deliveries (id, integration_id, notification_id, status, error_message, attempted_at)
|
||||
VALUES ($1, $2, $3, $4, $5, now())",
|
||||
)
|
||||
.bind(Uuid::new_v4())
|
||||
.bind(uuid)
|
||||
.bind(notification_id)
|
||||
.bind(status.as_str())
|
||||
.bind(error_message)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| IntegrationError::Store(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_deliveries(
|
||||
&self,
|
||||
integration_id: &str,
|
||||
limit: usize,
|
||||
) -> Result<Vec<NotificationDelivery>, IntegrationError> {
|
||||
let uuid: Uuid = integration_id
|
||||
.parse()
|
||||
.map_err(|_| IntegrationError::NotFound(integration_id.to_string()))?;
|
||||
|
||||
let rows: Vec<DeliveryRow> = sqlx::query_as(
|
||||
"SELECT id, integration_id, notification_id, status, error_message, attempted_at
|
||||
FROM notification_deliveries
|
||||
WHERE integration_id = $1
|
||||
ORDER BY attempted_at DESC
|
||||
LIMIT $2",
|
||||
)
|
||||
.bind(uuid)
|
||||
.bind(limit as i64)
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
.map_err(|e| IntegrationError::Store(e.to_string()))?;
|
||||
|
||||
Ok(rows
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
let status = DeliveryStatus::parse(&r.status).unwrap_or(DeliveryStatus::Pending);
|
||||
NotificationDelivery {
|
||||
id: r.id.to_string(),
|
||||
integration_id: r.integration_id.to_string(),
|
||||
notification_id: r.notification_id,
|
||||
status,
|
||||
error_message: r.error_message,
|
||||
attempted_at: r.attempted_at.to_rfc3339(),
|
||||
}
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn list_matching_integrations(
|
||||
&self,
|
||||
organisation: &str,
|
||||
notification_type: &str,
|
||||
) -> Result<Vec<Integration>, IntegrationError> {
|
||||
let rows: Vec<IntegrationRow> = sqlx::query_as(
|
||||
"SELECT i.id, i.organisation, i.integration_type, i.name, i.config_encrypted, i.enabled, i.created_by, i.created_at, i.updated_at
|
||||
FROM integrations i
|
||||
JOIN notification_rules nr ON nr.integration_id = i.id
|
||||
WHERE i.organisation = $1
|
||||
AND i.enabled = true
|
||||
AND nr.notification_type = $2
|
||||
AND nr.enabled = true
|
||||
ORDER BY i.created_at",
|
||||
)
|
||||
.bind(organisation)
|
||||
.bind(notification_type)
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
.map_err(|e| IntegrationError::Store(e.to_string()))?;
|
||||
|
||||
rows.into_iter().map(|r| self.row_to_integration(r)).collect()
|
||||
}
|
||||
|
||||
async fn get_integration_by_token_hash(
|
||||
&self,
|
||||
token_hash: &str,
|
||||
) -> Result<Integration, IntegrationError> {
|
||||
let row: IntegrationRow = sqlx::query_as(
|
||||
"SELECT id, organisation, integration_type, name, config_encrypted, enabled, created_by, created_at, updated_at
|
||||
FROM integrations WHERE api_token_hash = $1 AND enabled = true",
|
||||
)
|
||||
.bind(token_hash)
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| IntegrationError::Store(e.to_string()))?
|
||||
.ok_or_else(|| IntegrationError::NotFound("invalid token".to_string()))?;
|
||||
|
||||
self.row_to_integration(row)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct IntegrationRow {
|
||||
id: Uuid,
|
||||
organisation: String,
|
||||
integration_type: String,
|
||||
name: String,
|
||||
config_encrypted: Vec<u8>,
|
||||
enabled: bool,
|
||||
created_by: String,
|
||||
created_at: chrono::DateTime<chrono::Utc>,
|
||||
updated_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct RuleRow {
|
||||
id: Uuid,
|
||||
integration_id: Uuid,
|
||||
notification_type: String,
|
||||
enabled: bool,
|
||||
}
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct DeliveryRow {
|
||||
id: Uuid,
|
||||
integration_id: Uuid,
|
||||
notification_id: String,
|
||||
status: String,
|
||||
error_message: Option<String>,
|
||||
attempted_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
mod integrations;
|
||||
mod sessions;
|
||||
|
||||
pub use integrations::PgIntegrationStore;
|
||||
pub use sessions::PgSessionStore;
|
||||
pub use sqlx::PgPool;
|
||||
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
CREATE TABLE IF NOT EXISTS integrations (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
organisation TEXT NOT NULL,
|
||||
integration_type TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
config_encrypted BYTEA NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT true,
|
||||
created_by TEXT NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
UNIQUE(organisation, name)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_integrations_org ON integrations(organisation);
|
||||
CREATE INDEX idx_integrations_org_enabled ON integrations(organisation, enabled);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS notification_rules (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
integration_id UUID NOT NULL REFERENCES integrations(id) ON DELETE CASCADE,
|
||||
notification_type TEXT NOT NULL,
|
||||
enabled BOOLEAN NOT NULL DEFAULT true,
|
||||
UNIQUE(integration_id, notification_type)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_notification_rules_integration ON notification_rules(integration_id);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS notification_deliveries (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
integration_id UUID NOT NULL REFERENCES integrations(id) ON DELETE CASCADE,
|
||||
notification_id TEXT NOT NULL,
|
||||
status TEXT NOT NULL,
|
||||
error_message TEXT,
|
||||
attempted_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_deliveries_integration ON notification_deliveries(integration_id, attempted_at DESC);
|
||||
CREATE INDEX idx_deliveries_status ON notification_deliveries(status, attempted_at DESC);
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE sessions ADD COLUMN user_orgs JSONB;
|
||||
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE integrations ADD COLUMN api_token_hash TEXT;
|
||||
CREATE UNIQUE INDEX idx_integrations_api_token ON integrations(api_token_hash) WHERE api_token_hash IS NOT NULL;
|
||||
@@ -1,16 +1,26 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use forage_core::auth::UserEmail;
|
||||
use forage_core::session::{CachedUser, SessionData, SessionError, SessionId, SessionStore};
|
||||
use forage_core::session::{CachedOrg, CachedUser, SessionData, SessionError, SessionId, SessionStore};
|
||||
use moka::future::Cache;
|
||||
use sqlx::PgPool;
|
||||
|
||||
/// PostgreSQL-backed session store for horizontal scaling.
|
||||
/// PostgreSQL-backed session store with a Moka write-through cache.
|
||||
/// Reads check the cache first, falling back to Postgres on miss.
|
||||
/// Writes update both cache and Postgres atomically.
|
||||
pub struct PgSessionStore {
|
||||
pool: PgPool,
|
||||
cache: Cache<String, SessionData>,
|
||||
}
|
||||
|
||||
impl PgSessionStore {
|
||||
pub fn new(pool: PgPool) -> Self {
|
||||
Self { pool }
|
||||
let cache = Cache::builder()
|
||||
.max_capacity(10_000)
|
||||
.time_to_idle(Duration::from_secs(30 * 60)) // evict after 30min idle
|
||||
.build();
|
||||
Self { pool, cache }
|
||||
}
|
||||
|
||||
/// Remove sessions inactive for longer than `max_inactive_days`.
|
||||
@@ -21,6 +31,10 @@ impl PgSessionStore {
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| SessionError::Store(e.to_string()))?;
|
||||
|
||||
// Moka handles its own TTL eviction, but force a sync for reaped sessions
|
||||
self.cache.run_pending_tasks().await;
|
||||
|
||||
Ok(result.rows_affected())
|
||||
}
|
||||
}
|
||||
@@ -29,21 +43,11 @@ impl PgSessionStore {
|
||||
impl SessionStore for PgSessionStore {
|
||||
async fn create(&self, data: SessionData) -> Result<SessionId, SessionError> {
|
||||
let id = SessionId::generate();
|
||||
let (user_id, username, emails_json) = match &data.user {
|
||||
Some(u) => (
|
||||
Some(u.user_id.clone()),
|
||||
Some(u.username.clone()),
|
||||
Some(
|
||||
serde_json::to_value(&u.emails)
|
||||
.map_err(|e| SessionError::Store(e.to_string()))?,
|
||||
),
|
||||
),
|
||||
None => (None, None, None),
|
||||
};
|
||||
let (user_id, username, emails_json, orgs_json) = extract_user_fields(&data)?;
|
||||
|
||||
sqlx::query(
|
||||
"INSERT INTO sessions (session_id, access_token, refresh_token, access_expires_at, user_id, username, user_emails, csrf_token, created_at, last_seen_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)",
|
||||
"INSERT INTO sessions (session_id, access_token, refresh_token, access_expires_at, user_id, username, user_emails, user_orgs, csrf_token, created_at, last_seen_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)",
|
||||
)
|
||||
.bind(id.as_str())
|
||||
.bind(&data.access_token)
|
||||
@@ -52,6 +56,7 @@ impl SessionStore for PgSessionStore {
|
||||
.bind(&user_id)
|
||||
.bind(&username)
|
||||
.bind(&emails_json)
|
||||
.bind(&orgs_json)
|
||||
.bind(&data.csrf_token)
|
||||
.bind(data.created_at)
|
||||
.bind(data.last_seen_at)
|
||||
@@ -59,12 +64,21 @@ impl SessionStore for PgSessionStore {
|
||||
.await
|
||||
.map_err(|e| SessionError::Store(e.to_string()))?;
|
||||
|
||||
// Populate cache
|
||||
self.cache.insert(id.as_str().to_string(), data).await;
|
||||
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
async fn get(&self, id: &SessionId) -> Result<Option<SessionData>, SessionError> {
|
||||
// Check cache first
|
||||
if let Some(data) = self.cache.get(id.as_str()).await {
|
||||
return Ok(Some(data));
|
||||
}
|
||||
|
||||
// Cache miss — fall back to Postgres
|
||||
let row: Option<SessionRow> = sqlx::query_as(
|
||||
"SELECT access_token, refresh_token, access_expires_at, user_id, username, user_emails, csrf_token, created_at, last_seen_at
|
||||
"SELECT access_token, refresh_token, access_expires_at, user_id, username, user_emails, user_orgs, csrf_token, created_at, last_seen_at
|
||||
FROM sessions WHERE session_id = $1",
|
||||
)
|
||||
.bind(id.as_str())
|
||||
@@ -72,25 +86,22 @@ impl SessionStore for PgSessionStore {
|
||||
.await
|
||||
.map_err(|e| SessionError::Store(e.to_string()))?;
|
||||
|
||||
Ok(row.map(|r| r.into_session_data()))
|
||||
if let Some(row) = row {
|
||||
let data = row.into_session_data();
|
||||
// Backfill cache
|
||||
self.cache.insert(id.as_str().to_string(), data.clone()).await;
|
||||
Ok(Some(data))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
async fn update(&self, id: &SessionId, data: SessionData) -> Result<(), SessionError> {
|
||||
let (user_id, username, emails_json) = match &data.user {
|
||||
Some(u) => (
|
||||
Some(u.user_id.clone()),
|
||||
Some(u.username.clone()),
|
||||
Some(
|
||||
serde_json::to_value(&u.emails)
|
||||
.map_err(|e| SessionError::Store(e.to_string()))?,
|
||||
),
|
||||
),
|
||||
None => (None, None, None),
|
||||
};
|
||||
let (user_id, username, emails_json, orgs_json) = extract_user_fields(&data)?;
|
||||
|
||||
sqlx::query(
|
||||
"UPDATE sessions SET access_token = $1, refresh_token = $2, access_expires_at = $3, user_id = $4, username = $5, user_emails = $6, csrf_token = $7, last_seen_at = $8
|
||||
WHERE session_id = $9",
|
||||
"UPDATE sessions SET access_token = $1, refresh_token = $2, access_expires_at = $3, user_id = $4, username = $5, user_emails = $6, user_orgs = $7, csrf_token = $8, last_seen_at = $9
|
||||
WHERE session_id = $10",
|
||||
)
|
||||
.bind(&data.access_token)
|
||||
.bind(&data.refresh_token)
|
||||
@@ -98,6 +109,7 @@ impl SessionStore for PgSessionStore {
|
||||
.bind(&user_id)
|
||||
.bind(&username)
|
||||
.bind(&emails_json)
|
||||
.bind(&orgs_json)
|
||||
.bind(&data.csrf_token)
|
||||
.bind(data.last_seen_at)
|
||||
.bind(id.as_str())
|
||||
@@ -105,6 +117,9 @@ impl SessionStore for PgSessionStore {
|
||||
.await
|
||||
.map_err(|e| SessionError::Store(e.to_string()))?;
|
||||
|
||||
// Update cache
|
||||
self.cache.insert(id.as_str().to_string(), data).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -115,10 +130,42 @@ impl SessionStore for PgSessionStore {
|
||||
.await
|
||||
.map_err(|e| SessionError::Store(e.to_string()))?;
|
||||
|
||||
// Evict from cache
|
||||
self.cache.invalidate(id.as_str()).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract user fields for SQL binding, shared by create and update.
|
||||
fn extract_user_fields(
|
||||
data: &SessionData,
|
||||
) -> Result<
|
||||
(
|
||||
Option<String>,
|
||||
Option<String>,
|
||||
Option<serde_json::Value>,
|
||||
Option<serde_json::Value>,
|
||||
),
|
||||
SessionError,
|
||||
> {
|
||||
match &data.user {
|
||||
Some(u) => Ok((
|
||||
Some(u.user_id.clone()),
|
||||
Some(u.username.clone()),
|
||||
Some(
|
||||
serde_json::to_value(&u.emails)
|
||||
.map_err(|e| SessionError::Store(e.to_string()))?,
|
||||
),
|
||||
Some(
|
||||
serde_json::to_value(&u.orgs)
|
||||
.map_err(|e| SessionError::Store(e.to_string()))?,
|
||||
),
|
||||
)),
|
||||
None => Ok((None, None, None, None)),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct SessionRow {
|
||||
access_token: String,
|
||||
@@ -127,6 +174,7 @@ struct SessionRow {
|
||||
user_id: Option<String>,
|
||||
username: Option<String>,
|
||||
user_emails: Option<serde_json::Value>,
|
||||
user_orgs: Option<serde_json::Value>,
|
||||
csrf_token: String,
|
||||
created_at: DateTime<Utc>,
|
||||
last_seen_at: DateTime<Utc>,
|
||||
@@ -140,11 +188,15 @@ impl SessionRow {
|
||||
.user_emails
|
||||
.and_then(|v| serde_json::from_value(v).ok())
|
||||
.unwrap_or_default();
|
||||
let orgs: Vec<CachedOrg> = self
|
||||
.user_orgs
|
||||
.and_then(|v| serde_json::from_value(v).ok())
|
||||
.unwrap_or_default();
|
||||
Some(CachedUser {
|
||||
user_id,
|
||||
username,
|
||||
emails,
|
||||
orgs: vec![],
|
||||
orgs,
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
|
||||
Reference in New Issue
Block a user