From 8af2a8d31c1468a57deb5a7b1e0b50608d447d28 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 21:58:24 +1000
Subject: [PATCH 001/172] Enable foreign_keys pragma on SQLite native pool
---
src/db/mod.rs | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/src/db/mod.rs b/src/db/mod.rs
index dca5d31..b06072b 100644
--- a/src/db/mod.rs
+++ b/src/db/mod.rs
@@ -327,6 +327,7 @@ impl DbPool {
sqlx::sqlite::SqliteConnectOptions::new()
.filename(&cfg.path)
.create_if_missing(cfg.create_if_missing)
+ .foreign_keys(true)
.journal_mode(if cfg.wal_mode {
sqlx::sqlite::SqliteJournalMode::Wal
} else {
@@ -336,6 +337,13 @@ impl DbPool {
)
.await?;
+ let fk_check: i64 = sqlx::query_scalar("PRAGMA foreign_keys")
+ .fetch_one(&pool)
+ .await?;
+ if fk_check != 1 {
+ return Err(DbError::NotConfigured);
+ }
+
let repos = CachedRepos {
organizations: Arc::new(sqlite::SqliteOrganizationRepo::new(pool.clone())),
projects: Arc::new(sqlite::SqliteProjectRepo::new(pool.clone())),
From f81bb8c543d153777cfff41011614c134f9f4ef4 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:00:17 +1000
Subject: [PATCH 002/172] Wire Postgres pool config (timeouts and ssl mode)
---
src/db/mod.rs | 36 ++++++++++++++++++++++++------------
1 file changed, 24 insertions(+), 12 deletions(-)
diff --git a/src/db/mod.rs b/src/db/mod.rs
index b06072b..d7fd087 100644
--- a/src/db/mod.rs
+++ b/src/db/mod.rs
@@ -393,21 +393,33 @@ impl DbPool {
}
#[cfg(feature = "database-postgres")]
DatabaseConfig::Postgres(cfg) => {
- let write_pool = sqlx::postgres::PgPoolOptions::new()
- .min_connections(cfg.min_connections)
- .max_connections(cfg.max_connections)
- .connect(&cfg.url)
- .await?;
+ let ssl_mode = match cfg.ssl_mode {
+ crate::config::PostgresSslMode::Disable => sqlx::postgres::PgSslMode::Disable,
+ crate::config::PostgresSslMode::Prefer => sqlx::postgres::PgSslMode::Prefer,
+ crate::config::PostgresSslMode::Require => sqlx::postgres::PgSslMode::Require,
+ crate::config::PostgresSslMode::VerifyCa => sqlx::postgres::PgSslMode::VerifyCa,
+ crate::config::PostgresSslMode::VerifyFull => {
+ sqlx::postgres::PgSslMode::VerifyFull
+ }
+ };
+ let connect_opts = |url: &str| -> Result {
+ let opts: sqlx::postgres::PgConnectOptions = url.parse().map_err(|e| {
+ DbError::Validation(format!("Invalid Postgres URL: {e}"))
+ })?;
+ Ok(opts.ssl_mode(ssl_mode))
+ };
+ let pool_opts = || {
+ sqlx::postgres::PgPoolOptions::new()
+ .min_connections(cfg.min_connections)
+ .max_connections(cfg.max_connections)
+ .acquire_timeout(std::time::Duration::from_secs(cfg.connect_timeout_secs))
+ .idle_timeout(std::time::Duration::from_secs(cfg.idle_timeout_secs))
+ };
+ let write_pool = pool_opts().connect_with(connect_opts(&cfg.url)?).await?;
let read_pool = if let Some(read_url) = &cfg.read_url {
tracing::info!("Configuring read replica pool");
- Some(
- sqlx::postgres::PgPoolOptions::new()
- .min_connections(cfg.min_connections)
- .max_connections(cfg.max_connections)
- .connect(read_url)
- .await?,
- )
+ Some(pool_opts().connect_with(connect_opts(read_url)?).await?)
} else {
None
};
From 84ee9867ccce68fae0dc4bee13a81d2e62a5d170 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:00:49 +1000
Subject: [PATCH 003/172] Attach ConnectInfo to axum service for client IP
extraction
---
src/cli/server.rs | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/src/cli/server.rs b/src/cli/server.rs
index 8e570d2..33ed357 100644
--- a/src/cli/server.rs
+++ b/src/cli/server.rs
@@ -400,11 +400,17 @@ pub(crate) async fn run_server(explicit_config_path: Option<&str>, no_browser: b
#[cfg(not(feature = "wizard"))]
let _ = no_browser;
- // Graceful shutdown: wait for SIGINT/SIGTERM, then wait for all background tasks
- axum::serve(listener, app)
- .with_graceful_shutdown(shutdown_signal(task_tracker, usage_buffer_handle))
- .await
- .unwrap();
+ // Graceful shutdown: wait for SIGINT/SIGTERM, then wait for all background tasks.
+ // `into_make_service_with_connect_info` is required so middleware can read the
+ // connecting peer address via `ConnectInfo` for IP-based rate limits,
+ // API-key IP allowlists, and audit logging.
+ axum::serve(
+ listener,
+ app.into_make_service_with_connect_info::(),
+ )
+ .with_graceful_shutdown(shutdown_signal(task_tracker, usage_buffer_handle))
+ .await
+ .unwrap();
}
async fn shutdown_signal(
From 9883bb306d2dd2e808f88cbaeb07f1a184aad5cf Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:04:31 +1000
Subject: [PATCH 004/172] Use safe prefix strip helper for Anthropic stream IDs
---
src/providers/anthropic/convert.rs | 10 +++++--
src/providers/anthropic/stream.rs | 43 +++++++++++++++---------------
2 files changed, 30 insertions(+), 23 deletions(-)
diff --git a/src/providers/anthropic/convert.rs b/src/providers/anthropic/convert.rs
index 3650f99..d2309fc 100644
--- a/src/providers/anthropic/convert.rs
+++ b/src/providers/anthropic/convert.rs
@@ -954,7 +954,10 @@ pub fn convert_anthropic_to_responses_response(
type_: ResponsesReasoningType::Reasoning,
id: format!(
"rs_{}",
- &anthropic.id[4..].chars().take(24).collect::()
+ crate::providers::anthropic::stream::strip_anthropic_prefix(
+ &anthropic.id,
+ "msg_"
+ )
),
content: None, // Anthropic doesn't provide structured reasoning content
summary: vec![], // Would need to generate summary
@@ -996,7 +999,10 @@ pub fn convert_anthropic_to_responses_response(
ResponsesOutputItem::Message(OutputMessage {
id: format!(
"msg_{}",
- &anthropic.id[4..].chars().take(24).collect::()
+ crate::providers::anthropic::stream::strip_anthropic_prefix(
+ &anthropic.id,
+ "msg_"
+ )
),
type_: MessageType::Message,
role: "assistant".to_string(),
diff --git a/src/providers/anthropic/stream.rs b/src/providers/anthropic/stream.rs
index 533bd6a..2da21a5 100644
--- a/src/providers/anthropic/stream.rs
+++ b/src/providers/anthropic/stream.rs
@@ -14,6 +14,18 @@ use serde::{Deserialize, Serialize};
use crate::config::StreamingBufferConfig;
+/// Strip a known Anthropic ID prefix (`msg_`, `toolu_`, …) and return up to 24
+/// chars of the remainder. Falls back to the whole id if the prefix isn't
+/// present, which protects against panics on short ids or multibyte
+/// boundaries inside the prefix.
+pub(crate) fn strip_anthropic_prefix(id: &str, prefix: &str) -> String {
+ id.strip_prefix(prefix)
+ .unwrap_or(id)
+ .chars()
+ .take(24)
+ .collect()
+}
+
// ============================================================================
// Anthropic Streaming Event Types
// ============================================================================
@@ -820,10 +832,8 @@ impl AnthropicToResponsesStream {
match event {
AnthropicStreamEvent::MessageStart { message } => {
self.state.response_id = message.id.clone();
- self.state.message_id = format!(
- "msg_{}",
- &message.id[4..].chars().take(24).collect::()
- );
+ self.state.message_id =
+ format!("msg_{}", strip_anthropic_prefix(&message.id, "msg_"));
self.state.model = message.model;
if let Some(usage) = message.usage {
self.state.input_tokens = usage.input_tokens;
@@ -904,7 +914,7 @@ impl AnthropicToResponsesStream {
"output_index": output_index,
"item": {
"type": "function_call",
- "id": format!("fc_{}", &id[6..].chars().take(24).collect::()),
+ "id": format!("fc_{}", strip_anthropic_prefix(&id, "toolu_")),
"call_id": id,
"name": name,
"arguments": "",
@@ -927,7 +937,7 @@ impl AnthropicToResponsesStream {
"output_index": 0,
"item": {
"type": "reasoning",
- "id": format!("rs_{}", &self.state.response_id[4..].chars().take(24).collect::()),
+ "id": format!("rs_{}", strip_anthropic_prefix(&self.state.response_id, "msg_")),
"summary": []
}
}),
@@ -977,7 +987,7 @@ impl AnthropicToResponsesStream {
// Emit function call arguments delta
let fc_id =
- format!("fc_{}", &tool_id[6..].chars().take(24).collect::());
+ format!("fc_{}", strip_anthropic_prefix(&tool_id, "toolu_"));
self.emit_event(
"response.function_call_arguments.delta",
serde_json::json!({
@@ -996,10 +1006,7 @@ impl AnthropicToResponsesStream {
// Emit reasoning summary delta
let reasoning_id = format!(
"rs_{}",
- &self.state.response_id[4..]
- .chars()
- .take(24)
- .collect::()
+ strip_anthropic_prefix(&self.state.response_id, "msg_")
);
self.emit_event(
"response.reasoning_summary_text.delta",
@@ -1036,10 +1043,7 @@ impl AnthropicToResponsesStream {
if self.state.emitted_reasoning_added {
let reasoning_id = format!(
"rs_{}",
- &self.state.response_id[4..]
- .chars()
- .take(24)
- .collect::()
+ strip_anthropic_prefix(&self.state.response_id, "msg_")
);
// Emit reasoning summary done
@@ -1142,7 +1146,7 @@ impl AnthropicToResponsesStream {
for (i, tool_id, tool_name, arguments) in tool_calls {
let output_index = self.tool_output_index(i);
let fc_id =
- format!("fc_{}", &tool_id[6..].chars().take(24).collect::());
+ format!("fc_{}", strip_anthropic_prefix(tool_id.as_str(), "toolu_"));
self.emit_event(
"response.function_call_arguments.done",
@@ -1176,10 +1180,7 @@ impl AnthropicToResponsesStream {
if self.state.emitted_reasoning_added {
let reasoning_id = format!(
"rs_{}",
- &self.state.response_id[4..]
- .chars()
- .take(24)
- .collect::()
+ strip_anthropic_prefix(&self.state.response_id, "msg_")
);
let mut reasoning_item = serde_json::json!({
"type": "reasoning",
@@ -1215,7 +1216,7 @@ impl AnthropicToResponsesStream {
// Tool calls come last
for (_, tool_id, tool_name, arguments) in &self.state.tool_calls {
let fc_id =
- format!("fc_{}", &tool_id[6..].chars().take(24).collect::());
+ format!("fc_{}", strip_anthropic_prefix(tool_id.as_str(), "toolu_"));
output.push(serde_json::json!({
"type": "function_call",
"id": fc_id,
From c58d4406de5ff6f880fe76408a9deb28e832b235 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:04:52 +1000
Subject: [PATCH 005/172] Use /health/live for liveness and /health/ready for
readiness
---
Dockerfile | 2 +-
helm/hadrian/values.yaml | 9 +++++++--
2 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index 46361a6..1bd24b5 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -158,6 +158,6 @@ EXPOSE 8080
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
- CMD curl -f http://localhost:8080/health || exit 1
+ CMD curl -f http://localhost:8080/health/live || exit 1
CMD ["/app/hadrian", "--config", "/app/config/hadrian.toml"]
diff --git a/helm/hadrian/values.yaml b/helm/hadrian/values.yaml
index 21c2671..40e614e 100644
--- a/helm/hadrian/values.yaml
+++ b/helm/hadrian/values.yaml
@@ -269,9 +269,12 @@ resources:
memory: 256Mi
# -- Liveness probe configuration
+# `/health/live` is a cheap "process is up" check. The full `/health` aggregates
+# downstream subsystems (DB, cache, providers) and would cause every pod to
+# restart on any transient downstream blip — never use it for liveness.
livenessProbe:
httpGet:
- path: /health
+ path: /health/live
port: http
initialDelaySeconds: 10
periodSeconds: 30
@@ -279,9 +282,11 @@ livenessProbe:
failureThreshold: 3
# -- Readiness probe configuration
+# `/health/ready` checks DB connectivity, which is the right gate for accepting
+# traffic.
readinessProbe:
httpGet:
- path: /health
+ path: /health/ready
port: http
initialDelaySeconds: 5
periodSeconds: 10
From 1b81caea5e7c79256056d4fe7ee7450a85708851 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:05:40 +1000
Subject: [PATCH 006/172] Tighten OAuth callback loopback check and strip
duplicate code param
---
src/routes/admin/oauth.rs | 31 ++++++++++++++++++++++++++++---
1 file changed, 28 insertions(+), 3 deletions(-)
diff --git a/src/routes/admin/oauth.rs b/src/routes/admin/oauth.rs
index 58348bb..ab7f866 100644
--- a/src/routes/admin/oauth.rs
+++ b/src/routes/admin/oauth.rs
@@ -51,7 +51,17 @@ fn validate_callback_url(callback_url: &str, pkce: &OAuthPkceConfig) -> Result d.eq_ignore_ascii_case("localhost"),
+ Some(url::Host::Ipv4(ip)) => ip.is_loopback(),
+ Some(url::Host::Ipv6(ip)) => {
+ ip.is_loopback() || ip.to_ipv4_mapped().map(|v4| v4.is_loopback()).unwrap_or(false)
+ }
+ None => false,
+ };
if scheme != "https" && !(scheme == "http" && is_loopback) {
return Err(AdminError::Validation(
"callback_url must use https (http is allowed only for loopback hosts)".to_string(),
@@ -68,11 +78,26 @@ fn validate_callback_url(callback_url: &str, pkce: &OAuthPkceConfig) -> Result Result {
let mut redirect = url::Url::parse(callback_url)
.map_err(|_| AdminError::Validation("callback_url must be a valid URL".to_string()))?;
- redirect.query_pairs_mut().append_pair("code", code);
+ let preserved: Vec<(String, String)> = redirect
+ .query_pairs()
+ .filter(|(k, _)| k != "code")
+ .map(|(k, v)| (k.into_owned(), v.into_owned()))
+ .collect();
+ {
+ let mut pairs = redirect.query_pairs_mut();
+ pairs.clear();
+ for (k, v) in &preserved {
+ pairs.append_pair(k, v);
+ }
+ pairs.append_pair("code", code);
+ }
Ok(redirect.to_string())
}
From 932f17f6af54438dd388531368c8f22bce64450d Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:06:01 +1000
Subject: [PATCH 007/172] Pin OpenAPI info.version to CARGO_PKG_VERSION
---
src/openapi.rs | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/openapi.rs b/src/openapi.rs
index e4ea858..841e78f 100644
--- a/src/openapi.rs
+++ b/src/openapi.rs
@@ -14,7 +14,7 @@ use crate::{
#[openapi(
info(
title = "Hadrian Gateway API",
- version = "0.1.0",
+ version = env!("CARGO_PKG_VERSION"),
description = r#"**Hadrian Gateway** is an AI Gateway providing a unified OpenAI-compatible API for routing requests to multiple LLM providers.
## Overview
From afc03c3e0f64799b49393484128d9537f10adb2a Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:06:26 +1000
Subject: [PATCH 008/172] Stop swallowing cargo audit failures in CI scripts
---
scripts/ci-backend.sh | 7 +++++--
scripts/ci.sh | 2 +-
2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/scripts/ci-backend.sh b/scripts/ci-backend.sh
index 9441b7a..a608984 100755
--- a/scripts/ci-backend.sh
+++ b/scripts/ci-backend.sh
@@ -86,10 +86,13 @@ else
FAILED=1
fi
-# Security audit (non-blocking)
+# Security audit
step "Security audit"
if command -v cargo-audit &> /dev/null; then
- cargo audit || echo -e "${YELLOW}!${NC} Audit warnings (non-blocking)"
+ if ! cargo audit; then
+ echo -e "${RED}✗${NC} Security audit failed"
+ FAILED=1
+ fi
else
echo " cargo-audit not installed, skipping"
fi
diff --git a/scripts/ci.sh b/scripts/ci.sh
index 210b539..bb35150 100755
--- a/scripts/ci.sh
+++ b/scripts/ci.sh
@@ -127,7 +127,7 @@ if [ "$RUN_BACKEND" = true ]; then
run_check "Tests (unit + integration)" cargo test -- --include-ignored
- run_check "Security audit" cargo audit || true # Don't fail on audit warnings
+ run_check "Security audit" cargo audit
fi
# Frontend checks
From 550e583d5a5473b9bbf6d310bb0ce9c8f23bfcb9 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:12:09 +1000
Subject: [PATCH 009/172] Reject empty JWT and proxy audience values at config
load
---
src/auth/jwt.rs | 38 ++++++++++++++++++++++++++++----------
src/config/auth.rs | 38 ++++++++++++++++++++++++++++++++++++++
2 files changed, 66 insertions(+), 10 deletions(-)
diff --git a/src/auth/jwt.rs b/src/auth/jwt.rs
index 519b1bf..e2556c7 100644
--- a/src/auth/jwt.rs
+++ b/src/auth/jwt.rs
@@ -115,11 +115,7 @@ impl JwtValidator {
/// Create a new JWT validator.
#[allow(dead_code)] // Auth infrastructure
pub fn new(config: JwtAuthConfig) -> Result {
- if config.allowed_algorithms.is_empty() {
- return Err(AuthError::Internal(
- "JWT allowed_algorithms must not be empty".into(),
- ));
- }
+ Self::check_config(&config)?;
Ok(Self {
config,
http_client: reqwest::Client::new(),
@@ -132,11 +128,7 @@ impl JwtValidator {
config: JwtAuthConfig,
http_client: reqwest::Client,
) -> Result {
- if config.allowed_algorithms.is_empty() {
- return Err(AuthError::Internal(
- "JWT allowed_algorithms must not be empty".into(),
- ));
- }
+ Self::check_config(&config)?;
Ok(Self {
config,
http_client,
@@ -144,6 +136,32 @@ impl JwtValidator {
})
}
+ fn check_config(config: &JwtAuthConfig) -> Result<(), AuthError> {
+ if config.allowed_algorithms.is_empty() {
+ return Err(AuthError::Internal(
+ "JWT allowed_algorithms must not be empty".into(),
+ ));
+ }
+ // `jsonwebtoken::Validation::set_audience(&[""])` accepts a token whose
+ // `aud` claim equals the empty string, silently disabling the audience
+ // check. Reject empty entries here so the validator always enforces a
+ // real expected audience.
+ let entries = config.audience.to_vec();
+ if entries.is_empty() {
+ return Err(AuthError::Internal(
+ "JWT audience must not be empty".into(),
+ ));
+ }
+ for entry in entries {
+ if entry.trim().is_empty() {
+ return Err(AuthError::Internal(
+ "JWT audience entries must not be empty".into(),
+ ));
+ }
+ }
+ Ok(())
+ }
+
/// Validate a JWT and return the claims.
pub async fn validate(&self, token: &str) -> Result {
// Decode header to get the key ID and algorithm
diff --git a/src/config/auth.rs b/src/config/auth.rs
index 963284d..0a69cb1 100644
--- a/src/config/auth.rs
+++ b/src/config/auth.rs
@@ -586,10 +586,48 @@ impl IapConfig {
"IAP identity header cannot be empty".into(),
));
}
+ if let Some(jwt) = &self.jwt_assertion {
+ jwt.validate()?;
+ }
+ Ok(())
+ }
+}
+
+impl ProxyAuthJwtConfig {
+ fn validate(&self) -> Result<(), ConfigError> {
+ validate_jwt_audience("auth.iap.jwt_assertion", &self.audience)?;
+ if self.issuer.is_empty() {
+ return Err(ConfigError::Validation(
+ "auth.iap.jwt_assertion.issuer cannot be empty".into(),
+ ));
+ }
Ok(())
}
}
+/// Reject empty audience values. `jsonwebtoken` accepts an empty string as a
+/// valid audience match, so an empty entry would silently disable the audience
+/// check.
+fn validate_jwt_audience(
+ field: &str,
+ audience: &OneOrMany,
+) -> Result<(), ConfigError> {
+ let entries = audience.to_vec();
+ if entries.is_empty() {
+ return Err(ConfigError::Validation(format!(
+ "{field}.audience must not be empty"
+ )));
+ }
+ for entry in &entries {
+ if entry.trim().is_empty() {
+ return Err(ConfigError::Validation(format!(
+ "{field}.audience entries must not be empty"
+ )));
+ }
+ }
+ Ok(())
+}
+
/// API key authentication configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))]
From 947fc5a791d3655cb8d0fc645bc0593bba7f9b40 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:15:01 +1000
Subject: [PATCH 010/172] Disallow space character in model string validation
---
src/routing/mod.rs | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/routing/mod.rs b/src/routing/mod.rs
index baf7f22..77bbf6b 100644
--- a/src/routing/mod.rs
+++ b/src/routing/mod.rs
@@ -101,7 +101,7 @@ fn validate_model_string(model: &str) -> Result<(), RoutingError> {
}
if !model
.chars()
- .all(|c| c.is_alphanumeric() || "-._/:@ ".contains(c))
+ .all(|c| c.is_alphanumeric() || "-._/:@".contains(c))
{
return Err(RoutingError::InvalidModelFormat(
"Model string contains invalid characters".to_string(),
From cba1be4661e06a900bf3f246f5e5f16ee676ee13 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:15:29 +1000
Subject: [PATCH 011/172] Return first routing error rather than last on
fallback failure
---
src/routing/mod.rs | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/src/routing/mod.rs b/src/routing/mod.rs
index 77bbf6b..15684e0 100644
--- a/src/routing/mod.rs
+++ b/src/routing/mod.rs
@@ -345,28 +345,30 @@ pub fn route_models_extended<'a>(
models: Option<&'a [String]>,
providers: &'a ProvidersConfig,
) -> Result, RoutingError> {
- let mut last_error = None;
+ // Surface the *first* error if every candidate fails. The primary model's
+ // failure is the most actionable for the caller — fallback errors are a
+ // secondary signal.
+ let mut first_error: Option = None;
- // First, try the primary model
if let Some(m) = model {
match route_model_extended(Some(m), providers) {
Ok(routed) => return Ok(routed),
- Err(e) => last_error = Some(e),
- }
+ Err(e) => first_error.get_or_insert(e),
+ };
}
- // Then try fallback models
if let Some(model_list) = models {
for m in model_list {
match route_model_extended(Some(m.as_str()), providers) {
Ok(routed) => return Ok(routed),
- Err(e) => last_error = Some(e),
+ Err(e) => {
+ first_error.get_or_insert(e);
+ }
}
}
}
- // Return the last error, or NoModel if no models were tried
- Err(last_error.unwrap_or(RoutingError::NoModel))
+ Err(first_error.unwrap_or(RoutingError::NoModel))
}
#[cfg(test)]
From e3b394a30c74c6f9a225dd5cabe76a7f143e7847 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:17:26 +1000
Subject: [PATCH 012/172] Match HADRIAN_TEST_DEBUG on value not env presence
---
src/tests/provider_e2e.rs | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/src/tests/provider_e2e.rs b/src/tests/provider_e2e.rs
index 11fc1c8..6fdd058 100644
--- a/src/tests/provider_e2e.rs
+++ b/src/tests/provider_e2e.rs
@@ -483,8 +483,16 @@ pub static OLLAMA_SPEC: ProviderTestSpec = ProviderTestSpec {
// =============================================================================
/// Check if debug output is enabled via HADRIAN_TEST_DEBUG env var.
+/// Only `1`/`true` (case-insensitive) count — `HADRIAN_TEST_DEBUG=0` should
+/// not turn debug on.
fn is_debug_enabled() -> bool {
- std::env::var("HADRIAN_TEST_DEBUG").is_ok()
+ matches!(
+ std::env::var("HADRIAN_TEST_DEBUG")
+ .ok()
+ .as_deref()
+ .map(|v| v.trim().to_ascii_lowercase()),
+ Some(ref s) if s == "1" || s == "true"
+ )
}
/// Save a debug response to the debug output directory.
From e2e13b121c87583f8523e1263310f9ee80ced3bd Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:18:51 +1000
Subject: [PATCH 013/172] Validate SAML metadata URL against SSRF in parse
endpoint
---
src/routes/admin/org_sso_configs.rs | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/src/routes/admin/org_sso_configs.rs b/src/routes/admin/org_sso_configs.rs
index 3a0f3cc..3bd55ba 100644
--- a/src/routes/admin/org_sso_configs.rs
+++ b/src/routes/admin/org_sso_configs.rs
@@ -759,6 +759,12 @@ pub async fn parse_saml_metadata(
crate::validation::require_https(&input.metadata_url)
.map_err(|e| AdminError::Validation(format!("SAML metadata URL must use HTTPS: {e}")))?;
+ // Block private/loopback/cloud-metadata addresses with DNS rebinding
+ // protection — the same gate that `SamlAuthenticator::get_metadata` uses.
+ crate::validation::validate_base_url(&input.metadata_url, false).map_err(|e| {
+ AdminError::Validation(format!("SAML metadata URL is not permitted: {e}"))
+ })?;
+
// Fetch and parse the metadata
let client = reqwest::Client::new();
tracing::debug!(url = %input.metadata_url, "Fetching SAML IdP metadata");
From 20a676f7bd0aa75b40f3ede89d9418c3b317385a Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:20:27 +1000
Subject: [PATCH 014/172] Validate image URL against SSRF before fetching
---
src/providers/image.rs | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/src/providers/image.rs b/src/providers/image.rs
index 0480f3a..765d231 100644
--- a/src/providers/image.rs
+++ b/src/providers/image.rs
@@ -58,6 +58,8 @@ pub enum ImageError {
TooLarge { size: usize, limit: usize },
#[error("Unsupported content type: {0}")]
UnsupportedContentType(String),
+ #[error("Image URL is not permitted: {0}")]
+ BlockedUrl(String),
#[error("Failed to fetch image: {0}")]
FetchError(String),
#[error("Image URL timeout after {0:?}")]
@@ -177,6 +179,13 @@ pub async fn fetch_image_url(
)));
}
+ // SSRF guard: reject loopback/private/cloud-metadata/RFC1918 addresses and
+ // resolve hostnames so DNS rebinding can't redirect us to a blocked range
+ // between this check and the actual HTTP request below. We deliberately do
+ // not enable `allow_loopback` — image URLs from chat content are untrusted.
+ crate::validation::validate_base_url(url, false)
+ .map_err(|e| ImageError::BlockedUrl(e.to_string()))?;
+
// Build request with timeout
let response = client
.get(url)
From 6c07a760cb7aec699842921d36b1c1d65c0bb4b3 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:21:22 +1000
Subject: [PATCH 015/172] Strip reserved underscore-prefixed roles from bearer
and proxy auth
---
src/middleware/layers/admin.rs | 44 +++++++++++++++++++++++-----------
1 file changed, 30 insertions(+), 14 deletions(-)
diff --git a/src/middleware/layers/admin.rs b/src/middleware/layers/admin.rs
index b3f7463..e97315b 100644
--- a/src/middleware/layers/admin.rs
+++ b/src/middleware/layers/admin.rs
@@ -181,6 +181,16 @@ pub const BOOTSTRAP_ROLE: &str = "_system_bootstrap";
/// Roles starting with `_` are reserved for internal use and cannot be assigned by IdPs.
pub const EMERGENCY_ADMIN_ROLE: &str = "_emergency_admin";
+/// Drop any role with the reserved `_` prefix from a list. IdPs and proxy
+/// headers must never be able to claim these roles, since the gateway grants
+/// extra trust to them (bootstrap / emergency break-glass).
+pub(crate) fn strip_reserved_roles(roles: Vec) -> Vec {
+ roles
+ .into_iter()
+ .filter(|r| !r.starts_with('_'))
+ .collect()
+}
+
/// Try to authenticate via bootstrap API key.
///
/// Bootstrap authentication is only valid when:
@@ -860,8 +870,9 @@ async fn try_bearer_token_auth(
(None, Vec::new(), Vec::new(), Vec::new())
};
- // Extract roles from token
- let roles = claims.roles.clone().unwrap_or_default();
+ // Extract roles from token, stripping any `_`-prefixed reserved roles
+ // (bootstrap/emergency) — IdPs must never be able to claim these.
+ let roles = strip_reserved_roles(claims.roles.clone().unwrap_or_default());
tracing::debug!(
sub = %claims.sub,
@@ -1144,18 +1155,23 @@ async fn try_proxy_auth_auth(
None
};
- // Extract roles from groups header if configured
- let roles = config
- .groups_header
- .as_ref()
- .and_then(|h| headers.get(h))
- .and_then(|v| v.to_str().ok())
- .map(|v| {
- // Try JSON array first, then comma-separated
- serde_json::from_str::>(v)
- .unwrap_or_else(|_| v.split(',').map(|s| s.trim().to_string()).collect())
- })
- .unwrap_or_default();
+ // Extract roles from groups header if configured. Strip any `_`-prefixed
+ // reserved roles — proxy headers can be spoofed if `trusted_proxies` is
+ // misconfigured, so even with that gate we never want to honour a claim
+ // for `_emergency_admin`/`_system_bootstrap`.
+ let roles = strip_reserved_roles(
+ config
+ .groups_header
+ .as_ref()
+ .and_then(|h| headers.get(h))
+ .and_then(|v| v.to_str().ok())
+ .map(|v| {
+ // Try JSON array first, then comma-separated
+ serde_json::from_str::>(v)
+ .unwrap_or_else(|_| v.split(',').map(|s| s.trim().to_string()).collect())
+ })
+ .unwrap_or_default(),
+ );
// For proxy auth, the groups header contains both roles and raw groups
// Store them in both fields for backwards compatibility and debugging
From 9c34a1b693631ae9a7f58eba70b106a5c5706150 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:21:56 +1000
Subject: [PATCH 016/172] Hide cross-user session existence in delete endpoint
---
src/routes/admin/me_sessions.rs | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/src/routes/admin/me_sessions.rs b/src/routes/admin/me_sessions.rs
index ea88b46..1968244 100644
--- a/src/routes/admin/me_sessions.rs
+++ b/src/routes/admin/me_sessions.rs
@@ -127,15 +127,22 @@ pub async fn delete_one(
let session_store = get_session_store(&state)?;
- // Verify session belongs to the current user
+ // Verify session belongs to the current user. Both "session does not exist"
+ // and "session belongs to a different user" return 200 with
+ // `sessions_revoked: 0` so an attacker can't probe arbitrary session IDs to
+ // confirm they exist. The mismatch is logged at warn for forensics.
let session_existed = match session_store.get_session(session_id).await {
Ok(Some(session)) => {
if session.external_id != *external_id {
- return Err(AdminError::BadRequest(
- "Session does not belong to current user".to_string(),
- ));
+ tracing::warn!(
+ session_id = %session_id,
+ actor_external_id = %external_id,
+ "Attempt to revoke a session that belongs to a different user"
+ );
+ false
+ } else {
+ true
}
- true
}
Ok(None) => false,
Err(e) => {
From b63092fe9d0706f461946ad49799999e1c21e905 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:23:24 +1000
Subject: [PATCH 017/172] Send Vertex API key via header instead of URL query
---
src/providers/vertex/mod.rs | 34 ++++++++++++++--------------------
1 file changed, 14 insertions(+), 20 deletions(-)
diff --git a/src/providers/vertex/mod.rs b/src/providers/vertex/mod.rs
index 1f70653..5c06dba 100644
--- a/src/providers/vertex/mod.rs
+++ b/src/providers/vertex/mod.rs
@@ -168,27 +168,15 @@ impl VertexProvider {
}
}
- /// Build the full URL for a model endpoint.
+ /// Build the full URL for a model endpoint. The API key (when present) is
+ /// passed as the `x-goog-api-key` header in [`build_request`], not in the
+ /// query string — query parameters end up in HTTP access logs and tracing
+ /// span attributes.
fn model_url(&self, model: &str, endpoint: &str, stream: bool) -> String {
let base = self.base_url();
let mut url = format!("{}/{}:{}", base, model, endpoint);
-
- match &self.auth_mode {
- AuthMode::ApiKey(api_key) => {
- // Add API key as query parameter
- if stream {
- url.push_str("?alt=sse&key=");
- } else {
- url.push_str("?key=");
- }
- url.push_str(api_key);
- }
- AuthMode::OAuth { .. } => {
- // OAuth uses header auth, just add SSE param if streaming
- if stream {
- url.push_str("?alt=sse");
- }
- }
+ if stream {
+ url.push_str("?alt=sse");
}
url
}
@@ -316,8 +304,14 @@ impl VertexProvider {
.header("Content-Type", "application/json")
.timeout(self.timeout);
- if let Some(token) = token {
- req = req.header("Authorization", format!("Bearer {}", token));
+ match (&self.auth_mode, token) {
+ (AuthMode::ApiKey(api_key), _) => {
+ req = req.header("x-goog-api-key", api_key.as_str());
+ }
+ (AuthMode::OAuth { .. }, Some(token)) => {
+ req = req.header("Authorization", format!("Bearer {}", token));
+ }
+ (AuthMode::OAuth { .. }, None) => {}
}
req
From 47a3d0acf39d288699fecfe7c5852232a175f8c3 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:26:36 +1000
Subject: [PATCH 018/172] Use VecDeque for SSE stream output buffers to avoid
O(n) shifts
---
src/providers/anthropic/stream.rs | 46 +++++++++++++++++++++----------
src/providers/bedrock/stream.rs | 46 +++++++++++++++++++++----------
src/providers/vertex/mod.rs | 3 +-
src/providers/vertex/stream.rs | 46 +++++++++++++++++++++----------
4 files changed, 98 insertions(+), 43 deletions(-)
diff --git a/src/providers/anthropic/stream.rs b/src/providers/anthropic/stream.rs
index 2da21a5..3d2e512 100644
--- a/src/providers/anthropic/stream.rs
+++ b/src/providers/anthropic/stream.rs
@@ -246,7 +246,7 @@ pub struct AnthropicToOpenAIStream {
inner: S,
state: StreamState,
/// Output buffer for generated SSE chunks
- output_buffer: Vec,
+ output_buffer: std::collections::VecDeque,
/// Maximum input buffer size in bytes
max_input_buffer_bytes: usize,
/// Maximum output buffer chunks
@@ -258,7 +258,7 @@ impl AnthropicToOpenAIStream {
Self {
inner,
state: StreamState::default(),
- output_buffer: Vec::new(),
+ output_buffer: std::collections::VecDeque::new(),
max_input_buffer_bytes: streaming_buffer.max_input_buffer_bytes,
max_output_buffer_chunks: streaming_buffer.max_output_buffer_chunks,
}
@@ -541,7 +541,7 @@ impl AnthropicToOpenAIStream {
self.emit_chunk(&chunk);
// Emit [DONE]
- self.output_buffer.push(Bytes::from("data: [DONE]\n\n"));
+ self.output_buffer.push_back(Bytes::from("data: [DONE]\n\n"));
}
AnthropicStreamEvent::Ping => {
@@ -562,7 +562,7 @@ impl AnthropicToOpenAIStream {
fn emit_chunk(&mut self, chunk: &OpenAIStreamChunk) {
if let Ok(json) = serde_json::to_string(chunk) {
let sse = format!("data: {}\n\n", json);
- self.output_buffer.push(Bytes::from(sse));
+ self.output_buffer.push_back(Bytes::from(sse));
}
}
@@ -632,7 +632,10 @@ where
// First, return any buffered output
if !self.output_buffer.is_empty() {
- return Poll::Ready(Some(Ok(self.output_buffer.remove(0))));
+ return Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))));
}
// Poll the inner stream
@@ -652,7 +655,10 @@ where
// Return first buffered output if any
if !self.output_buffer.is_empty() {
- Poll::Ready(Some(Ok(self.output_buffer.remove(0))))
+ Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))))
} else {
// No output yet, need to poll again
cx.waker().wake_by_ref();
@@ -663,7 +669,10 @@ where
Poll::Ready(None) => {
// Stream ended - flush any remaining buffer
if !self.output_buffer.is_empty() {
- Poll::Ready(Some(Ok(self.output_buffer.remove(0))))
+ Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))))
} else {
Poll::Ready(None)
}
@@ -724,7 +733,7 @@ pub struct AnthropicToResponsesStream {
inner: S,
state: ResponsesStreamState,
/// Output buffer for generated SSE chunks
- output_buffer: Vec,
+ output_buffer: std::collections::VecDeque,
/// Maximum input buffer size in bytes
max_input_buffer_bytes: usize,
/// Maximum output buffer chunks
@@ -743,7 +752,7 @@ impl AnthropicToResponsesStream {
echo_fields,
..ResponsesStreamState::default()
},
- output_buffer: Vec::new(),
+ output_buffer: std::collections::VecDeque::new(),
max_input_buffer_bytes: streaming_buffer.max_input_buffer_bytes,
max_output_buffer_chunks: streaming_buffer.max_output_buffer_chunks,
}
@@ -1256,7 +1265,7 @@ impl AnthropicToResponsesStream {
);
// Emit [DONE] to signal end of stream (OpenAI Responses API convention)
- self.output_buffer.push(Bytes::from("data: [DONE]\n\n"));
+ self.output_buffer.push_back(Bytes::from("data: [DONE]\n\n"));
}
AnthropicStreamEvent::Ping => {
@@ -1302,7 +1311,7 @@ impl AnthropicToResponsesStream {
}
if let Ok(json) = serde_json::to_string(&serde_json::Value::Object(event_obj)) {
let sse = format!("data: {}\n\n", json);
- self.output_buffer.push(Bytes::from(sse));
+ self.output_buffer.push_back(Bytes::from(sse));
}
}
@@ -1369,7 +1378,10 @@ where
// First, return any buffered output
if !self.output_buffer.is_empty() {
- return Poll::Ready(Some(Ok(self.output_buffer.remove(0))));
+ return Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))));
}
// Poll the inner stream
@@ -1389,7 +1401,10 @@ where
// Return buffered output or wake for more
if !self.output_buffer.is_empty() {
- Poll::Ready(Some(Ok(self.output_buffer.remove(0))))
+ Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))))
} else {
cx.waker().wake_by_ref();
Poll::Pending
@@ -1399,7 +1414,10 @@ where
Poll::Ready(None) => {
// Stream ended - flush any remaining buffer
if !self.output_buffer.is_empty() {
- Poll::Ready(Some(Ok(self.output_buffer.remove(0))))
+ Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))))
} else {
Poll::Ready(None)
}
diff --git a/src/providers/bedrock/stream.rs b/src/providers/bedrock/stream.rs
index 989ffc4..575fe9c 100644
--- a/src/providers/bedrock/stream.rs
+++ b/src/providers/bedrock/stream.rs
@@ -48,7 +48,7 @@ pub(super) struct BedrockToOpenAIStream {
pub inner: S,
pub state: StreamState,
/// Output buffer for generated SSE chunks
- pub output_buffer: Vec,
+ pub output_buffer: std::collections::VecDeque,
/// Maximum input buffer size in bytes
pub max_input_buffer_bytes: usize,
/// Maximum output buffer chunks
@@ -66,7 +66,7 @@ impl BedrockToOpenAIStream {
buffer: bytes::BytesMut::new(),
..StreamState::default()
},
- output_buffer: Vec::new(),
+ output_buffer: std::collections::VecDeque::new(),
max_input_buffer_bytes: streaming_buffer.max_input_buffer_bytes,
max_output_buffer_chunks: streaming_buffer.max_output_buffer_chunks,
}
@@ -362,7 +362,7 @@ impl BedrockToOpenAIStream {
self.emit_chunk(&usage_chunk);
// Emit [DONE]
- self.output_buffer.push(Bytes::from("data: [DONE]\n\n"));
+ self.output_buffer.push_back(Bytes::from("data: [DONE]\n\n"));
}
}
_ => {
@@ -374,7 +374,7 @@ impl BedrockToOpenAIStream {
pub fn emit_chunk(&mut self, chunk: &OpenAIStreamChunk) {
if let Ok(json) = serde_json::to_string(chunk) {
let sse = format!("data: {}\n\n", json);
- self.output_buffer.push(Bytes::from(sse));
+ self.output_buffer.push_back(Bytes::from(sse));
}
}
@@ -446,7 +446,10 @@ where
// First, return any buffered output
if !self.output_buffer.is_empty() {
- return Poll::Ready(Some(Ok(self.output_buffer.remove(0))));
+ return Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))));
}
// Poll the inner stream
@@ -466,7 +469,10 @@ where
// Return first buffered output if any
if !self.output_buffer.is_empty() {
- Poll::Ready(Some(Ok(self.output_buffer.remove(0))))
+ Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))))
} else {
// No output yet, need to poll again
cx.waker().wake_by_ref();
@@ -477,7 +483,10 @@ where
Poll::Ready(None) => {
// Stream ended, return any remaining buffered output
if !self.output_buffer.is_empty() {
- Poll::Ready(Some(Ok(self.output_buffer.remove(0))))
+ Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))))
} else {
Poll::Ready(None)
}
@@ -540,7 +549,7 @@ pub struct BedrockToResponsesStream {
pub inner: S,
pub state: ResponsesStreamState,
/// Output buffer for generated SSE chunks
- pub output_buffer: Vec,
+ pub output_buffer: std::collections::VecDeque,
/// Maximum input buffer size in bytes
pub max_input_buffer_bytes: usize,
/// Maximum output buffer chunks
@@ -574,7 +583,7 @@ impl BedrockToResponsesStream {
echo_fields,
..ResponsesStreamState::default()
},
- output_buffer: Vec::new(),
+ output_buffer: std::collections::VecDeque::new(),
max_input_buffer_bytes: streaming_buffer.max_input_buffer_bytes,
max_output_buffer_chunks: streaming_buffer.max_output_buffer_chunks,
}
@@ -1099,7 +1108,7 @@ impl BedrockToResponsesStream {
);
// Emit [DONE] to signal end of stream
- self.output_buffer.push(Bytes::from("data: [DONE]\n\n"));
+ self.output_buffer.push_back(Bytes::from("data: [DONE]\n\n"));
}
}
_ => {
@@ -1129,7 +1138,7 @@ impl BedrockToResponsesStream {
}
if let Ok(json) = serde_json::to_string(&serde_json::Value::Object(event_obj)) {
let sse = format!("data: {}\n\n", json);
- self.output_buffer.push(Bytes::from(sse));
+ self.output_buffer.push_back(Bytes::from(sse));
}
}
@@ -1200,7 +1209,10 @@ where
// First, return any buffered output
if !self.output_buffer.is_empty() {
- return Poll::Ready(Some(Ok(self.output_buffer.remove(0))));
+ return Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))));
}
// Poll the inner stream
@@ -1220,7 +1232,10 @@ where
// Return first buffered output if any
if !self.output_buffer.is_empty() {
- Poll::Ready(Some(Ok(self.output_buffer.remove(0))))
+ Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))))
} else {
// No output yet, need to poll again
cx.waker().wake_by_ref();
@@ -1231,7 +1246,10 @@ where
Poll::Ready(None) => {
// Stream ended, return any remaining buffered output
if !self.output_buffer.is_empty() {
- Poll::Ready(Some(Ok(self.output_buffer.remove(0))))
+ Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))))
} else {
Poll::Ready(None)
}
diff --git a/src/providers/vertex/mod.rs b/src/providers/vertex/mod.rs
index 5c06dba..1283647 100644
--- a/src/providers/vertex/mod.rs
+++ b/src/providers/vertex/mod.rs
@@ -874,7 +874,8 @@ mod streaming_tests {
transformer.handle_response(response);
// Should emit [DONE] at the end
- let last_chunk = std::str::from_utf8(transformer.output_buffer.last().unwrap()).unwrap();
+ let last_chunk =
+ std::str::from_utf8(transformer.output_buffer.back().unwrap()).unwrap();
assert_eq!(last_chunk, "data: [DONE]\n\n");
// Should have usage in second-to-last chunk
diff --git a/src/providers/vertex/stream.rs b/src/providers/vertex/stream.rs
index 4acd7ee..cf735e3 100644
--- a/src/providers/vertex/stream.rs
+++ b/src/providers/vertex/stream.rs
@@ -121,7 +121,7 @@ pub struct VertexToOpenAIStream {
pub inner: S,
pub state: StreamState,
/// Output buffer for generated SSE chunks
- pub output_buffer: Vec,
+ pub output_buffer: std::collections::VecDeque,
/// Maximum input buffer size in bytes
pub max_input_buffer_bytes: usize,
/// Maximum output buffer chunks
@@ -136,7 +136,7 @@ impl VertexToOpenAIStream {
model,
..StreamState::default()
},
- output_buffer: Vec::new(),
+ output_buffer: std::collections::VecDeque::new(),
max_input_buffer_bytes: streaming_buffer.max_input_buffer_bytes,
max_output_buffer_chunks: streaming_buffer.max_output_buffer_chunks,
}
@@ -353,7 +353,7 @@ impl VertexToOpenAIStream {
self.emit_chunk(&usage_chunk);
// Emit [DONE]
- self.output_buffer.push(Bytes::from("data: [DONE]\n\n"));
+ self.output_buffer.push_back(Bytes::from("data: [DONE]\n\n"));
}
}
}
@@ -361,7 +361,7 @@ impl VertexToOpenAIStream {
fn emit_chunk(&mut self, chunk: &OpenAIStreamChunk) {
if let Ok(json) = serde_json::to_string(chunk) {
let sse = format!("data: {}\n\n", json);
- self.output_buffer.push(Bytes::from(sse));
+ self.output_buffer.push_back(Bytes::from(sse));
}
}
@@ -431,7 +431,10 @@ where
// First, return any buffered output
if !self.output_buffer.is_empty() {
- return Poll::Ready(Some(Ok(self.output_buffer.remove(0))));
+ return Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))));
}
// Poll the inner stream
@@ -451,7 +454,10 @@ where
// Return first buffered output if any
if !self.output_buffer.is_empty() {
- Poll::Ready(Some(Ok(self.output_buffer.remove(0))))
+ Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))))
} else {
// No output yet, need to poll again
cx.waker().wake_by_ref();
@@ -462,7 +468,10 @@ where
Poll::Ready(None) => {
// Stream ended - flush any remaining buffer
if !self.output_buffer.is_empty() {
- Poll::Ready(Some(Ok(self.output_buffer.remove(0))))
+ Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))))
} else {
Poll::Ready(None)
}
@@ -517,7 +526,7 @@ pub struct VertexToResponsesStream {
inner: S,
state: ResponsesStreamState,
/// Output buffer for generated SSE chunks
- output_buffer: Vec,
+ output_buffer: std::collections::VecDeque,
/// Maximum input buffer size in bytes
max_input_buffer_bytes: usize,
/// Maximum output buffer chunks
@@ -541,7 +550,7 @@ impl VertexToResponsesStream {
echo_fields,
..ResponsesStreamState::default()
},
- output_buffer: Vec::new(),
+ output_buffer: std::collections::VecDeque::new(),
max_input_buffer_bytes: streaming_buffer.max_input_buffer_bytes,
max_output_buffer_chunks: streaming_buffer.max_output_buffer_chunks,
}
@@ -583,7 +592,7 @@ impl VertexToResponsesStream {
// Pass through [DONE] marker
if json_str == "[DONE]" {
- self.output_buffer.push(Bytes::from("data: [DONE]\n\n"));
+ self.output_buffer.push_back(Bytes::from("data: [DONE]\n\n"));
return;
}
@@ -1021,7 +1030,7 @@ impl VertexToResponsesStream {
}
if let Ok(json) = serde_json::to_string(&serde_json::Value::Object(event_obj)) {
let sse = format!("data: {}\n\n", json);
- self.output_buffer.push(Bytes::from(sse));
+ self.output_buffer.push_back(Bytes::from(sse));
}
}
@@ -1088,7 +1097,10 @@ where
// First, return any buffered output
if !self.output_buffer.is_empty() {
- return Poll::Ready(Some(Ok(self.output_buffer.remove(0))));
+ return Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))));
}
// Poll the inner stream
@@ -1108,7 +1120,10 @@ where
// Return buffered output or wake for more
if !self.output_buffer.is_empty() {
- Poll::Ready(Some(Ok(self.output_buffer.remove(0))))
+ Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))))
} else {
cx.waker().wake_by_ref();
Poll::Pending
@@ -1118,7 +1133,10 @@ where
Poll::Ready(None) => {
// Stream ended - flush any remaining buffer
if !self.output_buffer.is_empty() {
- Poll::Ready(Some(Ok(self.output_buffer.remove(0))))
+ Poll::Ready(Some(Ok(self
+ .output_buffer
+ .pop_front()
+ .expect("non-empty checked above"))))
} else {
Poll::Ready(None)
}
From 28c8664c1b73a36ac0373ea4c15d53dc9f73d0ee Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:27:18 +1000
Subject: [PATCH 019/172] Use parking_lot RwLock in CircuitBreakerRegistry to
drop poison panic
---
src/providers/registry.rs | 18 ++++++++----------
1 file changed, 8 insertions(+), 10 deletions(-)
diff --git a/src/providers/registry.rs b/src/providers/registry.rs
index ee14016..0e1ae4c 100644
--- a/src/providers/registry.rs
+++ b/src/providers/registry.rs
@@ -4,11 +4,9 @@
//! and protect against unhealthy providers. This module provides a
//! registry that stores circuit breakers keyed by provider name.
-use std::{
- collections::HashMap,
- sync::{Arc, RwLock},
-};
+use std::{collections::HashMap, sync::Arc};
+use parking_lot::RwLock;
use serde::Serialize;
use super::circuit_breaker::{CircuitBreaker, CircuitState};
@@ -74,7 +72,7 @@ impl CircuitBreakerRegistry {
/// Register a circuit breaker for a provider.
pub fn register(&self, provider_name: &str, breaker: CircuitBreaker) {
- let mut breakers = self.breakers.write().expect("RwLock poisoned");
+ let mut breakers = self.breakers.write();
breakers.insert(provider_name.to_string(), Arc::new(breaker));
}
@@ -93,14 +91,14 @@ impl CircuitBreakerRegistry {
// Try read lock first
{
- let breakers = self.breakers.read().expect("RwLock poisoned");
+ let breakers = self.breakers.read();
if let Some(breaker) = breakers.get(provider_name) {
return Some(breaker.clone());
}
}
// Need to create - upgrade to write lock
- let mut breakers = self.breakers.write().expect("RwLock poisoned");
+ let mut breakers = self.breakers.write();
// Double-check after acquiring write lock
if let Some(breaker) = breakers.get(provider_name) {
return Some(breaker.clone());
@@ -121,13 +119,13 @@ impl CircuitBreakerRegistry {
/// Get a circuit breaker by name if it exists.
pub fn get(&self, provider_name: &str) -> Option> {
- let breakers = self.breakers.read().expect("RwLock poisoned");
+ let breakers = self.breakers.read();
breakers.get(provider_name).cloned()
}
/// Get the status of all circuit breakers.
pub fn status(&self) -> Vec {
- let breakers = self.breakers.read().expect("RwLock poisoned");
+ let breakers = self.breakers.read();
breakers
.iter()
.map(
@@ -142,7 +140,7 @@ impl CircuitBreakerRegistry {
/// Get the status of a specific circuit breaker.
pub fn status_for(&self, provider_name: &str) -> Option {
- let breakers = self.breakers.read().expect("RwLock poisoned");
+ let breakers = self.breakers.read();
breakers
.get(provider_name)
.map(|breaker: &Arc| CircuitBreakerStatus {
From ab947cc4be13f480ad40d07d2fba77e73cf112ab Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:27:58 +1000
Subject: [PATCH 020/172] Add noopener to OpenRouter OAuth iframe escape
window.open
---
ui/src/components/WasmSetup/openrouter-oauth.ts | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/ui/src/components/WasmSetup/openrouter-oauth.ts b/ui/src/components/WasmSetup/openrouter-oauth.ts
index e235870..9566252 100644
--- a/ui/src/components/WasmSetup/openrouter-oauth.ts
+++ b/ui/src/components/WasmSetup/openrouter-oauth.ts
@@ -43,7 +43,11 @@ export function isInIframe(): boolean {
*/
export async function startOpenRouterOAuth() {
if (isInIframe()) {
- window.open(window.location.origin + window.location.pathname, "_blank");
+ window.open(
+ window.location.origin + window.location.pathname,
+ "_blank",
+ "noopener,noreferrer",
+ );
return;
}
From 9f6305ab4a5d126e25d5ed51ee047e9bfecf67fc Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:28:23 +1000
Subject: [PATCH 021/172] Reject protocol-relative return_to values on login
redirect
---
ui/src/pages/LoginPage.tsx | 17 +++++++++++++----
1 file changed, 13 insertions(+), 4 deletions(-)
diff --git a/ui/src/pages/LoginPage.tsx b/ui/src/pages/LoginPage.tsx
index 863764d..ee317cf 100644
--- a/ui/src/pages/LoginPage.tsx
+++ b/ui/src/pages/LoginPage.tsx
@@ -63,11 +63,20 @@ export default function LoginPage() {
// a full URL (path + search, e.g. /oauth/authorize?callback_url=...) survive
// the round-trip through login. Falls back to the in-app `state.from` set by
// RequireAuth.
+ //
+ // `startsWith("/")` alone is not enough: `//evil.com/...` and `/\evil.com`
+ // are treated as same-origin by `Navigate`/`startsWith` but resolve to a
+ // cross-origin URL in the browser. Reject anything whose second character
+ // makes it protocol-relative or backslash-prefixed.
+ const isSafeReturnTo = (value: string | null): value is string =>
+ !!value &&
+ value.startsWith("/") &&
+ !value.startsWith("//") &&
+ !value.startsWith("/\\");
const returnToParam = new URLSearchParams(location.search).get("return_to");
- const from =
- returnToParam && returnToParam.startsWith("/")
- ? returnToParam
- : location.state?.from?.pathname || "/";
+ const from = isSafeReturnTo(returnToParam)
+ ? returnToParam
+ : location.state?.from?.pathname || "/";
if (configLoading || authLoading) {
return (
From 3ba6e8dcb7ce47a8af471b033cc20aac434c3899 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:28:40 +1000
Subject: [PATCH 022/172] Redact auth token from AccountPage data export
---
ui/src/pages/AccountPage.tsx | 21 ++++++++++++++++++---
1 file changed, 18 insertions(+), 3 deletions(-)
diff --git a/ui/src/pages/AccountPage.tsx b/ui/src/pages/AccountPage.tsx
index 7e0141b..c823868 100644
--- a/ui/src/pages/AccountPage.tsx
+++ b/ui/src/pages/AccountPage.tsx
@@ -22,17 +22,32 @@ import { exportAllIndexedDBData, deleteIndexedDBDatabase } from "@/hooks/useInde
// localStorage keys used by the app
const LOCAL_STORAGE_KEYS = ["hadrian-auth", "hadrian-mcp-servers", "hadrian-preferences"] as const;
-/** Export all localStorage data for Hadrian keys */
+/** Sanitize a stored auth blob so the export doesn't ship the bearer token. */
+function sanitizeForExport(key: string, value: unknown): unknown {
+ if (!value || typeof value !== "object" || Array.isArray(value)) {
+ return value;
+ }
+ if (key === "hadrian-auth") {
+ const { token: _token, ...rest } = value as Record;
+ return { ...rest, token: "[redacted]" };
+ }
+ return value;
+}
+
+/** Export all localStorage data for Hadrian keys.
+ * Auth tokens are redacted: a user emailing this export "for support"
+ * shouldn't be shipping their gateway credential. */
function exportLocalStorageData(): Record {
const result: Record = {};
for (const key of LOCAL_STORAGE_KEYS) {
try {
const value = localStorage.getItem(key);
if (value) {
- result[key] = JSON.parse(value);
+ result[key] = sanitizeForExport(key, JSON.parse(value));
}
} catch {
- // If parsing fails, store as raw string
+ // If parsing fails, store as raw string (auth blob always parses, so
+ // raw strings reaching here aren't credentials we know about)
const value = localStorage.getItem(key);
if (value) {
result[key] = value;
From c3f47e062bf9686a32c3ea15121d8625894a797a Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:29:13 +1000
Subject: [PATCH 023/172] Clamp ListQuery limit to a hard maximum of 1000
---
src/routes/admin/organizations.rs | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/src/routes/admin/organizations.rs b/src/routes/admin/organizations.rs
index 7ea7fed..a2229bc 100644
--- a/src/routes/admin/organizations.rs
+++ b/src/routes/admin/organizations.rs
@@ -37,11 +37,19 @@ pub struct ListQuery {
pub include_deleted: Option,
}
+/// Hard upper bound on `limit` for any admin list endpoint. A client passing
+/// `limit=999999999` would otherwise scan an entire table and DoS the gateway.
+pub const MAX_LIST_LIMIT: i64 = 1000;
+
+fn clamp_limit(limit: Option) -> Option {
+ limit.map(|n| n.clamp(1, MAX_LIST_LIMIT))
+}
+
/// Simple conversion that requires using try_into_with_cursor() for cursor validation.
impl From for ListParams {
fn from(q: ListQuery) -> Self {
ListParams {
- limit: q.limit,
+ limit: clamp_limit(q.limit),
cursor: None,
direction: CursorDirection::Forward,
sort_order: Default::default(),
@@ -73,7 +81,7 @@ impl ListQuery {
};
Ok(ListParams {
- limit: self.limit,
+ limit: clamp_limit(self.limit),
cursor,
direction,
sort_order: Default::default(),
From fbf0c476e1b84afd57ea4779208db7fd33f3b3ad Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:34:25 +1000
Subject: [PATCH 024/172] Sanitize CSV export cells to defang formula injection
---
src/routes/admin/csv_export.rs | 83 ++++++++++++++++++++--------------
1 file changed, 48 insertions(+), 35 deletions(-)
diff --git a/src/routes/admin/csv_export.rs b/src/routes/admin/csv_export.rs
index 963ed4b..e6ac221 100644
--- a/src/routes/admin/csv_export.rs
+++ b/src/routes/admin/csv_export.rs
@@ -15,6 +15,19 @@ use crate::models::{
UserAccessInventoryEntry, UserAccessSummaryResponse,
};
+/// Defang any cell whose first character would be interpreted as a formula by
+/// Excel/Sheets/Numbers (`= + - @ \t \r`). The auditor-friendly format means
+/// a malicious user-controlled email or org name should never become a live
+/// formula or `HYPERLINK()` exfiltration vector.
+fn sanitize_csv_cell(value: String) -> String {
+ match value.chars().next() {
+ Some('=') | Some('+') | Some('-') | Some('@') | Some('\t') | Some('\r') => {
+ format!("'{}", value)
+ }
+ _ => value,
+ }
+}
+
/// Error type for CSV export operations
#[derive(Debug)]
pub struct CsvExportError(String);
@@ -98,9 +111,9 @@ pub fn export_access_inventory_csv(
for org in &user.organizations {
let mut row = base_row.clone();
row.org_id = org.org_id.to_string();
- row.org_slug = org.org_slug.clone();
- row.org_name = org.org_name.clone();
- row.org_role = org.role.clone();
+ row.org_slug = sanitize_csv_cell(org.org_slug.clone());
+ row.org_name = sanitize_csv_cell(org.org_name.clone());
+ row.org_role = sanitize_csv_cell(org.role.clone());
row.org_granted_at = org.granted_at.to_rfc3339();
wtr.serialize(&row)
.map_err(|e| CsvExportError(e.to_string()))?;
@@ -110,10 +123,10 @@ pub fn export_access_inventory_csv(
for project in &user.projects {
let mut row = base_row.clone();
row.project_id = project.project_id.to_string();
- row.project_slug = project.project_slug.clone();
- row.project_name = project.project_name.clone();
+ row.project_slug = sanitize_csv_cell(project.project_slug.clone());
+ row.project_name = sanitize_csv_cell(project.project_name.clone());
row.project_org_id = project.org_id.to_string();
- row.project_role = project.role.clone();
+ row.project_role = sanitize_csv_cell(project.role.clone());
row.project_granted_at = project.granted_at.to_rfc3339();
wtr.serialize(&row)
.map_err(|e| CsvExportError(e.to_string()))?;
@@ -127,9 +140,9 @@ pub fn export_access_inventory_csv(
fn create_base_inventory_row(user: &UserAccessInventoryEntry) -> AccessInventoryRow {
AccessInventoryRow {
user_id: user.user_id.to_string(),
- external_id: user.external_id.clone(),
- email: user.email.clone().unwrap_or_default(),
- name: user.name.clone().unwrap_or_default(),
+ external_id: sanitize_csv_cell(user.external_id.clone()),
+ email: sanitize_csv_cell(user.email.clone().unwrap_or_default()),
+ name: sanitize_csv_cell(user.name.clone().unwrap_or_default()),
created_at: user.created_at.to_rfc3339(),
org_id: String::new(),
org_slug: String::new(),
@@ -181,10 +194,10 @@ pub fn export_org_access_report_csv(
for member in &response.members {
let base_row = OrgAccessReportRow {
user_id: member.user_id.to_string(),
- external_id: member.external_id.clone(),
- email: member.email.clone().unwrap_or_default(),
- name: member.name.clone().unwrap_or_default(),
- org_role: member.role.clone(),
+ external_id: sanitize_csv_cell(member.external_id.clone()),
+ email: sanitize_csv_cell(member.email.clone().unwrap_or_default()),
+ name: sanitize_csv_cell(member.name.clone().unwrap_or_default()),
+ org_role: sanitize_csv_cell(member.role.clone()),
org_granted_at: member.granted_at.to_rfc3339(),
project_id: String::new(),
project_slug: String::new(),
@@ -207,9 +220,9 @@ pub fn export_org_access_report_csv(
for project in &member.project_access {
let mut row = base_row.clone();
row.project_id = project.project_id.to_string();
- row.project_slug = project.project_slug.clone();
- row.project_name = project.project_name.clone();
- row.project_role = project.role.clone();
+ row.project_slug = sanitize_csv_cell(project.project_slug.clone());
+ row.project_name = sanitize_csv_cell(project.project_name.clone());
+ row.project_role = sanitize_csv_cell(project.role.clone());
row.project_granted_at = project.granted_at.to_rfc3339();
wtr.serialize(&row)
.map_err(|e| CsvExportError(e.to_string()))?;
@@ -247,9 +260,9 @@ pub fn export_user_access_summary_csv(
let base = |resource_type: &str| UserAccessSummaryRow {
user_id: response.user_id.to_string(),
- external_id: response.external_id.clone(),
- email: response.email.clone().unwrap_or_default(),
- name: response.name.clone().unwrap_or_default(),
+ external_id: sanitize_csv_cell(response.external_id.clone()),
+ email: sanitize_csv_cell(response.email.clone().unwrap_or_default()),
+ name: sanitize_csv_cell(response.name.clone().unwrap_or_default()),
created_at: response.created_at.to_rfc3339(),
resource_type: resource_type.to_string(),
resource_id: String::new(),
@@ -269,9 +282,9 @@ pub fn export_user_access_summary_csv(
for org in &response.organizations {
let mut row = base("organization");
row.resource_id = org.org_id.to_string();
- row.resource_slug = org.org_slug.clone();
- row.resource_name = org.org_name.clone();
- row.role = org.role.clone();
+ row.resource_slug = sanitize_csv_cell(org.org_slug.clone());
+ row.resource_name = sanitize_csv_cell(org.org_name.clone());
+ row.role = sanitize_csv_cell(org.role.clone());
row.granted_at = org.granted_at.to_rfc3339();
row.last_activity_at = org
.last_activity_at
@@ -285,9 +298,9 @@ pub fn export_user_access_summary_csv(
for project in &response.projects {
let mut row = base("project");
row.resource_id = project.project_id.to_string();
- row.resource_slug = project.project_slug.clone();
- row.resource_name = project.project_name.clone();
- row.role = project.role.clone();
+ row.resource_slug = sanitize_csv_cell(project.project_slug.clone());
+ row.resource_name = sanitize_csv_cell(project.project_name.clone());
+ row.role = sanitize_csv_cell(project.role.clone());
row.granted_at = project.granted_at.to_rfc3339();
row.last_activity_at = project
.last_activity_at
@@ -301,7 +314,7 @@ pub fn export_user_access_summary_csv(
for api_key in &response.api_keys {
let mut row = base("api_key");
row.resource_id = api_key.key_id.to_string();
- row.resource_name = api_key.name.clone();
+ row.resource_name = sanitize_csv_cell(api_key.name.clone());
row.is_active = api_key.is_active.to_string();
row.granted_at = api_key.created_at.to_rfc3339();
row.last_used_at = api_key
@@ -356,9 +369,9 @@ pub fn export_stale_access_csv(response: &StaleAccessResponse) -> Result
let row = StaleAccessRow {
category: "stale_user".to_string(),
user_id: user.user_id.to_string(),
- external_id: user.external_id.clone(),
- email: user.email.clone().unwrap_or_default(),
- name: user.name.clone().unwrap_or_default(),
+ external_id: sanitize_csv_cell(user.external_id.clone()),
+ email: sanitize_csv_cell(user.email.clone().unwrap_or_default()),
+ name: sanitize_csv_cell(user.name.clone().unwrap_or_default()),
created_at: user.created_at.to_rfc3339(),
last_activity_at: user
.last_activity_at
@@ -384,9 +397,9 @@ pub fn export_stale_access_csv(response: &StaleAccessResponse) -> Result
let row = StaleAccessRow {
category: "never_active_user".to_string(),
user_id: user.user_id.to_string(),
- external_id: user.external_id.clone(),
- email: user.email.clone().unwrap_or_default(),
- name: user.name.clone().unwrap_or_default(),
+ external_id: sanitize_csv_cell(user.external_id.clone()),
+ email: sanitize_csv_cell(user.email.clone().unwrap_or_default()),
+ name: sanitize_csv_cell(user.name.clone().unwrap_or_default()),
created_at: user.created_at.to_rfc3339(),
last_activity_at: String::new(),
days_inactive: user.days_since_creation,
@@ -419,9 +432,9 @@ pub fn export_stale_access_csv(response: &StaleAccessResponse) -> Result
project_count: 0,
active_api_keys: 0,
key_id: key.key_id.to_string(),
- key_name: key.name.clone(),
- key_prefix: key.key_prefix.clone(),
- owner_type: key.owner_type.clone(),
+ key_name: sanitize_csv_cell(key.name.clone()),
+ key_prefix: sanitize_csv_cell(key.key_prefix.clone()),
+ owner_type: sanitize_csv_cell(key.owner_type.clone()),
owner_id: key.owner_id.to_string(),
never_used: key.never_used.to_string(),
};
From 0708ea46acba5099cd87426dfc11e0c31f6b8ae1 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:35:07 +1000
Subject: [PATCH 025/172] Route citation links through parent URL handler for
safety modal
---
.../components/CitationList/CitationList.tsx | 19 ++++++++++++++-----
1 file changed, 14 insertions(+), 5 deletions(-)
diff --git a/ui/src/components/CitationList/CitationList.tsx b/ui/src/components/CitationList/CitationList.tsx
index 46c56f6..c603ab7 100644
--- a/ui/src/components/CitationList/CitationList.tsx
+++ b/ui/src/components/CitationList/CitationList.tsx
@@ -162,14 +162,23 @@ const CitationItem = memo(function CitationItem({
)}
{citation.type === "url" && (
- {
+ // Route through the parent's URL handler so the same trusted-
+ // domain confirmation modal that markdown links use applies
+ // here. Citations are model-supplied — a citation that
+ // displays "Wikipedia" can link to attacker.example.
+ if (onUrlClick) {
+ onUrlClick(citation.url);
+ } else {
+ window.open(citation.url, "_blank", "noopener,noreferrer");
+ }
+ }}
className="text-xs text-primary hover:underline mt-1 inline-flex items-center gap-1"
>
Open source
-
+
)}
)}
From 2a2edceb730ecad0d78574a6117d5baa46421825 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:36:01 +1000
Subject: [PATCH 026/172] Default audit log list to last 7 days when no range
given
---
src/routes/admin/audit_logs.rs | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/src/routes/admin/audit_logs.rs b/src/routes/admin/audit_logs.rs
index 3d8cc50..34e5de2 100644
--- a/src/routes/admin/audit_logs.rs
+++ b/src/routes/admin/audit_logs.rs
@@ -61,6 +61,14 @@ pub async fn list(
)));
}
+ // Cap unbounded scans: when no time range is supplied, default to the last
+ // 7 days. The audit log is append-only and grows fast; an unfiltered list
+ // hits the entire table with `ORDER BY ts DESC` which can DoS the gateway.
+ let mut query = query;
+ if query.from.is_none() && query.to.is_none() {
+ query.from = Some(chrono::Utc::now() - chrono::Duration::days(7));
+ }
+
let result = services.audit_logs.list(query).await?;
let pagination = PaginationMeta::with_cursors(
From 9cf736baf6dcd6bce1ec658cba8c0fdd0a8c0627 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:36:32 +1000
Subject: [PATCH 027/172] Validate DLQ table_name as identifier before
interpolating
---
src/dlq/mod.rs | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/src/dlq/mod.rs b/src/dlq/mod.rs
index 5bbd80f..ba35c26 100644
--- a/src/dlq/mod.rs
+++ b/src/dlq/mod.rs
@@ -74,6 +74,25 @@ pub async fn create_dlq(
ttl_secs,
..
} => {
+ // The table name is interpolated as raw SQL throughout
+ // `dlq::database`, so we validate it against an identifier shape
+ // here rather than trusting it. Mistyped/templated config values
+ // would otherwise become an injection surface.
+ let valid_ident = !table_name.is_empty()
+ && table_name.len() <= 63
+ && table_name
+ .chars()
+ .next()
+ .map(|c| c.is_ascii_alphabetic() || c == '_')
+ .unwrap_or(false)
+ && table_name
+ .chars()
+ .all(|c| c.is_ascii_alphanumeric() || c == '_');
+ if !valid_ident {
+ return Err(DlqError::Internal(format!(
+ "Invalid DLQ table_name '{table_name}': must match [A-Za-z_][A-Za-z0-9_]{{0,62}}"
+ )));
+ }
let db = db.ok_or_else(|| {
DlqError::Internal(
"Database DLQ configured but no database connection available".to_string(),
From 0d175c35a1ac4129308f6bbc48a355e79d0b8ad6 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:37:09 +1000
Subject: [PATCH 028/172] Mark selected conversation with aria-current for
screen readers
---
ui/src/components/ConversationList/ConversationList.tsx | 1 +
1 file changed, 1 insertion(+)
diff --git a/ui/src/components/ConversationList/ConversationList.tsx b/ui/src/components/ConversationList/ConversationList.tsx
index 7abb522..fbe46cd 100644
--- a/ui/src/components/ConversationList/ConversationList.tsx
+++ b/ui/src/components/ConversationList/ConversationList.tsx
@@ -151,6 +151,7 @@ const ConversationItem = memo(
type="button"
className="flex min-w-0 flex-1 items-center gap-2 text-left"
onClick={() => onSelect(conv.id)}
+ aria-current={isSelected ? "page" : undefined}
>
Date: Sat, 25 Apr 2026 22:37:31 +1000
Subject: [PATCH 029/172] Log SSE event parse errors instead of silently
swallowing
---
ui/src/pages/chat/useChat.ts | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/ui/src/pages/chat/useChat.ts b/ui/src/pages/chat/useChat.ts
index b96b911..2b7331f 100644
--- a/ui/src/pages/chat/useChat.ts
+++ b/ui/src/pages/chat/useChat.ts
@@ -1211,8 +1211,12 @@ export function useChat({
}
}
}
- } catch {
- // Ignore parse errors for partial JSON
+ } catch (err) {
+ // Per-line `data:` payloads should always be complete JSON
+ // (we already split on `\n` and the last partial line stays
+ // in `buffer`). Surface the error at debug so producer/spec
+ // drift doesn't silently drop tool calls or citations.
+ console.debug("Failed to parse SSE event payload", { data, err });
}
}
}
From 4cdb3b8396b2787842a0d0233506204d692da7ac Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:38:07 +1000
Subject: [PATCH 030/172] Wrap clipboard writes in try/catch to surface
failures
---
ui/src/components/ChatMessage/ChatMessage.tsx | 10 +++++++---
.../MultiModelResponse/MultiModelResponse.tsx | 10 +++++++---
.../components/ResponseActions/ResponseActions.tsx | 13 ++++++++++---
3 files changed, 24 insertions(+), 9 deletions(-)
diff --git a/ui/src/components/ChatMessage/ChatMessage.tsx b/ui/src/components/ChatMessage/ChatMessage.tsx
index b022080..8603569 100644
--- a/ui/src/components/ChatMessage/ChatMessage.tsx
+++ b/ui/src/components/ChatMessage/ChatMessage.tsx
@@ -141,9 +141,13 @@ function ChatMessageComponent({
);
const handleCopy = async () => {
- await navigator.clipboard.writeText(message.content);
- setCopied(true);
- setTimeout(() => setCopied(false), 2000);
+ try {
+ await navigator.clipboard.writeText(message.content);
+ setCopied(true);
+ setTimeout(() => setCopied(false), 2000);
+ } catch (err) {
+ console.debug("Clipboard write failed", err);
+ }
};
// Quote selection state
diff --git a/ui/src/components/MultiModelResponse/MultiModelResponse.tsx b/ui/src/components/MultiModelResponse/MultiModelResponse.tsx
index d99e11a..77d7d0a 100644
--- a/ui/src/components/MultiModelResponse/MultiModelResponse.tsx
+++ b/ui/src/components/MultiModelResponse/MultiModelResponse.tsx
@@ -417,9 +417,13 @@ function CollapsedActionsMenu({
const [copied, setCopied] = useState(false);
const handleCopy = async () => {
- await navigator.clipboard.writeText(content);
- setCopied(true);
- setTimeout(() => setCopied(false), 2000);
+ try {
+ await navigator.clipboard.writeText(content);
+ setCopied(true);
+ setTimeout(() => setCopied(false), 2000);
+ } catch (err) {
+ console.debug("Clipboard write failed", err);
+ }
};
const isSpeaking = speakingState === "playing";
diff --git a/ui/src/components/ResponseActions/ResponseActions.tsx b/ui/src/components/ResponseActions/ResponseActions.tsx
index 06e2586..4101e88 100644
--- a/ui/src/components/ResponseActions/ResponseActions.tsx
+++ b/ui/src/components/ResponseActions/ResponseActions.tsx
@@ -102,9 +102,16 @@ export function ResponseActions({
const [copied, setCopied] = useState(false);
const handleCopy = async () => {
- await navigator.clipboard.writeText(content);
- setCopied(true);
- setTimeout(() => setCopied(false), 2000);
+ // `clipboard.writeText` rejects on permission denial, lack of focus, or
+ // non-secure context. Without try/catch the rejection becomes an
+ // unhandled promise rejection and `setCopied(true)` silently never runs.
+ try {
+ await navigator.clipboard.writeText(content);
+ setCopied(true);
+ setTimeout(() => setCopied(false), 2000);
+ } catch (err) {
+ console.debug("Clipboard write failed", err);
+ }
};
// Primary actions - always visible
From 92bc583c370fa3e389433bd829e9204146b8ea18 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:38:52 +1000
Subject: [PATCH 031/172] Debounce and memoise conversation list filter to
avoid O(N*M) hitches
---
.../ConversationList/ConversationList.tsx | 23 ++++++++++++-------
1 file changed, 15 insertions(+), 8 deletions(-)
diff --git a/ui/src/components/ConversationList/ConversationList.tsx b/ui/src/components/ConversationList/ConversationList.tsx
index fbe46cd..b4b4c39 100644
--- a/ui/src/components/ConversationList/ConversationList.tsx
+++ b/ui/src/components/ConversationList/ConversationList.tsx
@@ -9,9 +9,10 @@ import {
Trash2,
X,
} from "lucide-react";
-import { memo, useCallback, useState } from "react";
+import { memo, useCallback, useMemo, useState } from "react";
import { Button } from "@/components/Button/Button";
+import { useDebouncedValue } from "@/hooks/useDebouncedValue";
import {
Dropdown,
DropdownContent,
@@ -244,13 +245,19 @@ export function ConversationList({
const [editingId, setEditingId] = useState(null);
const [editTitle, setEditTitle] = useState("");
- const filteredConversations = searchQuery
- ? conversations.filter(
- (c) =>
- c.title.toLowerCase().includes(searchQuery.toLowerCase()) ||
- c.messages.some((m) => m.content.toLowerCase().includes(searchQuery.toLowerCase()))
- )
- : conversations;
+ // Debounce + memoise the filter. Without this every keystroke walks every
+ // message body lowercased — O(N×M) on each character. With many long
+ // conversations this is a measurable hitch.
+ const debouncedQuery = useDebouncedValue(searchQuery, 150);
+ const filteredConversations = useMemo(() => {
+ if (!debouncedQuery) return conversations;
+ const needle = debouncedQuery.toLowerCase();
+ return conversations.filter(
+ (c) =>
+ c.title.toLowerCase().includes(needle) ||
+ c.messages.some((m) => m.content.toLowerCase().includes(needle))
+ );
+ }, [conversations, debouncedQuery]);
const groups = groupConversations(filteredConversations);
From 786ac9e817dc2d74d69f3b3e64a640f1c9fb5010 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:39:12 +1000
Subject: [PATCH 032/172] Stop forcing inflated virtualizer height in
ChatMessageList
---
ui/src/components/ChatMessageList/ChatMessageList.tsx | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/ui/src/components/ChatMessageList/ChatMessageList.tsx b/ui/src/components/ChatMessageList/ChatMessageList.tsx
index c877799..176e761 100644
--- a/ui/src/components/ChatMessageList/ChatMessageList.tsx
+++ b/ui/src/components/ChatMessageList/ChatMessageList.tsx
@@ -351,9 +351,12 @@ export function ChatMessageList({
) : (
{virtualizer.getVirtualItems().map((virtualItem) => {
const group = messageGroups[virtualItem.index];
From 9bdb3db588a9b9111e5f9ec215dc94f01326a2b7 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:39:47 +1000
Subject: [PATCH 033/172] Broadcast post-update conversation snapshot to other
tabs
---
.../ConversationsProvider/ConversationsProvider.tsx | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/ui/src/components/ConversationsProvider/ConversationsProvider.tsx b/ui/src/components/ConversationsProvider/ConversationsProvider.tsx
index 159b8a0..93764a8 100644
--- a/ui/src/components/ConversationsProvider/ConversationsProvider.tsx
+++ b/ui/src/components/ConversationsProvider/ConversationsProvider.tsx
@@ -382,8 +382,12 @@ export function ConversationsProvider({ children }: ConversationsProviderProps)
}
}
- // Apply all updates atomically via React state
+ // Apply all updates atomically via React state, and broadcast the
+ // *post-update* snapshot so other tabs see the new remoteId/syncedAt.
+ // Reading the closed-over `storedConversations` here would broadcast
+ // the pre-update state, leaving other tabs out of sync.
if (updates.length > 0) {
+ let merged: StoredConversation[] = storedConversationsRef.current;
setStoredConversations((prev) => {
const updated = [...prev];
for (const update of updates) {
@@ -396,13 +400,13 @@ export function ConversationsProvider({ children }: ConversationsProviderProps)
};
}
}
+ merged = updated;
return updated;
});
- // Broadcast to other tabs
broadcastChannelRef.current?.postMessage({
type: "sync",
- conversations: storedConversations,
+ conversations: merged,
} satisfies SyncMessage);
}
} finally {
From 88de4db2c0aac5a1a110225583584cfe41a5ae37 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:40:14 +1000
Subject: [PATCH 034/172] Compare feedback historyMode and modeMetadata in memo
equality
---
.../MultiModelResponse/MultiModelResponse.tsx | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/ui/src/components/MultiModelResponse/MultiModelResponse.tsx b/ui/src/components/MultiModelResponse/MultiModelResponse.tsx
index 77d7d0a..2ec3907 100644
--- a/ui/src/components/MultiModelResponse/MultiModelResponse.tsx
+++ b/ui/src/components/MultiModelResponse/MultiModelResponse.tsx
@@ -1535,6 +1535,8 @@ function areMultiModelResponsePropsEqual(
if (prev.groupId !== next.groupId) return false;
if (prev.selectedBest !== next.selectedBest) return false;
if (prev.timestamp.getTime() !== next.timestamp.getTime()) return false;
+ if (prev.historyMode !== next.historyMode) return false;
+ if (prev.forceStacked !== next.forceStacked) return false;
// Check callback identity - parent MUST use useCallback for stable refs
if (prev.onSelectBest !== next.onSelectBest) return false;
@@ -1576,6 +1578,14 @@ function areMultiModelResponsePropsEqual(
if (prevR.error !== nextR.error) return false;
if (prevR.usage?.totalTokens !== nextR.usage?.totalTokens) return false;
if (prevR.usage?.reasoningTokens !== nextR.usage?.reasoningTokens) return false;
+ // Feedback flips (rating, "select as best") — these change badges in the
+ // header; without a check the user has to scroll/click to see the new
+ // state.
+ if (prevR.feedback?.rating !== nextR.feedback?.rating) return false;
+ if (prevR.feedback?.selectedAsBest !== nextR.feedback?.selectedAsBest) return false;
+ // Mode metadata (e.g., router model swap on regenerate) drives the
+ // routing badge.
+ if (prevR.modeMetadata !== nextR.modeMetadata) return false;
// Check citations (compare length as a quick check)
if ((prevR.citations?.length ?? 0) !== (nextR.citations?.length ?? 0)) return false;
// Check artifacts (compare length as a quick check)
From e2b5593b3464899ebfd3ad4aba04a399baaee69f Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:47:22 +1000
Subject: [PATCH 035/172] Apply nightly rustfmt to review-fixes changes
---
src/auth/jwt.rs | 4 +---
src/config/auth.rs | 5 +----
src/db/mod.rs | 13 +++++++------
src/middleware/layers/admin.rs | 5 +----
src/providers/anthropic/convert.rs | 5 +----
src/providers/anthropic/stream.rs | 9 +++++----
src/providers/bedrock/stream.rs | 6 ++++--
src/providers/vertex/mod.rs | 3 +--
src/providers/vertex/stream.rs | 6 ++++--
src/routes/admin/oauth.rs | 6 +++++-
src/routes/admin/org_sso_configs.rs | 5 ++---
11 files changed, 32 insertions(+), 35 deletions(-)
diff --git a/src/auth/jwt.rs b/src/auth/jwt.rs
index e2556c7..5176cc8 100644
--- a/src/auth/jwt.rs
+++ b/src/auth/jwt.rs
@@ -148,9 +148,7 @@ impl JwtValidator {
// real expected audience.
let entries = config.audience.to_vec();
if entries.is_empty() {
- return Err(AuthError::Internal(
- "JWT audience must not be empty".into(),
- ));
+ return Err(AuthError::Internal("JWT audience must not be empty".into()));
}
for entry in entries {
if entry.trim().is_empty() {
diff --git a/src/config/auth.rs b/src/config/auth.rs
index 0a69cb1..db87cd4 100644
--- a/src/config/auth.rs
+++ b/src/config/auth.rs
@@ -608,10 +608,7 @@ impl ProxyAuthJwtConfig {
/// Reject empty audience values. `jsonwebtoken` accepts an empty string as a
/// valid audience match, so an empty entry would silently disable the audience
/// check.
-fn validate_jwt_audience(
- field: &str,
- audience: &OneOrMany,
-) -> Result<(), ConfigError> {
+fn validate_jwt_audience(field: &str, audience: &OneOrMany) -> Result<(), ConfigError> {
let entries = audience.to_vec();
if entries.is_empty() {
return Err(ConfigError::Validation(format!(
diff --git a/src/db/mod.rs b/src/db/mod.rs
index d7fd087..b13bb01 100644
--- a/src/db/mod.rs
+++ b/src/db/mod.rs
@@ -402,12 +402,13 @@ impl DbPool {
sqlx::postgres::PgSslMode::VerifyFull
}
};
- let connect_opts = |url: &str| -> Result {
- let opts: sqlx::postgres::PgConnectOptions = url.parse().map_err(|e| {
- DbError::Validation(format!("Invalid Postgres URL: {e}"))
- })?;
- Ok(opts.ssl_mode(ssl_mode))
- };
+ let connect_opts =
+ |url: &str| -> Result {
+ let opts: sqlx::postgres::PgConnectOptions = url.parse().map_err(|e| {
+ DbError::Validation(format!("Invalid Postgres URL: {e}"))
+ })?;
+ Ok(opts.ssl_mode(ssl_mode))
+ };
let pool_opts = || {
sqlx::postgres::PgPoolOptions::new()
.min_connections(cfg.min_connections)
diff --git a/src/middleware/layers/admin.rs b/src/middleware/layers/admin.rs
index e97315b..76129f7 100644
--- a/src/middleware/layers/admin.rs
+++ b/src/middleware/layers/admin.rs
@@ -185,10 +185,7 @@ pub const EMERGENCY_ADMIN_ROLE: &str = "_emergency_admin";
/// headers must never be able to claim these roles, since the gateway grants
/// extra trust to them (bootstrap / emergency break-glass).
pub(crate) fn strip_reserved_roles(roles: Vec) -> Vec {
- roles
- .into_iter()
- .filter(|r| !r.starts_with('_'))
- .collect()
+ roles.into_iter().filter(|r| !r.starts_with('_')).collect()
}
/// Try to authenticate via bootstrap API key.
diff --git a/src/providers/anthropic/convert.rs b/src/providers/anthropic/convert.rs
index d2309fc..0766b2f 100644
--- a/src/providers/anthropic/convert.rs
+++ b/src/providers/anthropic/convert.rs
@@ -954,10 +954,7 @@ pub fn convert_anthropic_to_responses_response(
type_: ResponsesReasoningType::Reasoning,
id: format!(
"rs_{}",
- crate::providers::anthropic::stream::strip_anthropic_prefix(
- &anthropic.id,
- "msg_"
- )
+ crate::providers::anthropic::stream::strip_anthropic_prefix(&anthropic.id, "msg_")
),
content: None, // Anthropic doesn't provide structured reasoning content
summary: vec![], // Would need to generate summary
diff --git a/src/providers/anthropic/stream.rs b/src/providers/anthropic/stream.rs
index 3d2e512..497069a 100644
--- a/src/providers/anthropic/stream.rs
+++ b/src/providers/anthropic/stream.rs
@@ -541,7 +541,8 @@ impl AnthropicToOpenAIStream {
self.emit_chunk(&chunk);
// Emit [DONE]
- self.output_buffer.push_back(Bytes::from("data: [DONE]\n\n"));
+ self.output_buffer
+ .push_back(Bytes::from("data: [DONE]\n\n"));
}
AnthropicStreamEvent::Ping => {
@@ -995,8 +996,7 @@ impl AnthropicToResponsesStream {
let output_index = self.tool_output_index(tool_index);
// Emit function call arguments delta
- let fc_id =
- format!("fc_{}", strip_anthropic_prefix(&tool_id, "toolu_"));
+ let fc_id = format!("fc_{}", strip_anthropic_prefix(&tool_id, "toolu_"));
self.emit_event(
"response.function_call_arguments.delta",
serde_json::json!({
@@ -1265,7 +1265,8 @@ impl AnthropicToResponsesStream {
);
// Emit [DONE] to signal end of stream (OpenAI Responses API convention)
- self.output_buffer.push_back(Bytes::from("data: [DONE]\n\n"));
+ self.output_buffer
+ .push_back(Bytes::from("data: [DONE]\n\n"));
}
AnthropicStreamEvent::Ping => {
diff --git a/src/providers/bedrock/stream.rs b/src/providers/bedrock/stream.rs
index 575fe9c..c58c0bd 100644
--- a/src/providers/bedrock/stream.rs
+++ b/src/providers/bedrock/stream.rs
@@ -362,7 +362,8 @@ impl BedrockToOpenAIStream {
self.emit_chunk(&usage_chunk);
// Emit [DONE]
- self.output_buffer.push_back(Bytes::from("data: [DONE]\n\n"));
+ self.output_buffer
+ .push_back(Bytes::from("data: [DONE]\n\n"));
}
}
_ => {
@@ -1108,7 +1109,8 @@ impl BedrockToResponsesStream {
);
// Emit [DONE] to signal end of stream
- self.output_buffer.push_back(Bytes::from("data: [DONE]\n\n"));
+ self.output_buffer
+ .push_back(Bytes::from("data: [DONE]\n\n"));
}
}
_ => {
diff --git a/src/providers/vertex/mod.rs b/src/providers/vertex/mod.rs
index 1283647..9def374 100644
--- a/src/providers/vertex/mod.rs
+++ b/src/providers/vertex/mod.rs
@@ -874,8 +874,7 @@ mod streaming_tests {
transformer.handle_response(response);
// Should emit [DONE] at the end
- let last_chunk =
- std::str::from_utf8(transformer.output_buffer.back().unwrap()).unwrap();
+ let last_chunk = std::str::from_utf8(transformer.output_buffer.back().unwrap()).unwrap();
assert_eq!(last_chunk, "data: [DONE]\n\n");
// Should have usage in second-to-last chunk
diff --git a/src/providers/vertex/stream.rs b/src/providers/vertex/stream.rs
index cf735e3..1837263 100644
--- a/src/providers/vertex/stream.rs
+++ b/src/providers/vertex/stream.rs
@@ -353,7 +353,8 @@ impl VertexToOpenAIStream {
self.emit_chunk(&usage_chunk);
// Emit [DONE]
- self.output_buffer.push_back(Bytes::from("data: [DONE]\n\n"));
+ self.output_buffer
+ .push_back(Bytes::from("data: [DONE]\n\n"));
}
}
}
@@ -592,7 +593,8 @@ impl VertexToResponsesStream {
// Pass through [DONE] marker
if json_str == "[DONE]" {
- self.output_buffer.push_back(Bytes::from("data: [DONE]\n\n"));
+ self.output_buffer
+ .push_back(Bytes::from("data: [DONE]\n\n"));
return;
}
diff --git a/src/routes/admin/oauth.rs b/src/routes/admin/oauth.rs
index ab7f866..14f0abb 100644
--- a/src/routes/admin/oauth.rs
+++ b/src/routes/admin/oauth.rs
@@ -58,7 +58,11 @@ fn validate_callback_url(callback_url: &str, pkce: &OAuthPkceConfig) -> Result d.eq_ignore_ascii_case("localhost"),
Some(url::Host::Ipv4(ip)) => ip.is_loopback(),
Some(url::Host::Ipv6(ip)) => {
- ip.is_loopback() || ip.to_ipv4_mapped().map(|v4| v4.is_loopback()).unwrap_or(false)
+ ip.is_loopback()
+ || ip
+ .to_ipv4_mapped()
+ .map(|v4| v4.is_loopback())
+ .unwrap_or(false)
}
None => false,
};
diff --git a/src/routes/admin/org_sso_configs.rs b/src/routes/admin/org_sso_configs.rs
index 3bd55ba..031c303 100644
--- a/src/routes/admin/org_sso_configs.rs
+++ b/src/routes/admin/org_sso_configs.rs
@@ -761,9 +761,8 @@ pub async fn parse_saml_metadata(
// Block private/loopback/cloud-metadata addresses with DNS rebinding
// protection — the same gate that `SamlAuthenticator::get_metadata` uses.
- crate::validation::validate_base_url(&input.metadata_url, false).map_err(|e| {
- AdminError::Validation(format!("SAML metadata URL is not permitted: {e}"))
- })?;
+ crate::validation::validate_base_url(&input.metadata_url, false)
+ .map_err(|e| AdminError::Validation(format!("SAML metadata URL is not permitted: {e}")))?;
// Fetch and parse the metadata
let client = reqwest::Client::new();
From f2edc67a0cc2143c920936476ec5372439f457f8 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 22:59:41 +1000
Subject: [PATCH 036/172] Reject session cookie secure=false with SameSite=None
---
src/config/auth.rs | 40 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
diff --git a/src/config/auth.rs b/src/config/auth.rs
index db87cd4..97367e4 100644
--- a/src/config/auth.rs
+++ b/src/config/auth.rs
@@ -1203,6 +1203,13 @@ impl SessionConfig {
"Session duration cannot be zero".into(),
));
}
+ // Browsers require the Secure attribute when SameSite=None; otherwise
+ // the cookie is silently rejected in cross-site contexts.
+ if matches!(self.same_site, SameSite::None) && !self.secure {
+ return Err(ConfigError::Validation(
+ "Session cookie with same_site = \"none\" requires secure = true".into(),
+ ));
+ }
Ok(())
}
}
@@ -1767,6 +1774,39 @@ mod tests {
);
}
+ #[cfg(feature = "sso")]
+ #[test]
+ fn test_session_config_rejects_insecure_samesite_none() {
+ let config = SessionConfig {
+ cookie_name: "__gw_session".to_string(),
+ duration_secs: 86400,
+ secure: false,
+ same_site: SameSite::None,
+ secret: None,
+ enhanced: EnhancedSessionConfig::default(),
+ };
+ let err = config.validate().expect_err("must reject insecure None");
+ let msg = format!("{}", err);
+ assert!(
+ msg.contains("same_site") && msg.contains("secure"),
+ "error must mention same_site/secure: {msg}"
+ );
+ }
+
+ #[cfg(feature = "sso")]
+ #[test]
+ fn test_session_config_allows_insecure_lax() {
+ let config = SessionConfig {
+ cookie_name: "__gw_session".to_string(),
+ duration_secs: 86400,
+ secure: false,
+ same_site: SameSite::Lax,
+ secret: None,
+ enhanced: EnhancedSessionConfig::default(),
+ };
+ config.validate().expect("Lax + insecure must validate");
+ }
+
#[cfg(feature = "sso")]
#[test]
fn test_session_config_debug_no_secret() {
From 899b3fe829bd75f1c329ad98ce3445166867928f Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:02:17 +1000
Subject: [PATCH 037/172] Preserve SSE event terminator when injecting cost
---
src/streaming/mod.rs | 33 +++++++++++++++++++++++++--------
1 file changed, 25 insertions(+), 8 deletions(-)
diff --git a/src/streaming/mod.rs b/src/streaming/mod.rs
index 2af4fb2..6764645 100644
--- a/src/streaming/mod.rs
+++ b/src/streaming/mod.rs
@@ -276,7 +276,11 @@ fn inject_cost_into_sse_chunk(chunk: &[u8], cost_dollars: f64) -> Bytes {
};
let mut output = String::with_capacity(chunk_str.len() + 32);
- for line in chunk_str.split('\n') {
+ for raw in chunk_str.split_inclusive('\n') {
+ let (line, terminator) = match raw.strip_suffix('\n') {
+ Some(without) => (without, "\n"),
+ None => (raw, ""),
+ };
if let Some(json_str) = line.strip_prefix("data: ") {
if let Ok(mut json) = serde_json::from_str::(json_str) {
// Try root-level usage (Chat Completions format)
@@ -308,13 +312,7 @@ fn inject_cost_into_sse_chunk(chunk: &[u8], cost_dollars: f64) -> Bytes {
} else {
output.push_str(line);
}
- output.push('\n');
- }
-
- // The split('\n') + push('\n') loop adds one extra trailing newline;
- // remove it to match original chunk ending
- if !chunk_str.ends_with('\n') {
- output.pop();
+ output.push_str(terminator);
}
Bytes::from(output)
@@ -1024,6 +1022,25 @@ mod tests {
}
}
+ #[test]
+ fn test_inject_cost_preserves_double_newline_terminator() {
+ let chunk = b"data: {\"usage\":{\"prompt_tokens\":1,\"completion_tokens\":2}}\n\n";
+ let injected = inject_cost_into_sse_chunk(chunk, 0.0042);
+ let s = std::str::from_utf8(&injected).unwrap();
+ assert!(s.ends_with("\n\n"), "must preserve SSE event terminator");
+ assert!(!s.ends_with("\n\n\n"), "must not add extra newline");
+ assert!(s.contains("\"cost\":0.0042"));
+ }
+
+ #[test]
+ fn test_inject_cost_no_trailing_newline() {
+ let chunk = b"data: {\"usage\":{\"prompt_tokens\":1,\"completion_tokens\":2}}";
+ let injected = inject_cost_into_sse_chunk(chunk, 0.0042);
+ let s = std::str::from_utf8(&injected).unwrap();
+ assert!(!s.ends_with('\n'), "must preserve absent terminator");
+ assert!(s.contains("\"cost\":0.0042"));
+ }
+
#[test]
fn test_parse_sse_done() {
let chunk = b"data: [DONE]\n\n";
From d3af79f4a512cecbf1f1a814c134bd8793da8a7b Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:04:00 +1000
Subject: [PATCH 038/172] Estimate SSE delta tokens by char count, not byte len
---
src/streaming/mod.rs | 23 +++++++++++++++++++++--
1 file changed, 21 insertions(+), 2 deletions(-)
diff --git a/src/streaming/mod.rs b/src/streaming/mod.rs
index 6764645..726ed3c 100644
--- a/src/streaming/mod.rs
+++ b/src/streaming/mod.rs
@@ -229,8 +229,10 @@ impl SseParser {
.and_then(|delta| delta.get("content"))
.and_then(|c| c.as_str())
{
- // Rough approximation: 1 token ≈ 4 characters
- let estimated_tokens = (content.len() as i64 + 3) / 4;
+ // Rough approximation: 1 token ≈ 4 characters.
+ // Use chars() instead of len() so multibyte content
+ // (CJK, emoji) isn't over-counted as a token-per-byte.
+ let estimated_tokens = (content.chars().count() as i64 + 3) / 4;
return Some(SseChunk::Delta {
tokens: estimated_tokens,
});
@@ -1022,6 +1024,23 @@ mod tests {
}
}
+ #[test]
+ fn test_parse_sse_delta_multibyte_content() {
+ // Four CJK chars = 12 bytes. len()/4 would estimate 3 tokens;
+ // chars().count()/4 estimates 1.
+ let chunk = r#"data: {"choices":[{"delta":{"content":"日本語😀"}}]}"#;
+ let result = SseParser::parse_chunk(chunk.as_bytes());
+ match result {
+ Some(SseChunk::Delta { tokens }) => {
+ assert_eq!(
+ tokens, 1,
+ "4 chars should estimate to 1 token, got {tokens}"
+ );
+ }
+ _ => panic!("Expected Delta chunk"),
+ }
+ }
+
#[test]
fn test_inject_cost_preserves_double_newline_terminator() {
let chunk = b"data: {\"usage\":{\"prompt_tokens\":1,\"completion_tokens\":2}}\n\n";
From 4850102c288463da3e19ed9aa114ea614f84b33e Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:04:27 +1000
Subject: [PATCH 039/172] Pin React Query mutations retry to 0
---
ui/src/App.tsx | 3 +++
1 file changed, 3 insertions(+)
diff --git a/ui/src/App.tsx b/ui/src/App.tsx
index 0eb36c7..5bcfa98 100644
--- a/ui/src/App.tsx
+++ b/ui/src/App.tsx
@@ -18,6 +18,9 @@ const queryClient = new QueryClient({
staleTime: 1000 * 60, // 1 minute
retry: 1,
},
+ mutations: {
+ retry: 0,
+ },
},
});
From 140c5c17aaab4b21df0bb6f40c199754601147e6 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:05:07 +1000
Subject: [PATCH 040/172] Use form's isSubmitting on LoginPage to prevent
double-submit
---
ui/src/pages/LoginPage.tsx | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/ui/src/pages/LoginPage.tsx b/ui/src/pages/LoginPage.tsx
index ee317cf..e930f01 100644
--- a/ui/src/pages/LoginPage.tsx
+++ b/ui/src/pages/LoginPage.tsx
@@ -32,7 +32,6 @@ export default function LoginPage() {
const discoverSso = useDiscoverSso();
const [error, setError] = useState(null);
- const [isSubmitting, setIsSubmitting] = useState(false);
const [discoveredOrg, setDiscoveredOrg] = useState(null);
const [discoveryEmail, setDiscoveryEmail] = useState("");
@@ -98,17 +97,15 @@ export default function LoginPage() {
const onApiKeySubmit = async (data: LoginForm) => {
setError(null);
- setIsSubmitting(true);
-
try {
await login("api_key", { apiKey: data.apiKey });
} catch (err) {
setError(err instanceof Error ? err.message : "Authentication failed");
- } finally {
- setIsSubmitting(false);
}
};
+ const isSubmitting = apiKeyForm.formState.isSubmitting;
+
const handleOidcLogin = (orgId?: string) => {
login("oidc", orgId ? { orgId } : undefined);
};
From aeb5aa85c8f1a8c3a925ddf9c0189a2e9a53ed80 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:09:52 +1000
Subject: [PATCH 041/172] Make OIDC/SAML auth_state TTL configurable via
SessionConfig
---
src/auth/oidc.rs | 5 +++--
src/auth/saml.rs | 5 +++--
src/config/auth.rs | 22 ++++++++++++++++++++++
3 files changed, 28 insertions(+), 4 deletions(-)
diff --git a/src/auth/oidc.rs b/src/auth/oidc.rs
index 6e7217d..079b9e4 100644
--- a/src/auth/oidc.rs
+++ b/src/auth/oidc.rs
@@ -347,9 +347,10 @@ impl OidcAuthenticator {
.map_err(|e| AuthError::Internal(format!("Failed to retrieve auth state: {}", e)))?
.ok_or(AuthError::InvalidToken)?;
- // Check if state is too old (10 minute limit)
+ // Reject states older than the configured TTL.
+ let ttl = chrono::Duration::seconds(self.config.session.auth_state_ttl_secs as i64);
let age = Utc::now() - auth_state.created_at;
- if age > chrono::Duration::minutes(10) {
+ if age > ttl {
return Err(AuthError::ExpiredToken);
}
diff --git a/src/auth/saml.rs b/src/auth/saml.rs
index d15c95a..6627562 100644
--- a/src/auth/saml.rs
+++ b/src/auth/saml.rs
@@ -329,9 +329,10 @@ impl SamlAuthenticator {
.map_err(|e| AuthError::Internal(format!("Failed to retrieve auth state: {}", e)))?
.ok_or(AuthError::InvalidToken)?;
- // Check if state is too old (10 minute limit)
+ // Reject states older than the configured TTL.
+ let ttl = chrono::Duration::seconds(self.config.session.auth_state_ttl_secs as i64);
let age = Utc::now() - auth_state.created_at;
- if age > chrono::Duration::minutes(10) {
+ if age > ttl {
return Err(AuthError::ExpiredToken);
}
diff --git a/src/config/auth.rs b/src/config/auth.rs
index 97367e4..9d13d1c 100644
--- a/src/config/auth.rs
+++ b/src/config/auth.rs
@@ -1097,6 +1097,12 @@ pub struct SessionConfig {
#[serde(default = "default_session_duration")]
pub duration_secs: u64,
+ /// How long an in-flight authorization request (PKCE state, SAML
+ /// `relay_state`) remains valid, in seconds. Once exceeded, the user must
+ /// restart the login. Defaults to 10 minutes.
+ #[serde(default = "default_auth_state_ttl")]
+ pub auth_state_ttl_secs: u64,
+
/// Secure cookie (HTTPS only).
#[serde(default = "default_true")]
pub secure: bool,
@@ -1167,6 +1173,7 @@ impl std::fmt::Debug for SessionConfig {
f.debug_struct("SessionConfig")
.field("cookie_name", &self.cookie_name)
.field("duration_secs", &self.duration_secs)
+ .field("auth_state_ttl_secs", &self.auth_state_ttl_secs)
.field("secure", &self.secure)
.field("same_site", &self.same_site)
.field("secret", &self.secret.as_ref().map(|_| "****"))
@@ -1181,6 +1188,7 @@ impl Default for SessionConfig {
Self {
cookie_name: default_session_cookie(),
duration_secs: default_session_duration(),
+ auth_state_ttl_secs: default_auth_state_ttl(),
secure: true,
same_site: SameSite::default(),
secret: None,
@@ -1203,6 +1211,11 @@ impl SessionConfig {
"Session duration cannot be zero".into(),
));
}
+ if self.auth_state_ttl_secs == 0 {
+ return Err(ConfigError::Validation(
+ "Session auth_state_ttl_secs cannot be zero".into(),
+ ));
+ }
// Browsers require the Secure attribute when SameSite=None; otherwise
// the cookie is silently rejected in cross-site contexts.
if matches!(self.same_site, SameSite::None) && !self.secure {
@@ -1224,6 +1237,11 @@ fn default_session_duration() -> u64 {
86400 * 7 // 7 days
}
+#[cfg(feature = "sso")]
+fn default_auth_state_ttl() -> u64 {
+ 600 // 10 minutes
+}
+
#[cfg(feature = "sso")]
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))]
@@ -1752,6 +1770,7 @@ mod tests {
let config = SessionConfig {
cookie_name: "__gw_session".to_string(),
duration_secs: 86400,
+ auth_state_ttl_secs: 600,
secure: true,
same_site: SameSite::Lax,
secret: Some("my-super-secret-session-key".to_string()),
@@ -1780,6 +1799,7 @@ mod tests {
let config = SessionConfig {
cookie_name: "__gw_session".to_string(),
duration_secs: 86400,
+ auth_state_ttl_secs: 600,
secure: false,
same_site: SameSite::None,
secret: None,
@@ -1799,6 +1819,7 @@ mod tests {
let config = SessionConfig {
cookie_name: "__gw_session".to_string(),
duration_secs: 86400,
+ auth_state_ttl_secs: 600,
secure: false,
same_site: SameSite::Lax,
secret: None,
@@ -1814,6 +1835,7 @@ mod tests {
let config = SessionConfig {
cookie_name: "__gw_session".to_string(),
duration_secs: 86400,
+ auth_state_ttl_secs: 600,
secure: true,
same_site: SameSite::Lax,
secret: None,
From 2dcd8084efc6c38ce79476af045df5f7d992a226 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:11:40 +1000
Subject: [PATCH 042/172] Only strip Content-Length when cost injection
rewrites body
---
src/providers/mod.rs | 24 +++++++++++++++++++-----
1 file changed, 19 insertions(+), 5 deletions(-)
diff --git a/src/providers/mod.rs b/src/providers/mod.rs
index ce4c639..e9db46b 100644
--- a/src/providers/mod.rs
+++ b/src/providers/mod.rs
@@ -797,16 +797,24 @@ pub async fn inject_cost_into_response(params: CostInjectionParams<'_>) -> Respo
.map(|(_, s)| s)
.unwrap_or(crate::pricing::CostPricingSource::None);
- // Inject cost (in dollars) into the usage object in the response body
+ // Inject cost (in dollars) into the usage object in the response body.
+ // Only re-serialize when we actually mutate the JSON; otherwise we'd
+ // change the body length (whitespace, key order) and have to strip
+ // Content-Length unnecessarily.
+ let mut body_modified = false;
if let Some(cost) = cost_microcents {
let cost_dollars = crate::pricing::microcents_to_dollars(cost);
if let Some(usage_obj) = json.get_mut("usage").and_then(|u| u.as_object_mut()) {
usage_obj.insert("cost".to_string(), serde_json::Value::from(cost_dollars));
+ body_modified = true;
}
}
- // Re-serialize the (possibly modified) JSON
- let body_bytes = serde_json::to_vec(&json).unwrap_or_else(|_| bytes.to_vec());
+ let body_bytes = if body_modified {
+ serde_json::to_vec(&json).unwrap_or_else(|_| bytes.to_vec())
+ } else {
+ bytes.to_vec()
+ };
(
Some(input),
@@ -817,6 +825,7 @@ pub async fn inject_cost_into_response(params: CostInjectionParams<'_>) -> Respo
finish_reason,
body_bytes,
pricing_source,
+ body_modified,
)
}
Err(_) => (
@@ -828,6 +837,7 @@ pub async fn inject_cost_into_response(params: CostInjectionParams<'_>) -> Respo
None,
bytes.to_vec(),
crate::pricing::CostPricingSource::None,
+ false,
),
};
@@ -840,6 +850,7 @@ pub async fn inject_cost_into_response(params: CostInjectionParams<'_>) -> Respo
finish_reason,
body_bytes,
pricing_source,
+ body_modified,
) = extracted;
// Rebuild response with headers
@@ -880,8 +891,11 @@ pub async fn inject_cost_into_response(params: CostInjectionParams<'_>) -> Respo
new_parts.headers.insert("X-Pricing-Source", value);
}
- // Remove Content-Length since body size may have changed after cost injection
- new_parts.headers.remove(CONTENT_LENGTH);
+ // Only strip Content-Length when we re-serialized the body. If the body is
+ // passed through untouched, the upstream length is still authoritative.
+ if body_modified {
+ new_parts.headers.remove(CONTENT_LENGTH);
+ }
Response::from_parts(new_parts, Body::from(body_bytes))
}
From 443c85a9948625157f24cf69984c6ba89b61670a Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:12:12 +1000
Subject: [PATCH 043/172] Surface unknown OAuth owner kind instead of coercing
to user
---
ui/src/pages/OAuthAuthorizePage.tsx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ui/src/pages/OAuthAuthorizePage.tsx b/ui/src/pages/OAuthAuthorizePage.tsx
index eaf6fea..2322622 100644
--- a/ui/src/pages/OAuthAuthorizePage.tsx
+++ b/ui/src/pages/OAuthAuthorizePage.tsx
@@ -125,7 +125,7 @@ function ownerKeyToApiKeyOwner(key: string, userId: string): ApiKeyOwner {
case "project":
return { type: "project", project_id: id };
default:
- return { type: "user", user_id: userId };
+ throw new Error(`Unsupported owner kind: ${kind}`);
}
}
From 7212f401335e1063ff9416fffd8db6e504762313 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:12:55 +1000
Subject: [PATCH 044/172] Cancel useAutoScroll rAF on unmount to avoid stale
scheduled callback
---
ui/src/hooks/useAutoScroll.ts | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/ui/src/hooks/useAutoScroll.ts b/ui/src/hooks/useAutoScroll.ts
index 5f8f996..4d35eb5 100644
--- a/ui/src/hooks/useAutoScroll.ts
+++ b/ui/src/hooks/useAutoScroll.ts
@@ -140,7 +140,7 @@ export function useAutoScroll(options: UseAutoScrollOptions = {}): UseAutoScroll
};
// Use requestAnimationFrame to ensure layout is complete
- requestAnimationFrame(checkInitialPosition);
+ const rafId = requestAnimationFrame(checkInitialPosition);
// Also check when container resizes (content loaded)
// Skip during streaming - content height changes constantly during streaming,
@@ -153,7 +153,10 @@ export function useAutoScroll(options: UseAutoScrollOptions = {}): UseAutoScroll
});
resizeObserver.observe(container);
- return () => resizeObserver.disconnect();
+ return () => {
+ cancelAnimationFrame(rafId);
+ resizeObserver.disconnect();
+ };
}, [checkIfAtBottom]);
return {
From 128ccf39ccec341da965184aeb78b362bbb28f7e Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:13:31 +1000
Subject: [PATCH 045/172] Roll back optimistic pin reorder on sync failure
---
.../ConversationsProvider.tsx | 20 ++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/ui/src/components/ConversationsProvider/ConversationsProvider.tsx b/ui/src/components/ConversationsProvider/ConversationsProvider.tsx
index 93764a8..eae2f28 100644
--- a/ui/src/components/ConversationsProvider/ConversationsProvider.tsx
+++ b/ui/src/components/ConversationsProvider/ConversationsProvider.tsx
@@ -714,6 +714,11 @@ export function ConversationsProvider({ children }: ConversationsProviderProps)
const reorderPinned = useCallback(
(orderedIds: string[]) => {
+ // Snapshot current pin orders so we can roll back if any sync fails.
+ const previousOrders = new Map(
+ storedConversations.map((c) => [c.id, c.pinOrder] as const)
+ );
+
// Update local state with new pin orders
setStoredConversations((prev) => {
const updated = prev.map((c) => {
@@ -731,7 +736,20 @@ export function ConversationsProvider({ children }: ConversationsProviderProps)
orderedIds.forEach((id, index) => {
const conv = storedConversations.find((c) => c.id === id);
if (conv?.remoteId) {
- pinMutation.mutate({ remoteId: conv.remoteId, pinOrder: index });
+ pinMutation.mutate(
+ { remoteId: conv.remoteId, pinOrder: index },
+ {
+ onError: () => {
+ setStoredConversations((prev) =>
+ prev.map((c) =>
+ previousOrders.has(c.id)
+ ? { ...c, pinOrder: previousOrders.get(c.id) }
+ : c
+ )
+ );
+ },
+ }
+ );
}
});
}
From 55fc3e5babb3c2e1f401b9c8ae96a0d143201a6e Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:14:50 +1000
Subject: [PATCH 046/172] Cache shiki HTML so theme toggles reuse prior
highlights
---
.../HighlightedCode/HighlightedCode.tsx | 37 ++++++++++++++++++-
1 file changed, 36 insertions(+), 1 deletion(-)
diff --git a/ui/src/components/HighlightedCode/HighlightedCode.tsx b/ui/src/components/HighlightedCode/HighlightedCode.tsx
index a930837..7fa2f79 100644
--- a/ui/src/components/HighlightedCode/HighlightedCode.tsx
+++ b/ui/src/components/HighlightedCode/HighlightedCode.tsx
@@ -31,6 +31,33 @@ function getHighlighter(): Promise {
return highlighterPromise;
}
+// Bounded LRU-ish cache so toggling themes back and forth on the same blocks
+// doesn't trigger a re-highlight every time. Keyed on (theme, lang, code).
+const HIGHLIGHT_CACHE_LIMIT = 256;
+const highlightCache = new Map();
+
+function cacheKey(theme: string, lang: string, code: string): string {
+ return `${theme}|${lang}|${code}`;
+}
+
+function readHighlightCache(key: string): string | undefined {
+ const cached = highlightCache.get(key);
+ if (cached !== undefined) {
+ // Move to most-recent slot
+ highlightCache.delete(key);
+ highlightCache.set(key, cached);
+ }
+ return cached;
+}
+
+function writeHighlightCache(key: string, value: string): void {
+ if (highlightCache.size >= HIGHLIGHT_CACHE_LIMIT) {
+ const oldest = highlightCache.keys().next().value;
+ if (oldest !== undefined) highlightCache.delete(oldest);
+ }
+ highlightCache.set(key, value);
+}
+
export interface HighlightedCodeProps {
code: string;
language?: string;
@@ -64,10 +91,17 @@ function HighlightedCodeComponent({
useEffect(() => {
let cancelled = false;
+ const lang = (language?.toLowerCase() ?? "text") || "text";
+ const key = cacheKey(theme, lang, code);
+ const cached = readHighlightCache(key);
+ if (cached !== undefined) {
+ setHtml(cached);
+ return;
+ }
+
getHighlighter().then((highlighter) => {
if (cancelled) return;
- const lang = language?.toLowerCase() ?? "text";
const loadedLangs = highlighter.getLoadedLanguages();
// Use plain text for unknown languages
@@ -77,6 +111,7 @@ function HighlightedCodeComponent({
lang: effectiveLang,
theme,
});
+ writeHighlightCache(key, result);
setHtml(result);
});
From 4e82ec073fb59832ba4eab140a0b1cf851c4fc09 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:15:16 +1000
Subject: [PATCH 047/172] Replace 50-char prefix sync hash with djb2 over full
content
---
.../ConversationsProvider.tsx | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/ui/src/components/ConversationsProvider/ConversationsProvider.tsx b/ui/src/components/ConversationsProvider/ConversationsProvider.tsx
index eae2f28..3ab8851 100644
--- a/ui/src/components/ConversationsProvider/ConversationsProvider.tsx
+++ b/ui/src/components/ConversationsProvider/ConversationsProvider.tsx
@@ -121,15 +121,25 @@ function localToApiMessage(m: StoredConversation["messages"][0]): Message {
};
}
+// djb2 string hash. Plenty for content-change detection: collisions are
+// vanishingly rare in practice and we don't need cryptographic guarantees.
+function hashContent(s: string): string {
+ let h = 5381;
+ for (let i = 0; i < s.length; i++) {
+ h = (((h << 5) + h) ^ s.charCodeAt(i)) | 0;
+ }
+ return (h >>> 0).toString(36);
+}
+
// Compute a sync hash that includes actual content changes
function computeSyncHash(conversations: StoredConversation[]): string {
return JSON.stringify(
conversations.map((c) => ({
id: c.id,
title: c.title,
- // Include message content hash for detecting content changes
+ // Hash full content so edits past character 50 still invalidate the hash.
msgHash: c.messages
- .map((m) => `${m.role}:${m.content.length}:${m.content.slice(0, 50)}`)
+ .map((m) => `${m.role}:${m.content.length}:${hashContent(m.content)}`)
.join("|"),
models: c.models.join(","),
updatedAt: c.updatedAt,
From c5c45db0cc15471775650e84a6b75ff0b15ce031 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:15:46 +1000
Subject: [PATCH 048/172] Accept pasted images in ChatInput textarea
---
ui/src/components/ChatInput/ChatInput.tsx | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/ui/src/components/ChatInput/ChatInput.tsx b/ui/src/components/ChatInput/ChatInput.tsx
index 76c3e9a..da9f1ae 100644
--- a/ui/src/components/ChatInput/ChatInput.tsx
+++ b/ui/src/components/ChatInput/ChatInput.tsx
@@ -437,6 +437,18 @@ export function ChatInput({
[handleFileSelect]
);
+ const handlePaste = useCallback(
+ (event: React.ClipboardEvent) => {
+ const pastedFiles = event.clipboardData?.files;
+ if (pastedFiles && pastedFiles.length > 0) {
+ // Prevent the textarea from inserting an image filename or data URL.
+ event.preventDefault();
+ handleFileSelect(pastedFiles);
+ }
+ },
+ [handleFileSelect]
+ );
+
const handleDragOver = useCallback((event: React.DragEvent) => {
event.preventDefault();
setIsDragging(true);
@@ -549,6 +561,7 @@ export function ChatInput({
updateSlashState(target.value, target.selectionStart ?? 0);
}}
onKeyDown={handleKeyDown}
+ onPaste={handlePaste}
placeholder={placeholder}
className="min-h-[56px] w-full resize-none border-0 bg-transparent px-4 pt-3 pb-1 text-base focus-visible:ring-0 focus-visible:ring-offset-0"
autoResize
From d5c95924bdd164c71a509fbf477c545e87313c6c Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:16:14 +1000
Subject: [PATCH 049/172] Broadcast useLocalStorage writes to same-tab hook
instances
---
ui/src/hooks/useLocalStorage.ts | 45 ++++++++++++++++++++++++++-------
1 file changed, 36 insertions(+), 9 deletions(-)
diff --git a/ui/src/hooks/useLocalStorage.ts b/ui/src/hooks/useLocalStorage.ts
index 9216965..16abe7a 100644
--- a/ui/src/hooks/useLocalStorage.ts
+++ b/ui/src/hooks/useLocalStorage.ts
@@ -1,5 +1,15 @@
import { useState, useEffect, useCallback } from "react";
+// `storage` events only fire in *other* tabs. To keep multiple hook instances
+// of the same key inside the same tab in sync, mirror writes onto a custom
+// event we dispatch ourselves.
+const SAME_TAB_EVENT = "hadrian:local-storage";
+
+interface SameTabPayload {
+ key: string;
+ newValue: string | null;
+}
+
export function useLocalStorage(
key: string,
initialValue: T
@@ -21,7 +31,13 @@ export function useLocalStorage(
setStoredValue((prev) => {
const valueToStore = value instanceof Function ? value(prev) : value;
if (typeof window !== "undefined") {
- window.localStorage.setItem(key, JSON.stringify(valueToStore));
+ const serialized = JSON.stringify(valueToStore);
+ window.localStorage.setItem(key, serialized);
+ window.dispatchEvent(
+ new CustomEvent(SAME_TAB_EVENT, {
+ detail: { key, newValue: serialized },
+ })
+ );
}
return valueToStore;
});
@@ -30,18 +46,29 @@ export function useLocalStorage(
);
useEffect(() => {
- const handleStorageChange = (e: StorageEvent) => {
- if (e.key === key && e.newValue) {
- try {
- setStoredValue(JSON.parse(e.newValue) as T);
- } catch {
- // Ignore parse errors
- }
+ const apply = (newValue: string | null) => {
+ if (newValue === null) return;
+ try {
+ setStoredValue(JSON.parse(newValue) as T);
+ } catch {
+ // Ignore parse errors
}
};
+ const handleStorageChange = (e: StorageEvent) => {
+ if (e.key === key) apply(e.newValue);
+ };
+ const handleSameTabChange = (e: Event) => {
+ const detail = (e as CustomEvent).detail;
+ if (detail?.key === key) apply(detail.newValue);
+ };
+
window.addEventListener("storage", handleStorageChange);
- return () => window.removeEventListener("storage", handleStorageChange);
+ window.addEventListener(SAME_TAB_EVENT, handleSameTabChange);
+ return () => {
+ window.removeEventListener("storage", handleStorageChange);
+ window.removeEventListener(SAME_TAB_EVENT, handleSameTabChange);
+ };
}, [key]);
return [storedValue, setValue];
From 0a7423106fb222222d31b0da337d3d3d4b4f5cc8 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:18:36 +1000
Subject: [PATCH 050/172] SSRF-validate per-org OIDC redirect_uri on create and
update
---
src/routes/admin/org_sso_configs.rs | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/src/routes/admin/org_sso_configs.rs b/src/routes/admin/org_sso_configs.rs
index 031c303..0cfe64b 100644
--- a/src/routes/admin/org_sso_configs.rs
+++ b/src/routes/admin/org_sso_configs.rs
@@ -334,6 +334,10 @@ pub async fn create(
crate::validation::validate_base_url_opts(discovery_url, url_opts)
.map_err(|e| AdminError::Validation(format!("Invalid discovery URL: {e}")))?;
}
+ if let Some(ref redirect_uri) = input.redirect_uri {
+ crate::validation::validate_base_url_opts(redirect_uri, url_opts)
+ .map_err(|e| AdminError::Validation(format!("Invalid redirect URI: {e}")))?;
+ }
}
// Create the SSO config
@@ -550,6 +554,10 @@ pub async fn update(
crate::validation::validate_base_url_opts(discovery_url, url_opts)
.map_err(|e| AdminError::Validation(format!("Invalid discovery URL: {e}")))?;
}
+ if let Some(Some(ref redirect_uri)) = input.redirect_uri {
+ crate::validation::validate_base_url_opts(redirect_uri, url_opts)
+ .map_err(|e| AdminError::Validation(format!("Invalid redirect URI: {e}")))?;
+ }
}
// Update the SSO config
From 33991a9ad2a188c6b89512e1c2db0429a90299b6 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:20:01 +1000
Subject: [PATCH 051/172] Abort in-flight title generation when
ConversationsProvider unmounts
---
.../ConversationsProvider/ConversationsProvider.tsx | 9 ++++++++-
ui/src/utils/generateTitle.ts | 4 +++-
2 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/ui/src/components/ConversationsProvider/ConversationsProvider.tsx b/ui/src/components/ConversationsProvider/ConversationsProvider.tsx
index 3ab8851..d550eb1 100644
--- a/ui/src/components/ConversationsProvider/ConversationsProvider.tsx
+++ b/ui/src/components/ConversationsProvider/ConversationsProvider.tsx
@@ -527,6 +527,13 @@ export function ConversationsProvider({ children }: ConversationsProviderProps)
// Track conversations that are pending LLM title generation to avoid duplicate calls
const pendingTitleGenRef = useRef>(new Set());
+ // AbortController used to cancel any in-flight title generations on unmount.
+ const titleGenAbortRef = useRef(new AbortController());
+ useEffect(() => {
+ return () => {
+ titleGenAbortRef.current.abort();
+ };
+ }, []);
const updateConversation = useCallback(
(id: string, messages: ChatMessage[], models?: string[]) => {
@@ -564,7 +571,7 @@ export function ConversationsProvider({ children }: ConversationsProviderProps)
const titleModel = preferences.titleGenerationModel;
if (needsLLMTitle && firstUserMessage && titleModel) {
pendingTitleGenRef.current.add(id);
- generateTitleWithLLM(firstUserMessage, titleModel)
+ generateTitleWithLLM(firstUserMessage, titleModel, titleGenAbortRef.current.signal)
.then((result) => {
// Only update if the title is different and better
setConversations((prev) =>
diff --git a/ui/src/utils/generateTitle.ts b/ui/src/utils/generateTitle.ts
index 1453035..ff49dd3 100644
--- a/ui/src/utils/generateTitle.ts
+++ b/ui/src/utils/generateTitle.ts
@@ -27,7 +27,8 @@ export function generateSimpleTitle(userMessage: string): string {
*/
export async function generateTitleWithLLM(
userMessage: string,
- model: string
+ model: string,
+ signal?: AbortSignal
): Promise {
try {
const response = await apiV1ChatCompletions({
@@ -46,6 +47,7 @@ export async function generateTitleWithLLM(
},
],
},
+ signal,
throwOnError: true,
});
From 5de4df678d384f0fbc6eea238e2a5676bcdd8c8b Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:25:32 +1000
Subject: [PATCH 052/172] Scope API-key revoke/rotate authz by owner
org/team/project
---
src/routes/admin/api_keys.rs | 123 ++++++++++++++++++++++++++++-------
1 file changed, 101 insertions(+), 22 deletions(-)
diff --git a/src/routes/admin/api_keys.rs b/src/routes/admin/api_keys.rs
index af51c7e..63086b3 100644
--- a/src/routes/admin/api_keys.rs
+++ b/src/routes/admin/api_keys.rs
@@ -189,6 +189,88 @@ pub(super) async fn check_owner_create_authz(
Ok(())
}
+/// Run the owner-scoped RBAC check that gates modification of an existing key
+/// (revoke, rotate, etc). Mirrors `check_owner_create_authz` but for an
+/// already-known key with a concrete id, so authorisation is scoped to the
+/// owner's org/team/project rather than checking only the bare resource id.
+pub(super) async fn check_owner_modify_authz(
+ services: &crate::services::Services,
+ authz: &crate::middleware::AuthzContext,
+ action: &str,
+ key_id: uuid::Uuid,
+ owner: &crate::models::ApiKeyOwner,
+) -> Result<(), AdminError> {
+ let resource_id = key_id.to_string();
+ match owner {
+ crate::models::ApiKeyOwner::Organization { org_id } => {
+ authz.require(
+ "api_key",
+ action,
+ Some(&resource_id),
+ Some(&org_id.to_string()),
+ None,
+ None,
+ )?;
+ }
+ crate::models::ApiKeyOwner::Team { team_id } => {
+ let team = services
+ .teams
+ .get_by_id(*team_id)
+ .await?
+ .ok_or_else(|| AdminError::NotFound(format!("Team '{}' not found", team_id)))?;
+ authz.require(
+ "api_key",
+ action,
+ Some(&resource_id),
+ Some(&team.org_id.to_string()),
+ Some(&team_id.to_string()),
+ None,
+ )?;
+ }
+ crate::models::ApiKeyOwner::Project { project_id } => {
+ let project = services
+ .projects
+ .get_by_id(*project_id)
+ .await?
+ .ok_or_else(|| {
+ AdminError::NotFound(format!("Project '{}' not found", project_id))
+ })?;
+ authz.require(
+ "api_key",
+ action,
+ Some(&resource_id),
+ Some(&project.org_id.to_string()),
+ None,
+ Some(&project_id.to_string()),
+ )?;
+ }
+ crate::models::ApiKeyOwner::User { .. } => {
+ authz.require("api_key", action, Some(&resource_id), None, None, None)?;
+ }
+ crate::models::ApiKeyOwner::ServiceAccount { service_account_id } => {
+ let sa = services
+ .service_accounts
+ .get_by_id(*service_account_id)
+ .await?
+ .ok_or_else(|| {
+ AdminError::NotFound(format!(
+ "Service account '{}' not found",
+ service_account_id
+ ))
+ })?;
+ authz.require(
+ "api_key",
+ action,
+ Some(&resource_id),
+ Some(&sa.org_id.to_string()),
+ None,
+ None,
+ )?;
+ }
+ }
+ Ok(())
+}
+
/// Enforce the per-scope `max_api_keys_per_*` limits before creating a key.
pub(crate) async fn check_owner_create_limits(
services: &crate::services::Services,
@@ -800,20 +882,19 @@ pub async fn revoke(
Extension(client_info): Extension,
Path(key_id): Path,
) -> Result, AdminError> {
- authz.require(
- "api_key",
- "delete",
- Some(&key_id.to_string()),
- None,
- None,
- None,
- )?;
-
let services = get_services(&state)?;
let actor = AuditActor::from(&admin_auth);
- // Get API key info for audit log before revoking
- let key_info = services.api_keys.get_by_id(key_id).await?;
+ // Fetch the key first so authz can scope the check by owner. Without
+ // this, the key id alone is insufficient — RBAC needs the org/team/
+ // project to distinguish org-admins of different tenants.
+ let key_info = services
+ .api_keys
+ .get_by_id(key_id)
+ .await?
+ .ok_or_else(|| AdminError::NotFound(format!("API key '{}' not found", key_id)))?;
+ check_owner_modify_authz(services, &authz, "delete", key_id, &key_info.owner).await?;
+ let key_info = Some(key_info);
services.api_keys.revoke(key_id).await?;
@@ -956,18 +1037,17 @@ pub async fn rotate(
Path(key_id): Path,
Json(request): Json,
) -> Result<(StatusCode, Json), AdminError> {
- authz.require(
- "api_key",
- "update",
- Some(&key_id.to_string()),
- None,
- None,
- None,
- )?;
-
let services = get_services(&state)?;
let actor = AuditActor::from(&admin_auth);
+ // Fetch first so authz can scope by owner; see `revoke` for rationale.
+ let old_key_for_authz = services
+ .api_keys
+ .get_by_id(key_id)
+ .await?
+ .ok_or_else(|| AdminError::NotFound(format!("API key '{}' not found", key_id)))?;
+ check_owner_modify_authz(services, &authz, "update", key_id, &old_key_for_authz.owner).await?;
+
// Validate grace period
let grace_period_seconds = request
.grace_period_seconds
@@ -989,8 +1069,7 @@ pub async fn rotate(
// Get the key generation prefix from config
let prefix = state.config.auth.api_key_config().generation_prefix();
- // Get old key info for audit log before rotating
- let old_key = services.api_keys.get_by_id(key_id).await?;
+ let old_key = Some(old_key_for_authz);
// Perform the rotation
let created = services
From 3136d84c0eb298e0be028234973d7ca812ee1307 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:26:00 +1000
Subject: [PATCH 053/172] Gate admin-UI bypass on explicit
VITE_FORCE_ADMIN_ACCESS env flag
---
ui/src/auth/types.ts | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/ui/src/auth/types.ts b/ui/src/auth/types.ts
index e183948..353dd89 100644
--- a/ui/src/auth/types.ts
+++ b/ui/src/auth/types.ts
@@ -12,10 +12,15 @@ export interface User {
/** Admin roles that grant access to the admin UI */
export const ADMIN_ROLES = ["super_admin", "org_admin", "team_admin"] as const;
-/** Check if a user has any admin role */
+/** Check if a user has any admin role.
+ *
+ * The earlier shortcut "always allow in `import.meta.env.DEV`" leaked into
+ * Storybook builds and any local production-ish setup with `pnpm dev`, so
+ * the admin UI rendered for unprivileged users. Bypassing the role check now
+ * requires an explicit opt-in via `VITE_FORCE_ADMIN_ACCESS=1` so each
+ * developer turning it on is doing so deliberately. */
export function hasAdminAccess(user: User | null): boolean {
- // In dev mode, always show admin pages for easier development
- if (import.meta.env.DEV) return true;
+ if (import.meta.env.VITE_FORCE_ADMIN_ACCESS === "1") return true;
if (!user?.roles) return false;
return user.roles.some((role) => ADMIN_ROLES.includes(role as (typeof ADMIN_ROLES)[number]));
From dfc9e4e2c1230558a0848d6b4459969f98f32eda Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:26:46 +1000
Subject: [PATCH 054/172] Cancel superseded CEL validation requests with
AbortController
---
.../RbacPolicy/CelExpressionInput.tsx | 20 ++++++++++++++++---
1 file changed, 17 insertions(+), 3 deletions(-)
diff --git a/ui/src/components/RbacPolicy/CelExpressionInput.tsx b/ui/src/components/RbacPolicy/CelExpressionInput.tsx
index 20d91f5..a926f3e 100644
--- a/ui/src/components/RbacPolicy/CelExpressionInput.tsx
+++ b/ui/src/components/RbacPolicy/CelExpressionInput.tsx
@@ -1,4 +1,4 @@
-import { useEffect, useState } from "react";
+import { useEffect, useRef, useState } from "react";
import { useMutation } from "@tanstack/react-query";
import { CheckCircle2, XCircle, Loader2, Info } from "lucide-react";
import { useDebouncedCallback } from "use-debounce";
@@ -50,6 +50,12 @@ export function CelExpressionInput({
}>({ valid: null, error: null, checking: false });
const [showHelp, setShowHelp] = useState(false);
+ // Newer keystrokes abort older in-flight validations so out-of-order
+ // responses can't paint stale state, and unmount cancels everything.
+ const abortRef = useRef(null);
+ useEffect(() => {
+ return () => abortRef.current?.abort();
+ }, []);
const validateMutation = useMutation({
...orgRbacPolicyValidateMutation(),
@@ -60,7 +66,10 @@ export function CelExpressionInput({
checking: false,
});
},
- onError: () => {
+ onError: (error) => {
+ // Suppress aborted-request errors: they only mean a newer keystroke
+ // superseded this validation, not that the expression is invalid.
+ if (error instanceof DOMException && error.name === "AbortError") return;
setValidationState({
valid: null,
error: "Failed to validate expression",
@@ -75,7 +84,12 @@ export function CelExpressionInput({
return;
}
setValidationState((prev) => ({ ...prev, checking: true }));
- validateMutation.mutate({ body: { condition } });
+ abortRef.current?.abort();
+ abortRef.current = new AbortController();
+ validateMutation.mutate({
+ body: { condition },
+ signal: abortRef.current.signal,
+ });
}, 500);
useEffect(() => {
From 30316b9daf8d4b51e33566229719ba7bd96e5314 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:29:17 +1000
Subject: [PATCH 055/172] Skip HTTPS image preprocessing for providers that
pass through
---
src/config/features.rs | 2 ++
src/providers/anthropic/mod.rs | 5 +++++
src/providers/image.rs | 15 +++++++++++++++
3 files changed, 22 insertions(+)
diff --git a/src/config/features.rs b/src/config/features.rs
index 965dda5..3d0d799 100644
--- a/src/config/features.rs
+++ b/src/config/features.rs
@@ -2126,6 +2126,8 @@ impl ImageFetchingConfig {
max_size_bytes: self.max_size_mb * 1024 * 1024,
timeout: std::time::Duration::from_secs(self.timeout_secs),
allowed_content_types: self.allowed_content_types.clone(),
+ // Per-provider; Anthropic's constructor sets this on its own copy.
+ pass_through_https: false,
}
}
}
diff --git a/src/providers/anthropic/mod.rs b/src/providers/anthropic/mod.rs
index 39898a8..c8740d9 100644
--- a/src/providers/anthropic/mod.rs
+++ b/src/providers/anthropic/mod.rs
@@ -100,6 +100,11 @@ impl AnthropicProvider {
) -> Self {
let circuit_breaker = registry.get_or_create(provider_name, &config.circuit_breaker);
+ // Anthropic supports HTTPS image URLs natively, so don't waste cycles
+ // re-encoding them as base64 data URLs in the preprocess step.
+ let mut image_fetch_config = image_fetch_config;
+ image_fetch_config.pass_through_https = true;
+
Self {
api_key: config.api_key.clone(),
base_url: config.base_url.trim_end_matches('/').to_string(),
diff --git a/src/providers/image.rs b/src/providers/image.rs
index 765d231..1f48137 100644
--- a/src/providers/image.rs
+++ b/src/providers/image.rs
@@ -31,6 +31,12 @@ pub struct ImageFetchConfig {
pub timeout: Duration,
/// Allowed content types (empty = allow all image types)
pub allowed_content_types: Vec,
+ /// Skip preprocessing for `https://` URLs (default: false). Set this for
+ /// providers that natively support HTTPS image URLs (e.g. Anthropic), so
+ /// we don't waste bandwidth fetching and re-encoding images the upstream
+ /// can pull itself. `http://` URLs are still preprocessed because most
+ /// providers reject plain HTTP.
+ pub pass_through_https: bool,
}
impl Default for ImageFetchConfig {
@@ -45,6 +51,7 @@ impl Default for ImageFetchConfig {
"image/gif".to_string(),
"image/webp".to_string(),
],
+ pass_through_https: false,
}
}
}
@@ -380,6 +387,14 @@ async fn preprocess_content_for_images(
continue;
}
+ // Providers like Anthropic accept HTTPS URLs directly;
+ // fetching and re-encoding them is wasted work.
+ if image_url.url.starts_with("https://")
+ && config.is_some_and(|c| c.pass_through_https)
+ {
+ continue;
+ }
+
// Try to fetch HTTP URL
if is_http_url(&image_url.url) {
match resolve_image_url(client, &image_url.url, config).await {
From 1c848d31756f470a5ef982104ed69213857cc94e Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:30:24 +1000
Subject: [PATCH 056/172] Wire DataTable filtered row model unconditionally
---
ui/src/components/DataTable/DataTable.tsx | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/ui/src/components/DataTable/DataTable.tsx b/ui/src/components/DataTable/DataTable.tsx
index e0b6e41..b577de6 100644
--- a/ui/src/components/DataTable/DataTable.tsx
+++ b/ui/src/components/DataTable/DataTable.tsx
@@ -62,10 +62,11 @@ export function DataTable({
getSortedRowModel: getSortedRowModel(),
onSortingChange: setSorting,
}),
- ...(searchColumn && {
- getFilteredRowModel: getFilteredRowModel(),
- onColumnFiltersChange: setColumnFilters,
- }),
+ // Always enable the filtered row model when filtering is possible —
+ // either column-scoped (searchColumn) or via globalFilter — so the
+ // search input doesn't silently no-op when `searchColumn` is unset.
+ getFilteredRowModel: getFilteredRowModel(),
+ onColumnFiltersChange: setColumnFilters,
onColumnVisibilityChange: setColumnVisibility,
onGlobalFilterChange: setGlobalFilter,
state: {
From 18bd86d6bc6033974197af9acca49f4e7d4f2eb2 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:32:44 +1000
Subject: [PATCH 057/172] Prefer configured public_url for SCIM base URL
---
src/routes/scim/users.rs | 46 +++++++++++++++++++++++++++++++++-------
1 file changed, 38 insertions(+), 8 deletions(-)
diff --git a/src/routes/scim/users.rs b/src/routes/scim/users.rs
index 4c516f9..90e5f33 100644
--- a/src/routes/scim/users.rs
+++ b/src/routes/scim/users.rs
@@ -72,19 +72,49 @@ impl IntoResponse for ScimJsonWithStatus {
// =============================================================================
/// Extract the SCIM base URL from the request.
-fn get_base_url(request: &Request) -> String {
+///
+/// Prefers the operator-configured `auth.oauth_pkce.public_url` so we don't
+/// trust forwarded headers from arbitrary callers — RFC 7644 endpoints are
+/// authenticated by a bearer token, but a misconfigured deployment could
+/// still let a client poison the `Location` URLs we mint by spoofing
+/// `X-Forwarded-Host`. The configured URL is authoritative when present;
+/// otherwise build from the server's bound host/port.
+fn get_base_url(state: &AppState, request: &Request) -> String {
+ if let Some(public_url) = state.config.auth.oauth_pkce.public_url.as_deref()
+ && !public_url.is_empty()
+ {
+ return format!("{}/scim/v2", public_url.trim_end_matches('/'));
+ }
+
+ // Fall back to whatever the request claims, then finally to localhost so
+ // a SCIM list response is at least syntactically valid in dev/test.
let scheme = request
.headers()
.get("x-forwarded-proto")
.and_then(|v| v.to_str().ok())
- .unwrap_or("https");
+ .unwrap_or_else(|| {
+ if state.config.server.tls.is_some() {
+ "https"
+ } else {
+ "http"
+ }
+ });
let host = request
.headers()
.get("x-forwarded-host")
.or_else(|| request.headers().get(header::HOST))
.and_then(|v| v.to_str().ok())
- .unwrap_or("localhost");
+ .map(str::to_string)
+ .unwrap_or_else(|| {
+ let server = &state.config.server;
+ if (scheme == "https" && server.port == 443) || (scheme == "http" && server.port == 80)
+ {
+ server.host.to_string()
+ } else {
+ format!("{}:{}", server.host, server.port)
+ }
+ });
format!("{}://{}/scim/v2", scheme, host)
}
@@ -123,7 +153,7 @@ pub async fn list_users(
Query(params): Query,
request: Request,
) -> Response {
- let base_url = get_base_url(&request);
+ let base_url = get_base_url(&state, &request);
let service = match get_provisioning_service(&state) {
Ok(s) => s,
Err(e) => return e.into_response(),
@@ -150,7 +180,7 @@ pub async fn create_user(
Extension(scim_auth): Extension,
request: Request,
) -> Response {
- let base_url = get_base_url(&request);
+ let base_url = get_base_url(&state, &request);
let service = match get_provisioning_service(&state) {
Ok(s) => s,
Err(e) => return e.into_response(),
@@ -199,7 +229,7 @@ pub async fn get_user(
Path(id): Path,
request: Request,
) -> Response {
- let base_url = get_base_url(&request);
+ let base_url = get_base_url(&state, &request);
let service = match get_provisioning_service(&state) {
Ok(s) => s,
Err(e) => return e.into_response(),
@@ -228,7 +258,7 @@ pub async fn replace_user(
Path(id): Path,
request: Request,
) -> Response {
- let base_url = get_base_url(&request);
+ let base_url = get_base_url(&state, &request);
let service = match get_provisioning_service(&state) {
Ok(s) => s,
Err(e) => return e.into_response(),
@@ -286,7 +316,7 @@ pub async fn patch_user(
Path(id): Path,
request: Request,
) -> Response {
- let base_url = get_base_url(&request);
+ let base_url = get_base_url(&state, &request);
let service = match get_provisioning_service(&state) {
Ok(s) => s,
Err(e) => return e.into_response(),
From b302de88cba60b2ee4c688639388c584ca771260 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:33:33 +1000
Subject: [PATCH 058/172] Replace per-token Markdown pre-tagging with
MutationObserver
---
ui/src/components/Markdown/Markdown.tsx | 35 +++++++++++++++++++++----
1 file changed, 30 insertions(+), 5 deletions(-)
diff --git a/ui/src/components/Markdown/Markdown.tsx b/ui/src/components/Markdown/Markdown.tsx
index 83eff81..a7c4ba2 100644
--- a/ui/src/components/Markdown/Markdown.tsx
+++ b/ui/src/components/Markdown/Markdown.tsx
@@ -26,15 +26,40 @@ export function Markdown({ content, className }: MarkdownProps) {
// Streamdown renders
elements that we can't control directly.
// Post-render fixup: set tabIndex="0" on all
children so keyboard
// users can scroll them (fixes axe-core scrollable-region-focusable).
+ //
+ // Use a MutationObserver instead of re-querying on every token: streaming
+ // content changes hundreds of times per response, and `querySelectorAll`
+ // walks the entire markdown subtree each call. The observer only fires
+ // when the DOM actually changes, and we only need to attribute newly
+ // mounted
nodes.
useEffect(() => {
const container = containerRef.current;
if (!container) return;
- for (const pre of container.querySelectorAll("pre")) {
- if (!pre.hasAttribute("tabindex")) {
- pre.setAttribute("tabindex", "0");
+
+ const tagPre = (node: Element) => {
+ if (node.tagName === "PRE" && !node.hasAttribute("tabindex")) {
+ node.setAttribute("tabindex", "0");
+ }
+ for (const pre of node.querySelectorAll("pre")) {
+ if (!pre.hasAttribute("tabindex")) {
+ pre.setAttribute("tabindex", "0");
+ }
+ }
+ };
+ tagPre(container);
+
+ const observer = new MutationObserver((records) => {
+ for (const record of records) {
+ for (const node of record.addedNodes) {
+ if (node.nodeType === Node.ELEMENT_NODE) {
+ tagPre(node as Element);
+ }
+ }
}
- }
- }, [content]);
+ });
+ observer.observe(container, { childList: true, subtree: true });
+ return () => observer.disconnect();
+ }, []);
const mermaidOptions: MermaidOptions = {
config: {
From 670f439542e16c6bdb0a6c1ae09e2a1b39ace0c3 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:34:50 +1000
Subject: [PATCH 059/172] Generate a session secret in wizard-rendered IdP
configs
---
src/wizard.rs | 16 +++++++++++++++-
1 file changed, 15 insertions(+), 1 deletion(-)
diff --git a/src/wizard.rs b/src/wizard.rs
index 03be401..93e314a 100644
--- a/src/wizard.rs
+++ b/src/wizard.rs
@@ -1068,7 +1068,10 @@ fn generate_config(mode: DeploymentMode, wizard_config: &WizardConfig) -> String
));
config.push('\n');
config.push_str("[auth.session]\n");
- config.push_str("secret = \"${SESSION_SECRET}\"\n");
+ config.push_str("# Sessions are signed with this 256-bit secret. Override via the\n");
+ config.push_str("# SESSION_SECRET env var in multi-replica setups so every node\n");
+ config.push_str("# accepts the others' cookies.\n");
+ config.push_str(&format!("secret = \"{}\"\n", generate_session_secret()));
config.push('\n');
}
}
@@ -1149,6 +1152,17 @@ fn escape_toml_string(s: &str) -> String {
s.replace('\\', "\\\\").replace('"', "\\\"")
}
+/// Generate a fresh 256-bit URL-safe base64 session-signing secret. Called
+/// from the wizard so a freshly-installed deployment has a stable secret
+/// without the operator having to remember to set `SESSION_SECRET`.
+fn generate_session_secret() -> String {
+ use base64::{Engine, engine::general_purpose::URL_SAFE_NO_PAD};
+ use rand::RngCore;
+ let mut bytes = [0u8; 32];
+ rand::thread_rng().fill_bytes(&mut bytes);
+ URL_SAFE_NO_PAD.encode(bytes)
+}
+
#[cfg(test)]
mod tests {
use super::*;
From 38103ebf227cfafb32947bccad0c30389f34f460 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:36:04 +1000
Subject: [PATCH 060/172] Validate branding colors, fonts, and favicon URL
before injecting
---
ui/src/config/ConfigProvider.tsx | 84 ++++++++++++++++++++++++--------
1 file changed, 63 insertions(+), 21 deletions(-)
diff --git a/ui/src/config/ConfigProvider.tsx b/ui/src/config/ConfigProvider.tsx
index c2515d5..a620d4e 100644
--- a/ui/src/config/ConfigProvider.tsx
+++ b/ui/src/config/ConfigProvider.tsx
@@ -14,43 +14,74 @@ const ConfigContext = createContext(null);
const BRANDING_STYLE_ID = "hadrian-branding-colors";
const BRANDING_FONTS_STYLE_ID = "hadrian-branding-fonts";
+/** Permissive color literal: hex, rgb()/hsl()/oklch()/var(), CSS keyword.
+ * Rejects anything containing CSS control chars (`{`, `}`, `;`, `<`, etc.)
+ * so a misconfigured branding payload can't break out of the rule and
+ * inject arbitrary CSS into the page. */
+const COLOR_RE = /^[a-zA-Z0-9#%(),.\s\-/_]+$/;
+
+function isSafeColor(value: string | undefined): value is string {
+ return typeof value === "string" && value.length > 0 && value.length < 200 && COLOR_RE.test(value);
+}
+
+/** Validate a font-family name. Quotes/braces/semicolons in here would let
+ * an attacker close the `font-family` declaration and inject other rules. */
+const FONT_NAME_RE = /^[a-zA-Z0-9 \-_]+$/;
+
+function isSafeFontName(value: string | undefined): value is string {
+ return (
+ typeof value === "string" && value.length > 0 && value.length < 100 && FONT_NAME_RE.test(value)
+ );
+}
+
+/** Only accept absolute https/data URLs for font sources. */
+function isSafeFontUrl(value: string | undefined): value is string {
+ if (typeof value !== "string" || value.length === 0 || value.length > 2048) return false;
+ try {
+ const url = new URL(value, window.location.origin);
+ return url.protocol === "https:" || url.protocol === "data:";
+ } catch {
+ return false;
+ }
+}
+
/**
* Generates CSS variable overrides from a color palette
*/
function generateColorCss(colors: ColorPalette, selector: string): string {
const rules: string[] = [];
- if (colors.primary) {
+ if (isSafeColor(colors.primary)) {
rules.push(`--color-primary: ${colors.primary};`);
rules.push(`--color-ring: ${colors.primary};`);
// Set accent-foreground to primary color for consistent branding on selected items
rules.push(`--color-accent-foreground: ${colors.primary};`);
}
- if (colors.primary_foreground) {
+ if (isSafeColor(colors.primary_foreground)) {
rules.push(`--color-primary-foreground: ${colors.primary_foreground};`);
- } else if (colors.primary) {
+ } else if (isSafeColor(colors.primary)) {
// Default to white if primary is set but primary_foreground is not
rules.push(`--color-primary-foreground: #ffffff;`);
}
- if (colors.secondary) {
+ if (isSafeColor(colors.secondary)) {
rules.push(`--color-secondary: ${colors.secondary};`);
}
- if (colors.secondary_foreground) {
+ if (isSafeColor(colors.secondary_foreground)) {
rules.push(`--color-secondary-foreground: ${colors.secondary_foreground};`);
}
- if (colors.accent) {
+ if (isSafeColor(colors.accent)) {
rules.push(`--color-accent: ${colors.accent};`);
}
- if (colors.background) {
+ if (isSafeColor(colors.background)) {
rules.push(`--color-background: ${colors.background};`);
}
- if (colors.foreground) {
+ if (isSafeColor(colors.foreground)) {
rules.push(`--color-foreground: ${colors.foreground};`);
}
- if (colors.muted) {
+ if (isSafeColor(colors.muted)) {
rules.push(`--color-muted: ${colors.muted};`);
}
- if (colors.border) {
+ if (isSafeColor(colors.border)) {
rules.push(`--color-border: ${colors.border};`);
rules.push(`--color-input: ${colors.border};`);
}
@@ -82,19 +113,30 @@ function injectBrandingColors(colors: ColorPalette, colorsDark: ColorPalette | n
}
/**
- * Generates @font-face rules for custom fonts
+ * Generates @font-face rules for custom fonts. Skips entries whose name or URL
+ * fails validation; an invalid entry is logged and dropped rather than
+ * inlined verbatim into the stylesheet (where it could break out of the rule).
*/
function generateFontFaceRules(customFonts: CustomFont[]): string {
return customFonts
- .map(
- (font) => `@font-face {
+ .filter((font) => {
+ const ok = isSafeFontName(font.name) && isSafeFontUrl(font.url);
+ if (!ok) {
+ console.warn("Ignoring branded custom font with unsafe name or URL", font);
+ }
+ return ok;
+ })
+ .map((font) => {
+ const weight = Number.isFinite(Number(font.weight)) ? Number(font.weight) : 400;
+ const style = font.style === "italic" || font.style === "oblique" ? font.style : "normal";
+ return `@font-face {
font-family: "${font.name}";
src: url("${font.url}");
- font-weight: ${font.weight};
- font-style: ${font.style};
+ font-weight: ${weight};
+ font-style: ${style};
font-display: swap;
-}`
- )
+}`;
+ })
.join("\n\n");
}
@@ -110,13 +152,13 @@ function generateFontCss(fonts: FontsConfig): string {
const monoStack =
'ui-monospace, SFMono-Regular, "SF Mono", Menlo, Monaco, Consolas, "Liberation Mono", monospace';
- if (fonts.body) {
+ if (isSafeFontName(fonts.body)) {
rules.push(`--font-sans: "${fonts.body}", ${sansStack};`);
}
- if (fonts.heading) {
+ if (isSafeFontName(fonts.heading)) {
rules.push(`--font-heading: "${fonts.heading}", ${sansStack};`);
}
- if (fonts.mono) {
+ if (isSafeFontName(fonts.mono)) {
rules.push(`--font-mono: "${fonts.mono}", ${monoStack};`);
}
@@ -190,7 +232,7 @@ export function ConfigProvider({ children }: ConfigProviderProps) {
// Update document title, favicon, colors, and fonts based on config
useEffect(() => {
document.title = config.branding.title;
- if (config.branding.favicon_url) {
+ if (config.branding.favicon_url && isSafeFontUrl(config.branding.favicon_url)) {
const favicon = document.querySelector('link[rel="icon"]');
if (favicon) {
favicon.href = config.branding.favicon_url;
From 57690a94780ef44bd12510e360d192c21488e9f4 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:37:09 +1000
Subject: [PATCH 061/172] Record cache_operation error metric on semantic-match
lookup failure
---
src/cache/semantic_cache.rs | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/cache/semantic_cache.rs b/src/cache/semantic_cache.rs
index 7838583..1acdb90 100644
--- a/src/cache/semantic_cache.rs
+++ b/src/cache/semantic_cache.rs
@@ -337,6 +337,7 @@ impl SemanticCache {
);
}
Err(e) => {
+ metrics::record_cache_operation("semantic", "get", "error");
tracing::warn!(
matched_key = %best_match.metadata.cache_key,
error = %e,
From 28dd9fe1277b4c2dc2e89e4d79b04bb734d7ff35 Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sat, 25 Apr 2026 23:57:06 +1000
Subject: [PATCH 062/172] Drop inner stream when IdleTimeoutStream times out to
release upstream resources
---
src/streaming/mod.rs | 26 +++++++++++++-------------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/src/streaming/mod.rs b/src/streaming/mod.rs
index 726ed3c..5178461 100644
--- a/src/streaming/mod.rs
+++ b/src/streaming/mod.rs
@@ -45,13 +45,13 @@ pub struct IdleTimeoutError(Duration);
/// The timeout resets after each successful chunk, so long-running streams
/// that are actively producing data will not timeout.
pub struct IdleTimeoutStream {
- inner: S,
+ /// `None` once the stream has terminated, dropping the inner stream so any
+ /// upstream resources (sockets, channels) are released immediately.
+ inner: Option,
timeout: Duration,
/// Sleep future for the current timeout period.
/// Pinned because Sleep requires pinning.
sleep: Pin>,
- /// Whether the stream has already timed out or ended
- terminated: bool,
}
impl IdleTimeoutStream
@@ -63,10 +63,9 @@ where
/// If `timeout` is zero, the wrapper is effectively a no-op pass-through.
pub fn new(inner: S, timeout: Duration) -> Self {
Self {
- inner,
+ inner: Some(inner),
timeout,
sleep: Box::pin(tokio::time::sleep(timeout)),
- terminated: false,
}
}
@@ -84,17 +83,18 @@ where
type Item = Result;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll
);
}
From ff9427790c355fe4dd1ef15e85cd000b3fc75e7d Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sun, 26 Apr 2026 00:44:51 +1000
Subject: [PATCH 079/172] Accept optional zod schema in useLocalStorage to
validate cross-tab writes
---
ui/src/hooks/useLocalStorage.ts | 46 +++++++++++++++++++++++----------
1 file changed, 32 insertions(+), 14 deletions(-)
diff --git a/ui/src/hooks/useLocalStorage.ts b/ui/src/hooks/useLocalStorage.ts
index 16abe7a..a909113 100644
--- a/ui/src/hooks/useLocalStorage.ts
+++ b/ui/src/hooks/useLocalStorage.ts
@@ -1,4 +1,5 @@
import { useState, useEffect, useCallback } from "react";
+import type { ZodType } from "zod";
// `storage` events only fire in *other* tabs. To keep multiple hook instances
// of the same key inside the same tab in sync, mirror writes onto a custom
@@ -10,20 +11,41 @@ interface SameTabPayload {
newValue: string | null;
}
+/**
+ * Persist state to `localStorage` with same-tab and cross-tab sync.
+ *
+ * Pass an optional zod `schema` to validate values arriving from
+ * `localStorage` (initial read, `storage` events, same-tab broadcasts).
+ * Anything that fails validation is discarded — without a schema, a
+ * malicious or stale tab could write any JSON-shaped value into the key
+ * and surface it as a typed `T`. Callers handling user-controlled keys
+ * (auth tokens, preferences, settings) should always supply a schema.
+ */
export function useLocalStorage(
key: string,
- initialValue: T
+ initialValue: T,
+ schema?: ZodType
): [T, (value: T | ((prev: T) => T)) => void] {
+ const parse = useCallback(
+ (raw: string | null): T | undefined => {
+ if (raw === null) return undefined;
+ try {
+ const parsed: unknown = JSON.parse(raw);
+ if (!schema) return parsed as T;
+ const result = schema.safeParse(parsed);
+ return result.success ? result.data : undefined;
+ } catch {
+ return undefined;
+ }
+ },
+ [schema]
+ );
+
const [storedValue, setStoredValue] = useState(() => {
if (typeof window === "undefined") {
return initialValue;
}
- try {
- const item = window.localStorage.getItem(key);
- return item ? (JSON.parse(item) as T) : initialValue;
- } catch {
- return initialValue;
- }
+ return parse(window.localStorage.getItem(key)) ?? initialValue;
});
const setValue = useCallback(
@@ -47,12 +69,8 @@ export function useLocalStorage(
useEffect(() => {
const apply = (newValue: string | null) => {
- if (newValue === null) return;
- try {
- setStoredValue(JSON.parse(newValue) as T);
- } catch {
- // Ignore parse errors
- }
+ const next = parse(newValue);
+ if (next !== undefined) setStoredValue(next);
};
const handleStorageChange = (e: StorageEvent) => {
@@ -69,7 +87,7 @@ export function useLocalStorage(
window.removeEventListener("storage", handleStorageChange);
window.removeEventListener(SAME_TAB_EVENT, handleSameTabChange);
};
- }, [key]);
+ }, [key, parse]);
return [storedValue, setValue];
}
From f31b16cb72497edf94aac666bc3048feb0d630ea Mon Sep 17 00:00:00 2001
From: ScriptSmith
Date: Sun, 26 Apr 2026 00:45:27 +1000
Subject: [PATCH 080/172] Replace per-token streaming aria-live with hidden
status region announcement
---
ui/src/components/ChatMessage/ChatMessage.tsx | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/ui/src/components/ChatMessage/ChatMessage.tsx b/ui/src/components/ChatMessage/ChatMessage.tsx
index 8603569..74f2336 100644
--- a/ui/src/components/ChatMessage/ChatMessage.tsx
+++ b/ui/src/components/ChatMessage/ChatMessage.tsx
@@ -289,9 +289,14 @@ function ChatMessageComponent({
)}
+ {/* Streaming status announcement. Marking the whole content div as
+ `aria-live="polite"` floods screen readers with every token —
+ this hidden status region instead announces start/finish only. */}
+
+ {isStreaming ? "Assistant is responding" : ""}
+