committed by
GitHub
18 changed files with 1019 additions and 10 deletions
@ -0,0 +1,120 @@ |
|||
use rocket::{ |
|||
http::Status, |
|||
request::{FromRequest, Outcome, Request}, |
|||
response::content::RawText, |
|||
Route, |
|||
}; |
|||
|
|||
use crate::{auth::ClientIp, db::DbConn, CONFIG}; |
|||
|
|||
use log::error; |
|||
|
|||
// Metrics endpoint routes
|
|||
pub fn routes() -> Vec<Route> { |
|||
if CONFIG.enable_metrics() { |
|||
routes![get_metrics] |
|||
} else { |
|||
Vec::new() |
|||
} |
|||
} |
|||
|
|||
// Metrics authentication token guard
|
|||
#[allow(dead_code)] |
|||
pub struct MetricsToken { |
|||
ip: ClientIp, |
|||
} |
|||
|
|||
#[rocket::async_trait] |
|||
impl<'r> FromRequest<'r> for MetricsToken { |
|||
type Error = &'static str; |
|||
|
|||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> { |
|||
let ip = match ClientIp::from_request(request).await { |
|||
Outcome::Success(ip) => ip, |
|||
_ => return Outcome::Error((Status::InternalServerError, "Error getting Client IP")), |
|||
}; |
|||
|
|||
// If no metrics token is configured, allow access
|
|||
let Some(configured_token) = CONFIG.metrics_token() else { |
|||
return Outcome::Success(Self { |
|||
ip, |
|||
}); |
|||
}; |
|||
|
|||
// Check for token in Authorization header or query parameter
|
|||
let provided_token = request |
|||
.headers() |
|||
.get_one("Authorization") |
|||
.and_then(|auth| auth.strip_prefix("Bearer ")) |
|||
.or_else(|| request.query_value::<&str>("token").and_then(|result| result.ok())); |
|||
|
|||
match provided_token { |
|||
Some(token) => { |
|||
if validate_metrics_token(token, &configured_token) { |
|||
Outcome::Success(Self { |
|||
ip, |
|||
}) |
|||
} else { |
|||
error!("Invalid metrics token. IP: {}", ip.ip); |
|||
Outcome::Error((Status::Unauthorized, "Invalid metrics token")) |
|||
} |
|||
} |
|||
None => { |
|||
error!("Missing metrics token. IP: {}", ip.ip); |
|||
Outcome::Error((Status::Unauthorized, "Metrics token required")) |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
fn validate_metrics_token(provided: &str, configured: &str) -> bool { |
|||
if configured.starts_with("$argon2") { |
|||
use argon2::password_hash::PasswordVerifier; |
|||
match argon2::password_hash::PasswordHash::new(configured) { |
|||
Ok(hash) => argon2::Argon2::default().verify_password(provided.trim().as_bytes(), &hash).is_ok(), |
|||
Err(e) => { |
|||
error!("Invalid Argon2 PHC in METRICS_TOKEN: {e}"); |
|||
false |
|||
} |
|||
} |
|||
} else { |
|||
crate::crypto::ct_eq(configured.trim(), provided.trim()) |
|||
} |
|||
} |
|||
|
|||
/// Prometheus metrics endpoint
|
|||
#[get("/")] |
|||
async fn get_metrics(_token: MetricsToken, mut conn: DbConn) -> Result<RawText<String>, Status> { |
|||
// Update business metrics from database
|
|||
if let Err(e) = crate::metrics::update_business_metrics(&mut conn).await { |
|||
error!("Failed to update business metrics: {e}"); |
|||
return Err(Status::InternalServerError); |
|||
} |
|||
|
|||
// Gather all Prometheus metrics
|
|||
match crate::metrics::gather_metrics() { |
|||
Ok(metrics) => Ok(RawText(metrics)), |
|||
Err(e) => { |
|||
error!("Failed to gather metrics: {e}"); |
|||
Err(Status::InternalServerError) |
|||
} |
|||
} |
|||
} |
|||
|
|||
/// Health check endpoint that also updates some basic metrics
|
|||
#[cfg(feature = "enable_metrics")] |
|||
pub async fn update_health_metrics(_conn: &mut DbConn) { |
|||
// Update basic system metrics
|
|||
use std::time::SystemTime; |
|||
static START_TIME: std::sync::OnceLock<SystemTime> = std::sync::OnceLock::new(); |
|||
let start_time = *START_TIME.get_or_init(SystemTime::now); |
|||
|
|||
crate::metrics::update_uptime(start_time); |
|||
|
|||
// Update database connection metrics
|
|||
// Note: This is a simplified version - in production you'd want to get actual pool stats
|
|||
crate::metrics::update_db_connections("main", 1, 0); |
|||
} |
|||
|
|||
#[cfg(not(feature = "enable_metrics"))] |
|||
pub async fn update_health_metrics(_conn: &mut DbConn) {} |
|||
@ -0,0 +1,105 @@ |
|||
/// Metrics middleware for automatic HTTP request instrumentation
|
|||
use rocket::{ |
|||
fairing::{Fairing, Info, Kind}, |
|||
Data, Request, Response, |
|||
}; |
|||
use std::time::Instant; |
|||
|
|||
pub struct MetricsFairing; |
|||
|
|||
#[rocket::async_trait] |
|||
impl Fairing for MetricsFairing { |
|||
fn info(&self) -> Info { |
|||
Info { |
|||
name: "Metrics Collection", |
|||
kind: Kind::Request | Kind::Response, |
|||
} |
|||
} |
|||
|
|||
async fn on_request(&self, req: &mut Request<'_>, _: &mut Data<'_>) { |
|||
req.local_cache(|| RequestTimer { |
|||
start_time: Instant::now(), |
|||
}); |
|||
} |
|||
|
|||
async fn on_response<'r>(&self, req: &'r Request<'_>, res: &mut Response<'r>) { |
|||
let timer = req.local_cache(|| RequestTimer { |
|||
start_time: Instant::now(), |
|||
}); |
|||
let duration = timer.start_time.elapsed(); |
|||
let method = req.method().as_str(); |
|||
let path = normalize_path(req.uri().path().as_str()); |
|||
let status = res.status().code; |
|||
|
|||
// Record metrics
|
|||
crate::metrics::increment_http_requests(method, &path, status); |
|||
crate::metrics::observe_http_request_duration(method, &path, duration.as_secs_f64()); |
|||
} |
|||
} |
|||
|
|||
struct RequestTimer { |
|||
start_time: Instant, |
|||
} |
|||
|
|||
/// Normalize paths to avoid high cardinality metrics
|
|||
/// Convert dynamic segments to static labels
|
|||
fn normalize_path(path: &str) -> String { |
|||
let segments: Vec<&str> = path.split('/').collect(); |
|||
let mut normalized = Vec::new(); |
|||
|
|||
for segment in segments { |
|||
if segment.is_empty() { |
|||
continue; |
|||
} |
|||
|
|||
// Common patterns in Vaultwarden routes
|
|||
let normalized_segment = if is_uuid(segment) { |
|||
"{id}" |
|||
} else if segment.chars().all(|c| c.is_ascii_hexdigit()) && segment.len() > 10 { |
|||
"{hash}" |
|||
} else if segment.chars().all(|c| c.is_ascii_digit()) { |
|||
"{number}" |
|||
} else { |
|||
segment |
|||
}; |
|||
|
|||
normalized.push(normalized_segment); |
|||
} |
|||
|
|||
if normalized.is_empty() { |
|||
"/".to_string() |
|||
} else { |
|||
format!("/{}", normalized.join("/")) |
|||
} |
|||
} |
|||
|
|||
/// Check if a string looks like a UUID
|
|||
fn is_uuid(s: &str) -> bool { |
|||
s.len() == 36 |
|||
&& s.chars().enumerate().all(|(i, c)| match i { |
|||
8 | 13 | 18 | 23 => c == '-', |
|||
_ => c.is_ascii_hexdigit(), |
|||
}) |
|||
} |
|||
|
|||
#[cfg(test)] |
|||
mod tests { |
|||
use super::*; |
|||
|
|||
#[test] |
|||
fn test_normalize_path() { |
|||
assert_eq!(normalize_path("/api/accounts"), "/api/accounts"); |
|||
assert_eq!(normalize_path("/api/accounts/12345678-1234-5678-9012-123456789012"), "/api/accounts/{id}"); |
|||
assert_eq!(normalize_path("/attachments/abc123def456"), "/attachments/{hash}"); |
|||
assert_eq!(normalize_path("/api/organizations/123"), "/api/organizations/{number}"); |
|||
assert_eq!(normalize_path("/"), "/"); |
|||
} |
|||
|
|||
#[test] |
|||
fn test_is_uuid() { |
|||
assert!(is_uuid("12345678-1234-5678-9012-123456789012")); |
|||
assert!(!is_uuid("not-a-uuid")); |
|||
assert!(!is_uuid("12345678123456781234567812345678")); // No dashes
|
|||
assert!(!is_uuid("123")); // Too short
|
|||
} |
|||
} |
|||
@ -0,0 +1,80 @@ |
|||
#![allow(dead_code, unused_imports)] |
|||
/// Database metrics collection utilities
|
|||
|
|||
use std::time::Instant; |
|||
|
|||
/// Database operation tracker for metrics
|
|||
pub struct DbOperationTimer { |
|||
start_time: Instant, |
|||
operation: String, |
|||
} |
|||
|
|||
impl DbOperationTimer { |
|||
pub fn new(operation: &str) -> Self { |
|||
Self { |
|||
start_time: Instant::now(), |
|||
operation: operation.to_string(), |
|||
} |
|||
} |
|||
|
|||
pub fn finish(self) { |
|||
let duration = self.start_time.elapsed(); |
|||
crate::metrics::observe_db_query_duration(&self.operation, duration.as_secs_f64()); |
|||
} |
|||
} |
|||
|
|||
/// Macro to instrument database operations
|
|||
#[macro_export] |
|||
macro_rules! db_metric { |
|||
($operation:expr, $code:block) => {{ |
|||
#[cfg(feature = "enable_metrics")] |
|||
let timer = crate::db::metrics::DbOperationTimer::new($operation); |
|||
|
|||
let result = $code; |
|||
|
|||
#[cfg(feature = "enable_metrics")] |
|||
timer.finish(); |
|||
|
|||
result |
|||
}}; |
|||
} |
|||
|
|||
/// Track database connection pool statistics
|
|||
pub async fn update_pool_metrics(_pool: &crate::db::DbPool) { |
|||
#[cfg(feature = "enable_metrics")] |
|||
{ |
|||
// Note: This is a simplified implementation
|
|||
// In a real implementation, you'd want to get actual pool statistics
|
|||
// from the connection pool (r2d2 provides some stats)
|
|||
|
|||
// For now, we'll just update with basic info
|
|||
let db_type = crate::db::DbConnType::from_url(&crate::CONFIG.database_url()) |
|||
.map(|t| match t { |
|||
crate::db::DbConnType::sqlite => "sqlite", |
|||
crate::db::DbConnType::mysql => "mysql", |
|||
crate::db::DbConnType::postgresql => "postgresql", |
|||
}) |
|||
.unwrap_or("unknown"); |
|||
|
|||
// These would be actual pool statistics in a real implementation
|
|||
let active_connections = 1; // placeholder
|
|||
let idle_connections = crate::CONFIG.database_max_conns() as i64 - active_connections; |
|||
|
|||
crate::metrics::update_db_connections(db_type, active_connections, idle_connections); |
|||
} |
|||
} |
|||
|
|||
#[cfg(test)] |
|||
mod tests { |
|||
use super::*; |
|||
use std::thread; |
|||
use std::time::Duration; |
|||
|
|||
#[test] |
|||
fn test_db_operation_timer() { |
|||
let timer = DbOperationTimer::new("test_query"); |
|||
thread::sleep(Duration::from_millis(1)); |
|||
timer.finish(); |
|||
// In a real test, we'd verify the metric was recorded
|
|||
} |
|||
} |
|||
@ -0,0 +1,319 @@ |
|||
#![allow(dead_code, unused_imports)] |
|||
|
|||
use std::time::SystemTime; |
|||
|
|||
#[cfg(feature = "enable_metrics")] |
|||
use once_cell::sync::Lazy; |
|||
#[cfg(feature = "enable_metrics")] |
|||
use prometheus::{ |
|||
register_gauge_vec, register_histogram_vec, register_int_counter_vec, register_int_gauge_vec, Encoder, GaugeVec, |
|||
HistogramVec, IntCounterVec, IntGaugeVec, TextEncoder, |
|||
}; |
|||
|
|||
use crate::{db::DbConn, error::Error, CONFIG}; |
|||
#[cfg(feature = "enable_metrics")] |
|||
use std::sync::{Arc, RwLock}; |
|||
#[cfg(feature = "enable_metrics")] |
|||
use std::time::UNIX_EPOCH; |
|||
|
|||
// HTTP request metrics
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static HTTP_REQUESTS_TOTAL: Lazy<IntCounterVec> = Lazy::new(|| { |
|||
register_int_counter_vec!( |
|||
"vaultwarden_http_requests_total", |
|||
"Total number of HTTP requests processed", |
|||
&["method", "path", "status"] |
|||
) |
|||
.unwrap() |
|||
}); |
|||
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static HTTP_REQUEST_DURATION_SECONDS: Lazy<HistogramVec> = Lazy::new(|| { |
|||
register_histogram_vec!( |
|||
"vaultwarden_http_request_duration_seconds", |
|||
"HTTP request duration in seconds", |
|||
&["method", "path"], |
|||
vec![0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0] |
|||
) |
|||
.unwrap() |
|||
}); |
|||
|
|||
// Database metrics
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static DB_CONNECTIONS_ACTIVE: Lazy<IntGaugeVec> = Lazy::new(|| { |
|||
register_int_gauge_vec!("vaultwarden_db_connections_active", "Number of active database connections", &["database"]) |
|||
.unwrap() |
|||
}); |
|||
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static DB_CONNECTIONS_IDLE: Lazy<IntGaugeVec> = Lazy::new(|| { |
|||
register_int_gauge_vec!("vaultwarden_db_connections_idle", "Number of idle database connections", &["database"]) |
|||
.unwrap() |
|||
}); |
|||
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static DB_QUERY_DURATION_SECONDS: Lazy<HistogramVec> = Lazy::new(|| { |
|||
register_histogram_vec!( |
|||
"vaultwarden_db_query_duration_seconds", |
|||
"Database query duration in seconds", |
|||
&["operation"], |
|||
vec![0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0] |
|||
) |
|||
.unwrap() |
|||
}); |
|||
|
|||
// Authentication metrics
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static AUTH_ATTEMPTS_TOTAL: Lazy<IntCounterVec> = Lazy::new(|| { |
|||
register_int_counter_vec!( |
|||
"vaultwarden_auth_attempts_total", |
|||
"Total number of authentication attempts", |
|||
&["method", "status"] |
|||
) |
|||
.unwrap() |
|||
}); |
|||
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static USER_SESSIONS_ACTIVE: Lazy<IntGaugeVec> = Lazy::new(|| { |
|||
register_int_gauge_vec!("vaultwarden_user_sessions_active", "Number of active user sessions", &["user_type"]) |
|||
.unwrap() |
|||
}); |
|||
|
|||
// Business metrics
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static USERS_TOTAL: Lazy<IntGaugeVec> = |
|||
Lazy::new(|| register_int_gauge_vec!("vaultwarden_users_total", "Total number of users", &["status"]).unwrap()); |
|||
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static ORGANIZATIONS_TOTAL: Lazy<IntGaugeVec> = Lazy::new(|| { |
|||
register_int_gauge_vec!("vaultwarden_organizations_total", "Total number of organizations", &["status"]).unwrap() |
|||
}); |
|||
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static VAULT_ITEMS_TOTAL: Lazy<IntGaugeVec> = Lazy::new(|| { |
|||
register_int_gauge_vec!("vaultwarden_vault_items_total", "Total number of vault items", &["type", "organization"]) |
|||
.unwrap() |
|||
}); |
|||
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static COLLECTIONS_TOTAL: Lazy<IntGaugeVec> = Lazy::new(|| { |
|||
register_int_gauge_vec!("vaultwarden_collections_total", "Total number of collections", &["organization"]).unwrap() |
|||
}); |
|||
|
|||
// System metrics
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static UPTIME_SECONDS: Lazy<GaugeVec> = |
|||
Lazy::new(|| register_gauge_vec!("vaultwarden_uptime_seconds", "Uptime in seconds", &["version"]).unwrap()); |
|||
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static BUILD_INFO: Lazy<IntGaugeVec> = Lazy::new(|| { |
|||
register_int_gauge_vec!("vaultwarden_build_info", "Build information", &["version", "revision", "branch"]).unwrap() |
|||
}); |
|||
|
|||
/// Increment HTTP request counter
|
|||
#[cfg(feature = "enable_metrics")] |
|||
pub fn increment_http_requests(method: &str, path: &str, status: u16) { |
|||
HTTP_REQUESTS_TOTAL.with_label_values(&[method, path, &status.to_string()]).inc(); |
|||
} |
|||
|
|||
/// Observe HTTP request duration
|
|||
#[cfg(feature = "enable_metrics")] |
|||
pub fn observe_http_request_duration(method: &str, path: &str, duration_seconds: f64) { |
|||
HTTP_REQUEST_DURATION_SECONDS.with_label_values(&[method, path]).observe(duration_seconds); |
|||
} |
|||
|
|||
/// Update database connection metrics
|
|||
#[cfg(feature = "enable_metrics")] |
|||
pub fn update_db_connections(database: &str, active: i64, idle: i64) { |
|||
DB_CONNECTIONS_ACTIVE.with_label_values(&[database]).set(active); |
|||
DB_CONNECTIONS_IDLE.with_label_values(&[database]).set(idle); |
|||
} |
|||
|
|||
/// Observe database query duration
|
|||
#[cfg(feature = "enable_metrics")] |
|||
pub fn observe_db_query_duration(operation: &str, duration_seconds: f64) { |
|||
DB_QUERY_DURATION_SECONDS.with_label_values(&[operation]).observe(duration_seconds); |
|||
} |
|||
|
|||
/// Increment authentication attempts
|
|||
#[cfg(feature = "enable_metrics")] |
|||
pub fn increment_auth_attempts(method: &str, status: &str) { |
|||
AUTH_ATTEMPTS_TOTAL.with_label_values(&[method, status]).inc(); |
|||
} |
|||
|
|||
/// Update active user sessions
|
|||
#[cfg(feature = "enable_metrics")] |
|||
pub fn update_user_sessions(user_type: &str, count: i64) { |
|||
USER_SESSIONS_ACTIVE.with_label_values(&[user_type]).set(count); |
|||
} |
|||
|
|||
/// Cached business metrics data
|
|||
#[cfg(feature = "enable_metrics")] |
|||
#[derive(Clone)] |
|||
struct BusinessMetricsCache { |
|||
timestamp: u64, |
|||
users_enabled: i64, |
|||
users_disabled: i64, |
|||
organizations: i64, |
|||
vault_counts: std::collections::HashMap<(String, String), i64>, |
|||
collection_counts: std::collections::HashMap<String, i64>, |
|||
} |
|||
|
|||
#[cfg(feature = "enable_metrics")] |
|||
static BUSINESS_METRICS_CACHE: Lazy<RwLock<Option<BusinessMetricsCache>>> = Lazy::new(|| RwLock::new(None)); |
|||
|
|||
/// Check if business metrics cache is still valid
|
|||
#[cfg(feature = "enable_metrics")] |
|||
fn is_cache_valid() -> bool { |
|||
let cache_timeout = CONFIG.metrics_business_cache_seconds(); |
|||
if let Ok(cache) = BUSINESS_METRICS_CACHE.read() { |
|||
if let Some(ref cached) = *cache { |
|||
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); |
|||
return now - cached.timestamp < cache_timeout; |
|||
} |
|||
} |
|||
false |
|||
} |
|||
|
|||
/// Update cached business metrics
|
|||
#[cfg(feature = "enable_metrics")] |
|||
fn update_cached_metrics(cache: BusinessMetricsCache) { |
|||
if let Ok(mut cached) = BUSINESS_METRICS_CACHE.write() { |
|||
*cached = Some(cache); |
|||
} |
|||
} |
|||
|
|||
/// Apply cached metrics to Prometheus gauges
|
|||
#[cfg(feature = "enable_metrics")] |
|||
fn apply_cached_metrics(cache: &BusinessMetricsCache) { |
|||
USERS_TOTAL.with_label_values(&["enabled"]).set(cache.users_enabled); |
|||
USERS_TOTAL.with_label_values(&["disabled"]).set(cache.users_disabled); |
|||
ORGANIZATIONS_TOTAL.with_label_values(&["active"]).set(cache.organizations); |
|||
|
|||
for ((cipher_type, org_label), count) in &cache.vault_counts { |
|||
VAULT_ITEMS_TOTAL.with_label_values(&[cipher_type, org_label]).set(*count); |
|||
} |
|||
|
|||
for (org_id, count) in &cache.collection_counts { |
|||
COLLECTIONS_TOTAL.with_label_values(&[org_id]).set(*count); |
|||
} |
|||
} |
|||
|
|||
/// Update business metrics from database (with caching)
|
|||
#[cfg(feature = "enable_metrics")] |
|||
pub async fn update_business_metrics(conn: &mut DbConn) -> Result<(), Error> { |
|||
// Check if cache is still valid
|
|||
if is_cache_valid() { |
|||
// Apply cached metrics without DB query
|
|||
if let Ok(cache) = BUSINESS_METRICS_CACHE.read() { |
|||
if let Some(ref cached) = *cache { |
|||
apply_cached_metrics(cached); |
|||
return Ok(()); |
|||
} |
|||
} |
|||
} |
|||
|
|||
use crate::db::models::*; |
|||
use std::collections::HashMap; |
|||
|
|||
// Count users
|
|||
let enabled_users = User::count_enabled(conn).await; |
|||
let disabled_users = User::count_disabled(conn).await; |
|||
|
|||
// Count organizations
|
|||
let organizations_vec = Organization::get_all(conn).await; |
|||
let active_orgs = organizations_vec.len() as i64; |
|||
|
|||
// Count vault items by type and organization
|
|||
let vault_counts = Cipher::count_by_type_and_org(conn).await; |
|||
|
|||
// Count collections per organization
|
|||
let mut collection_counts: HashMap<String, i64> = HashMap::new(); |
|||
for org in &organizations_vec { |
|||
let count = Collection::count_by_org(&org.uuid, conn).await; |
|||
collection_counts.insert(org.uuid.to_string(), count); |
|||
} |
|||
|
|||
// Create cache entry
|
|||
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); |
|||
let cache = BusinessMetricsCache { |
|||
timestamp: now, |
|||
users_enabled: enabled_users, |
|||
users_disabled: disabled_users, |
|||
organizations: active_orgs, |
|||
vault_counts, |
|||
collection_counts, |
|||
}; |
|||
|
|||
// Update cache and apply metrics
|
|||
update_cached_metrics(cache.clone()); |
|||
apply_cached_metrics(&cache); |
|||
|
|||
Ok(()) |
|||
} |
|||
|
|||
/// Initialize build info metrics
|
|||
#[cfg(feature = "enable_metrics")] |
|||
pub fn init_build_info() { |
|||
let version = crate::VERSION.unwrap_or("unknown"); |
|||
BUILD_INFO.with_label_values(&[version, "unknown", "unknown"]).set(1); |
|||
} |
|||
|
|||
/// Update system uptime
|
|||
#[cfg(feature = "enable_metrics")] |
|||
pub fn update_uptime(start_time: SystemTime) { |
|||
if let Ok(elapsed) = start_time.elapsed() { |
|||
let version = crate::VERSION.unwrap_or("unknown"); |
|||
UPTIME_SECONDS.with_label_values(&[version]).set(elapsed.as_secs_f64()); |
|||
} |
|||
} |
|||
|
|||
/// Gather all metrics and return as Prometheus text format
|
|||
#[cfg(feature = "enable_metrics")] |
|||
pub fn gather_metrics() -> Result<String, Error> { |
|||
let encoder = TextEncoder::new(); |
|||
let metric_families = prometheus::gather(); |
|||
let mut output = Vec::new(); |
|||
if let Err(e) = encoder.encode(&metric_families, &mut output) { |
|||
return Err(Error::new(format!("Failed to encode metrics: {}", e), "")); |
|||
} |
|||
match String::from_utf8(output) { |
|||
Ok(s) => Ok(s), |
|||
Err(e) => Err(Error::new(format!("Failed to convert metrics to string: {}", e), "")), |
|||
} |
|||
} |
|||
|
|||
// No-op implementations when metrics are disabled
|
|||
#[cfg(not(feature = "enable_metrics"))] |
|||
pub fn increment_http_requests(_method: &str, _path: &str, _status: u16) {} |
|||
|
|||
#[cfg(not(feature = "enable_metrics"))] |
|||
pub fn observe_http_request_duration(_method: &str, _path: &str, _duration_seconds: f64) {} |
|||
|
|||
#[cfg(not(feature = "enable_metrics"))] |
|||
pub fn update_db_connections(_database: &str, _active: i64, _idle: i64) {} |
|||
|
|||
#[cfg(not(feature = "enable_metrics"))] |
|||
pub fn observe_db_query_duration(_operation: &str, _duration_seconds: f64) {} |
|||
|
|||
#[cfg(not(feature = "enable_metrics"))] |
|||
pub fn increment_auth_attempts(_method: &str, _status: &str) {} |
|||
|
|||
#[cfg(not(feature = "enable_metrics"))] |
|||
pub fn update_user_sessions(_user_type: &str, _count: i64) {} |
|||
|
|||
#[cfg(not(feature = "enable_metrics"))] |
|||
pub async fn update_business_metrics(_conn: &mut DbConn) -> Result<(), Error> { |
|||
Ok(()) |
|||
} |
|||
|
|||
#[cfg(not(feature = "enable_metrics"))] |
|||
pub fn init_build_info() {} |
|||
|
|||
#[cfg(not(feature = "enable_metrics"))] |
|||
pub fn update_uptime(_start_time: SystemTime) {} |
|||
|
|||
#[cfg(not(feature = "enable_metrics"))] |
|||
pub fn gather_metrics() -> Result<String, Error> { |
|||
Ok("Metrics not enabled".to_string()) |
|||
} |
|||
@ -0,0 +1,196 @@ |
|||
#[cfg(test)] |
|||
mod tests { |
|||
use super::*; |
|||
use std::time::Duration; |
|||
use tokio::time::sleep; |
|||
|
|||
#[cfg(feature = "enable_metrics")] |
|||
mod metrics_enabled_tests { |
|||
use super::*; |
|||
|
|||
#[test] |
|||
fn test_http_metrics_collection() { |
|||
// Test HTTP request metrics
|
|||
increment_http_requests("GET", "/api/sync", 200); |
|||
increment_http_requests("POST", "/api/accounts/register", 201); |
|||
increment_http_requests("GET", "/api/sync", 500); |
|||
|
|||
// Test HTTP duration metrics
|
|||
observe_http_request_duration("GET", "/api/sync", 0.150); |
|||
observe_http_request_duration("POST", "/api/accounts/register", 0.300); |
|||
|
|||
// In a real test environment, we would verify these metrics
|
|||
// were actually recorded by checking the prometheus registry
|
|||
} |
|||
|
|||
#[test] |
|||
fn test_database_metrics_collection() { |
|||
// Test database connection metrics
|
|||
update_db_connections("sqlite", 5, 10); |
|||
update_db_connections("postgresql", 8, 2); |
|||
|
|||
// Test database query duration metrics
|
|||
observe_db_query_duration("select", 0.025); |
|||
observe_db_query_duration("insert", 0.045); |
|||
observe_db_query_duration("update", 0.030); |
|||
} |
|||
|
|||
#[test] |
|||
fn test_authentication_metrics() { |
|||
// Test authentication attempt metrics
|
|||
increment_auth_attempts("password", "success"); |
|||
increment_auth_attempts("password", "failed"); |
|||
increment_auth_attempts("webauthn", "success"); |
|||
increment_auth_attempts("2fa", "failed"); |
|||
|
|||
// Test user session metrics
|
|||
update_user_sessions("authenticated", 150); |
|||
update_user_sessions("anonymous", 5); |
|||
} |
|||
|
|||
#[test] |
|||
fn test_build_info_initialization() { |
|||
// Test build info metrics initialization
|
|||
init_build_info(); |
|||
|
|||
// Test uptime metrics
|
|||
let start_time = std::time::SystemTime::now(); |
|||
update_uptime(start_time); |
|||
} |
|||
|
|||
#[test] |
|||
fn test_metrics_gathering() { |
|||
// Initialize some metrics
|
|||
increment_http_requests("GET", "/api/sync", 200); |
|||
update_db_connections("sqlite", 1, 5); |
|||
init_build_info(); |
|||
|
|||
// Test gathering all metrics
|
|||
let metrics_output = gather_metrics(); |
|||
assert!(metrics_output.is_ok()); |
|||
|
|||
let metrics_text = metrics_output.unwrap(); |
|||
assert!(!metrics_text.is_empty()); |
|||
|
|||
// Should contain Prometheus format headers
|
|||
assert!(metrics_text.contains("# HELP")); |
|||
assert!(metrics_text.contains("# TYPE")); |
|||
} |
|||
|
|||
#[tokio::test] |
|||
async fn test_business_metrics_collection() { |
|||
// This test would require a mock database connection
|
|||
// For now, we just test that the function doesn't panic
|
|||
|
|||
// In a real test, you would:
|
|||
// 1. Create a test database
|
|||
// 2. Insert test data (users, organizations, ciphers)
|
|||
// 3. Call update_business_metrics
|
|||
// 4. Verify the metrics were updated correctly
|
|||
|
|||
// Placeholder test - in production this would use a mock DbConn
|
|||
assert!(true); |
|||
} |
|||
|
|||
#[test] |
|||
fn test_path_normalization() { |
|||
// Test that path normalization works for metric cardinality control
|
|||
increment_http_requests("GET", "/api/sync", 200); |
|||
increment_http_requests("GET", "/api/accounts/123/profile", 200); |
|||
increment_http_requests("POST", "/api/organizations/456/users", 201); |
|||
increment_http_requests("PUT", "/api/ciphers/789", 200); |
|||
|
|||
// Test that gather_metrics works
|
|||
let result = gather_metrics(); |
|||
assert!(result.is_ok()); |
|||
|
|||
let metrics_text = result.unwrap(); |
|||
// Paths should be normalized in the actual implementation
|
|||
// This test verifies the collection doesn't panic
|
|||
assert!(!metrics_text.is_empty()); |
|||
} |
|||
|
|||
#[test] |
|||
fn test_concurrent_metrics_collection() { |
|||
use std::sync::Arc; |
|||
use std::thread; |
|||
|
|||
// Test concurrent access to metrics
|
|||
let handles: Vec<_> = (0..10).map(|i| { |
|||
thread::spawn(move || { |
|||
increment_http_requests("GET", "/api/sync", 200); |
|||
observe_http_request_duration("GET", "/api/sync", 0.1 + (i as f64 * 0.01)); |
|||
update_db_connections("sqlite", i, 10 - i); |
|||
}) |
|||
}).collect(); |
|||
|
|||
// Wait for all threads to complete
|
|||
for handle in handles { |
|||
handle.join().unwrap(); |
|||
} |
|||
|
|||
// Verify metrics collection still works
|
|||
let result = gather_metrics(); |
|||
assert!(result.is_ok()); |
|||
} |
|||
} |
|||
|
|||
#[cfg(not(feature = "enable_metrics"))] |
|||
mod metrics_disabled_tests { |
|||
use super::*; |
|||
|
|||
#[test] |
|||
fn test_no_op_implementations() { |
|||
// When metrics are disabled, all functions should be no-ops
|
|||
increment_http_requests("GET", "/api/sync", 200); |
|||
observe_http_request_duration("GET", "/api/sync", 0.150); |
|||
update_db_connections("sqlite", 5, 10); |
|||
observe_db_query_duration("select", 0.025); |
|||
increment_auth_attempts("password", "success"); |
|||
update_user_sessions("authenticated", 150); |
|||
init_build_info(); |
|||
|
|||
let start_time = std::time::SystemTime::now(); |
|||
update_uptime(start_time); |
|||
|
|||
// Test that gather_metrics returns a disabled message
|
|||
let result = gather_metrics(); |
|||
assert!(result.is_ok()); |
|||
assert_eq!(result.unwrap(), "Metrics not enabled"); |
|||
} |
|||
|
|||
#[tokio::test] |
|||
async fn test_business_metrics_no_op() { |
|||
// This should also be a no-op when metrics are disabled
|
|||
// We can't test with a real DbConn without significant setup,
|
|||
// but we can verify it doesn't panic
|
|||
|
|||
// In a real implementation, you'd mock DbConn
|
|||
assert!(true); |
|||
} |
|||
|
|||
#[test] |
|||
fn test_concurrent_no_op_calls() { |
|||
use std::thread; |
|||
|
|||
// Test that concurrent calls to disabled metrics don't cause issues
|
|||
let handles: Vec<_> = (0..5).map(|i| { |
|||
thread::spawn(move || { |
|||
increment_http_requests("GET", "/test", 200); |
|||
observe_http_request_duration("GET", "/test", 0.1); |
|||
update_db_connections("test", i, 5 - i); |
|||
increment_auth_attempts("password", "success"); |
|||
}) |
|||
}).collect(); |
|||
|
|||
for handle in handles { |
|||
handle.join().unwrap(); |
|||
} |
|||
|
|||
// All calls should be no-ops
|
|||
let result = gather_metrics(); |
|||
assert!(result.is_ok()); |
|||
assert_eq!(result.unwrap(), "Metrics not enabled"); |
|||
} |
|||
} |
|||
} |
|||
Loading…
Reference in new issue