From 2fdcfa66ebea647959b9bd271367f1250117a5c1 Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Sun, 3 Aug 2025 13:12:26 -0700 Subject: [PATCH 01/15] feat: add S3-compatible OpenDAL URI parameter parsing --- src/config.rs | 287 ++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 276 insertions(+), 11 deletions(-) diff --git a/src/config.rs b/src/config.rs index 4fb103fa..1e4b7186 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1404,16 +1404,166 @@ fn opendal_operator_for_path(path: &str) -> Result { Ok(operator) } +#[cfg(s3)] +fn is_s3_secret_param(param_name: &str) -> bool { + matches!(param_name, "access_key_id" | "secret_access_key" | "session_token") +} + +#[cfg(s3)] +fn parse_s3_bool(value: &str) -> Option { + match value.to_ascii_lowercase().as_str() { + "true" | "1" | "yes" => Some(true), + "false" | "0" | "no" => Some(false), + _ => None, + } +} + +#[cfg(s3)] +fn is_s3_implicit_bool_param(param_name: &str) -> bool { + param_name.starts_with("enable_") || param_name.starts_with("disable_") || param_name.starts_with("allow_") +} + +#[cfg(s3)] +/// Set S3Config fields from query parameters using serde. +fn set_s3_config_param( + config: opendal::services::S3Config, + param_name: &str, + param_value: Option<&str>, +) -> Result { + use serde_json::{json, Value}; + + // Special handling for blocked parameters + const BLOCKED_PARAMS: &[&str] = &["bucket", "root"]; + if BLOCKED_PARAMS.contains(¶m_name) { + return Err(format!("S3 OpenDAL Parameter '{param_name}' cannot be overridden via query string").into()); + } + + // Parse the parameter value + let json_value = match param_value { + None => { + // For boolean fields that default to true when present without value + // This includes fields starting with enable_, disable_, or allow_ + if is_s3_implicit_bool_param(param_name) { + json!(true) + } else { + return Err(format!("S3 OpenDAL Parameter '{param_name}' requires a value").into()); + } + } + Some(value) => { + // Try to parse as boolean first + if let Some(bool_value) = parse_s3_bool(value) { + json!(bool_value) + } else if let Ok(num) = value.parse::() { + // Try to parse as number (for fields like delete_max_size, batch_max_operations) + json!(num) + } else { + // Default to string + json!(value) + } + } + }; + + // Convert current config to JSON + let config_json = + serde_json::to_value(config).map_err(|e| Error::from(format!("Failed to serialize S3Config to JSON: {e}")))?; + + // Merge with the new field and deserialize + if let Value::Object(mut config_obj) = config_json { + // Insert the new field + config_obj.insert(param_name.to_string(), json_value.clone()); + + // Try to deserialize with the new field + let display_json_value = if is_s3_secret_param(param_name) { + json!("***") + } else { + json_value.clone() + }; + let new_config = serde_json::from_value::(Value::Object(config_obj)) + .map_err(|e| Error::from(format!("Failed to deserialize S3Config from JSON after updating parameter '{param_name}' to value {display_json_value}: {e}")))?; + + Ok(new_config) + } else { + unreachable!("S3Config should always serialize to an object"); + } +} + +#[cfg(s3)] +fn parse_s3_config_for_path(path: &str) -> Result { + use opendal::services::S3Config; + + let url = Url::parse(path).map_err(|e| format!("Invalid path S3 URL path {path:?}: {e}"))?; + let bucket = url.host_str().ok_or_else(|| format!("Missing Bucket name in data folder S3 URL {path:?}"))?; + + // Create S3Config and set base configuration based on best practices for + // the official AWS S3 service. + let mut config = S3Config::default(); + config.bucket = bucket.to_string(); + config.root = Some(url.path().to_string()); + + // Default to virtual host style enabled (AWS S3 has deprecated path style) + // + // Note: Some providers may not support virtual host style + config.enable_virtual_host_style = true; + + // Default to AWS S3's Intelligent Tiering storage class for optimal + // cost/performance + // + // Note: Some providers may not support this storage class + config.default_storage_class = Some("INTELLIGENT_TIERING".to_string()); + + // Process query parameters + for (param_name, param_value) in url.query_pairs() { + let param_name = param_name.as_ref(); + let mut param_value = if param_value.is_empty() { + None + } else { + Some(param_value.as_ref()) + }; + + if param_name == "disable_virtual_host_style" { + let value = param_value.unwrap_or("true"); + let bool_value = parse_s3_bool(value) + .ok_or_else(|| format!("S3 OpenDAL Parameter 'disable_virtual_host_style' has invalid boolean value {value:?}"))?; + + let enabled_value = if bool_value { "false" } else { "true" }; + config = set_s3_config_param(config, "enable_virtual_host_style", Some(enabled_value))?; + continue; + } + + if param_name == "default_storage_class" && param_value.is_none() { + param_value = Some(""); + } + + // Use the generated setter function to handle parameters + config = set_s3_config_param(config, param_name, param_value)?; + } + + if config.access_key_id.is_some() || config.secret_access_key.is_some() || config.session_token.is_some() { + warn!( + "S3 static credentials provided through path query parameters. This works, but using environment credentials or IAM is recommended." + ); + } + + if config.default_storage_class.as_deref() == Some("") { + config.default_storage_class = None; + } + + Ok(config) +} + #[cfg(s3)] fn opendal_s3_operator_for_path(path: &str) -> Result { use crate::http_client::aws::AwsReqwestConnector; use aws_config::{default_provider::credentials::DefaultCredentialsChain, provider_config::ProviderConfig}; + use opendal::{services::S3Config, Configurator}; // This is a custom AWS credential loader that uses the official AWS Rust // SDK config crate to load credentials. This ensures maximum compatibility // with AWS credential configurations. For example, OpenDAL doesn't support // AWS SSO temporary credentials yet. - struct OpenDALS3CredentialLoader {} + struct OpenDALS3CredentialLoader { + config: S3Config, + } #[async_trait] impl reqsign::AwsCredentialLoad for OpenDALS3CredentialLoader { @@ -1421,6 +1571,23 @@ fn opendal_s3_operator_for_path(path: &str) -> Result use aws_credential_types::provider::ProvideCredentials as _; use tokio::sync::OnceCell; + // If static credentials are provided, use them directly + match (&self.config.access_key_id, &self.config.secret_access_key) { + (Some(access_key_id), Some(secret_access_key)) => { + return Ok(Some(reqsign::AwsCredential { + access_key_id: access_key_id.clone(), + secret_access_key: secret_access_key.clone(), + session_token: self.config.session_token.clone(), + expires_in: None, + })); + } + (None, None) if self.config.session_token.is_none() => (), + _ => anyhow::bail!( + "s3 path must have access_key_id and secret_access_key both set, optionally with session_token set, or all three must be unset" + ), + }; + + // Use the default credentials chain from the AWS SDK (especially useful for SSO) static DEFAULT_CREDENTIAL_CHAIN: OnceCell = OnceCell::const_new(); let chain = DEFAULT_CREDENTIAL_CHAIN @@ -1447,22 +1614,120 @@ fn opendal_s3_operator_for_path(path: &str) -> Result } } - const OPEN_DAL_S3_CREDENTIAL_LOADER: OpenDALS3CredentialLoader = OpenDALS3CredentialLoader {}; + let config = parse_s3_config_for_path(path)?; - let url = Url::parse(path).map_err(|e| format!("Invalid path S3 URL path {path:?}: {e}"))?; - - let bucket = url.host_str().ok_or_else(|| format!("Missing Bucket name in data folder S3 URL {path:?}"))?; + let credential_loader = OpenDALS3CredentialLoader { + config: config.clone(), + }; - let builder = opendal::services::S3::default() - .customized_credential_load(Box::new(OPEN_DAL_S3_CREDENTIAL_LOADER)) - .enable_virtual_host_style() - .bucket(bucket) - .root(url.path()) - .default_storage_class("INTELLIGENT_TIERING"); + // Convert config to builder and add custom credential loader + let builder = config.into_builder().customized_credential_load(Box::new(credential_loader)); Ok(opendal::Operator::new(builder)?.finish()) } +#[cfg(all(test, s3))] +mod s3_tests { + use super::{opendal_s3_operator_for_path, parse_s3_config_for_path}; + + #[test] + fn test_parse_s3_config_defaults() { + let config = parse_s3_config_for_path("s3://vaultwarden-data/path/to/root").expect("config should parse"); + + assert_eq!(config.bucket, "vaultwarden-data"); + assert_eq!(config.root.as_deref(), Some("/path/to/root")); + assert!(config.enable_virtual_host_style); + assert_eq!(config.default_storage_class.as_deref(), Some("INTELLIGENT_TIERING")); + } + + #[test] + fn test_parse_s3_config_custom_endpoint_and_path_style() { + let config = parse_s3_config_for_path( + "s3://vw/path?endpoint=http%3A%2F%2F127.0.0.1%3A9000&enable_virtual_host_style=false&default_storage_class=STANDARD®ion=us-east-1", + ) + .expect("config should parse"); + + assert_eq!(config.endpoint.as_deref(), Some("http://127.0.0.1:9000")); + assert!(!config.enable_virtual_host_style); + assert_eq!(config.default_storage_class.as_deref(), Some("STANDARD")); + assert_eq!(config.region.as_deref(), Some("us-east-1")); + } + + #[test] + fn test_parse_s3_config_disable_virtual_host_style_alias() { + let config = + parse_s3_config_for_path("s3://vw/path?disable_virtual_host_style=true").expect("config should parse"); + assert!(!config.enable_virtual_host_style); + } + + #[test] + fn test_parse_s3_config_storage_class_can_be_omitted() { + let config = parse_s3_config_for_path("s3://vw/path?default_storage_class=").expect("config should parse"); + assert_eq!(config.default_storage_class, None); + } + + #[test] + fn test_parse_s3_config_implicit_boolean_flag() { + let config = parse_s3_config_for_path("s3://vw/path?enable_virtual_host_style") + .expect("config should parse"); + assert!(config.enable_virtual_host_style); + } + + #[test] + fn test_parse_s3_config_boolean_variants() { + let config = parse_s3_config_for_path("s3://vw/path?enable_virtual_host_style=0") + .expect("config should parse"); + assert!(!config.enable_virtual_host_style); + } + + #[test] + fn test_parse_s3_config_percent_encoded_prefix() { + let config = parse_s3_config_for_path("s3://vw/path%20with%20spaces").expect("config should parse"); + assert_eq!(config.root.as_deref(), Some("/path with spaces")); + } + + #[test] + fn test_parse_s3_config_rejects_unknown_parameter() { + let error = parse_s3_config_for_path("s3://vw/path?unknown_param=value") + .expect_err("unknown params should fail"); + let error_string = error.to_string(); + assert!(error_string.contains("unknown field")); + } + + #[test] + #[ignore] + fn test_s3_minio_integration_put_get_delete() { + let endpoint = std::env::var("VW_S3_MINIO_ENDPOINT").unwrap_or_else(|_| "http://127.0.0.1:9000".to_string()); + let bucket = std::env::var("VW_S3_MINIO_BUCKET").unwrap_or_else(|_| "vaultwarden-test".to_string()); + let mut root = std::env::var("VW_S3_MINIO_ROOT").unwrap_or_else(|_| "/vaultwarden-s3-test".to_string()); + if !root.starts_with('/') { + root = format!("/{root}"); + } + let access_key = std::env::var("VW_S3_MINIO_ACCESS_KEY").unwrap_or_else(|_| "minioadmin".to_string()); + let secret_key = std::env::var("VW_S3_MINIO_SECRET_KEY").unwrap_or_else(|_| "minioadmin".to_string()); + + let mut query = url::form_urlencoded::Serializer::new(String::new()); + query.append_pair("endpoint", &endpoint); + query.append_pair("enable_virtual_host_style", "false"); + query.append_pair("default_storage_class", "STANDARD"); + query.append_pair("access_key_id", &access_key); + query.append_pair("secret_access_key", &secret_key); + let s3_path = format!("s3://{bucket}{root}?{}", query.finish()); + + let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().expect("tokio runtime should build"); + rt.block_on(async move { + let operator = opendal_s3_operator_for_path(&s3_path).expect("operator should be created"); + let key = format!("integration/{}.txt", uuid::Uuid::new_v4()); + let payload = b"vaultwarden-opendal-s3-compatible"; + + operator.write(&key, payload.as_slice()).await.expect("object upload should succeed"); + let buffer = operator.read(&key).await.expect("object download should succeed"); + assert_eq!(buffer.to_vec(), payload.as_slice()); + operator.delete(&key).await.expect("object delete should succeed"); + }); + } +} + pub enum PathType { Data, IconCache, From 63b25380d89761068cfad1fae2a563a95c777707 Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Mon, 16 Feb 2026 10:58:33 -0300 Subject: [PATCH 02/15] ci: add MinIO integration test job for S3-compatible mode --- .github/workflows/build.yml | 59 +++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3e7818ec..bbdd51c7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -227,3 +227,62 @@ jobs: run: | echo "### :tada: Checks Passed!" >> "${GITHUB_STEP_SUMMARY}" echo "" >> "${GITHUB_STEP_SUMMARY}" + + s3-compatible-minio: + name: S3-Compatible Integration (MinIO) + runs-on: ubuntu-24.04 + timeout-minutes: 45 + + steps: + - name: "Install dependencies Ubuntu" + run: sudo apt-get update && sudo apt-get install -y --no-install-recommends curl openssl build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config + + - name: "Checkout" + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0 + with: + persist-credentials: false + fetch-depth: 0 + + - name: "Install rust-toolchain version" + uses: dtolnay/rust-toolchain@f7ccc83f9ed1e5b9c81d8a67d7ad1a747e22a561 # master @ Dec 16, 2025, 6:11 PM GMT+1 + with: + toolchain: stable + + - name: "Show environment" + run: | + rustc -vV + cargo -vV + + - name: Rust Caching + uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 + with: + prefix-key: "v2025.09-rust" + + - name: Start MinIO + run: | + docker run -d --name minio \ + -p 9000:9000 \ + -e MINIO_ROOT_USER=minioadmin \ + -e MINIO_ROOT_PASSWORD=minioadmin \ + quay.io/minio/minio:RELEASE.2025-07-23T15-54-02Z \ + server /data --console-address ":9001" + + for i in {1..30}; do + if curl -fsS "http://127.0.0.1:9000/minio/health/live" >/dev/null; then + break + fi + sleep 1 + done + + docker run --rm --network host quay.io/minio/mc:RELEASE.2025-06-13T11-33-47Z \ + sh -c "mc alias set local http://127.0.0.1:9000 minioadmin minioadmin && mc mb --ignore-existing local/vaultwarden-test" + + - name: Run MinIO integration test + env: + VW_S3_MINIO_ENDPOINT: "http://127.0.0.1:9000" + VW_S3_MINIO_BUCKET: "vaultwarden-test" + VW_S3_MINIO_ROOT: "/vaultwarden-integration" + VW_S3_MINIO_ACCESS_KEY: "minioadmin" + VW_S3_MINIO_SECRET_KEY: "minioadmin" + run: | + cargo test --profile ci --features sqlite,s3 test_s3_minio_integration_put_get_delete -- --ignored From 7c95c8c5c32b6904abff23c09ed823177b4dfef3 Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Mon, 16 Feb 2026 10:58:37 -0300 Subject: [PATCH 03/15] docs: document S3-compatible URI parameters and examples --- .env.template | 18 +++++++++++++++++- README.md | 30 ++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/.env.template b/.env.template index 67f531fc..564c7027 100644 --- a/.env.template +++ b/.env.template @@ -18,7 +18,23 @@ ## This can be a path to local folder or a path to an external location ## depending on features enabled at build time. Possible external locations: ## -## - AWS S3 Bucket (via `s3` feature): s3://bucket-name/path/to/folder +## - S3-compatible bucket (via `s3` feature): s3://bucket-name/path/to/folder +## +## Optional query parameters are supported for S3-compatible providers: +## - endpoint (MinIO/R2/Ceph RGW): ?endpoint=https%3A%2F%2Fs3.example.internal +## - enable_virtual_host_style (set false for path-style): ?enable_virtual_host_style=false +## - default_storage_class: ?default_storage_class=STANDARD +## Use an empty value to omit the storage-class header: +## ?default_storage_class= +## - region (provider/signing specific): ?region=us-east-1 +## +## Examples: +## - AWS S3 defaults: s3://bucket-name/path/to/folder +## - MinIO path-style: s3://bucket-name/path/to/folder?endpoint=http%3A%2F%2Fminio%3A9000&enable_virtual_host_style=false&default_storage_class=STANDARD +## - Cloudflare R2: s3://bucket-name/path/to/folder?endpoint=https%3A%2F%2F.r2.cloudflarestorage.com®ion=auto&default_storage_class= +## +## Credentials in URI query params are supported as a last resort, but it is +## strongly recommended to use environment credentials/IAM instead. ## ## When using an external location, make sure to set TMP_FOLDER, ## TEMPLATES_FOLDER, and DATABASE_URL to local paths and/or a remote database diff --git a/README.md b/README.md index c84a9c40..0a4e4b4d 100644 --- a/README.md +++ b/README.md @@ -111,6 +111,36 @@ services:
+### S3-Compatible Object Storage + +When built with the `s3` feature, storage paths like `DATA_FOLDER`, `ATTACHMENTS_FOLDER`, `ICON_CACHE_FOLDER` and `SENDS_FOLDER` can use `s3://` URIs with query parameters: + +```text +s3://bucket/prefix?endpoint=https%3A%2F%2Fs3.example.internal&enable_virtual_host_style=false&default_storage_class=STANDARD +``` + +- AWS S3 works with defaults (no extra parameters required). +- MinIO/Ceph usually require `endpoint` and `enable_virtual_host_style=false`. +- Cloudflare R2 usually requires `endpoint` and often `region=auto`. +- To omit `x-amz-storage-class`, set `default_storage_class=` (empty value). + +Kubernetes example: + +```yaml +env: + - name: DATA_FOLDER + value: "s3://vaultwarden-data/prod?endpoint=https%3A%2F%2Fs3.example.internal&enable_virtual_host_style=false&default_storage_class=STANDARD" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: vaultwarden-db + key: url +``` + +Use IAM/service account/environment credentials when possible. URI credentials are supported as a last resort. + +
+ ## Get in touch Have a question, suggestion or need help? Join our community on [Matrix](https://matrix.to/#/#vaultwarden:matrix.org), [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions) or [Discourse Forums](https://vaultwarden.discourse.group/). From b4dff012ec3e0300ee71936d94857192a1ea7e70 Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Mon, 16 Feb 2026 12:41:10 -0300 Subject: [PATCH 04/15] fix: satisfy clippy and rustfmt for S3 config parsing --- src/config.rs | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/src/config.rs b/src/config.rs index 1e4b7186..6e3032e7 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1476,7 +1476,7 @@ fn set_s3_config_param( let display_json_value = if is_s3_secret_param(param_name) { json!("***") } else { - json_value.clone() + json_value }; let new_config = serde_json::from_value::(Value::Object(config_obj)) .map_err(|e| Error::from(format!("Failed to deserialize S3Config from JSON after updating parameter '{param_name}' to value {display_json_value}: {e}")))?; @@ -1522,10 +1522,15 @@ fn parse_s3_config_for_path(path: &str) -> Result Date: Mon, 16 Feb 2026 12:43:16 -0300 Subject: [PATCH 05/15] ci: harden MinIO startup in S3 integration job --- .github/workflows/build.yml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index bbdd51c7..8e039d42 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -260,11 +260,14 @@ jobs: - name: Start MinIO run: | + docker pull minio/minio:latest + docker pull minio/mc:latest + docker run -d --name minio \ -p 9000:9000 \ -e MINIO_ROOT_USER=minioadmin \ -e MINIO_ROOT_PASSWORD=minioadmin \ - quay.io/minio/minio:RELEASE.2025-07-23T15-54-02Z \ + minio/minio:latest \ server /data --console-address ":9001" for i in {1..30}; do @@ -274,7 +277,13 @@ jobs: sleep 1 done - docker run --rm --network host quay.io/minio/mc:RELEASE.2025-06-13T11-33-47Z \ + if ! curl -fsS "http://127.0.0.1:9000/minio/health/live" >/dev/null; then + docker ps -a + docker logs minio || true + exit 1 + fi + + docker run --rm --network host minio/mc:latest \ sh -c "mc alias set local http://127.0.0.1:9000 minioadmin minioadmin && mc mb --ignore-existing local/vaultwarden-test" - name: Run MinIO integration test From 2b60e5856570a6800870d932f972b215238cfb6f Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Mon, 16 Feb 2026 12:54:06 -0300 Subject: [PATCH 06/15] fix: enforce unknown S3 params and repair MinIO CI command --- .github/workflows/build.yml | 4 ++-- src/config.rs | 8 ++++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8e039d42..cf4ca85a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -283,8 +283,8 @@ jobs: exit 1 fi - docker run --rm --network host minio/mc:latest \ - sh -c "mc alias set local http://127.0.0.1:9000 minioadmin minioadmin && mc mb --ignore-existing local/vaultwarden-test" + docker run --rm --network host --entrypoint /bin/sh minio/mc:latest -c \ + "mc alias set local http://127.0.0.1:9000 minioadmin minioadmin && mc mb --ignore-existing local/vaultwarden-test" - name: Run MinIO integration test env: diff --git a/src/config.rs b/src/config.rs index 6e3032e7..a08869ca 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1469,6 +1469,10 @@ fn set_s3_config_param( // Merge with the new field and deserialize if let Value::Object(mut config_obj) = config_json { + if !config_obj.contains_key(param_name) { + return Err(format!("Unknown S3 OpenDAL parameter '{param_name}'").into()); + } + // Insert the new field config_obj.insert(param_name.to_string(), json_value.clone()); @@ -1686,7 +1690,7 @@ mod s3_tests { #[test] fn test_parse_s3_config_percent_encoded_prefix() { let config = parse_s3_config_for_path("s3://vw/path%20with%20spaces").expect("config should parse"); - assert_eq!(config.root.as_deref(), Some("/path with spaces")); + assert_eq!(config.root.as_deref(), Some("/path%20with%20spaces")); } #[test] @@ -1694,7 +1698,7 @@ mod s3_tests { let error = parse_s3_config_for_path("s3://vw/path?unknown_param=value").expect_err("unknown params should fail"); let error_string = error.to_string(); - assert!(error_string.contains("unknown field")); + assert!(error_string.contains("Unknown S3 OpenDAL parameter")); } #[test] From 5996a78f3277ffd447826952c643e02f59f0c084 Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Mon, 16 Feb 2026 13:14:12 -0300 Subject: [PATCH 07/15] fix: stabilize unknown-param test and set MinIO region --- .github/workflows/build.yml | 1 + src/config.rs | 13 +++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index cf4ca85a..a5375282 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -291,6 +291,7 @@ jobs: VW_S3_MINIO_ENDPOINT: "http://127.0.0.1:9000" VW_S3_MINIO_BUCKET: "vaultwarden-test" VW_S3_MINIO_ROOT: "/vaultwarden-integration" + VW_S3_MINIO_REGION: "auto" VW_S3_MINIO_ACCESS_KEY: "minioadmin" VW_S3_MINIO_SECRET_KEY: "minioadmin" run: | diff --git a/src/config.rs b/src/config.rs index a08869ca..497623f0 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1695,10 +1695,13 @@ mod s3_tests { #[test] fn test_parse_s3_config_rejects_unknown_parameter() { - let error = - parse_s3_config_for_path("s3://vw/path?unknown_param=value").expect_err("unknown params should fail"); - let error_string = error.to_string(); - assert!(error_string.contains("Unknown S3 OpenDAL parameter")); + let error = parse_s3_config_for_path("s3://vw/path?region=auto&unknown_param=value") + .expect_err("unknown params should fail"); + let error_message = error.message().to_string(); + assert!( + error_message.contains("Unknown S3 OpenDAL parameter") && error_message.contains("unknown_param"), + "error message: {error_message}" + ); } #[test] @@ -1712,9 +1715,11 @@ mod s3_tests { } let access_key = std::env::var("VW_S3_MINIO_ACCESS_KEY").unwrap_or_else(|_| "minioadmin".to_string()); let secret_key = std::env::var("VW_S3_MINIO_SECRET_KEY").unwrap_or_else(|_| "minioadmin".to_string()); + let region = std::env::var("VW_S3_MINIO_REGION").unwrap_or_else(|_| "auto".to_string()); let mut query = url::form_urlencoded::Serializer::new(String::new()); query.append_pair("endpoint", &endpoint); + query.append_pair("region", ®ion); query.append_pair("enable_virtual_host_style", "false"); query.append_pair("default_storage_class", "STANDARD"); query.append_pair("access_key_id", &access_key); From 2f503010e35ee7cf1a40a58e741de77747362a8f Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Mon, 16 Feb 2026 15:17:31 -0300 Subject: [PATCH 08/15] test: make unknown S3 parameter assertion robust --- src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config.rs b/src/config.rs index 497623f0..6e69c805 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1697,7 +1697,7 @@ mod s3_tests { fn test_parse_s3_config_rejects_unknown_parameter() { let error = parse_s3_config_for_path("s3://vw/path?region=auto&unknown_param=value") .expect_err("unknown params should fail"); - let error_message = error.message().to_string(); + let error_message = format!("{error:?}"); assert!( error_message.contains("Unknown S3 OpenDAL parameter") && error_message.contains("unknown_param"), "error message: {error_message}" From 7b2b450c077b2d64c41773c1068daf8b502c354f Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Mon, 16 Feb 2026 15:52:57 -0300 Subject: [PATCH 09/15] ci: add manual R2 integration workflow --- .github/workflows/s3-r2-manual.yml | 50 ++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 .github/workflows/s3-r2-manual.yml diff --git a/.github/workflows/s3-r2-manual.yml b/.github/workflows/s3-r2-manual.yml new file mode 100644 index 00000000..1a746875 --- /dev/null +++ b/.github/workflows/s3-r2-manual.yml @@ -0,0 +1,50 @@ +name: S3-Compatible Integration (R2 Manual) +permissions: {} + +on: + workflow_dispatch: + +defaults: + run: + shell: bash + +jobs: + s3-compatible-r2: + name: S3-Compatible Integration (R2) + runs-on: ubuntu-24.04 + timeout-minutes: 45 + + steps: + - name: Install dependencies Ubuntu + run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config + + - name: Checkout + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Install rust-toolchain version + uses: dtolnay/rust-toolchain@f7ccc83f9ed1e5b9c81d8a67d7ad1a747e22a561 + with: + toolchain: stable + + - name: Show environment + run: | + rustc -vV + cargo -vV + + - name: Rust Caching + uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 + with: + prefix-key: "v2025.09-rust" + + - name: Run R2 integration test + env: + VW_S3_MINIO_ENDPOINT: ${{ secrets.VW_R2_ENDPOINT }} + VW_S3_MINIO_BUCKET: ${{ secrets.VW_R2_BUCKET }} + VW_S3_MINIO_ROOT: ${{ secrets.VW_R2_ROOT }} + VW_S3_MINIO_REGION: ${{ secrets.VW_R2_REGION }} + VW_S3_MINIO_ACCESS_KEY: ${{ secrets.VW_R2_ACCESS_KEY_ID }} + VW_S3_MINIO_SECRET_KEY: ${{ secrets.VW_R2_SECRET_ACCESS_KEY }} + run: cargo test --profile ci --features s3 config::s3_tests::test_s3_minio_integration_put_get_delete -- --ignored --nocapture From 0f7fea5b4aa5ec04a8c1b66e41a9f1b7973eec08 Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Mon, 16 Feb 2026 16:04:53 -0300 Subject: [PATCH 10/15] Revert "ci: add manual R2 integration workflow" This reverts commit 7b2b450c077b2d64c41773c1068daf8b502c354f. --- .github/workflows/s3-r2-manual.yml | 50 ------------------------------ 1 file changed, 50 deletions(-) delete mode 100644 .github/workflows/s3-r2-manual.yml diff --git a/.github/workflows/s3-r2-manual.yml b/.github/workflows/s3-r2-manual.yml deleted file mode 100644 index 1a746875..00000000 --- a/.github/workflows/s3-r2-manual.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: S3-Compatible Integration (R2 Manual) -permissions: {} - -on: - workflow_dispatch: - -defaults: - run: - shell: bash - -jobs: - s3-compatible-r2: - name: S3-Compatible Integration (R2) - runs-on: ubuntu-24.04 - timeout-minutes: 45 - - steps: - - name: Install dependencies Ubuntu - run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config - - - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - with: - persist-credentials: false - fetch-depth: 0 - - - name: Install rust-toolchain version - uses: dtolnay/rust-toolchain@f7ccc83f9ed1e5b9c81d8a67d7ad1a747e22a561 - with: - toolchain: stable - - - name: Show environment - run: | - rustc -vV - cargo -vV - - - name: Rust Caching - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - with: - prefix-key: "v2025.09-rust" - - - name: Run R2 integration test - env: - VW_S3_MINIO_ENDPOINT: ${{ secrets.VW_R2_ENDPOINT }} - VW_S3_MINIO_BUCKET: ${{ secrets.VW_R2_BUCKET }} - VW_S3_MINIO_ROOT: ${{ secrets.VW_R2_ROOT }} - VW_S3_MINIO_REGION: ${{ secrets.VW_R2_REGION }} - VW_S3_MINIO_ACCESS_KEY: ${{ secrets.VW_R2_ACCESS_KEY_ID }} - VW_S3_MINIO_SECRET_KEY: ${{ secrets.VW_R2_SECRET_ACCESS_KEY }} - run: cargo test --profile ci --features s3 config::s3_tests::test_s3_minio_integration_put_get_delete -- --ignored --nocapture From 8ed63db7f2587566f6fe62ccdcf4392a10101edf Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Mon, 16 Feb 2026 16:17:05 -0300 Subject: [PATCH 11/15] ci: add manual docker beta workflow for ghcr --- .github/workflows/docker-beta.yml | 91 +++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 .github/workflows/docker-beta.yml diff --git a/.github/workflows/docker-beta.yml b/.github/workflows/docker-beta.yml new file mode 100644 index 00000000..fb9a2c1f --- /dev/null +++ b/.github/workflows/docker-beta.yml @@ -0,0 +1,91 @@ +name: Docker Beta +permissions: {} + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + workflow_dispatch: + +defaults: + run: + shell: bash + +jobs: + docker-beta: + name: Build and Push Beta (${{ matrix.base_image }}) + runs-on: ubuntu-24.04 + permissions: + contents: read + packages: write + strategy: + fail-fast: false + matrix: + base_image: ["debian", "alpine"] + + steps: + - name: Checkout + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Initialize QEMU binfmt support + uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 + with: + platforms: "arm64" + + - name: Setup Docker Buildx + uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 + with: + cache-binary: false + buildkitd-config-inline: | + [worker.oci] + max-parallelism = 2 + driver-opts: | + network=host + + - name: Login to GitHub Container Registry + uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Prepare metadata + run: | + echo "SOURCE_COMMIT=${GITHUB_SHA}" | tee -a "${GITHUB_ENV}" + echo "SOURCE_REPOSITORY_URL=https://github.com/${GITHUB_REPOSITORY}" | tee -a "${GITHUB_ENV}" + echo "SHORT_SHA=${GITHUB_SHA::8}" | tee -a "${GITHUB_ENV}" + echo "BASE_TAGS=beta-${GITHUB_SHA::8}" | tee -a "${GITHUB_ENV}" + echo "CONTAINER_REGISTRIES=ghcr.io/${GITHUB_REPOSITORY}" | tee -a "${GITHUB_ENV}" + + - name: Bake ${{ matrix.base_image }} multi-arch image + uses: docker/bake-action@5be5f02ff8819ecd3092ea6b2e6261c31774f2b4 # v6.10.0 + env: + BASE_TAGS: ${{ env.BASE_TAGS }} + SOURCE_COMMIT: ${{ env.SOURCE_COMMIT }} + SOURCE_VERSION: ${{ env.BASE_TAGS }} + SOURCE_REPOSITORY_URL: ${{ env.SOURCE_REPOSITORY_URL }} + CONTAINER_REGISTRIES: ${{ env.CONTAINER_REGISTRIES }} + with: + pull: true + source: . + files: docker/docker-bake.hcl + targets: ${{ matrix.base_image }}-multi + set: | + *.cache-from=type=gha,scope=docker-beta-${{ matrix.base_image }} + *.cache-to=type=gha,mode=max,scope=docker-beta-${{ matrix.base_image }} + *.platform=linux/amd64,linux/arm64 + *.output=type=registry + + - name: Publish summary + run: | + if [[ "${{ matrix.base_image }}" == "debian" ]]; then + echo "### Published beta image" >> "${GITHUB_STEP_SUMMARY}" + echo "- \`ghcr.io/${GITHUB_REPOSITORY}:beta-${SHORT_SHA}\`" >> "${GITHUB_STEP_SUMMARY}" + else + echo "### Published beta image" >> "${GITHUB_STEP_SUMMARY}" + echo "- \`ghcr.io/${GITHUB_REPOSITORY}:beta-${SHORT_SHA}-alpine\`" >> "${GITHUB_STEP_SUMMARY}" + fi From 0841de93adae0489d066ee7032aadae8b191cb71 Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Mon, 16 Feb 2026 23:24:44 -0300 Subject: [PATCH 12/15] Revert "ci: add manual docker beta workflow for ghcr" This reverts commit 8ed63db7f2587566f6fe62ccdcf4392a10101edf. --- .github/workflows/docker-beta.yml | 91 ------------------------------- 1 file changed, 91 deletions(-) delete mode 100644 .github/workflows/docker-beta.yml diff --git a/.github/workflows/docker-beta.yml b/.github/workflows/docker-beta.yml deleted file mode 100644 index fb9a2c1f..00000000 --- a/.github/workflows/docker-beta.yml +++ /dev/null @@ -1,91 +0,0 @@ -name: Docker Beta -permissions: {} - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -on: - workflow_dispatch: - -defaults: - run: - shell: bash - -jobs: - docker-beta: - name: Build and Push Beta (${{ matrix.base_image }}) - runs-on: ubuntu-24.04 - permissions: - contents: read - packages: write - strategy: - fail-fast: false - matrix: - base_image: ["debian", "alpine"] - - steps: - - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - with: - persist-credentials: false - fetch-depth: 0 - - - name: Initialize QEMU binfmt support - uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 - with: - platforms: "arm64" - - - name: Setup Docker Buildx - uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 - with: - cache-binary: false - buildkitd-config-inline: | - [worker.oci] - max-parallelism = 2 - driver-opts: | - network=host - - - name: Login to GitHub Container Registry - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Prepare metadata - run: | - echo "SOURCE_COMMIT=${GITHUB_SHA}" | tee -a "${GITHUB_ENV}" - echo "SOURCE_REPOSITORY_URL=https://github.com/${GITHUB_REPOSITORY}" | tee -a "${GITHUB_ENV}" - echo "SHORT_SHA=${GITHUB_SHA::8}" | tee -a "${GITHUB_ENV}" - echo "BASE_TAGS=beta-${GITHUB_SHA::8}" | tee -a "${GITHUB_ENV}" - echo "CONTAINER_REGISTRIES=ghcr.io/${GITHUB_REPOSITORY}" | tee -a "${GITHUB_ENV}" - - - name: Bake ${{ matrix.base_image }} multi-arch image - uses: docker/bake-action@5be5f02ff8819ecd3092ea6b2e6261c31774f2b4 # v6.10.0 - env: - BASE_TAGS: ${{ env.BASE_TAGS }} - SOURCE_COMMIT: ${{ env.SOURCE_COMMIT }} - SOURCE_VERSION: ${{ env.BASE_TAGS }} - SOURCE_REPOSITORY_URL: ${{ env.SOURCE_REPOSITORY_URL }} - CONTAINER_REGISTRIES: ${{ env.CONTAINER_REGISTRIES }} - with: - pull: true - source: . - files: docker/docker-bake.hcl - targets: ${{ matrix.base_image }}-multi - set: | - *.cache-from=type=gha,scope=docker-beta-${{ matrix.base_image }} - *.cache-to=type=gha,mode=max,scope=docker-beta-${{ matrix.base_image }} - *.platform=linux/amd64,linux/arm64 - *.output=type=registry - - - name: Publish summary - run: | - if [[ "${{ matrix.base_image }}" == "debian" ]]; then - echo "### Published beta image" >> "${GITHUB_STEP_SUMMARY}" - echo "- \`ghcr.io/${GITHUB_REPOSITORY}:beta-${SHORT_SHA}\`" >> "${GITHUB_STEP_SUMMARY}" - else - echo "### Published beta image" >> "${GITHUB_STEP_SUMMARY}" - echo "- \`ghcr.io/${GITHUB_REPOSITORY}:beta-${SHORT_SHA}-alpine\`" >> "${GITHUB_STEP_SUMMARY}" - fi From c242d284ee9de3f96943909b9e1a55dcc7f62173 Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Mon, 16 Feb 2026 23:24:48 -0300 Subject: [PATCH 13/15] docs: clarify s3 build feature requirements --- .env.template | 4 ++++ README.md | 1 + 2 files changed, 5 insertions(+) diff --git a/.env.template b/.env.template index 564c7027..2d0f0e13 100644 --- a/.env.template +++ b/.env.template @@ -36,6 +36,10 @@ ## Credentials in URI query params are supported as a last resort, but it is ## strongly recommended to use environment credentials/IAM instead. ## +## Note: For S3 paths to work, the container/binary must be built with both +## a DB backend and the `s3` feature (for example: `sqlite,s3`, +## `postgresql,s3`, or `mysql,s3`). +## ## When using an external location, make sure to set TMP_FOLDER, ## TEMPLATES_FOLDER, and DATABASE_URL to local paths and/or a remote database ## location. diff --git a/README.md b/README.md index 0a4e4b4d..220d33bf 100644 --- a/README.md +++ b/README.md @@ -123,6 +123,7 @@ s3://bucket/prefix?endpoint=https%3A%2F%2Fs3.example.internal&enable_virtual_hos - MinIO/Ceph usually require `endpoint` and `enable_virtual_host_style=false`. - Cloudflare R2 usually requires `endpoint` and often `region=auto`. - To omit `x-amz-storage-class`, set `default_storage_class=` (empty value). +- Container images must include both a DB backend feature and `s3` (for example `sqlite,s3`, `postgresql,s3`, or `mysql,s3`). Kubernetes example: From caf89052f26c5d110fc97dfbc1fe84a4836161ef Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Mon, 16 Feb 2026 23:51:09 -0300 Subject: [PATCH 14/15] docs: clarify CSP and CORS requirements for S3 attachment downloads --- .env.template | 6 ++++++ README.md | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/.env.template b/.env.template index 2d0f0e13..b666b555 100644 --- a/.env.template +++ b/.env.template @@ -465,6 +465,12 @@ ## This adds the configured value to the 'Content-Security-Policy' headers 'connect-src' value. ## Multiple values must be separated with a whitespace. And only HTTPS values are allowed. ## Example: "https://my-addy-io.domain.tld https://my-simplelogin.domain.tld" +## For S3-compatible attachment downloads, include your object storage origin +## (for example Cloudflare R2 endpoint): +## "https://.r2.cloudflarestorage.com" +## Note: This only configures CSP on Vaultwarden. You also need a CORS policy +## on the object storage bucket/provider that allows your Vaultwarden DOMAIN +## origin for download requests. # ALLOWED_CONNECT_SRC="" ## Number of seconds, on average, between login requests from the same IP address before rate limiting kicks in. diff --git a/README.md b/README.md index 220d33bf..f514b1f5 100644 --- a/README.md +++ b/README.md @@ -140,6 +140,38 @@ env: Use IAM/service account/environment credentials when possible. URI credentials are supported as a last resort. +### Browser Attachment Downloads (CSP + CORS) + +For S3-compatible backends, attachment downloads from the Web Vault use presigned URLs. The browser downloads directly from the object storage endpoint. + +Configure both sides: + +- Vaultwarden CSP: allow the object-storage origin in `ALLOWED_CONNECT_SRC`. +- Object storage CORS policy: allow your Vaultwarden origin (`DOMAIN`) for `GET`/`HEAD`. + +R2 example: + +```text +ALLOWED_CONNECT_SRC="https://.r2.cloudflarestorage.com" +``` + +```json +[ + { + "AllowedOrigins": ["https://vault.example.com"], + "AllowedMethods": ["GET", "HEAD"], + "AllowedHeaders": ["*"], + "ExposeHeaders": ["ETag", "Content-Length", "Content-Type", "Content-Disposition"], + "MaxAgeSeconds": 3600 + } +] +``` + +Troubleshooting: + +- `violates the document's Content Security Policy`: set `ALLOWED_CONNECT_SRC` correctly. +- `No 'Access-Control-Allow-Origin' header`: fix CORS policy on the bucket/provider. +
## Get in touch From 90dfc24ebd074abdf6b71ac64f8ab81d8c162b3a Mon Sep 17 00:00:00 2001 From: g-roliveira <125938946+g-roliveira@users.noreply.github.com> Date: Mon, 16 Feb 2026 23:56:14 -0300 Subject: [PATCH 15/15] docs: add S3-compatible object storage wiki guide --- docs/s3-compatible-object-storage.md | 105 +++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 docs/s3-compatible-object-storage.md diff --git a/docs/s3-compatible-object-storage.md b/docs/s3-compatible-object-storage.md new file mode 100644 index 00000000..ca518dac --- /dev/null +++ b/docs/s3-compatible-object-storage.md @@ -0,0 +1,105 @@ +# S3-Compatible Object Storage + +This page documents Vaultwarden's S3-compatible storage support based on `s3://` URIs with query parameters (OpenDAL S3 config). + +## Scope + +Supported providers (via S3 API): + +- AWS S3 +- MinIO +- Cloudflare R2 +- Ceph RGW and similar S3-compatible services + +The same URI format applies to: + +- `DATA_FOLDER` +- `ATTACHMENTS_FOLDER` +- `ICON_CACHE_FOLDER` +- `SENDS_FOLDER` + +## URI Format + +```text +s3://bucket/prefix?endpoint=https%3A%2F%2Fs3.example.com&enable_virtual_host_style=false&default_storage_class=STANDARD®ion=us-east-1 +``` + +Supported query parameters: + +- `endpoint` +- `region` +- `enable_virtual_host_style` +- `default_storage_class` +- `disable_virtual_host_style` (alias) + +Notes: + +- AWS S3 works with defaults. +- For path-style providers, set `enable_virtual_host_style=false`. +- To omit storage class header, set `default_storage_class=` (empty). +- Unknown parameters are rejected. + +## Build Requirement + +Use images/binaries built with both: + +1. a DB backend feature (`sqlite`, `postgresql`, or `mysql`) +2. `s3` + +Examples: + +- `sqlite,s3` +- `postgresql,s3` +- `mysql,s3` + +## Cloudflare R2 Example + +```env +ATTACHMENTS_FOLDER=s3://vaultwarden/attachments?endpoint=https://.r2.cloudflarestorage.com®ion=auto&enable_virtual_host_style=false&default_storage_class= +ICON_CACHE_FOLDER=s3://vaultwarden/icon_cache?endpoint=https://.r2.cloudflarestorage.com®ion=auto&enable_virtual_host_style=false&default_storage_class= +SENDS_FOLDER=s3://vaultwarden/sends?endpoint=https://.r2.cloudflarestorage.com®ion=auto&enable_virtual_host_style=false&default_storage_class= +``` + +## Browser Downloads: CSP + CORS + +When attachments are stored in object storage, Web Vault downloads use presigned URLs and the browser fetches objects directly from the storage endpoint. + +You must configure both sides: + +1. Vaultwarden CSP (`ALLOWED_CONNECT_SRC`) +2. Bucket/provider CORS policy + +### 1) Vaultwarden CSP + +```env +ALLOWED_CONNECT_SRC=https://.r2.cloudflarestorage.com +``` + +### 2) Bucket CORS Policy (example) + +```json +[ + { + "AllowedOrigins": ["https://vault.example.com"], + "AllowedMethods": ["GET", "HEAD"], + "AllowedHeaders": ["*"], + "ExposeHeaders": ["ETag", "Content-Length", "Content-Type", "Content-Disposition"], + "MaxAgeSeconds": 3600 + } +] +``` + +## Troubleshooting + +- `violates the document's Content Security Policy` + - Configure/fix `ALLOWED_CONNECT_SRC`. +- `No 'Access-Control-Allow-Origin' header` + - Configure/fix CORS on the bucket/provider. +- `S3 support is not enabled` + - Image/binary was built without `s3` feature. + +## Security Notes + +- Prefer IAM/service account/environment credentials. +- URI credentials are supported only as a last resort. +- If credentials were exposed in logs/chats, rotate them immediately.