Browse Source

Merge remote-tracking branch 'origin/main'

pull/6166/head
Samoth 4 days ago
parent
commit
5964baa72c
  1. 2
      .env.template
  2. 2
      Cargo.toml
  3. 2
      docker/DockerSettings.yaml
  4. 8
      docker/Dockerfile.alpine
  5. 2
      docker/Dockerfile.debian
  6. 41
      playwright/README.md
  7. 2
      playwright/compose/warden/Dockerfile
  8. 2
      playwright/test.env
  9. 2
      rust-toolchain.toml
  10. 6
      src/api/core/accounts.rs
  11. 98
      src/api/core/ciphers.rs
  12. 2
      src/api/core/organizations.rs
  13. 1
      src/api/core/two_factor/email.rs
  14. 4
      src/api/icons.rs
  15. 6
      src/api/identity.rs
  16. 2
      src/api/web.rs
  17. 2
      src/auth.rs
  18. 7
      src/config.rs
  19. 2
      src/db/models/auth_request.rs
  20. 51
      src/db/models/cipher.rs
  21. 8
      src/db/models/device.rs
  22. 2
      src/db/models/group.rs
  23. 6
      src/sso.rs
  24. 2
      src/sso_client.rs
  25. 12
      src/static/templates/scss/vaultwarden.scss.hbs

2
.env.template

@ -485,7 +485,7 @@
# SSO_AUTHORITY=https://auth.example.com # SSO_AUTHORITY=https://auth.example.com
## Authorization request scopes. Optional SSO scopes, override if email and profile are not enough (`openid` is implicit). ## Authorization request scopes. Optional SSO scopes, override if email and profile are not enough (`openid` is implicit).
#SSO_SCOPES="email profile" # SSO_SCOPES="email profile"
## Additional authorization url parameters (ex: to obtain a `refresh_token` with Google Auth). ## Additional authorization url parameters (ex: to obtain a `refresh_token` with Google Auth).
# SSO_AUTHORIZE_EXTRA_PARAMS="access_type=offline&prompt=consent" # SSO_AUTHORIZE_EXTRA_PARAMS="access_type=offline&prompt=consent"

2
Cargo.toml

@ -6,7 +6,7 @@ name = "vaultwarden"
version = "1.0.0" version = "1.0.0"
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"] authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
edition = "2021" edition = "2021"
rust-version = "1.86.0" rust-version = "1.87.0"
resolver = "2" resolver = "2"
repository = "https://github.com/dani-garcia/vaultwarden" repository = "https://github.com/dani-garcia/vaultwarden"

2
docker/DockerSettings.yaml

@ -5,7 +5,7 @@ vault_image_digest: "sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f3
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts # We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
# https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags # https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags
xx_image_digest: "sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894" xx_image_digest: "sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894"
rust_version: 1.88.0 # Rust version to be used rust_version: 1.89.0 # Rust version to be used
debian_version: bookworm # Debian release name to be used debian_version: bookworm # Debian release name to be used
alpine_version: "3.22" # Alpine version to be used alpine_version: "3.22" # Alpine version to be used
# For which platforms/architectures will we try to build images # For which platforms/architectures will we try to build images

8
docker/Dockerfile.alpine

@ -32,10 +32,10 @@ FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e
########################## ALPINE BUILD IMAGES ########################## ########################## ALPINE BUILD IMAGES ##########################
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 ## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
## And for Alpine we define all build images here, they will only be loaded when actually used ## And for Alpine we define all build images here, they will only be loaded when actually used
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.88.0 AS build_amd64 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.89.0 AS build_amd64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.88.0 AS build_arm64 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.89.0 AS build_arm64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.88.0 AS build_armv7 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.89.0 AS build_armv7
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.88.0 AS build_armv6 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.89.0 AS build_armv6
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006 # hadolint ignore=DL3006

2
docker/Dockerfile.debian

@ -36,7 +36,7 @@ FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:9c207bead753dda9430bd
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006 # hadolint ignore=DL3006
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.88.0-slim-bookworm AS build FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.89.0-slim-bookworm AS build
COPY --from=xx / / COPY --from=xx / /
ARG TARGETARCH ARG TARGETARCH
ARG TARGETVARIANT ARG TARGETVARIANT

41
playwright/README.md

@ -1,18 +1,18 @@
# Integration tests # Integration tests
This allows running integration tests using [Playwright](https://playwright.dev/). This allows running integration tests using [Playwright](https://playwright.dev/).
\
It usse its own [test.env](/test/scenarios/test.env) with different ports to not collide with a running dev instance. It uses its own `test.env` with different ports to not collide with a running dev instance.
## Install ## Install
This rely on `docker` and the `compose` [plugin](https://docs.docker.com/compose/install/). This relies on `docker` and the `compose` [plugin](https://docs.docker.com/compose/install/).
Databases (`Mariadb`, `Mysql` and `Postgres`) and `Playwright` will run in containers. Databases (`Mariadb`, `Mysql` and `Postgres`) and `Playwright` will run in containers.
### Running Playwright outside docker ### Running Playwright outside docker
It's possible to run `Playwright` outside of the container, this remove the need to rebuild the image for each change. It is possible to run `Playwright` outside of the container, this removes the need to rebuild the image for each change.
You'll additionally need `nodejs` then run: You will additionally need `nodejs` then run:
```bash ```bash
npm install npm install
@ -33,7 +33,7 @@ To force a rebuild of the Playwright image:
DOCKER_BUILDKIT=1 docker compose --env-file test.env build Playwright DOCKER_BUILDKIT=1 docker compose --env-file test.env build Playwright
``` ```
To access the ui to easily run test individually and debug if needed (will not work in docker): To access the UI to easily run test individually and debug if needed (this will not work in docker):
```bash ```bash
npx playwright test --ui npx playwright test --ui
@ -42,7 +42,7 @@ npx playwright test --ui
### DB ### DB
Projects are configured to allow to run tests only on specific database. Projects are configured to allow to run tests only on specific database.
\
You can use: You can use:
```bash ```bash
@ -62,7 +62,7 @@ DOCKER_BUILDKIT=1 docker compose --profile playwright --env-file test.env run Pl
### Keep services running ### Keep services running
If you want you can keep the Db and Keycloak runnning (states are not impacted by the tests): If you want you can keep the DB and Keycloak runnning (states are not impacted by the tests):
```bash ```bash
PW_KEEP_SERVICE_RUNNNING=true npx playwright test PW_KEEP_SERVICE_RUNNNING=true npx playwright test
@ -86,7 +86,8 @@ DOCKER_BUILDKIT=1 docker compose --profile playwright --env-file test.env run Pl
## Writing scenario ## Writing scenario
When creating new scenario use the recorder to more easily identify elements (in general try to rely on visible hint to identify elements and not hidden ids). When creating new scenario use the recorder to more easily identify elements
(in general try to rely on visible hint to identify elements and not hidden IDs).
This does not start the server, you will need to start it manually. This does not start the server, you will need to start it manually.
```bash ```bash
@ -95,7 +96,7 @@ npx playwright codegen "http://127.0.0.1:8000"
## Override web-vault ## Override web-vault
It's possible to change the `web-vault` used by referencing a different `bw_web_builds` commit. It is possible to change the `web-vault` used by referencing a different `bw_web_builds` commit.
```bash ```bash
export PW_WV_REPO_URL=https://github.com/Timshel/oidc_web_builds.git export PW_WV_REPO_URL=https://github.com/Timshel/oidc_web_builds.git
@ -105,12 +106,13 @@ DOCKER_BUILDKIT=1 docker compose --profile playwright --env-file test.env build
# OpenID Connect test setup # OpenID Connect test setup
Additionally this `docker-compose` template allow to run locally `VaultWarden`, [Keycloak](https://www.keycloak.org/) and [Maildev](https://github.com/timshel/maildev) to test OIDC. Additionally this `docker-compose` template allows to run locally Vaultwarden,
[Keycloak](https://www.keycloak.org/) and [Maildev](https://github.com/timshel/maildev) to test OIDC.
## Setup ## Setup
This rely on `docker` and the `compose` [plugin](https://docs.docker.com/compose/install/). This rely on `docker` and the `compose` [plugin](https://docs.docker.com/compose/install/).
First create a copy of `.env.template` as `.env` (This is done to prevent commiting your custom settings, Ex `SMTP_`). First create a copy of `.env.template` as `.env` (This is done to prevent committing your custom settings, Ex `SMTP_`).
## Usage ## Usage
@ -125,11 +127,12 @@ keycloakSetup_1 | 74af4933-e386-4e64-ba15-a7b61212c45e
oidc_keycloakSetup_1 exited with code 0 oidc_keycloakSetup_1 exited with code 0
``` ```
Wait until `oidc_keycloakSetup_1 exited with code 0` which indicate the correct setup of the Keycloak realm, client and user (It's normal for this container to stop once the configuration is done). Wait until `oidc_keycloakSetup_1 exited with code 0` which indicates the correct setup of the Keycloak realm, client and user
(It is normal for this container to stop once the configuration is done).
Then you can access : Then you can access :
- `VaultWarden` on http://0.0.0.0:8000 with the default user `test@yopmail.com/test`. - `Vaultwarden` on http://0.0.0.0:8000 with the default user `test@yopmail.com/test`.
- `Keycloak` on http://0.0.0.0:8080/admin/master/console/ with the default user `admin/admin` - `Keycloak` on http://0.0.0.0:8080/admin/master/console/ with the default user `admin/admin`
- `Maildev` on http://0.0.0.0:1080 - `Maildev` on http://0.0.0.0:1080
@ -143,7 +146,7 @@ You can run just `Keycloak` with `--profile keycloak`:
```bash ```bash
> docker compose --profile keycloak --env-file .env up > docker compose --profile keycloak --env-file .env up
``` ```
When running with a local VaultWarden, you can use a front-end build from [dani-garcia/bw_web_builds](https://github.com/dani-garcia/bw_web_builds/releases). When running with a local Vaultwarden, you can use a front-end build from [dani-garcia/bw_web_builds](https://github.com/dani-garcia/bw_web_builds/releases).
## Rebuilding the Vaultwarden ## Rebuilding the Vaultwarden
@ -155,12 +158,12 @@ docker compose --profile vaultwarden --env-file .env build VaultwardenPrebuild V
## Configuration ## Configuration
All configuration for `keycloak` / `VaultWarden` / `keycloak_setup.sh` can be found in [.env](.env.template). All configuration for `keycloak` / `Vaultwarden` / `keycloak_setup.sh` can be found in [.env](.env.template).
The content of the file will be loaded as environment variables in all containers. The content of the file will be loaded as environment variables in all containers.
- `keycloak` [configuration](https://www.keycloak.org/server/all-config) include `KEYCLOAK_ADMIN` / `KEYCLOAK_ADMIN_PASSWORD` and any variable prefixed `KC_` ([more information](https://www.keycloak.org/server/configuration#_example_configuring_the_db_url_host_parameter)). - `keycloak` [configuration](https://www.keycloak.org/server/all-config) includes `KEYCLOAK_ADMIN` / `KEYCLOAK_ADMIN_PASSWORD` and any variable prefixed `KC_` ([more information](https://www.keycloak.org/server/configuration#_example_configuring_the_db_url_host_parameter)).
- All `VaultWarden` configuration can be set (EX: `SMTP_*`) - All `Vaultwarden` configuration can be set (EX: `SMTP_*`)
## Cleanup ## Cleanup
Use `docker compose --profile vaultWarden down`. Use `docker compose --profile vaultwarden down`.

2
playwright/compose/warden/Dockerfile

@ -1,6 +1,6 @@
FROM playwright_oidc_vaultwarden_prebuilt AS prebuilt FROM playwright_oidc_vaultwarden_prebuilt AS prebuilt
FROM node:18-bookworm AS build FROM node:22-bookworm AS build
ARG REPO_URL ARG REPO_URL
ARG COMMIT_HASH ARG COMMIT_HASH

2
playwright/test.env

@ -43,7 +43,7 @@ KEYCLOAK_ADMIN_PASSWORD=${KEYCLOAK_ADMIN}
KC_HTTP_HOST=127.0.0.1 KC_HTTP_HOST=127.0.0.1
KC_HTTP_PORT=8081 KC_HTTP_PORT=8081
# Script parameters (use Keycloak and VaultWarden config too) # Script parameters (use Keycloak and Vaultwarden config too)
TEST_REALM=test TEST_REALM=test
DUMMY_REALM=dummy DUMMY_REALM=dummy
DUMMY_AUTHORITY=http://${KC_HTTP_HOST}:${KC_HTTP_PORT}/realms/${DUMMY_REALM} DUMMY_AUTHORITY=http://${KC_HTTP_HOST}:${KC_HTTP_PORT}/realms/${DUMMY_REALM}

2
rust-toolchain.toml

@ -1,4 +1,4 @@
[toolchain] [toolchain]
channel = "1.88.0" channel = "1.89.0"
components = [ "rustfmt", "clippy" ] components = [ "rustfmt", "clippy" ]
profile = "minimal" profile = "minimal"

6
src/api/core/accounts.rs

@ -342,11 +342,11 @@ async fn post_set_password(data: Json<SetPasswordData>, headers: Headers, mut co
let mut user = headers.user; let mut user = headers.user;
if user.private_key.is_some() { if user.private_key.is_some() {
err!("Account already intialized cannot set password") err!("Account already initialized, cannot set password")
} }
// Check against the password hint setting here so if it fails, the user // Check against the password hint setting here so if it fails,
// can retry without losing their invitation below. // the user can retry without losing their invitation below.
let password_hint = clean_password_hint(&data.master_password_hint); let password_hint = clean_password_hint(&data.master_password_hint);
enforce_password_hint_setting(&password_hint)?; enforce_password_hint_setting(&password_hint)?;

98
src/api/core/ciphers.rs

@ -78,6 +78,7 @@ pub fn routes() -> Vec<Route> {
restore_cipher_put, restore_cipher_put,
restore_cipher_put_admin, restore_cipher_put_admin,
restore_cipher_selected, restore_cipher_selected,
restore_cipher_selected_admin,
delete_all, delete_all,
move_cipher_selected, move_cipher_selected,
move_cipher_selected_put, move_cipher_selected_put,
@ -318,7 +319,7 @@ async fn post_ciphers_create(
// or otherwise), we can just ignore this field entirely. // or otherwise), we can just ignore this field entirely.
data.cipher.last_known_revision_date = None; data.cipher.last_known_revision_date = None;
share_cipher_by_uuid(&cipher.uuid, data, &headers, &mut conn, &nt).await share_cipher_by_uuid(&cipher.uuid, data, &headers, &mut conn, &nt, None).await
} }
/// Called when creating a new user-owned cipher. /// Called when creating a new user-owned cipher.
@ -920,7 +921,7 @@ async fn post_cipher_share(
) -> JsonResult { ) -> JsonResult {
let data: ShareCipherData = data.into_inner(); let data: ShareCipherData = data.into_inner();
share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt).await share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt, None).await
} }
#[put("/ciphers/<cipher_id>/share", data = "<data>")] #[put("/ciphers/<cipher_id>/share", data = "<data>")]
@ -933,7 +934,7 @@ async fn put_cipher_share(
) -> JsonResult { ) -> JsonResult {
let data: ShareCipherData = data.into_inner(); let data: ShareCipherData = data.into_inner();
share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt).await share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt, None).await
} }
#[derive(Deserialize)] #[derive(Deserialize)]
@ -973,11 +974,16 @@ async fn put_cipher_share_selected(
}; };
match shared_cipher_data.cipher.id.take() { match shared_cipher_data.cipher.id.take() {
Some(id) => share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &nt).await?, Some(id) => {
share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &nt, Some(UpdateType::None)).await?
}
None => err!("Request missing ids field"), None => err!("Request missing ids field"),
}; };
} }
// Multi share actions do not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &mut conn).await;
Ok(()) Ok(())
} }
@ -987,6 +993,7 @@ async fn share_cipher_by_uuid(
headers: &Headers, headers: &Headers,
conn: &mut DbConn, conn: &mut DbConn,
nt: &Notify<'_>, nt: &Notify<'_>,
override_ut: Option<UpdateType>,
) -> JsonResult { ) -> JsonResult {
let mut cipher = match Cipher::find_by_uuid(cipher_id, conn).await { let mut cipher = match Cipher::find_by_uuid(cipher_id, conn).await {
Some(cipher) => { Some(cipher) => {
@ -1018,7 +1025,10 @@ async fn share_cipher_by_uuid(
}; };
// When LastKnownRevisionDate is None, it is a new cipher, so send CipherCreate. // When LastKnownRevisionDate is None, it is a new cipher, so send CipherCreate.
let ut = if data.cipher.last_known_revision_date.is_some() { // If there is an override, like when handling multiple items, we want to prevent a push notification for every single item
let ut = if let Some(ut) = override_ut {
ut
} else if data.cipher.last_known_revision_date.is_some() {
UpdateType::SyncCipherUpdate UpdateType::SyncCipherUpdate
} else { } else {
UpdateType::SyncCipherCreate UpdateType::SyncCipherCreate
@ -1517,7 +1527,7 @@ async fn delete_cipher_selected_put_admin(
#[put("/ciphers/<cipher_id>/restore")] #[put("/ciphers/<cipher_id>/restore")]
async fn restore_cipher_put(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { async fn restore_cipher_put(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
_restore_cipher_by_uuid(&cipher_id, &headers, &mut conn, &nt).await _restore_cipher_by_uuid(&cipher_id, &headers, false, &mut conn, &nt).await
} }
#[put("/ciphers/<cipher_id>/restore-admin")] #[put("/ciphers/<cipher_id>/restore-admin")]
@ -1527,7 +1537,17 @@ async fn restore_cipher_put_admin(
mut conn: DbConn, mut conn: DbConn,
nt: Notify<'_>, nt: Notify<'_>,
) -> JsonResult { ) -> JsonResult {
_restore_cipher_by_uuid(&cipher_id, &headers, &mut conn, &nt).await _restore_cipher_by_uuid(&cipher_id, &headers, false, &mut conn, &nt).await
}
#[put("/ciphers/restore-admin", data = "<data>")]
async fn restore_cipher_selected_admin(
data: Json<CipherIdsData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
_restore_multiple_ciphers(data, &headers, &mut conn, &nt).await
} }
#[put("/ciphers/restore", data = "<data>")] #[put("/ciphers/restore", data = "<data>")]
@ -1555,35 +1575,47 @@ async fn move_cipher_selected(
nt: Notify<'_>, nt: Notify<'_>,
) -> EmptyResult { ) -> EmptyResult {
let data = data.into_inner(); let data = data.into_inner();
let user_id = headers.user.uuid; let user_id = &headers.user.uuid;
if let Some(ref folder_id) = data.folder_id { if let Some(ref folder_id) = data.folder_id {
if Folder::find_by_uuid_and_user(folder_id, &user_id, &mut conn).await.is_none() { if Folder::find_by_uuid_and_user(folder_id, user_id, &mut conn).await.is_none() {
err!("Invalid folder", "Folder does not exist or belongs to another user"); err!("Invalid folder", "Folder does not exist or belongs to another user");
} }
} }
for cipher_id in data.ids { let cipher_count = data.ids.len();
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { let mut single_cipher: Option<Cipher> = None;
err!("Cipher doesn't exist")
}; // TODO: Convert this to use a single query (or at least less) to update all items
// Find all ciphers a user has access to, all others will be ignored
if !cipher.is_accessible_to_user(&user_id, &mut conn).await { let accessible_ciphers = Cipher::find_by_user_and_ciphers(user_id, &data.ids, &mut conn).await;
err!("Cipher is not accessible by user") let accessible_ciphers_count = accessible_ciphers.len();
for cipher in accessible_ciphers {
cipher.move_to_folder(data.folder_id.clone(), user_id, &mut conn).await?;
if cipher_count == 1 {
single_cipher = Some(cipher);
} }
}
// Move cipher if let Some(cipher) = single_cipher {
cipher.move_to_folder(data.folder_id.clone(), &user_id, &mut conn).await?;
nt.send_cipher_update( nt.send_cipher_update(
UpdateType::SyncCipherUpdate, UpdateType::SyncCipherUpdate,
&cipher, &cipher,
std::slice::from_ref(&user_id), std::slice::from_ref(user_id),
&headers.device, &headers.device,
None, None,
&mut conn, &mut conn,
) )
.await; .await;
} else {
// Multi move actions do not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &mut conn).await;
}
if cipher_count != accessible_ciphers_count {
err!(format!(
"Not all ciphers are moved! {accessible_ciphers_count} of the selected {cipher_count} were moved."
))
} }
Ok(()) Ok(())
@ -1764,6 +1796,7 @@ async fn _delete_multiple_ciphers(
async fn _restore_cipher_by_uuid( async fn _restore_cipher_by_uuid(
cipher_id: &CipherId, cipher_id: &CipherId,
headers: &Headers, headers: &Headers,
multi_restore: bool,
conn: &mut DbConn, conn: &mut DbConn,
nt: &Notify<'_>, nt: &Notify<'_>,
) -> JsonResult { ) -> JsonResult {
@ -1778,15 +1811,17 @@ async fn _restore_cipher_by_uuid(
cipher.deleted_at = None; cipher.deleted_at = None;
cipher.save(conn).await?; cipher.save(conn).await?;
nt.send_cipher_update( if !multi_restore {
UpdateType::SyncCipherUpdate, nt.send_cipher_update(
&cipher, UpdateType::SyncCipherUpdate,
&cipher.update_users_revision(conn).await, &cipher,
&headers.device, &cipher.update_users_revision(conn).await,
None, &headers.device,
conn, None,
) conn,
.await; )
.await;
}
if let Some(org_id) = &cipher.organization_uuid { if let Some(org_id) = &cipher.organization_uuid {
log_event( log_event(
@ -1814,12 +1849,15 @@ async fn _restore_multiple_ciphers(
let mut ciphers: Vec<Value> = Vec::new(); let mut ciphers: Vec<Value> = Vec::new();
for cipher_id in data.ids { for cipher_id in data.ids {
match _restore_cipher_by_uuid(&cipher_id, headers, conn, nt).await { match _restore_cipher_by_uuid(&cipher_id, headers, true, conn, nt).await {
Ok(json) => ciphers.push(json.into_inner()), Ok(json) => ciphers.push(json.into_inner()),
err => return err, err => return err,
} }
} }
// Multi move actions do not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, conn).await;
Ok(Json(json!({ Ok(Json(json!({
"data": ciphers, "data": ciphers,
"object": "list", "object": "list",

2
src/api/core/organizations.rs

@ -2310,7 +2310,7 @@ struct OrgImportData {
users: Vec<OrgImportUserData>, users: Vec<OrgImportUserData>,
} }
/// This function seems to be deprected /// This function seems to be deprecated
/// It is only used with older directory connectors /// It is only used with older directory connectors
/// TODO: Cleanup Tech debt /// TODO: Cleanup Tech debt
#[post("/organizations/<org_id>/import", data = "<data>")] #[post("/organizations/<org_id>/import", data = "<data>")]

1
src/api/core/two_factor/email.rs

@ -24,6 +24,7 @@ pub fn routes() -> Vec<Route> {
#[derive(Deserialize)] #[derive(Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct SendEmailLoginData { struct SendEmailLoginData {
#[serde(alias = "DeviceIdentifier")]
device_identifier: DeviceId, device_identifier: DeviceId,
#[allow(unused)] #[allow(unused)]

4
src/api/icons.rs

@ -641,9 +641,9 @@ async fn stream_to_bytes_limit(res: Response, max_size: usize) -> Result<Bytes,
let mut buf = BytesMut::new(); let mut buf = BytesMut::new();
let mut size = 0; let mut size = 0;
while let Some(chunk) = stream.next().await { while let Some(chunk) = stream.next().await {
// It is possible that there might occure UnexpectedEof errors or others // It is possible that there might occur UnexpectedEof errors or others
// This is most of the time no issue, and if there is no chunked data anymore or at all parsing the HTML will not happen anyway. // This is most of the time no issue, and if there is no chunked data anymore or at all parsing the HTML will not happen anyway.
// Therfore if chunk is an err, just break and continue with the data be have received. // Therefore if chunk is an err, just break and continue with the data be have received.
if chunk.is_err() { if chunk.is_err() {
break; break;
} }

6
src/api/identity.rs

@ -293,7 +293,7 @@ async fn _sso_login(
} }
}; };
// We passed 2FA get full user informations // We passed 2FA get full user information
let auth_user = sso::redeem(&user_infos.state, conn).await?; let auth_user = sso::redeem(&user_infos.state, conn).await?;
if sso_user.is_none() { if sso_user.is_none() {
@ -1060,12 +1060,12 @@ async fn oidcsignin_redirect(
wrapper: impl FnOnce(OIDCState) -> sso::OIDCCodeWrapper, wrapper: impl FnOnce(OIDCState) -> sso::OIDCCodeWrapper,
conn: &DbConn, conn: &DbConn,
) -> ApiResult<Redirect> { ) -> ApiResult<Redirect> {
let state = sso::deocde_state(base64_state)?; let state = sso::decode_state(base64_state)?;
let code = sso::encode_code_claims(wrapper(state.clone())); let code = sso::encode_code_claims(wrapper(state.clone()));
let nonce = match SsoNonce::find(&state, conn).await { let nonce = match SsoNonce::find(&state, conn).await {
Some(n) => n, Some(n) => n,
None => err!(format!("Failed to retrive redirect_uri with {state}")), None => err!(format!("Failed to retrieve redirect_uri with {state}")),
}; };
let mut url = match url::Url::parse(&nonce.redirect_uri) { let mut url = match url::Url::parse(&nonce.redirect_uri) {

2
src/api/web.rs

@ -61,7 +61,7 @@ fn vaultwarden_css() -> Cached<Css<String>> {
"mail_enabled": CONFIG.mail_enabled(), "mail_enabled": CONFIG.mail_enabled(),
"sends_allowed": CONFIG.sends_allowed(), "sends_allowed": CONFIG.sends_allowed(),
"signup_disabled": CONFIG.is_signup_disabled(), "signup_disabled": CONFIG.is_signup_disabled(),
"sso_disabled": !CONFIG.sso_enabled(), "sso_enabled": CONFIG.sso_enabled(),
"sso_only": CONFIG.sso_enabled() && CONFIG.sso_only(), "sso_only": CONFIG.sso_enabled() && CONFIG.sso_only(),
"yubico_enabled": CONFIG._enable_yubico() && CONFIG.yubico_client_id().is_some() && CONFIG.yubico_secret_key().is_some(), "yubico_enabled": CONFIG._enable_yubico() && CONFIG.yubico_client_id().is_some() && CONFIG.yubico_secret_key().is_some(),
}); });

2
src/auth.rs

@ -1174,7 +1174,7 @@ impl AuthTokens {
let access_claims = LoginJwtClaims::default(device, user, &sub, client_id); let access_claims = LoginJwtClaims::default(device, user, &sub, client_id);
let validity = if DeviceType::is_mobile(&device.atype) { let validity = if device.is_mobile() {
*MOBILE_REFRESH_VALIDITY *MOBILE_REFRESH_VALIDITY
} else { } else {
*DEFAULT_REFRESH_VALIDITY *DEFAULT_REFRESH_VALIDITY

7
src/config.rs

@ -283,6 +283,9 @@ macro_rules! make_config {
"smtp_host", "smtp_host",
"smtp_username", "smtp_username",
"_smtp_img_src", "_smtp_img_src",
"sso_client_id",
"sso_authority",
"sso_callback_path",
]; ];
let cfg = { let cfg = {
@ -715,7 +718,7 @@ make_config! {
sso_master_password_policy: String, true, option; sso_master_password_policy: String, true, option;
/// Use SSO only for auth not the session lifecycle |> Use default Vaultwarden session lifecycle (Idle refresh token valid for 30days) /// Use SSO only for auth not the session lifecycle |> Use default Vaultwarden session lifecycle (Idle refresh token valid for 30days)
sso_auth_only_not_session: bool, true, def, false; sso_auth_only_not_session: bool, true, def, false;
/// Client cache for discovery endpoint. |> Duration in seconds (0 or less to disable). More details: https://github.com/dani-garcia/vaultwarden/blob/sso-support/SSO.md#client-cache /// Client cache for discovery endpoint. |> Duration in seconds (0 or less to disable). More details: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-SSO-support-using-OpenId-Connect#client-cache
sso_client_cache_expiration: u64, true, def, 0; sso_client_cache_expiration: u64, true, def, 0;
/// Log all tokens |> `LOG_LEVEL=debug` or `LOG_LEVEL=info,vaultwarden::sso=debug` is required /// Log all tokens |> `LOG_LEVEL=debug` or `LOG_LEVEL=info,vaultwarden::sso=debug` is required
sso_debug_tokens: bool, true, def, false; sso_debug_tokens: bool, true, def, false;
@ -1145,7 +1148,7 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
fn validate_internal_sso_issuer_url(sso_authority: &String) -> Result<openidconnect::IssuerUrl, Error> { fn validate_internal_sso_issuer_url(sso_authority: &String) -> Result<openidconnect::IssuerUrl, Error> {
match openidconnect::IssuerUrl::new(sso_authority.clone()) { match openidconnect::IssuerUrl::new(sso_authority.clone()) {
Err(err) => err!(format!("Invalid sso_authority UR ({sso_authority}): {err}")), Err(err) => err!(format!("Invalid sso_authority URL ({sso_authority}): {err}")),
Ok(issuer_url) => Ok(issuer_url), Ok(issuer_url) => Ok(issuer_url),
} }
} }

2
src/db/models/auth_request.rs

@ -6,7 +6,7 @@ use macros::UuidFromParam;
use serde_json::Value; use serde_json::Value;
db_object! { db_object! {
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset, Deserialize, Serialize)] #[derive(Identifiable, Queryable, Insertable, AsChangeset, Deserialize, Serialize)]
#[diesel(table_name = auth_requests)] #[diesel(table_name = auth_requests)]
#[diesel(treat_none_as_null = true)] #[diesel(treat_none_as_null = true)]
#[diesel(primary_key(uuid))] #[diesel(primary_key(uuid))]

51
src/db/models/cipher.rs

@ -783,7 +783,12 @@ impl Cipher {
// true, then the non-interesting ciphers will not be returned. As a // true, then the non-interesting ciphers will not be returned. As a
// result, those ciphers will not appear in "My Vault" for the org // result, those ciphers will not appear in "My Vault" for the org
// owner/admin, but they can still be accessed via the org vault view. // owner/admin, but they can still be accessed via the org vault view.
pub async fn find_by_user(user_uuid: &UserId, visible_only: bool, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_user(
user_uuid: &UserId,
visible_only: bool,
cipher_uuids: &Vec<CipherId>,
conn: &mut DbConn,
) -> Vec<Self> {
if CONFIG.org_groups_enabled() { if CONFIG.org_groups_enabled() {
db_run! {conn: { db_run! {conn: {
let mut query = ciphers::table let mut query = ciphers::table
@ -821,7 +826,14 @@ impl Cipher {
if !visible_only { if !visible_only {
query = query.or_filter( query = query.or_filter(
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin/owner users_organizations::atype.le(MembershipType::Admin as i32) // Org admin/owner
); );
}
// Only filter for one specific cipher
if !cipher_uuids.is_empty() {
query = query.filter(
ciphers::uuid.eq_any(cipher_uuids)
);
} }
query query
@ -850,11 +862,18 @@ impl Cipher {
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection .or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
.into_boxed(); .into_boxed();
if !visible_only { if !visible_only {
query = query.or_filter( query = query.or_filter(
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin/owner users_organizations::atype.le(MembershipType::Admin as i32) // Org admin/owner
); );
} }
// Only filter for one specific cipher
if !cipher_uuids.is_empty() {
query = query.filter(
ciphers::uuid.eq_any(cipher_uuids)
);
}
query query
.select(ciphers::all_columns) .select(ciphers::all_columns)
@ -866,7 +885,23 @@ impl Cipher {
// Find all ciphers visible to the specified user. // Find all ciphers visible to the specified user.
pub async fn find_by_user_visible(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_user_visible(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
Self::find_by_user(user_uuid, true, conn).await Self::find_by_user(user_uuid, true, &vec![], conn).await
}
pub async fn find_by_user_and_ciphers(
user_uuid: &UserId,
cipher_uuids: &Vec<CipherId>,
conn: &mut DbConn,
) -> Vec<Self> {
Self::find_by_user(user_uuid, true, cipher_uuids, conn).await
}
pub async fn find_by_user_and_cipher(
user_uuid: &UserId,
cipher_uuid: &CipherId,
conn: &mut DbConn,
) -> Option<Self> {
Self::find_by_user(user_uuid, true, &vec![cipher_uuid.clone()], conn).await.pop()
} }
// Find all ciphers directly owned by the specified user. // Find all ciphers directly owned by the specified user.

8
src/db/models/device.rs

@ -70,6 +70,10 @@ impl Device {
pub fn is_cli(&self) -> bool { pub fn is_cli(&self) -> bool {
matches!(DeviceType::from_i32(self.atype), DeviceType::WindowsCLI | DeviceType::MacOsCLI | DeviceType::LinuxCLI) matches!(DeviceType::from_i32(self.atype), DeviceType::WindowsCLI | DeviceType::MacOsCLI | DeviceType::LinuxCLI)
} }
pub fn is_mobile(&self) -> bool {
matches!(DeviceType::from_i32(self.atype), DeviceType::Android | DeviceType::Ios)
}
} }
pub struct DeviceWithAuthRequest { pub struct DeviceWithAuthRequest {
@ -353,10 +357,6 @@ impl DeviceType {
_ => DeviceType::UnknownBrowser, _ => DeviceType::UnknownBrowser,
} }
} }
pub fn is_mobile(value: &i32) -> bool {
*value == DeviceType::Android as i32 || *value == DeviceType::Ios as i32
}
} }
#[derive( #[derive(

2
src/db/models/group.rs

@ -135,7 +135,7 @@ impl CollectionGroup {
// If both read_only and hide_passwords are false, then manage should be true // If both read_only and hide_passwords are false, then manage should be true
// You can't have an entry with read_only and manage, or hide_passwords and manage // You can't have an entry with read_only and manage, or hide_passwords and manage
// Or an entry with everything to false // Or an entry with everything to false
// For backwards compaibility and migration proposes we keep checking read_only and hide_password // For backwards compatibility and migration proposes we keep checking read_only and hide_password
json!({ json!({
"id": self.groups_uuid, "id": self.groups_uuid,
"readOnly": self.read_only, "readOnly": self.read_only,

6
src/sso.rs

@ -151,7 +151,7 @@ fn decode_token_claims(token_name: &str, token: &str) -> ApiResult<BasicTokenCla
} }
} }
pub fn deocde_state(base64_state: String) -> ApiResult<OIDCState> { pub fn decode_state(base64_state: String) -> ApiResult<OIDCState> {
let state = match data_encoding::BASE64.decode(base64_state.as_bytes()) { let state = match data_encoding::BASE64.decode(base64_state.as_bytes()) {
Ok(vec) => match String::from_utf8(vec) { Ok(vec) => match String::from_utf8(vec) {
Ok(valid) => OIDCState(valid), Ok(valid) => OIDCState(valid),
@ -316,7 +316,7 @@ pub async fn exchange_code(wrapped_code: &str, conn: &mut DbConn) -> ApiResult<U
user_name: user_name.clone(), user_name: user_name.clone(),
}; };
debug!("Authentified user {authenticated_user:?}"); debug!("Authenticated user {authenticated_user:?}");
AC_CACHE.insert(state.clone(), authenticated_user); AC_CACHE.insert(state.clone(), authenticated_user);
@ -443,7 +443,7 @@ pub async fn exchange_refresh_token(
err_silent!("Access token is close to expiration but we have no refresh token") err_silent!("Access token is close to expiration but we have no refresh token")
} }
Client::check_validaty(access_token.clone()).await?; Client::check_validity(access_token.clone()).await?;
let access_claims = auth::LoginJwtClaims::new( let access_claims = auth::LoginJwtClaims::new(
device, device,

2
src/sso_client.rs

@ -203,7 +203,7 @@ impl Client {
} }
} }
pub async fn check_validaty(access_token: String) -> EmptyResult { pub async fn check_validity(access_token: String) -> EmptyResult {
let client = Client::cached().await?; let client = Client::cached().await?;
match client.user_info(AccessToken::new(access_token)).await { match client.user_info(AccessToken::new(access_token)).await {
Err(err) => { Err(err) => {

12
src/static/templates/scss/vaultwarden.scss.hbs

@ -21,21 +21,21 @@ a[href$="/settings/sponsored-families"] {
} }
/* Hide the sso `Email` input field */ /* Hide the sso `Email` input field */
{{#if sso_disabled}} {{#if (not sso_enabled)}}
.vw-email-sso { .vw-email-sso {
@extend %vw-hide; @extend %vw-hide;
} }
{{/if}} {{/if}}
/* Hide the default/continue `Email` input field */ /* Hide the default/continue `Email` input field */
{{#if (not sso_disabled)}} {{#if sso_enabled}}
.vw-email-continue { .vw-email-continue {
@extend %vw-hide; @extend %vw-hide;
} }
{{/if}} {{/if}}
/* Hide the `Continue` button on the login page */ /* Hide the `Continue` button on the login page */
{{#if (not sso_disabled)}} {{#if sso_enabled}}
.vw-continue-login { .vw-continue-login {
@extend %vw-hide; @extend %vw-hide;
} }
@ -43,7 +43,7 @@ a[href$="/settings/sponsored-families"] {
/* Hide the `Enterprise Single Sign-On` button on the login page */ /* Hide the `Enterprise Single Sign-On` button on the login page */
{{#if (webver ">=2025.5.1")}} {{#if (webver ">=2025.5.1")}}
{{#if sso_disabled}} {{#if (not sso_enabled)}}
.vw-sso-login { .vw-sso-login {
@extend %vw-hide; @extend %vw-hide;
} }
@ -71,7 +71,7 @@ app-root ng-component > form > div:nth-child(1) > div > button[buttontype="secon
/* Hide the or text followed by the two buttons hidden above */ /* Hide the or text followed by the two buttons hidden above */
{{#if (webver ">=2025.5.1")}} {{#if (webver ">=2025.5.1")}}
{{#if (or sso_disabled sso_only)}} {{#if (or (not sso_enabled) sso_only)}}
.vw-or-text { .vw-or-text {
@extend %vw-hide; @extend %vw-hide;
} }
@ -83,7 +83,7 @@ app-root ng-component > form > div:nth-child(1) > div:nth-child(3) > div:nth-chi
{{/if}} {{/if}}
/* Hide the `Other` button on the login page */ /* Hide the `Other` button on the login page */
{{#if (or sso_disabled sso_only)}} {{#if (or (not sso_enabled) sso_only)}}
.vw-other-login { .vw-other-login {
@extend %vw-hide; @extend %vw-hide;
} }

Loading…
Cancel
Save