Browse Source

Merge branch 'main' into static-const

pull/5260/head
Daniel García 3 months ago
committed by GitHub
parent
commit
126fc4e6dd
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 1
      .dockerignore
  2. 9
      .env.template
  3. 7
      .github/workflows/build.yml
  4. 2
      .github/workflows/hadolint.yml
  5. 12
      .github/workflows/release.yml
  6. 461
      Cargo.lock
  7. 39
      Cargo.toml
  8. 10
      docker/DockerSettings.yaml
  9. 21
      docker/Dockerfile.alpine
  10. 17
      docker/Dockerfile.debian
  11. 1
      docker/Dockerfile.j2
  12. 13
      macros/Cargo.toml
  13. 58
      macros/src/lib.rs
  14. 2
      rust-toolchain.toml
  15. 123
      src/api/admin.rs
  16. 248
      src/api/core/accounts.rs
  17. 553
      src/api/core/ciphers.rs
  18. 229
      src/api/core/emergency_access.rs
  19. 85
      src/api/core/events.rs
  20. 62
      src/api/core/folders.rs
  21. 14
      src/api/core/mod.rs
  22. 1684
      src/api/core/organizations.rs
  23. 106
      src/api/core/public.rs
  24. 121
      src/api/core/sends.rs
  25. 69
      src/api/core/two_factor/authenticator.rs
  26. 19
      src/api/core/two_factor/duo.rs
  27. 6
      src/api/core/two_factor/duo_oidc.rs
  28. 32
      src/api/core/two_factor/email.rs
  29. 30
      src/api/core/two_factor/mod.rs
  30. 6
      src/api/core/two_factor/protected_actions.rs
  31. 37
      src/api/core/two_factor/webauthn.rs
  32. 4
      src/api/core/two_factor/yubikey.rs
  33. 87
      src/api/identity.rs
  34. 125
      src/api/notifications.rs
  35. 75
      src/api/push.rs
  36. 48
      src/api/web.rs
  37. 141
      src/auth.rs
  38. 56
      src/config.rs
  39. 9
      src/crypto.rs
  40. 52
      src/db/models/attachment.rs
  41. 52
      src/db/models/auth_request.rs
  42. 237
      src/db/models/cipher.rs
  43. 215
      src/db/models/collection.rs
  44. 101
      src/db/models/device.rs
  45. 72
      src/db/models/emergency_access.rs
  46. 45
      src/db/models/event.rs
  47. 23
      src/db/models/favorite.rs
  48. 63
      src/db/models/folder.rs
  49. 151
      src/db/models/group.rs
  50. 30
      src/db/models/mod.rs
  51. 80
      src/db/models/org_policy.rs
  52. 476
      src/db/models/organization.rs
  53. 87
      src/db/models/send.rs
  54. 18
      src/db/models/two_factor.rs
  55. 35
      src/db/models/two_factor_incomplete.rs
  56. 68
      src/db/models/user.rs
  57. 58
      src/mail.rs
  58. 2
      src/main.rs
  59. 4
      src/static/scripts/admin.css
  60. 212
      src/static/scripts/admin_diagnostics.js
  61. 2
      src/static/scripts/admin_users.js
  62. 42
      src/static/scripts/datatables.css
  63. 1372
      src/static/scripts/datatables.js
  64. 23
      src/static/templates/admin/diagnostics.hbs
  65. 2
      src/static/templates/admin/users.hbs
  66. 55
      src/static/templates/scss/vaultwarden.scss.hbs
  67. 46
      src/util.rs

1
.dockerignore

@ -5,6 +5,7 @@
!.git !.git
!docker/healthcheck.sh !docker/healthcheck.sh
!docker/start.sh !docker/start.sh
!macros
!migrations !migrations
!src !src

9
.env.template

@ -350,6 +350,7 @@
## - "browser-fileless-import": Directly import credentials from other providers without a file. ## - "browser-fileless-import": Directly import credentials from other providers without a file.
## - "extension-refresh": Temporarily enable the new extension design until general availability (should be used with the beta Chrome extension) ## - "extension-refresh": Temporarily enable the new extension design until general availability (should be used with the beta Chrome extension)
## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor. ## - "fido2-vault-credentials": Enable the use of FIDO2 security keys as second factor.
## - "inline-menu-positioning-improvements": Enable the use of inline menu password generator and identity suggestions in the browser extension.
## - "ssh-key-vault-item": Enable the creation and use of SSH key vault items. (Needs clients >=2024.12.0) ## - "ssh-key-vault-item": Enable the creation and use of SSH key vault items. (Needs clients >=2024.12.0)
## - "ssh-agent": Enable SSH agent support on Desktop. (Needs desktop >=2024.12.0) ## - "ssh-agent": Enable SSH agent support on Desktop. (Needs desktop >=2024.12.0)
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials # EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
@ -410,6 +411,14 @@
## Multiple values must be separated with a whitespace. ## Multiple values must be separated with a whitespace.
# ALLOWED_IFRAME_ANCESTORS= # ALLOWED_IFRAME_ANCESTORS=
## Allowed connect-src (Know the risks!)
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/connect-src
## Allows other domains to URLs which can be loaded using script interfaces like the Forwarded email alias feature
## This adds the configured value to the 'Content-Security-Policy' headers 'connect-src' value.
## Multiple values must be separated with a whitespace. And only HTTPS values are allowed.
## Example: "https://my-addy-io.domain.tld https://my-simplelogin.domain.tld"
# ALLOWED_CONNECT_SRC=""
## Number of seconds, on average, between login requests from the same IP address before rate limiting kicks in. ## Number of seconds, on average, between login requests from the same IP address before rate limiting kicks in.
# LOGIN_RATELIMIT_SECONDS=60 # LOGIN_RATELIMIT_SECONDS=60
## Allow a burst of requests of up to this size, while maintaining the average indicated by `LOGIN_RATELIMIT_SECONDS`. ## Allow a burst of requests of up to this size, while maintaining the average indicated by `LOGIN_RATELIMIT_SECONDS`.

7
.github/workflows/build.yml

@ -75,7 +75,7 @@ jobs:
# Only install the clippy and rustfmt components on the default rust-toolchain # Only install the clippy and rustfmt components on the default rust-toolchain
- name: "Install rust-toolchain version" - name: "Install rust-toolchain version"
uses: dtolnay/rust-toolchain@315e265cd78dad1e1dcf3a5074f6d6c47029d5aa # master @ Nov 18, 2024, 5:36 AM GMT+1 uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203 # master @ Dec 14, 2024, 5:49 AM GMT+1
if: ${{ matrix.channel == 'rust-toolchain' }} if: ${{ matrix.channel == 'rust-toolchain' }}
with: with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}" toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
@ -85,7 +85,7 @@ jobs:
# Install the any other channel to be used for which we do not execute clippy and rustfmt # Install the any other channel to be used for which we do not execute clippy and rustfmt
- name: "Install MSRV version" - name: "Install MSRV version"
uses: dtolnay/rust-toolchain@315e265cd78dad1e1dcf3a5074f6d6c47029d5aa # master @ Nov 18, 2024, 5:36 AM GMT+1 uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203 # master @ Dec 14, 2024, 5:49 AM GMT+1
if: ${{ matrix.channel != 'rust-toolchain' }} if: ${{ matrix.channel != 'rust-toolchain' }}
with: with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}" toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
@ -107,7 +107,8 @@ jobs:
# End Show environment # End Show environment
# Enable Rust Caching # Enable Rust Caching
- uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 - name: Rust Caching
uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7
with: with:
# Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes. # Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes.
# Like changing the build host from Ubuntu 20.04 to 22.04 for example. # Like changing the build host from Ubuntu 20.04 to 22.04 for example.

2
.github/workflows/hadolint.yml

@ -18,7 +18,7 @@ jobs:
# Start Docker Buildx # Start Docker Buildx
- name: Setup Docker Buildx - name: Setup Docker Buildx
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.8.0
# https://github.com/moby/buildkit/issues/3969 # https://github.com/moby/buildkit/issues/3969
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills # Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
with: with:

12
.github/workflows/release.yml

@ -69,7 +69,7 @@ jobs:
# Start Docker Buildx # Start Docker Buildx
- name: Setup Docker Buildx - name: Setup Docker Buildx
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.8.0
# https://github.com/moby/buildkit/issues/3969 # https://github.com/moby/buildkit/issues/3969
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills # Also set max parallelism to 2, the default of 4 breaks GitHub Actions and causes OOMKills
with: with:
@ -165,7 +165,7 @@ jobs:
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}" echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
- name: Bake ${{ matrix.base_image }} containers - name: Bake ${{ matrix.base_image }} containers
uses: docker/bake-action@2e3d19baedb14545e5d41222653874f25d5b4dfb # v5.10.0 uses: docker/bake-action@3fc70e1131fee40a422dd8dd0ff22014ae20a1f3 # v5.11.0
env: env:
BASE_TAGS: "${{ env.BASE_TAGS }}" BASE_TAGS: "${{ env.BASE_TAGS }}"
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}" SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
@ -223,28 +223,28 @@ jobs:
# Upload artifacts to Github Actions # Upload artifacts to Github Actions
- name: "Upload amd64 artifact" - name: "Upload amd64 artifact"
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
if: ${{ matrix.base_image == 'alpine' }} if: ${{ matrix.base_image == 'alpine' }}
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64 name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
path: vaultwarden-amd64 path: vaultwarden-amd64
- name: "Upload arm64 artifact" - name: "Upload arm64 artifact"
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
if: ${{ matrix.base_image == 'alpine' }} if: ${{ matrix.base_image == 'alpine' }}
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64 name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
path: vaultwarden-arm64 path: vaultwarden-arm64
- name: "Upload armv7 artifact" - name: "Upload armv7 artifact"
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
if: ${{ matrix.base_image == 'alpine' }} if: ${{ matrix.base_image == 'alpine' }}
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7 name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
path: vaultwarden-armv7 path: vaultwarden-armv7
- name: "Upload armv6 artifact" - name: "Upload armv6 artifact"
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
if: ${{ matrix.base_image == 'alpine' }} if: ${{ matrix.base_image == 'alpine' }}
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6 name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6

461
Cargo.lock

File diff suppressed because it is too large

39
Cargo.toml

@ -1,9 +1,11 @@
workspace = { members = ["macros"] }
[package] [package]
name = "vaultwarden" name = "vaultwarden"
version = "1.0.0" version = "1.0.0"
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"] authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
edition = "2021" edition = "2021"
rust-version = "1.82.0" rust-version = "1.83.0"
resolver = "2" resolver = "2"
repository = "https://github.com/dani-garcia/vaultwarden" repository = "https://github.com/dani-garcia/vaultwarden"
@ -39,9 +41,11 @@ unstable = []
syslog = "7.0.0" syslog = "7.0.0"
[dependencies] [dependencies]
macros = { path = "./macros" }
# Logging # Logging
log = "0.4.22" log = "0.4.22"
fern = { version = "0.7.0", features = ["syslog-7", "reopen-1"] } fern = { version = "0.7.1", features = ["syslog-7", "reopen-1"] }
tracing = { version = "0.1.41", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work tracing = { version = "0.1.41", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
# A `dotenv` implementation for Rust # A `dotenv` implementation for Rust
@ -53,7 +57,7 @@ once_cell = "1.20.2"
# Numerical libraries # Numerical libraries
num-traits = "0.2.19" num-traits = "0.2.19"
num-derive = "0.4.2" num-derive = "0.4.2"
bigdecimal = "0.4.6" bigdecimal = "0.4.7"
# Web framework # Web framework
rocket = { version = "0.5.1", features = ["tls", "json"], default-features = false } rocket = { version = "0.5.1", features = ["tls", "json"], default-features = false }
@ -70,14 +74,17 @@ futures = "0.3.31"
tokio = { version = "1.42.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] } tokio = { version = "1.42.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
# A generic serialization/deserialization framework # A generic serialization/deserialization framework
serde = { version = "1.0.215", features = ["derive"] } serde = { version = "1.0.217", features = ["derive"] }
serde_json = "1.0.133" serde_json = "1.0.135"
# A safe, extensible ORM and Query builder # A safe, extensible ORM and Query builder
diesel = { version = "2.2.6", features = ["chrono", "r2d2", "numeric"] } diesel = { version = "2.2.6", features = ["chrono", "r2d2", "numeric"] }
diesel_migrations = "2.2.0" diesel_migrations = "2.2.0"
diesel_logger = { version = "0.4.0", optional = true } diesel_logger = { version = "0.4.0", optional = true }
derive_more = { version = "1.0.0", features = ["from", "into", "as_ref", "deref", "display"] }
diesel-derive-newtype = "2.1.2"
# Bundled/Static SQLite # Bundled/Static SQLite
libsqlite3-sys = { version = "0.30.1", features = ["bundled"], optional = true } libsqlite3-sys = { version = "0.30.1", features = ["bundled"], optional = true }
@ -89,7 +96,7 @@ ring = "0.17.8"
uuid = { version = "1.11.0", features = ["v4"] } uuid = { version = "1.11.0", features = ["v4"] }
# Date and time libraries # Date and time libraries
chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false } chrono = { version = "0.4.39", features = ["clock", "serde"], default-features = false }
chrono-tz = "0.10.0" chrono-tz = "0.10.0"
time = "0.3.37" time = "0.3.37"
@ -115,16 +122,16 @@ webauthn-rs = "0.3.2"
url = "2.5.4" url = "2.5.4"
# Email libraries # Email libraries
lettre = { version = "0.11.10", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false } lettre = { version = "0.11.11", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
email_address = "0.2.9" email_address = "0.2.9"
# HTML Template library # HTML Template library
handlebars = { version = "6.2.0", features = ["dir_source"] } handlebars = { version = "6.3.0", features = ["dir_source"] }
# HTTP client (Used for favicons, version check, DUO and HIBP API) # HTTP client (Used for favicons, version check, DUO and HIBP API)
reqwest = { version = "0.12.9", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] } reqwest = { version = "0.12.12", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
hickory-resolver = "0.24.1" hickory-resolver = "0.24.2"
# Favicon extraction libraries # Favicon extraction libraries
html5gum = "0.7.0" html5gum = "0.7.0"
@ -147,15 +154,15 @@ pico-args = "0.5.0"
# Macro ident concatenation # Macro ident concatenation
paste = "1.0.15" paste = "1.0.15"
governor = "0.7.0" governor = "0.8.0"
# Check client versions for specific features. # Check client versions for specific features.
semver = "1.0.23" semver = "1.0.24"
# Allow overriding the default memory allocator # Allow overriding the default memory allocator
# Mainly used for the musl builds, since the default musl malloc is very slow # Mainly used for the musl builds, since the default musl malloc is very slow
mimalloc = { version = "0.1.43", features = ["secure"], default-features = false, optional = true } mimalloc = { version = "0.1.43", features = ["secure"], default-features = false, optional = true }
which = "7.0.0" which = "7.0.1"
# Argon2 library with support for the PHC format # Argon2 library with support for the PHC format
argon2 = "0.5.3" argon2 = "0.5.3"
@ -167,8 +174,6 @@ rpassword = "7.3.1"
grass_compiler = { version = "0.13.4", default-features = false } grass_compiler = { version = "0.13.4", default-features = false }
[patch.crates-io] [patch.crates-io]
# Patch fern to support syslog v7
fern = { git = "https://github.com/daboross/fern", rev = "3e775ccfafe7d24baee39826d38011981b2e55b5" }
# Patch yubico to remove duplicate crates of older versions # Patch yubico to remove duplicate crates of older versions
yubico = { git = "https://github.com/BlackDex/yubico-rs", rev = "00df14811f58155c0f02e3ab10f1570ed3e115c6" } yubico = { git = "https://github.com/BlackDex/yubico-rs", rev = "00df14811f58155c0f02e3ab10f1570ed3e115c6" }
@ -232,6 +237,10 @@ unused_import_braces = "deny"
unused_lifetimes = "deny" unused_lifetimes = "deny"
unused_qualifications = "deny" unused_qualifications = "deny"
variant_size_differences = "deny" variant_size_differences = "deny"
# Allow the following lints since these cause issues with Rust v1.84.0 or newer
# Building Vaultwarden with Rust v1.85.0 and edition 2024 also works without issues
if_let_rescope = "allow"
tail_expr_drop_order = "allow"
# https://rust-lang.github.io/rust-clippy/stable/index.html # https://rust-lang.github.io/rust-clippy/stable/index.html
[lints.clippy] [lints.clippy]

10
docker/DockerSettings.yaml

@ -1,11 +1,11 @@
--- ---
vault_version: "v2024.6.2c" vault_version: "v2025.1.0"
vault_image_digest: "sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b" vault_image_digest: "sha256:72d636334b4ad6fe9ba1d12e0cda562cd31772cf28772f6b2fe4121a537b72a8"
# Cross Compile Docker Helper Scripts v1.5.0 # Cross Compile Docker Helper Scripts v1.6.1
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts # We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
# https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags # https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags
xx_image_digest: "sha256:1978e7a58a1777cb0ef0dde76bad60b7914b21da57cfa88047875e4f364297aa" xx_image_digest: "sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894"
rust_version: 1.83.0 # Rust version to be used rust_version: 1.84.0 # Rust version to be used
debian_version: bookworm # Debian release name to be used debian_version: bookworm # Debian release name to be used
alpine_version: "3.21" # Alpine version to be used alpine_version: "3.21" # Alpine version to be used
# For which platforms/architectures will we try to build images # For which platforms/architectures will we try to build images

21
docker/Dockerfile.alpine

@ -19,23 +19,23 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, # - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to. # click the tag name to view the digest of the image it currently points to.
# - From the command line: # - From the command line:
# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2c # $ docker pull docker.io/vaultwarden/web-vault:v2025.1.0
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2c # $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.1.0
# [docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b] # [docker.io/vaultwarden/web-vault@sha256:72d636334b4ad6fe9ba1d12e0cda562cd31772cf28772f6b2fe4121a537b72a8]
# #
# - Conversely, to get the tag name from the digest: # - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b # $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:72d636334b4ad6fe9ba1d12e0cda562cd31772cf28772f6b2fe4121a537b72a8
# [docker.io/vaultwarden/web-vault:v2024.6.2c] # [docker.io/vaultwarden/web-vault:v2025.1.0]
# #
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b AS vault FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:72d636334b4ad6fe9ba1d12e0cda562cd31772cf28772f6b2fe4121a537b72a8 AS vault
########################## ALPINE BUILD IMAGES ########################## ########################## ALPINE BUILD IMAGES ##########################
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 ## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
## And for Alpine we define all build images here, they will only be loaded when actually used ## And for Alpine we define all build images here, they will only be loaded when actually used
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.83.0 AS build_amd64 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.84.0 AS build_amd64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.83.0 AS build_arm64 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.84.0 AS build_arm64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.83.0 AS build_armv7 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.84.0 AS build_armv7
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.83.0 AS build_armv6 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.84.0 AS build_armv6
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006 # hadolint ignore=DL3006
@ -76,6 +76,7 @@ RUN source /env-cargo && \
# Copies over *only* your manifests and build files # Copies over *only* your manifests and build files
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./ COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
COPY ./macros ./macros
ARG CARGO_PROFILE=release ARG CARGO_PROFILE=release

17
docker/Dockerfile.debian

@ -19,24 +19,24 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, # - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to. # click the tag name to view the digest of the image it currently points to.
# - From the command line: # - From the command line:
# $ docker pull docker.io/vaultwarden/web-vault:v2024.6.2c # $ docker pull docker.io/vaultwarden/web-vault:v2025.1.0
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.6.2c # $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2025.1.0
# [docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b] # [docker.io/vaultwarden/web-vault@sha256:72d636334b4ad6fe9ba1d12e0cda562cd31772cf28772f6b2fe4121a537b72a8]
# #
# - Conversely, to get the tag name from the digest: # - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b # $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:72d636334b4ad6fe9ba1d12e0cda562cd31772cf28772f6b2fe4121a537b72a8
# [docker.io/vaultwarden/web-vault:v2024.6.2c] # [docker.io/vaultwarden/web-vault:v2025.1.0]
# #
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:409ab328ca931439cb916b388a4bb784bd44220717aaf74cf71620c23e34fc2b AS vault FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:72d636334b4ad6fe9ba1d12e0cda562cd31772cf28772f6b2fe4121a537b72a8 AS vault
########################## Cross Compile Docker Helper Scripts ########################## ########################## Cross Compile Docker Helper Scripts ##########################
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts ## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
## And these bash scripts do not have any significant difference if at all ## And these bash scripts do not have any significant difference if at all
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:1978e7a58a1777cb0ef0dde76bad60b7914b21da57cfa88047875e4f364297aa AS xx FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894 AS xx
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006 # hadolint ignore=DL3006
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.83.0-slim-bookworm AS build FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.84.0-slim-bookworm AS build
COPY --from=xx / / COPY --from=xx / /
ARG TARGETARCH ARG TARGETARCH
ARG TARGETVARIANT ARG TARGETVARIANT
@ -116,6 +116,7 @@ RUN source /env-cargo && \
# Copies over *only* your manifests and build files # Copies over *only* your manifests and build files
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./ COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
COPY ./macros ./macros
ARG CARGO_PROFILE=release ARG CARGO_PROFILE=release

1
docker/Dockerfile.j2

@ -143,6 +143,7 @@ RUN source /env-cargo && \
# Copies over *only* your manifests and build files # Copies over *only* your manifests and build files
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./ COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
COPY ./macros ./macros
ARG CARGO_PROFILE=release ARG CARGO_PROFILE=release

13
macros/Cargo.toml

@ -0,0 +1,13 @@
[package]
name = "macros"
version = "0.1.0"
edition = "2021"
[lib]
name = "macros"
path = "src/lib.rs"
proc-macro = true
[dependencies]
quote = "1.0.38"
syn = "2.0.94"

58
macros/src/lib.rs

@ -0,0 +1,58 @@
extern crate proc_macro;
use proc_macro::TokenStream;
use quote::quote;
#[proc_macro_derive(UuidFromParam)]
pub fn derive_uuid_from_param(input: TokenStream) -> TokenStream {
let ast = syn::parse(input).unwrap();
impl_derive_uuid_macro(&ast)
}
fn impl_derive_uuid_macro(ast: &syn::DeriveInput) -> TokenStream {
let name = &ast.ident;
let gen = quote! {
#[automatically_derived]
impl<'r> rocket::request::FromParam<'r> for #name {
type Error = ();
#[inline(always)]
fn from_param(param: &'r str) -> Result<Self, Self::Error> {
if uuid::Uuid::parse_str(param).is_ok() {
Ok(Self(param.to_string()))
} else {
Err(())
}
}
}
};
gen.into()
}
#[proc_macro_derive(IdFromParam)]
pub fn derive_id_from_param(input: TokenStream) -> TokenStream {
let ast = syn::parse(input).unwrap();
impl_derive_safestring_macro(&ast)
}
fn impl_derive_safestring_macro(ast: &syn::DeriveInput) -> TokenStream {
let name = &ast.ident;
let gen = quote! {
#[automatically_derived]
impl<'r> rocket::request::FromParam<'r> for #name {
type Error = ();
#[inline(always)]
fn from_param(param: &'r str) -> Result<Self, Self::Error> {
if param.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-')) {
Ok(Self(param.to_string()))
} else {
Err(())
}
}
}
};
gen.into()
}

2
rust-toolchain.toml

@ -1,4 +1,4 @@
[toolchain] [toolchain]
channel = "1.83.0" channel = "1.84.0"
components = [ "rustfmt", "clippy" ] components = [ "rustfmt", "clippy" ]
profile = "minimal" profile = "minimal"

123
src/api/admin.rs

@ -50,7 +50,7 @@ pub fn routes() -> Vec<Route> {
disable_user, disable_user,
enable_user, enable_user,
remove_2fa, remove_2fa,
update_user_org_type, update_membership_type,
update_revision_users, update_revision_users,
post_config, post_config,
delete_config, delete_config,
@ -62,6 +62,7 @@ pub fn routes() -> Vec<Route> {
diagnostics, diagnostics,
get_diagnostics_config, get_diagnostics_config,
resend_user_invite, resend_user_invite,
get_diagnostics_http,
] ]
} }
@ -279,8 +280,8 @@ struct InviteData {
email: String, email: String,
} }
async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult<User> { async fn get_user_or_404(user_id: &UserId, conn: &mut DbConn) -> ApiResult<User> {
if let Some(user) = User::find_by_uuid(uuid, conn).await { if let Some(user) = User::find_by_uuid(user_id, conn).await {
Ok(user) Ok(user)
} else { } else {
err_code!("User doesn't exist", Status::NotFound.code); err_code!("User doesn't exist", Status::NotFound.code);
@ -380,29 +381,29 @@ async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn)
} }
} }
#[get("/users/<uuid>")] #[get("/users/<user_id>")]
async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult { async fn get_user_json(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> JsonResult {
let u = get_user_or_404(uuid, &mut conn).await?; let u = get_user_or_404(&user_id, &mut conn).await?;
let mut usr = u.to_json(&mut conn).await; let mut usr = u.to_json(&mut conn).await;
usr["userEnabled"] = json!(u.enabled); usr["userEnabled"] = json!(u.enabled);
usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
Ok(Json(usr)) Ok(Json(usr))
} }
#[post("/users/<uuid>/delete")] #[post("/users/<user_id>/delete")]
async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult { async fn delete_user(user_id: UserId, token: AdminToken, mut conn: DbConn) -> EmptyResult {
let user = get_user_or_404(uuid, &mut conn).await?; let user = get_user_or_404(&user_id, &mut conn).await?;
// Get the user_org records before deleting the actual user // Get the membership records before deleting the actual user
let user_orgs = UserOrganization::find_any_state_by_user(uuid, &mut conn).await; let memberships = Membership::find_any_state_by_user(&user_id, &mut conn).await;
let res = user.delete(&mut conn).await; let res = user.delete(&mut conn).await;
for user_org in user_orgs { for membership in memberships {
log_event( log_event(
EventType::OrganizationUserRemoved as i32, EventType::OrganizationUserRemoved as i32,
&user_org.uuid, &membership.uuid,
&user_org.org_uuid, &membership.org_uuid,
ACTING_ADMIN_USER, &ACTING_ADMIN_USER.into(),
14, // Use UnknownBrowser type 14, // Use UnknownBrowser type
&token.ip.ip, &token.ip.ip,
&mut conn, &mut conn,
@ -413,9 +414,9 @@ async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyRe
res res
} }
#[post("/users/<uuid>/deauth")] #[post("/users/<user_id>/deauth")]
async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { async fn deauth_user(user_id: UserId, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
let mut user = get_user_or_404(uuid, &mut conn).await?; let mut user = get_user_or_404(&user_id, &mut conn).await?;
nt.send_logout(&user, None).await; nt.send_logout(&user, None).await;
@ -434,9 +435,9 @@ async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notif
user.save(&mut conn).await user.save(&mut conn).await
} }
#[post("/users/<uuid>/disable")] #[post("/users/<user_id>/disable")]
async fn disable_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { async fn disable_user(user_id: UserId, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
let mut user = get_user_or_404(uuid, &mut conn).await?; let mut user = get_user_or_404(&user_id, &mut conn).await?;
Device::delete_all_by_user(&user.uuid, &mut conn).await?; Device::delete_all_by_user(&user.uuid, &mut conn).await?;
user.reset_security_stamp(); user.reset_security_stamp();
user.enabled = false; user.enabled = false;
@ -448,26 +449,26 @@ async fn disable_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Noti
save_result save_result
} }
#[post("/users/<uuid>/enable")] #[post("/users/<user_id>/enable")]
async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult { async fn enable_user(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
let mut user = get_user_or_404(uuid, &mut conn).await?; let mut user = get_user_or_404(&user_id, &mut conn).await?;
user.enabled = true; user.enabled = true;
user.save(&mut conn).await user.save(&mut conn).await
} }
#[post("/users/<uuid>/remove-2fa")] #[post("/users/<user_id>/remove-2fa")]
async fn remove_2fa(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult { async fn remove_2fa(user_id: UserId, token: AdminToken, mut conn: DbConn) -> EmptyResult {
let mut user = get_user_or_404(uuid, &mut conn).await?; let mut user = get_user_or_404(&user_id, &mut conn).await?;
TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?; TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
two_factor::enforce_2fa_policy(&user, ACTING_ADMIN_USER, 14, &token.ip.ip, &mut conn).await?; two_factor::enforce_2fa_policy(&user, &ACTING_ADMIN_USER.into(), 14, &token.ip.ip, &mut conn).await?;
user.totp_recover = None; user.totp_recover = None;
user.save(&mut conn).await user.save(&mut conn).await
} }
#[post("/users/<uuid>/invite/resend")] #[post("/users/<user_id>/invite/resend")]
async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult { async fn resend_user_invite(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
if let Some(user) = User::find_by_uuid(uuid, &mut conn).await { if let Some(user) = User::find_by_uuid(&user_id, &mut conn).await {
//TODO: replace this with user.status check when it will be available (PR#3397) //TODO: replace this with user.status check when it will be available (PR#3397)
if !user.password_hash.is_empty() { if !user.password_hash.is_empty() {
err_code!("User already accepted invitation", Status::BadRequest.code); err_code!("User already accepted invitation", Status::BadRequest.code);
@ -484,42 +485,41 @@ async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) ->
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
struct UserOrgTypeData { struct MembershipTypeData {
user_type: NumberOrString, user_type: NumberOrString,
user_uuid: String, user_uuid: UserId,
org_uuid: String, org_uuid: OrganizationId,
} }
#[post("/users/org_type", data = "<data>")] #[post("/users/org_type", data = "<data>")]
async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mut conn: DbConn) -> EmptyResult { async fn update_membership_type(data: Json<MembershipTypeData>, token: AdminToken, mut conn: DbConn) -> EmptyResult {
let data: UserOrgTypeData = data.into_inner(); let data: MembershipTypeData = data.into_inner();
let mut user_to_edit = let Some(mut member_to_edit) = Membership::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &mut conn).await
match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &mut conn).await { else {
Some(user) => user, err!("The specified user isn't member of the organization")
None => err!("The specified user isn't member of the organization"), };
};
let new_type = match UserOrgType::from_str(&data.user_type.into_string()) { let new_type = match MembershipType::from_str(&data.user_type.into_string()) {
Some(new_type) => new_type as i32, Some(new_type) => new_type as i32,
None => err!("Invalid type"), None => err!("Invalid type"),
}; };
if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner { if member_to_edit.atype == MembershipType::Owner && new_type != MembershipType::Owner {
// Removing owner permission, check that there is at least one other confirmed owner // Removing owner permission, check that there is at least one other confirmed owner
if UserOrganization::count_confirmed_by_org_and_type(&data.org_uuid, UserOrgType::Owner, &mut conn).await <= 1 { if Membership::count_confirmed_by_org_and_type(&data.org_uuid, MembershipType::Owner, &mut conn).await <= 1 {
err!("Can't change the type of the last owner") err!("Can't change the type of the last owner")
} }
} }
// This check is also done at api::organizations::{accept_invite(), _confirm_invite, _activate_user(), edit_user()}, update_user_org_type // This check is also done at api::organizations::{accept_invite, _confirm_invite, _activate_member, edit_member}, update_membership_type
// It returns different error messages per function. // It returns different error messages per function.
if new_type < UserOrgType::Admin { if new_type < MembershipType::Admin {
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await { match OrgPolicy::is_user_allowed(&member_to_edit.user_uuid, &member_to_edit.org_uuid, true, &mut conn).await {
Ok(_) => {} Ok(_) => {}
Err(OrgPolicyErr::TwoFactorMissing) => { Err(OrgPolicyErr::TwoFactorMissing) => {
if CONFIG.email_2fa_auto_fallback() { if CONFIG.email_2fa_auto_fallback() {
two_factor::email::find_and_activate_email_2fa(&user_to_edit.user_uuid, &mut conn).await?; two_factor::email::find_and_activate_email_2fa(&member_to_edit.user_uuid, &mut conn).await?;
} else { } else {
err!("You cannot modify this user to this type because they have not setup 2FA"); err!("You cannot modify this user to this type because they have not setup 2FA");
} }
@ -532,17 +532,17 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
log_event( log_event(
EventType::OrganizationUserUpdated as i32, EventType::OrganizationUserUpdated as i32,
&user_to_edit.uuid, &member_to_edit.uuid,
&data.org_uuid, &data.org_uuid,
ACTING_ADMIN_USER, &ACTING_ADMIN_USER.into(),
14, // Use UnknownBrowser type 14, // Use UnknownBrowser type
&token.ip.ip, &token.ip.ip,
&mut conn, &mut conn,
) )
.await; .await;
user_to_edit.atype = new_type; member_to_edit.atype = new_type;
user_to_edit.save(&mut conn).await member_to_edit.save(&mut conn).await
} }
#[post("/users/update_revision")] #[post("/users/update_revision")]
@ -556,7 +556,7 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
let mut organizations_json = Vec::with_capacity(organizations.len()); let mut organizations_json = Vec::with_capacity(organizations.len());
for o in organizations { for o in organizations {
let mut org = o.to_json(); let mut org = o.to_json();
org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &mut conn).await); org["user_count"] = json!(Membership::count_by_org(&o.uuid, &mut conn).await);
org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &mut conn).await); org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &mut conn).await);
org["collection_count"] = json!(Collection::count_by_org(&o.uuid, &mut conn).await); org["collection_count"] = json!(Collection::count_by_org(&o.uuid, &mut conn).await);
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await); org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
@ -570,9 +570,9 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
Ok(Html(text)) Ok(Html(text))
} }
#[post("/organizations/<uuid>/delete")] #[post("/organizations/<org_id>/delete")]
async fn delete_organization(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult { async fn delete_organization(org_id: OrganizationId, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
let org = Organization::find_by_uuid(uuid, &mut conn).await.map_res("Organization doesn't exist")?; let org = Organization::find_by_uuid(&org_id, &mut conn).await.map_res("Organization doesn't exist")?;
org.delete(&mut conn).await org.delete(&mut conn).await
} }
@ -601,9 +601,8 @@ async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
} }
async fn has_http_access() -> bool { async fn has_http_access() -> bool {
let req = match make_http_request(Method::HEAD, "https://github.com/dani-garcia/vaultwarden") { let Ok(req) = make_http_request(Method::HEAD, "https://github.com/dani-garcia/vaultwarden") else {
Ok(r) => r, return false;
Err(_) => return false,
}; };
match req.send().await { match req.send().await {
Ok(r) => r.status().is_success(), Ok(r) => r.status().is_success(),
@ -713,6 +712,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
"ip_header_name": ip_header_name, "ip_header_name": ip_header_name,
"ip_header_config": &CONFIG.ip_header(), "ip_header_config": &CONFIG.ip_header(),
"uses_proxy": uses_proxy, "uses_proxy": uses_proxy,
"enable_websocket": &CONFIG.enable_websocket(),
"db_type": *DB_TYPE, "db_type": *DB_TYPE,
"db_version": get_sql_server_version(&mut conn).await, "db_version": get_sql_server_version(&mut conn).await,
"admin_url": format!("{}/diagnostics", admin_url()), "admin_url": format!("{}/diagnostics", admin_url()),
@ -734,6 +734,11 @@ fn get_diagnostics_config(_token: AdminToken) -> Json<Value> {
Json(support_json) Json(support_json)
} }
#[get("/diagnostics/http?<code>")]
fn get_diagnostics_http(code: u16, _token: AdminToken) -> EmptyResult {
err_code!(format!("Testing error {code} response"), code);
}
#[post("/config", data = "<data>")] #[post("/config", data = "<data>")]
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult { fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
let data: ConfigBuilder = data.into_inner(); let data: ConfigBuilder = data.into_inner();

248
src/api/core/accounts.rs

@ -79,7 +79,7 @@ pub struct RegisterData {
name: Option<String>, name: Option<String>,
token: Option<String>, token: Option<String>,
#[allow(dead_code)] #[allow(dead_code)]
organization_user_id: Option<String>, organization_user_id: Option<MembershipId>,
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
@ -106,15 +106,15 @@ fn enforce_password_hint_setting(password_hint: &Option<String>) -> EmptyResult
} }
Ok(()) Ok(())
} }
async fn is_email_2fa_required(org_user_uuid: Option<String>, conn: &mut DbConn) -> bool { async fn is_email_2fa_required(member_id: Option<MembershipId>, conn: &mut DbConn) -> bool {
if !CONFIG._enable_email_2fa() { if !CONFIG._enable_email_2fa() {
return false; return false;
} }
if CONFIG.email_2fa_enforce_on_verified_invite() { if CONFIG.email_2fa_enforce_on_verified_invite() {
return true; return true;
} }
if org_user_uuid.is_some() { if member_id.is_some() {
return OrgPolicy::is_enabled_for_member(&org_user_uuid.unwrap(), OrgPolicyType::TwoFactorAuthentication, conn) return OrgPolicy::is_enabled_for_member(&member_id.unwrap(), OrgPolicyType::TwoFactorAuthentication, conn)
.await; .await;
} }
false false
@ -161,9 +161,9 @@ pub async fn _register(data: Json<RegisterData>, mut conn: DbConn) -> JsonResult
err!("Registration email does not match invite email") err!("Registration email does not match invite email")
} }
} else if Invitation::take(&email, &mut conn).await { } else if Invitation::take(&email, &mut conn).await {
for user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() { for membership in Membership::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() {
user_org.status = UserOrgStatus::Accepted as i32; membership.status = MembershipStatus::Accepted as i32;
user_org.save(&mut conn).await?; membership.save(&mut conn).await?;
} }
user user
} else if CONFIG.is_signup_allowed(&email) } else if CONFIG.is_signup_allowed(&email)
@ -305,9 +305,9 @@ async fn put_avatar(data: Json<AvatarData>, headers: Headers, mut conn: DbConn)
Ok(Json(user.to_json(&mut conn).await)) Ok(Json(user.to_json(&mut conn).await))
} }
#[get("/users/<uuid>/public-key")] #[get("/users/<user_id>/public-key")]
async fn get_public_keys(uuid: &str, _headers: Headers, mut conn: DbConn) -> JsonResult { async fn get_public_keys(user_id: UserId, _headers: Headers, mut conn: DbConn) -> JsonResult {
let user = match User::find_by_uuid(uuid, &mut conn).await { let user = match User::find_by_uuid(&user_id, &mut conn).await {
Some(user) if user.public_key.is_some() => user, Some(user) if user.public_key.is_some() => user,
Some(_) => err_code!("User has no public_key", Status::NotFound.code), Some(_) => err_code!("User has no public_key", Status::NotFound.code),
None => err_code!("User doesn't exist", Status::NotFound.code), None => err_code!("User doesn't exist", Status::NotFound.code),
@ -366,7 +366,12 @@ async fn post_password(data: Json<ChangePassData>, headers: Headers, mut conn: D
&data.new_master_password_hash, &data.new_master_password_hash,
Some(data.key), Some(data.key),
true, true,
Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]), Some(vec![
String::from("post_rotatekey"),
String::from("get_contacts"),
String::from("get_public_keys"),
String::from("get_api_webauthn"),
]),
); );
let save_result = user.save(&mut conn).await; let save_result = user.save(&mut conn).await;
@ -374,7 +379,7 @@ async fn post_password(data: Json<ChangePassData>, headers: Headers, mut conn: D
// Prevent logging out the client where the user requested this endpoint from. // Prevent logging out the client where the user requested this endpoint from.
// If you do logout the user it will causes issues at the client side. // If you do logout the user it will causes issues at the client side.
// Adding the device uuid will prevent this. // Adding the device uuid will prevent this.
nt.send_logout(&user, Some(headers.device.uuid)).await; nt.send_logout(&user, Some(headers.device.uuid.clone())).await;
save_result save_result
} }
@ -434,7 +439,7 @@ async fn post_kdf(data: Json<ChangeKdfData>, headers: Headers, mut conn: DbConn,
user.set_password(&data.new_master_password_hash, Some(data.key), true, None); user.set_password(&data.new_master_password_hash, Some(data.key), true, None);
let save_result = user.save(&mut conn).await; let save_result = user.save(&mut conn).await;
nt.send_logout(&user, Some(headers.device.uuid)).await; nt.send_logout(&user, Some(headers.device.uuid.clone())).await;
save_result save_result
} }
@ -445,21 +450,21 @@ struct UpdateFolderData {
// There is a bug in 2024.3.x which adds a `null` item. // There is a bug in 2024.3.x which adds a `null` item.
// To bypass this we allow a Option here, but skip it during the updates // To bypass this we allow a Option here, but skip it during the updates
// See: https://github.com/bitwarden/clients/issues/8453 // See: https://github.com/bitwarden/clients/issues/8453
id: Option<String>, id: Option<FolderId>,
name: String, name: String,
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct UpdateEmergencyAccessData { struct UpdateEmergencyAccessData {
id: String, id: EmergencyAccessId,
key_encrypted: String, key_encrypted: String,
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct UpdateResetPasswordData { struct UpdateResetPasswordData {
organization_id: String, organization_id: OrganizationId,
reset_password_key: String, reset_password_key: String,
} }
@ -484,48 +489,49 @@ fn validate_keydata(
existing_ciphers: &[Cipher], existing_ciphers: &[Cipher],
existing_folders: &[Folder], existing_folders: &[Folder],
existing_emergency_access: &[EmergencyAccess], existing_emergency_access: &[EmergencyAccess],
existing_user_orgs: &[UserOrganization], existing_memberships: &[Membership],
existing_sends: &[Send], existing_sends: &[Send],
) -> EmptyResult { ) -> EmptyResult {
// Check that we're correctly rotating all the user's ciphers // Check that we're correctly rotating all the user's ciphers
let existing_cipher_ids = existing_ciphers.iter().map(|c| c.uuid.as_str()).collect::<HashSet<_>>(); let existing_cipher_ids = existing_ciphers.iter().map(|c| &c.uuid).collect::<HashSet<&CipherId>>();
let provided_cipher_ids = data let provided_cipher_ids = data
.ciphers .ciphers
.iter() .iter()
.filter(|c| c.organization_id.is_none()) .filter(|c| c.organization_id.is_none())
.filter_map(|c| c.id.as_deref()) .filter_map(|c| c.id.as_ref())
.collect::<HashSet<_>>(); .collect::<HashSet<&CipherId>>();
if !provided_cipher_ids.is_superset(&existing_cipher_ids) { if !provided_cipher_ids.is_superset(&existing_cipher_ids) {
err!("All existing ciphers must be included in the rotation") err!("All existing ciphers must be included in the rotation")
} }
// Check that we're correctly rotating all the user's folders // Check that we're correctly rotating all the user's folders
let existing_folder_ids = existing_folders.iter().map(|f| f.uuid.as_str()).collect::<HashSet<_>>(); let existing_folder_ids = existing_folders.iter().map(|f| &f.uuid).collect::<HashSet<&FolderId>>();
let provided_folder_ids = data.folders.iter().filter_map(|f| f.id.as_deref()).collect::<HashSet<_>>(); let provided_folder_ids = data.folders.iter().filter_map(|f| f.id.as_ref()).collect::<HashSet<&FolderId>>();
if !provided_folder_ids.is_superset(&existing_folder_ids) { if !provided_folder_ids.is_superset(&existing_folder_ids) {
err!("All existing folders must be included in the rotation") err!("All existing folders must be included in the rotation")
} }
// Check that we're correctly rotating all the user's emergency access keys // Check that we're correctly rotating all the user's emergency access keys
let existing_emergency_access_ids = let existing_emergency_access_ids =
existing_emergency_access.iter().map(|ea| ea.uuid.as_str()).collect::<HashSet<_>>(); existing_emergency_access.iter().map(|ea| &ea.uuid).collect::<HashSet<&EmergencyAccessId>>();
let provided_emergency_access_ids = let provided_emergency_access_ids =
data.emergency_access_keys.iter().map(|ea| ea.id.as_str()).collect::<HashSet<_>>(); data.emergency_access_keys.iter().map(|ea| &ea.id).collect::<HashSet<&EmergencyAccessId>>();
if !provided_emergency_access_ids.is_superset(&existing_emergency_access_ids) { if !provided_emergency_access_ids.is_superset(&existing_emergency_access_ids) {
err!("All existing emergency access keys must be included in the rotation") err!("All existing emergency access keys must be included in the rotation")
} }
// Check that we're correctly rotating all the user's reset password keys // Check that we're correctly rotating all the user's reset password keys
let existing_reset_password_ids = existing_user_orgs.iter().map(|uo| uo.org_uuid.as_str()).collect::<HashSet<_>>(); let existing_reset_password_ids =
existing_memberships.iter().map(|m| &m.org_uuid).collect::<HashSet<&OrganizationId>>();
let provided_reset_password_ids = let provided_reset_password_ids =
data.reset_password_keys.iter().map(|rp| rp.organization_id.as_str()).collect::<HashSet<_>>(); data.reset_password_keys.iter().map(|rp| &rp.organization_id).collect::<HashSet<&OrganizationId>>();
if !provided_reset_password_ids.is_superset(&existing_reset_password_ids) { if !provided_reset_password_ids.is_superset(&existing_reset_password_ids) {
err!("All existing reset password keys must be included in the rotation") err!("All existing reset password keys must be included in the rotation")
} }
// Check that we're correctly rotating all the user's sends // Check that we're correctly rotating all the user's sends
let existing_send_ids = existing_sends.iter().map(|s| s.uuid.as_str()).collect::<HashSet<_>>(); let existing_send_ids = existing_sends.iter().map(|s| &s.uuid).collect::<HashSet<&SendId>>();
let provided_send_ids = data.sends.iter().filter_map(|s| s.id.as_deref()).collect::<HashSet<_>>(); let provided_send_ids = data.sends.iter().filter_map(|s| s.id.as_ref()).collect::<HashSet<&SendId>>();
if !provided_send_ids.is_superset(&existing_send_ids) { if !provided_send_ids.is_superset(&existing_send_ids) {
err!("All existing sends must be included in the rotation") err!("All existing sends must be included in the rotation")
} }
@ -548,24 +554,24 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks. // TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
Cipher::validate_cipher_data(&data.ciphers)?; Cipher::validate_cipher_data(&data.ciphers)?;
let user_uuid = &headers.user.uuid; let user_id = &headers.user.uuid;
// TODO: Ideally we'd do everything after this point in a single transaction. // TODO: Ideally we'd do everything after this point in a single transaction.
let mut existing_ciphers = Cipher::find_owned_by_user(user_uuid, &mut conn).await; let mut existing_ciphers = Cipher::find_owned_by_user(user_id, &mut conn).await;
let mut existing_folders = Folder::find_by_user(user_uuid, &mut conn).await; let mut existing_folders = Folder::find_by_user(user_id, &mut conn).await;
let mut existing_emergency_access = EmergencyAccess::find_all_by_grantor_uuid(user_uuid, &mut conn).await; let mut existing_emergency_access = EmergencyAccess::find_all_by_grantor_uuid(user_id, &mut conn).await;
let mut existing_user_orgs = UserOrganization::find_by_user(user_uuid, &mut conn).await; let mut existing_memberships = Membership::find_by_user(user_id, &mut conn).await;
// We only rotate the reset password key if it is set. // We only rotate the reset password key if it is set.
existing_user_orgs.retain(|uo| uo.reset_password_key.is_some()); existing_memberships.retain(|m| m.reset_password_key.is_some());
let mut existing_sends = Send::find_by_user(user_uuid, &mut conn).await; let mut existing_sends = Send::find_by_user(user_id, &mut conn).await;
validate_keydata( validate_keydata(
&data, &data,
&existing_ciphers, &existing_ciphers,
&existing_folders, &existing_folders,
&existing_emergency_access, &existing_emergency_access,
&existing_user_orgs, &existing_memberships,
&existing_sends, &existing_sends,
)?; )?;
@ -574,9 +580,8 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
// Skip `null` folder id entries. // Skip `null` folder id entries.
// See: https://github.com/bitwarden/clients/issues/8453 // See: https://github.com/bitwarden/clients/issues/8453
if let Some(folder_id) = folder_data.id { if let Some(folder_id) = folder_data.id {
let saved_folder = match existing_folders.iter_mut().find(|f| f.uuid == folder_id) { let Some(saved_folder) = existing_folders.iter_mut().find(|f| f.uuid == folder_id) else {
Some(folder) => folder, err!("Folder doesn't exist")
None => err!("Folder doesn't exist"),
}; };
saved_folder.name = folder_data.name; saved_folder.name = folder_data.name;
@ -586,11 +591,11 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
// Update emergency access data // Update emergency access data
for emergency_access_data in data.emergency_access_keys { for emergency_access_data in data.emergency_access_keys {
let saved_emergency_access = let Some(saved_emergency_access) =
match existing_emergency_access.iter_mut().find(|ea| ea.uuid == emergency_access_data.id) { existing_emergency_access.iter_mut().find(|ea| ea.uuid == emergency_access_data.id)
Some(emergency_access) => emergency_access, else {
None => err!("Emergency access doesn't exist or is not owned by the user"), err!("Emergency access doesn't exist or is not owned by the user")
}; };
saved_emergency_access.key_encrypted = Some(emergency_access_data.key_encrypted); saved_emergency_access.key_encrypted = Some(emergency_access_data.key_encrypted);
saved_emergency_access.save(&mut conn).await? saved_emergency_access.save(&mut conn).await?
@ -598,21 +603,20 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
// Update reset password data // Update reset password data
for reset_password_data in data.reset_password_keys { for reset_password_data in data.reset_password_keys {
let user_org = match existing_user_orgs.iter_mut().find(|uo| uo.org_uuid == reset_password_data.organization_id) let Some(membership) =
{ existing_memberships.iter_mut().find(|m| m.org_uuid == reset_password_data.organization_id)
Some(reset_password) => reset_password, else {
None => err!("Reset password doesn't exist"), err!("Reset password doesn't exist")
}; };
user_org.reset_password_key = Some(reset_password_data.reset_password_key); membership.reset_password_key = Some(reset_password_data.reset_password_key);
user_org.save(&mut conn).await? membership.save(&mut conn).await?
} }
// Update send data // Update send data
for send_data in data.sends { for send_data in data.sends {
let send = match existing_sends.iter_mut().find(|s| &s.uuid == send_data.id.as_ref().unwrap()) { let Some(send) = existing_sends.iter_mut().find(|s| &s.uuid == send_data.id.as_ref().unwrap()) else {
Some(send) => send, err!("Send doesn't exist")
None => err!("Send doesn't exist"),
}; };
update_send_from_data(send, send_data, &headers, &mut conn, &nt, UpdateType::None).await?; update_send_from_data(send, send_data, &headers, &mut conn, &nt, UpdateType::None).await?;
@ -623,9 +627,9 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
for cipher_data in data.ciphers { for cipher_data in data.ciphers {
if cipher_data.organization_id.is_none() { if cipher_data.organization_id.is_none() {
let saved_cipher = match existing_ciphers.iter_mut().find(|c| &c.uuid == cipher_data.id.as_ref().unwrap()) { let Some(saved_cipher) = existing_ciphers.iter_mut().find(|c| &c.uuid == cipher_data.id.as_ref().unwrap())
Some(cipher) => cipher, else {
None => err!("Cipher doesn't exist"), err!("Cipher doesn't exist")
}; };
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None // Prevent triggering cipher updates via WebSockets by settings UpdateType::None
@ -647,7 +651,7 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
// Prevent logging out the client where the user requested this endpoint from. // Prevent logging out the client where the user requested this endpoint from.
// If you do logout the user it will causes issues at the client side. // If you do logout the user it will causes issues at the client side.
// Adding the device uuid will prevent this. // Adding the device uuid will prevent this.
nt.send_logout(&user, Some(headers.device.uuid)).await; nt.send_logout(&user, Some(headers.device.uuid.clone())).await;
save_result save_result
} }
@ -794,7 +798,7 @@ async fn post_verify_email(headers: Headers) -> EmptyResult {
#[derive(Deserialize)] #[derive(Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct VerifyEmailTokenData { struct VerifyEmailTokenData {
user_id: String, user_id: UserId,
token: String, token: String,
} }
@ -802,16 +806,14 @@ struct VerifyEmailTokenData {
async fn post_verify_email_token(data: Json<VerifyEmailTokenData>, mut conn: DbConn) -> EmptyResult { async fn post_verify_email_token(data: Json<VerifyEmailTokenData>, mut conn: DbConn) -> EmptyResult {
let data: VerifyEmailTokenData = data.into_inner(); let data: VerifyEmailTokenData = data.into_inner();
let mut user = match User::find_by_uuid(&data.user_id, &mut conn).await { let Some(mut user) = User::find_by_uuid(&data.user_id, &mut conn).await else {
Some(user) => user, err!("User doesn't exist")
None => err!("User doesn't exist"),
}; };
let claims = match decode_verify_email(&data.token) { let Ok(claims) = decode_verify_email(&data.token) else {
Ok(claims) => claims, err!("Invalid claim")
Err(_) => err!("Invalid claim"),
}; };
if claims.sub != user.uuid { if claims.sub != *user.uuid {
err!("Invalid claim"); err!("Invalid claim");
} }
user.verified_at = Some(Utc::now().naive_utc()); user.verified_at = Some(Utc::now().naive_utc());
@ -853,7 +855,7 @@ async fn post_delete_recover(data: Json<DeleteRecoverData>, mut conn: DbConn) ->
#[derive(Deserialize)] #[derive(Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct DeleteRecoverTokenData { struct DeleteRecoverTokenData {
user_id: String, user_id: UserId,
token: String, token: String,
} }
@ -861,16 +863,15 @@ struct DeleteRecoverTokenData {
async fn post_delete_recover_token(data: Json<DeleteRecoverTokenData>, mut conn: DbConn) -> EmptyResult { async fn post_delete_recover_token(data: Json<DeleteRecoverTokenData>, mut conn: DbConn) -> EmptyResult {
let data: DeleteRecoverTokenData = data.into_inner(); let data: DeleteRecoverTokenData = data.into_inner();
let user = match User::find_by_uuid(&data.user_id, &mut conn).await { let Ok(claims) = decode_delete(&data.token) else {
Some(user) => user, err!("Invalid claim")
None => err!("User doesn't exist"),
}; };
let claims = match decode_delete(&data.token) { let Some(user) = User::find_by_uuid(&data.user_id, &mut conn).await else {
Ok(claims) => claims, err!("User doesn't exist")
Err(_) => err!("Invalid claim"),
}; };
if claims.sub != user.uuid {
if claims.sub != *user.uuid {
err!("Invalid claim"); err!("Invalid claim");
} }
user.delete(&mut conn).await user.delete(&mut conn).await
@ -1032,7 +1033,7 @@ async fn get_known_device(device: KnownDevice, mut conn: DbConn) -> JsonResult {
struct KnownDevice { struct KnownDevice {
email: String, email: String,
uuid: String, uuid: DeviceId,
} }
#[rocket::async_trait] #[rocket::async_trait]
@ -1041,11 +1042,8 @@ impl<'r> FromRequest<'r> for KnownDevice {
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> { async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let email = if let Some(email_b64) = req.headers().get_one("X-Request-Email") { let email = if let Some(email_b64) = req.headers().get_one("X-Request-Email") {
let email_bytes = match data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) { let Ok(email_bytes) = data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) else {
Ok(bytes) => bytes, return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as base64url"));
Err(_) => {
return Outcome::Error((Status::BadRequest, "X-Request-Email value failed to decode as base64url"));
}
}; };
match String::from_utf8(email_bytes) { match String::from_utf8(email_bytes) {
Ok(email) => email, Ok(email) => email,
@ -1058,7 +1056,7 @@ impl<'r> FromRequest<'r> for KnownDevice {
}; };
let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") { let uuid = if let Some(uuid) = req.headers().get_one("X-Device-Identifier") {
uuid.to_string() uuid.to_string().into()
} else { } else {
return Outcome::Error((Status::BadRequest, "X-Device-Identifier value is required")); return Outcome::Error((Status::BadRequest, "X-Device-Identifier value is required"));
}; };
@ -1076,26 +1074,31 @@ struct PushToken {
push_token: String, push_token: String,
} }
#[post("/devices/identifier/<uuid>/token", data = "<data>")] #[post("/devices/identifier/<device_id>/token", data = "<data>")]
async fn post_device_token(uuid: &str, data: Json<PushToken>, headers: Headers, conn: DbConn) -> EmptyResult { async fn post_device_token(device_id: DeviceId, data: Json<PushToken>, headers: Headers, conn: DbConn) -> EmptyResult {
put_device_token(uuid, data, headers, conn).await put_device_token(device_id, data, headers, conn).await
} }
#[put("/devices/identifier/<uuid>/token", data = "<data>")] #[put("/devices/identifier/<device_id>/token", data = "<data>")]
async fn put_device_token(uuid: &str, data: Json<PushToken>, headers: Headers, mut conn: DbConn) -> EmptyResult { async fn put_device_token(
device_id: DeviceId,
data: Json<PushToken>,
headers: Headers,
mut conn: DbConn,
) -> EmptyResult {
let data = data.into_inner(); let data = data.into_inner();
let token = data.push_token; let token = data.push_token;
let mut device = match Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await { let Some(mut device) = Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await
Some(device) => device, else {
None => err!(format!("Error: device {uuid} should be present before a token can be assigned")), err!(format!("Error: device {device_id} should be present before a token can be assigned"))
}; };
// if the device already has been registered // if the device already has been registered
if device.is_registered() { if device.is_registered() {
// check if the new token is the same as the registered token // check if the new token is the same as the registered token
if device.push_token.is_some() && device.push_token.unwrap() == token.clone() { if device.push_token.is_some() && device.push_token.unwrap() == token.clone() {
debug!("Device {} is already registered and token is the same", uuid); debug!("Device {} is already registered and token is the same", device_id);
return Ok(()); return Ok(());
} else { } else {
// Try to unregister already registered device // Try to unregister already registered device
@ -1114,8 +1117,8 @@ async fn put_device_token(uuid: &str, data: Json<PushToken>, headers: Headers, m
Ok(()) Ok(())
} }
#[put("/devices/identifier/<uuid>/clear-token")] #[put("/devices/identifier/<device_id>/clear-token")]
async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult { async fn put_clear_device_token(device_id: DeviceId, mut conn: DbConn) -> EmptyResult {
// This only clears push token // This only clears push token
// https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109 // https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109
// https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37 // https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37
@ -1124,8 +1127,8 @@ async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult {
return Ok(()); return Ok(());
} }
if let Some(device) = Device::find_by_uuid(uuid, &mut conn).await { if let Some(device) = Device::find_by_uuid(&device_id, &mut conn).await {
Device::clear_push_token_by_uuid(uuid, &mut conn).await?; Device::clear_push_token_by_uuid(&device_id, &mut conn).await?;
unregister_push_device(device.push_uuid).await?; unregister_push_device(device.push_uuid).await?;
} }
@ -1133,16 +1136,16 @@ async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult {
} }
// On upstream server, both PUT and POST are declared. Implementing the POST method in case it would be useful somewhere // On upstream server, both PUT and POST are declared. Implementing the POST method in case it would be useful somewhere
#[post("/devices/identifier/<uuid>/clear-token")] #[post("/devices/identifier/<device_id>/clear-token")]
async fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult { async fn post_clear_device_token(device_id: DeviceId, conn: DbConn) -> EmptyResult {
put_clear_device_token(uuid, conn).await put_clear_device_token(device_id, conn).await
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct AuthRequestRequest { struct AuthRequestRequest {
access_code: String, access_code: String,
device_identifier: String, device_identifier: DeviceId,
email: String, email: String,
public_key: String, public_key: String,
// Not used for now // Not used for now
@ -1159,9 +1162,8 @@ async fn post_auth_request(
) -> JsonResult { ) -> JsonResult {
let data = data.into_inner(); let data = data.into_inner();
let user = match User::find_by_mail(&data.email, &mut conn).await { let Some(user) = User::find_by_mail(&data.email, &mut conn).await else {
Some(user) => user, err!("AuthRequest doesn't exist", "User not found")
None => err!("AuthRequest doesn't exist", "User not found"),
}; };
// Validate device uuid and type // Validate device uuid and type
@ -1197,21 +1199,17 @@ async fn post_auth_request(
}))) })))
} }
#[get("/auth-requests/<uuid>")] #[get("/auth-requests/<auth_request_id>")]
async fn get_auth_request(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn get_auth_request(auth_request_id: AuthRequestId, headers: Headers, mut conn: DbConn) -> JsonResult {
let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await { let Some(auth_request) = AuthRequest::find_by_uuid_and_user(&auth_request_id, &headers.user.uuid, &mut conn).await
Some(auth_request) => auth_request, else {
None => err!("AuthRequest doesn't exist", "Record not found"), err!("AuthRequest doesn't exist", "Record not found or user uuid does not match")
}; };
if headers.user.uuid != auth_request.user_uuid {
err!("AuthRequest doesn't exist", "User uuid's do not match")
}
let response_date_utc = auth_request.response_date.map(|response_date| format_date(&response_date)); let response_date_utc = auth_request.response_date.map(|response_date| format_date(&response_date));
Ok(Json(json!({ Ok(Json(json!({
"id": uuid, "id": &auth_request_id,
"publicKey": auth_request.public_key, "publicKey": auth_request.public_key,
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(), "requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
"requestIpAddress": auth_request.request_ip, "requestIpAddress": auth_request.request_ip,
@ -1228,15 +1226,15 @@ async fn get_auth_request(uuid: &str, headers: Headers, mut conn: DbConn) -> Jso
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct AuthResponseRequest { struct AuthResponseRequest {
device_identifier: String, device_identifier: DeviceId,
key: String, key: String,
master_password_hash: Option<String>, master_password_hash: Option<String>,
request_approved: bool, request_approved: bool,
} }
#[put("/auth-requests/<uuid>", data = "<data>")] #[put("/auth-requests/<auth_request_id>", data = "<data>")]
async fn put_auth_request( async fn put_auth_request(
uuid: &str, auth_request_id: AuthRequestId,
data: Json<AuthResponseRequest>, data: Json<AuthResponseRequest>,
headers: Headers, headers: Headers,
mut conn: DbConn, mut conn: DbConn,
@ -1244,15 +1242,12 @@ async fn put_auth_request(
nt: Notify<'_>, nt: Notify<'_>,
) -> JsonResult { ) -> JsonResult {
let data = data.into_inner(); let data = data.into_inner();
let mut auth_request: AuthRequest = match AuthRequest::find_by_uuid(uuid, &mut conn).await { let Some(mut auth_request) =
Some(auth_request) => auth_request, AuthRequest::find_by_uuid_and_user(&auth_request_id, &headers.user.uuid, &mut conn).await
None => err!("AuthRequest doesn't exist", "Record not found"), else {
err!("AuthRequest doesn't exist", "Record not found or user uuid does not match")
}; };
if headers.user.uuid != auth_request.user_uuid {
err!("AuthRequest doesn't exist", "User uuid's do not match")
}
if auth_request.approved.is_some() { if auth_request.approved.is_some() {
err!("An authentication request with the same device already exists") err!("An authentication request with the same device already exists")
} }
@ -1276,7 +1271,7 @@ async fn put_auth_request(
} }
Ok(Json(json!({ Ok(Json(json!({
"id": uuid, "id": &auth_request_id,
"publicKey": auth_request.public_key, "publicKey": auth_request.public_key,
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(), "requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
"requestIpAddress": auth_request.request_ip, "requestIpAddress": auth_request.request_ip,
@ -1290,16 +1285,15 @@ async fn put_auth_request(
}))) })))
} }
#[get("/auth-requests/<uuid>/response?<code>")] #[get("/auth-requests/<auth_request_id>/response?<code>")]
async fn get_auth_request_response( async fn get_auth_request_response(
uuid: &str, auth_request_id: AuthRequestId,
code: &str, code: &str,
client_headers: ClientHeaders, client_headers: ClientHeaders,
mut conn: DbConn, mut conn: DbConn,
) -> JsonResult { ) -> JsonResult {
let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await { let Some(auth_request) = AuthRequest::find_by_uuid(&auth_request_id, &mut conn).await else {
Some(auth_request) => auth_request, err!("AuthRequest doesn't exist", "User not found")
None => err!("AuthRequest doesn't exist", "User not found"),
}; };
if auth_request.device_type != client_headers.device_type if auth_request.device_type != client_headers.device_type
@ -1312,7 +1306,7 @@ async fn get_auth_request_response(
let response_date_utc = auth_request.response_date.map(|response_date| format_date(&response_date)); let response_date_utc = auth_request.response_date.map(|response_date| format_date(&response_date));
Ok(Json(json!({ Ok(Json(json!({
"id": uuid, "id": &auth_request_id,
"publicKey": auth_request.public_key, "publicKey": auth_request.public_key,
"requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(), "requestDeviceType": DeviceType::from_i32(auth_request.device_type).to_string(),
"requestIpAddress": auth_request.request_ip, "requestIpAddress": auth_request.request_ip,

553
src/api/core/ciphers.rs

File diff suppressed because it is too large

229
src/api/core/emergency_access.rs

@ -93,10 +93,10 @@ async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json<Value> {
} }
#[get("/emergency-access/<emer_id>")] #[get("/emergency-access/<emer_id>")]
async fn get_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn get_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await { match EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await {
Some(emergency_access) => Ok(Json( Some(emergency_access) => Ok(Json(
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"), emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
)), )),
@ -118,7 +118,7 @@ struct EmergencyAccessUpdateData {
#[put("/emergency-access/<emer_id>", data = "<data>")] #[put("/emergency-access/<emer_id>", data = "<data>")]
async fn put_emergency_access( async fn put_emergency_access(
emer_id: &str, emer_id: EmergencyAccessId,
data: Json<EmergencyAccessUpdateData>, data: Json<EmergencyAccessUpdateData>,
headers: Headers, headers: Headers,
conn: DbConn, conn: DbConn,
@ -128,7 +128,7 @@ async fn put_emergency_access(
#[post("/emergency-access/<emer_id>", data = "<data>")] #[post("/emergency-access/<emer_id>", data = "<data>")]
async fn post_emergency_access( async fn post_emergency_access(
emer_id: &str, emer_id: EmergencyAccessId,
data: Json<EmergencyAccessUpdateData>, data: Json<EmergencyAccessUpdateData>,
headers: Headers, headers: Headers,
mut conn: DbConn, mut conn: DbConn,
@ -137,11 +137,11 @@ async fn post_emergency_access(
let data: EmergencyAccessUpdateData = data.into_inner(); let data: EmergencyAccessUpdateData = data.into_inner();
let mut emergency_access = let Some(mut emergency_access) =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await { EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await
Some(emergency_access) => emergency_access, else {
None => err!("Emergency access not valid."), err!("Emergency access not valid.")
}; };
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) { let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
Some(new_type) => new_type as i32, Some(new_type) => new_type as i32,
@ -163,12 +163,12 @@ async fn post_emergency_access(
// region delete // region delete
#[delete("/emergency-access/<emer_id>")] #[delete("/emergency-access/<emer_id>")]
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult { async fn delete_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let emergency_access = match ( let emergency_access = match (
EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await, EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await,
EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await, EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &headers.user.uuid, &mut conn).await,
) { ) {
(Some(grantor_emer), None) => { (Some(grantor_emer), None) => {
info!("Grantor deleted emergency access {emer_id}"); info!("Grantor deleted emergency access {emer_id}");
@ -186,7 +186,7 @@ async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
} }
#[post("/emergency-access/<emer_id>/delete")] #[post("/emergency-access/<emer_id>/delete")]
async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbConn) -> EmptyResult { async fn post_delete_emergency_access(emer_id: EmergencyAccessId, headers: Headers, conn: DbConn) -> EmptyResult {
delete_emergency_access(emer_id, headers, conn).await delete_emergency_access(emer_id, headers, conn).await
} }
@ -266,8 +266,8 @@ async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mu
if CONFIG.mail_enabled() { if CONFIG.mail_enabled() {
mail::send_emergency_access_invite( mail::send_emergency_access_invite(
&new_emergency_access.email.expect("Grantee email does not exists"), &new_emergency_access.email.expect("Grantee email does not exists"),
&grantee_user.uuid, grantee_user.uuid,
&new_emergency_access.uuid, new_emergency_access.uuid,
&grantor_user.name, &grantor_user.name,
&grantor_user.email, &grantor_user.email,
) )
@ -281,27 +281,25 @@ async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mu
} }
#[post("/emergency-access/<emer_id>/reinvite")] #[post("/emergency-access/<emer_id>/reinvite")]
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult { async fn resend_invite(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let mut emergency_access = let Some(mut emergency_access) =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await { EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await
Some(emer) => emer, else {
None => err!("Emergency access not valid."), err!("Emergency access not valid.")
}; };
if emergency_access.status != EmergencyAccessStatus::Invited as i32 { if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
err!("The grantee user is already accepted or confirmed to the organization"); err!("The grantee user is already accepted or confirmed to the organization");
} }
let email = match emergency_access.email.clone() { let Some(email) = emergency_access.email.clone() else {
Some(email) => email, err!("Email not valid.")
None => err!("Email not valid."),
}; };
let grantee_user = match User::find_by_mail(&email, &mut conn).await { let Some(grantee_user) = User::find_by_mail(&email, &mut conn).await else {
Some(user) => user, err!("Grantee user not found.")
None => err!("Grantee user not found."),
}; };
let grantor_user = headers.user; let grantor_user = headers.user;
@ -309,8 +307,8 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
if CONFIG.mail_enabled() { if CONFIG.mail_enabled() {
mail::send_emergency_access_invite( mail::send_emergency_access_invite(
&email, &email,
&grantor_user.uuid, grantor_user.uuid,
&emergency_access.uuid, emergency_access.uuid,
&grantor_user.name, &grantor_user.name,
&grantor_user.email, &grantor_user.email,
) )
@ -333,7 +331,12 @@ struct AcceptData {
} }
#[post("/emergency-access/<emer_id>/accept", data = "<data>")] #[post("/emergency-access/<emer_id>/accept", data = "<data>")]
async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult { async fn accept_invite(
emer_id: EmergencyAccessId,
data: Json<AcceptData>,
headers: Headers,
mut conn: DbConn,
) -> EmptyResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let data: AcceptData = data.into_inner(); let data: AcceptData = data.into_inner();
@ -356,16 +359,15 @@ async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers,
// We need to search for the uuid in combination with the email, since we do not yet store the uuid of the grantee in the database. // We need to search for the uuid in combination with the email, since we do not yet store the uuid of the grantee in the database.
// The uuid of the grantee gets stored once accepted. // The uuid of the grantee gets stored once accepted.
let mut emergency_access = let Some(mut emergency_access) =
match EmergencyAccess::find_by_uuid_and_grantee_email(emer_id, &headers.user.email, &mut conn).await { EmergencyAccess::find_by_uuid_and_grantee_email(&emer_id, &headers.user.email, &mut conn).await
Some(emer) => emer, else {
None => err!("Emergency access not valid."), err!("Emergency access not valid.")
}; };
// get grantor user to send Accepted email // get grantor user to send Accepted email
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await { let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
Some(user) => user, err!("Grantor user not found.")
None => err!("Grantor user not found."),
}; };
if emer_id == claims.emer_id if emer_id == claims.emer_id
@ -392,7 +394,7 @@ struct ConfirmData {
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")] #[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
async fn confirm_emergency_access( async fn confirm_emergency_access(
emer_id: &str, emer_id: EmergencyAccessId,
data: Json<ConfirmData>, data: Json<ConfirmData>,
headers: Headers, headers: Headers,
mut conn: DbConn, mut conn: DbConn,
@ -403,11 +405,11 @@ async fn confirm_emergency_access(
let data: ConfirmData = data.into_inner(); let data: ConfirmData = data.into_inner();
let key = data.key; let key = data.key;
let mut emergency_access = let Some(mut emergency_access) =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &confirming_user.uuid, &mut conn).await { EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &confirming_user.uuid, &mut conn).await
Some(emer) => emer, else {
None => err!("Emergency access not valid."), err!("Emergency access not valid.")
}; };
if emergency_access.status != EmergencyAccessStatus::Accepted as i32 if emergency_access.status != EmergencyAccessStatus::Accepted as i32
|| emergency_access.grantor_uuid != confirming_user.uuid || emergency_access.grantor_uuid != confirming_user.uuid
@ -415,15 +417,13 @@ async fn confirm_emergency_access(
err!("Emergency access not valid.") err!("Emergency access not valid.")
} }
let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &mut conn).await { let Some(grantor_user) = User::find_by_uuid(&confirming_user.uuid, &mut conn).await else {
Some(user) => user, err!("Grantor user not found.")
None => err!("Grantor user not found."),
}; };
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await { let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else {
Some(user) => user, err!("Grantee user not found.")
None => err!("Grantee user not found."),
}; };
emergency_access.status = EmergencyAccessStatus::Confirmed as i32; emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
@ -446,23 +446,22 @@ async fn confirm_emergency_access(
// region access emergency access // region access emergency access
#[post("/emergency-access/<emer_id>/initiate")] #[post("/emergency-access/<emer_id>/initiate")]
async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn initiate_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let initiating_user = headers.user; let initiating_user = headers.user;
let mut emergency_access = let Some(mut emergency_access) =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &initiating_user.uuid, &mut conn).await { EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &initiating_user.uuid, &mut conn).await
Some(emer) => emer, else {
None => err!("Emergency access not valid."), err!("Emergency access not valid.")
}; };
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32 { if emergency_access.status != EmergencyAccessStatus::Confirmed as i32 {
err!("Emergency access not valid.") err!("Emergency access not valid.")
} }
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await { let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
Some(user) => user, err!("Grantor user not found.")
None => err!("Grantor user not found."),
}; };
let now = Utc::now().naive_utc(); let now = Utc::now().naive_utc();
@ -485,28 +484,26 @@ async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
} }
#[post("/emergency-access/<emer_id>/approve")] #[post("/emergency-access/<emer_id>/approve")]
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn approve_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let mut emergency_access = let Some(mut emergency_access) =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await { EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await
Some(emer) => emer, else {
None => err!("Emergency access not valid."), err!("Emergency access not valid.")
}; };
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 { if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 {
err!("Emergency access not valid.") err!("Emergency access not valid.")
} }
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await { let Some(grantor_user) = User::find_by_uuid(&headers.user.uuid, &mut conn).await else {
Some(user) => user, err!("Grantor user not found.")
None => err!("Grantor user not found."),
}; };
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await { let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else {
Some(user) => user, err!("Grantee user not found.")
None => err!("Grantee user not found."),
}; };
emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32; emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32;
@ -522,14 +519,14 @@ async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbC
} }
#[post("/emergency-access/<emer_id>/reject")] #[post("/emergency-access/<emer_id>/reject")]
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn reject_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let mut emergency_access = let Some(mut emergency_access) =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await { EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await
Some(emer) => emer, else {
None => err!("Emergency access not valid."), err!("Emergency access not valid.")
}; };
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32 && emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32
@ -538,9 +535,8 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
} }
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await { let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else {
Some(user) => user, err!("Grantee user not found.")
None => err!("Grantee user not found."),
}; };
emergency_access.status = EmergencyAccessStatus::Confirmed as i32; emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
@ -560,14 +556,14 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
// region action // region action
#[post("/emergency-access/<emer_id>/view")] #[post("/emergency-access/<emer_id>/view")]
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let emergency_access = let Some(emergency_access) =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await { EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &headers.user.uuid, &mut conn).await
Some(emer) => emer, else {
None => err!("Emergency access not valid."), err!("Emergency access not valid.")
}; };
if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) { if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) {
err!("Emergency access not valid.") err!("Emergency access not valid.")
@ -598,23 +594,22 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn
} }
#[post("/emergency-access/<emer_id>/takeover")] #[post("/emergency-access/<emer_id>/takeover")]
async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn takeover_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let requesting_user = headers.user; let requesting_user = headers.user;
let emergency_access = let Some(emergency_access) =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await { EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await
Some(emer) => emer, else {
None => err!("Emergency access not valid."), err!("Emergency access not valid.")
}; };
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) { if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
err!("Emergency access not valid.") err!("Emergency access not valid.")
} }
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await { let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
Some(user) => user, err!("Grantor user not found.")
None => err!("Grantor user not found."),
}; };
let result = json!({ let result = json!({
@ -638,7 +633,7 @@ struct EmergencyAccessPasswordData {
#[post("/emergency-access/<emer_id>/password", data = "<data>")] #[post("/emergency-access/<emer_id>/password", data = "<data>")]
async fn password_emergency_access( async fn password_emergency_access(
emer_id: &str, emer_id: EmergencyAccessId,
data: Json<EmergencyAccessPasswordData>, data: Json<EmergencyAccessPasswordData>,
headers: Headers, headers: Headers,
mut conn: DbConn, mut conn: DbConn,
@ -650,19 +645,18 @@ async fn password_emergency_access(
//let key = &data.Key; //let key = &data.Key;
let requesting_user = headers.user; let requesting_user = headers.user;
let emergency_access = let Some(emergency_access) =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await { EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await
Some(emer) => emer, else {
None => err!("Emergency access not valid."), err!("Emergency access not valid.")
}; };
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) { if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
err!("Emergency access not valid.") err!("Emergency access not valid.")
} }
let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await { let Some(mut grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
Some(user) => user, err!("Grantor user not found.")
None => err!("Grantor user not found."),
}; };
// change grantor_user password // change grantor_user password
@ -673,9 +667,9 @@ async fn password_emergency_access(
TwoFactor::delete_all_by_user(&grantor_user.uuid, &mut conn).await?; TwoFactor::delete_all_by_user(&grantor_user.uuid, &mut conn).await?;
// Remove grantor from all organisations unless Owner // Remove grantor from all organisations unless Owner
for user_org in UserOrganization::find_any_state_by_user(&grantor_user.uuid, &mut conn).await { for member in Membership::find_any_state_by_user(&grantor_user.uuid, &mut conn).await {
if user_org.atype != UserOrgType::Owner as i32 { if member.atype != MembershipType::Owner as i32 {
user_org.delete(&mut conn).await?; member.delete(&mut conn).await?;
} }
} }
Ok(()) Ok(())
@ -684,21 +678,20 @@ async fn password_emergency_access(
// endregion // endregion
#[get("/emergency-access/<emer_id>/policies")] #[get("/emergency-access/<emer_id>/policies")]
async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn policies_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult {
let requesting_user = headers.user; let requesting_user = headers.user;
let emergency_access = let Some(emergency_access) =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await { EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await
Some(emer) => emer, else {
None => err!("Emergency access not valid."), err!("Emergency access not valid.")
}; };
if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) { if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) {
err!("Emergency access not valid.") err!("Emergency access not valid.")
} }
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await { let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else {
Some(user) => user, err!("Grantor user not found.")
None => err!("Grantor user not found."),
}; };
let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &mut conn); let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &mut conn);
@ -713,11 +706,11 @@ async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
fn is_valid_request( fn is_valid_request(
emergency_access: &EmergencyAccess, emergency_access: &EmergencyAccess,
requesting_user_uuid: &str, requesting_user_id: &UserId,
requested_access_type: EmergencyAccessType, requested_access_type: EmergencyAccessType,
) -> bool { ) -> bool {
emergency_access.grantee_uuid.is_some() emergency_access.grantee_uuid.is_some()
&& emergency_access.grantee_uuid.as_ref().unwrap() == requesting_user_uuid && emergency_access.grantee_uuid.as_ref().unwrap() == requesting_user_id
&& emergency_access.status == EmergencyAccessStatus::RecoveryApproved as i32 && emergency_access.status == EmergencyAccessStatus::RecoveryApproved as i32
&& emergency_access.atype == requested_access_type as i32 && emergency_access.atype == requested_access_type as i32
} }

85
src/api/core/events.rs

@ -8,7 +8,7 @@ use crate::{
api::{EmptyResult, JsonResult}, api::{EmptyResult, JsonResult},
auth::{AdminHeaders, Headers}, auth::{AdminHeaders, Headers},
db::{ db::{
models::{Cipher, Event, UserOrganization}, models::{Cipher, CipherId, Event, Membership, MembershipId, OrganizationId, UserId},
DbConn, DbPool, DbConn, DbPool,
}, },
util::parse_date, util::parse_date,
@ -31,7 +31,12 @@ struct EventRange {
// Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41 // Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41
#[get("/organizations/<org_id>/events?<data..>")] #[get("/organizations/<org_id>/events?<data..>")]
async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { async fn get_org_events(
org_id: OrganizationId,
data: EventRange,
_headers: AdminHeaders,
mut conn: DbConn,
) -> JsonResult {
// Return an empty vec when we org events are disabled. // Return an empty vec when we org events are disabled.
// This prevents client errors // This prevents client errors
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() { let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
@ -44,7 +49,7 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders,
parse_date(&data.end) parse_date(&data.end)
}; };
Event::find_by_organization_uuid(org_id, &start_date, &end_date, &mut conn) Event::find_by_organization_uuid(&org_id, &start_date, &end_date, &mut conn)
.await .await
.iter() .iter()
.map(|e| e.to_json()) .map(|e| e.to_json())
@ -59,14 +64,14 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders,
} }
#[get("/ciphers/<cipher_id>/events?<data..>")] #[get("/ciphers/<cipher_id>/events?<data..>")]
async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult { async fn get_cipher_events(cipher_id: CipherId, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult {
// Return an empty vec when we org events are disabled. // Return an empty vec when we org events are disabled.
// This prevents client errors // This prevents client errors
let events_json: Vec<Value> = if !CONFIG.org_events_enabled() { let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
Vec::with_capacity(0) Vec::with_capacity(0)
} else { } else {
let mut events_json = Vec::with_capacity(0); let mut events_json = Vec::with_capacity(0);
if UserOrganization::user_has_ge_admin_access_to_cipher(&headers.user.uuid, cipher_id, &mut conn).await { if Membership::user_has_ge_admin_access_to_cipher(&headers.user.uuid, &cipher_id, &mut conn).await {
let start_date = parse_date(&data.start); let start_date = parse_date(&data.start);
let end_date = if let Some(before_date) = &data.continuation_token { let end_date = if let Some(before_date) = &data.continuation_token {
parse_date(before_date) parse_date(before_date)
@ -74,7 +79,7 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers,
parse_date(&data.end) parse_date(&data.end)
}; };
events_json = Event::find_by_cipher_uuid(cipher_id, &start_date, &end_date, &mut conn) events_json = Event::find_by_cipher_uuid(&cipher_id, &start_date, &end_date, &mut conn)
.await .await
.iter() .iter()
.map(|e| e.to_json()) .map(|e| e.to_json())
@ -90,10 +95,10 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers,
}))) })))
} }
#[get("/organizations/<org_id>/users/<user_org_id>/events?<data..>")] #[get("/organizations/<org_id>/users/<member_id>/events?<data..>")]
async fn get_user_events( async fn get_user_events(
org_id: &str, org_id: OrganizationId,
user_org_id: &str, member_id: MembershipId,
data: EventRange, data: EventRange,
_headers: AdminHeaders, _headers: AdminHeaders,
mut conn: DbConn, mut conn: DbConn,
@ -110,7 +115,7 @@ async fn get_user_events(
parse_date(&data.end) parse_date(&data.end)
}; };
Event::find_by_org_and_user_org(org_id, user_org_id, &start_date, &end_date, &mut conn) Event::find_by_org_and_member(&org_id, &member_id, &start_date, &end_date, &mut conn)
.await .await
.iter() .iter()
.map(|e| e.to_json()) .map(|e| e.to_json())
@ -152,8 +157,8 @@ struct EventCollection {
date: String, date: String,
// Optional // Optional
cipher_id: Option<String>, cipher_id: Option<CipherId>,
organization_id: Option<String>, organization_id: Option<OrganizationId>,
} }
// Upstream: // Upstream:
@ -180,11 +185,11 @@ async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers,
.await; .await;
} }
1600..=1699 => { 1600..=1699 => {
if let Some(org_uuid) = &event.organization_id { if let Some(org_id) = &event.organization_id {
_log_event( _log_event(
event.r#type, event.r#type,
org_uuid, org_id,
org_uuid, org_id,
&headers.user.uuid, &headers.user.uuid,
headers.device.atype, headers.device.atype,
Some(event_date), Some(event_date),
@ -197,11 +202,11 @@ async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers,
_ => { _ => {
if let Some(cipher_uuid) = &event.cipher_id { if let Some(cipher_uuid) = &event.cipher_id {
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await { if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
if let Some(org_uuid) = cipher.organization_uuid { if let Some(org_id) = cipher.organization_uuid {
_log_event( _log_event(
event.r#type, event.r#type,
cipher_uuid, cipher_uuid,
&org_uuid, &org_id,
&headers.user.uuid, &headers.user.uuid,
headers.device.atype, headers.device.atype,
Some(event_date), Some(event_date),
@ -218,38 +223,38 @@ async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers,
Ok(()) Ok(())
} }
pub async fn log_user_event(event_type: i32, user_uuid: &str, device_type: i32, ip: &IpAddr, conn: &mut DbConn) { pub async fn log_user_event(event_type: i32, user_id: &UserId, device_type: i32, ip: &IpAddr, conn: &mut DbConn) {
if !CONFIG.org_events_enabled() { if !CONFIG.org_events_enabled() {
return; return;
} }
_log_user_event(event_type, user_uuid, device_type, None, ip, conn).await; _log_user_event(event_type, user_id, device_type, None, ip, conn).await;
} }
async fn _log_user_event( async fn _log_user_event(
event_type: i32, event_type: i32,
user_uuid: &str, user_id: &UserId,
device_type: i32, device_type: i32,
event_date: Option<NaiveDateTime>, event_date: Option<NaiveDateTime>,
ip: &IpAddr, ip: &IpAddr,
conn: &mut DbConn, conn: &mut DbConn,
) { ) {
let orgs = UserOrganization::get_org_uuid_by_user(user_uuid, conn).await; let orgs = Membership::get_orgs_by_user(user_id, conn).await;
let mut events: Vec<Event> = Vec::with_capacity(orgs.len() + 1); // We need an event per org and one without an org let mut events: Vec<Event> = Vec::with_capacity(orgs.len() + 1); // We need an event per org and one without an org
// Upstream saves the event also without any org_uuid. // Upstream saves the event also without any org_id.
let mut event = Event::new(event_type, event_date); let mut event = Event::new(event_type, event_date);
event.user_uuid = Some(String::from(user_uuid)); event.user_uuid = Some(user_id.clone());
event.act_user_uuid = Some(String::from(user_uuid)); event.act_user_uuid = Some(user_id.clone());
event.device_type = Some(device_type); event.device_type = Some(device_type);
event.ip_address = Some(ip.to_string()); event.ip_address = Some(ip.to_string());
events.push(event); events.push(event);
// For each org a user is a member of store these events per org // For each org a user is a member of store these events per org
for org_uuid in orgs { for org_id in orgs {
let mut event = Event::new(event_type, event_date); let mut event = Event::new(event_type, event_date);
event.user_uuid = Some(String::from(user_uuid)); event.user_uuid = Some(user_id.clone());
event.org_uuid = Some(org_uuid); event.org_uuid = Some(org_id);
event.act_user_uuid = Some(String::from(user_uuid)); event.act_user_uuid = Some(user_id.clone());
event.device_type = Some(device_type); event.device_type = Some(device_type);
event.ip_address = Some(ip.to_string()); event.ip_address = Some(ip.to_string());
events.push(event); events.push(event);
@ -261,8 +266,8 @@ async fn _log_user_event(
pub async fn log_event( pub async fn log_event(
event_type: i32, event_type: i32,
source_uuid: &str, source_uuid: &str,
org_uuid: &str, org_id: &OrganizationId,
act_user_uuid: &str, act_user_id: &UserId,
device_type: i32, device_type: i32,
ip: &IpAddr, ip: &IpAddr,
conn: &mut DbConn, conn: &mut DbConn,
@ -270,15 +275,15 @@ pub async fn log_event(
if !CONFIG.org_events_enabled() { if !CONFIG.org_events_enabled() {
return; return;
} }
_log_event(event_type, source_uuid, org_uuid, act_user_uuid, device_type, None, ip, conn).await; _log_event(event_type, source_uuid, org_id, act_user_id, device_type, None, ip, conn).await;
} }
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
async fn _log_event( async fn _log_event(
event_type: i32, event_type: i32,
source_uuid: &str, source_uuid: &str,
org_uuid: &str, org_id: &OrganizationId,
act_user_uuid: &str, act_user_id: &UserId,
device_type: i32, device_type: i32,
event_date: Option<NaiveDateTime>, event_date: Option<NaiveDateTime>,
ip: &IpAddr, ip: &IpAddr,
@ -290,31 +295,31 @@ async fn _log_event(
// 1000..=1099 Are user events, they need to be logged via log_user_event() // 1000..=1099 Are user events, they need to be logged via log_user_event()
// Cipher Events // Cipher Events
1100..=1199 => { 1100..=1199 => {
event.cipher_uuid = Some(String::from(source_uuid)); event.cipher_uuid = Some(source_uuid.to_string().into());
} }
// Collection Events // Collection Events
1300..=1399 => { 1300..=1399 => {
event.collection_uuid = Some(String::from(source_uuid)); event.collection_uuid = Some(source_uuid.to_string().into());
} }
// Group Events // Group Events
1400..=1499 => { 1400..=1499 => {
event.group_uuid = Some(String::from(source_uuid)); event.group_uuid = Some(source_uuid.to_string().into());
} }
// Org User Events // Org User Events
1500..=1599 => { 1500..=1599 => {
event.org_user_uuid = Some(String::from(source_uuid)); event.org_user_uuid = Some(source_uuid.to_string().into());
} }
// 1600..=1699 Are organizational events, and they do not need the source_uuid // 1600..=1699 Are organizational events, and they do not need the source_uuid
// Policy Events // Policy Events
1700..=1799 => { 1700..=1799 => {
event.policy_uuid = Some(String::from(source_uuid)); event.policy_uuid = Some(source_uuid.to_string().into());
} }
// Ignore others // Ignore others
_ => {} _ => {}
} }
event.org_uuid = Some(String::from(org_uuid)); event.org_uuid = Some(org_id.clone());
event.act_user_uuid = Some(String::from(act_user_uuid)); event.act_user_uuid = Some(act_user_id.clone());
event.device_type = Some(device_type); event.device_type = Some(device_type);
event.ip_address = Some(ip.to_string()); event.ip_address = Some(ip.to_string());
event.save(conn).await.unwrap_or(()); event.save(conn).await.unwrap_or(());

62
src/api/core/folders.rs

@ -23,25 +23,19 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
})) }))
} }
#[get("/folders/<uuid>")] #[get("/folders/<folder_id>")]
async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn get_folder(folder_id: FolderId, headers: Headers, mut conn: DbConn) -> JsonResult {
let folder = match Folder::find_by_uuid(uuid, &mut conn).await { match Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await {
Some(folder) => folder, Some(folder) => Ok(Json(folder.to_json())),
_ => err!("Invalid folder"), _ => err!("Invalid folder", "Folder does not exist or belongs to another user"),
};
if folder.user_uuid != headers.user.uuid {
err!("Folder belongs to another user")
} }
Ok(Json(folder.to_json()))
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct FolderData { pub struct FolderData {
pub name: String, pub name: String,
pub id: Option<String>, pub id: Option<FolderId>,
} }
#[post("/folders", data = "<data>")] #[post("/folders", data = "<data>")]
@ -56,14 +50,20 @@ async fn post_folders(data: Json<FolderData>, headers: Headers, mut conn: DbConn
Ok(Json(folder.to_json())) Ok(Json(folder.to_json()))
} }
#[post("/folders/<uuid>", data = "<data>")] #[post("/folders/<folder_id>", data = "<data>")]
async fn post_folder(uuid: &str, data: Json<FolderData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { async fn post_folder(
put_folder(uuid, data, headers, conn, nt).await folder_id: FolderId,
data: Json<FolderData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
put_folder(folder_id, data, headers, conn, nt).await
} }
#[put("/folders/<uuid>", data = "<data>")] #[put("/folders/<folder_id>", data = "<data>")]
async fn put_folder( async fn put_folder(
uuid: &str, folder_id: FolderId,
data: Json<FolderData>, data: Json<FolderData>,
headers: Headers, headers: Headers,
mut conn: DbConn, mut conn: DbConn,
@ -71,15 +71,10 @@ async fn put_folder(
) -> JsonResult { ) -> JsonResult {
let data: FolderData = data.into_inner(); let data: FolderData = data.into_inner();
let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await { let Some(mut folder) = Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await else {
Some(folder) => folder, err!("Invalid folder", "Folder does not exist or belongs to another user")
_ => err!("Invalid folder"),
}; };
if folder.user_uuid != headers.user.uuid {
err!("Folder belongs to another user")
}
folder.name = data.name; folder.name = data.name;
folder.save(&mut conn).await?; folder.save(&mut conn).await?;
@ -88,22 +83,17 @@ async fn put_folder(
Ok(Json(folder.to_json())) Ok(Json(folder.to_json()))
} }
#[post("/folders/<uuid>/delete")] #[post("/folders/<folder_id>/delete")]
async fn delete_folder_post(uuid: &str, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { async fn delete_folder_post(folder_id: FolderId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
delete_folder(uuid, headers, conn, nt).await delete_folder(folder_id, headers, conn, nt).await
} }
#[delete("/folders/<uuid>")] #[delete("/folders/<folder_id>")]
async fn delete_folder(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { async fn delete_folder(folder_id: FolderId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
let folder = match Folder::find_by_uuid(uuid, &mut conn).await { let Some(folder) = Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await else {
Some(folder) => folder, err!("Invalid folder", "Folder does not exist or belongs to another user")
_ => err!("Invalid folder"),
}; };
if folder.user_uuid != headers.user.uuid {
err!("Folder belongs to another user")
}
// Delete the actual folder entry // Delete the actual folder entry
folder.delete(&mut conn).await?; folder.delete(&mut conn).await?;

14
src/api/core/mod.rs

@ -18,7 +18,7 @@ pub use sends::purge_sends;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains]; let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
let mut hibp_routes = routes![hibp_breach]; let mut hibp_routes = routes![hibp_breach];
let mut meta_routes = routes![alive, now, version, config]; let mut meta_routes = routes![alive, now, version, config, get_api_webauthn];
let mut routes = Vec::new(); let mut routes = Vec::new();
routes.append(&mut accounts::routes()); routes.append(&mut accounts::routes());
@ -184,6 +184,18 @@ fn version() -> Json<&'static str> {
Json(crate::VERSION.unwrap_or_default()) Json(crate::VERSION.unwrap_or_default())
} }
#[get("/webauthn")]
fn get_api_webauthn(_headers: Headers) -> Json<Value> {
// Prevent a 404 error, which also causes key-rotation issues
// It looks like this is used when login with passkeys is enabled, which Vaultwarden does not (yet) support
// An empty list/data also works fine
Json(json!({
"object": "list",
"data": [],
"continuationToken": null
}))
}
#[get("/config")] #[get("/config")]
fn config() -> Json<Value> { fn config() -> Json<Value> {
let domain = crate::CONFIG.domain(); let domain = crate::CONFIG.domain();

1684
src/api/core/organizations.rs

File diff suppressed because it is too large

106
src/api/core/public.rs

@ -52,40 +52,36 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
let data = data.into_inner(); let data = data.into_inner();
for user_data in &data.members { for user_data in &data.members {
let mut user_created: bool = false;
if user_data.deleted { if user_data.deleted {
// If user is marked for deletion and it exists, revoke it // If user is marked for deletion and it exists, revoke it
if let Some(mut user_org) = if let Some(mut member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await {
UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await
{
// Only revoke a user if it is not the last confirmed owner // Only revoke a user if it is not the last confirmed owner
let revoked = if user_org.atype == UserOrgType::Owner let revoked = if member.atype == MembershipType::Owner
&& user_org.status == UserOrgStatus::Confirmed as i32 && member.status == MembershipStatus::Confirmed as i32
{ {
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn).await if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await <= 1
<= 1
{ {
warn!("Can't revoke the last owner"); warn!("Can't revoke the last owner");
false false
} else { } else {
user_org.revoke() member.revoke()
} }
} else { } else {
user_org.revoke() member.revoke()
}; };
let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone())); let ext_modified = member.set_external_id(Some(user_data.external_id.clone()));
if revoked || ext_modified { if revoked || ext_modified {
user_org.save(&mut conn).await?; member.save(&mut conn).await?;
} }
} }
// If user is part of the organization, restore it // If user is part of the organization, restore it
} else if let Some(mut user_org) = } else if let Some(mut member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await {
UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await let restored = member.restore();
{ let ext_modified = member.set_external_id(Some(user_data.external_id.clone()));
let restored = user_org.restore();
let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone()));
if restored || ext_modified { if restored || ext_modified {
user_org.save(&mut conn).await?; member.save(&mut conn).await?;
} }
} else { } else {
// If user is not part of the organization // If user is not part of the organization
@ -97,25 +93,25 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
new_user.save(&mut conn).await?; new_user.save(&mut conn).await?;
if !CONFIG.mail_enabled() { if !CONFIG.mail_enabled() {
let invitation = Invitation::new(&new_user.email); Invitation::new(&new_user.email).save(&mut conn).await?;
invitation.save(&mut conn).await?;
} }
user_created = true;
new_user new_user
} }
}; };
let user_org_status = if CONFIG.mail_enabled() || user.password_hash.is_empty() { let member_status = if CONFIG.mail_enabled() || user.password_hash.is_empty() {
UserOrgStatus::Invited as i32 MembershipStatus::Invited as i32
} else { } else {
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites MembershipStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
}; };
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone()); let mut new_member = Membership::new(user.uuid.clone(), org_id.clone());
new_org_user.set_external_id(Some(user_data.external_id.clone())); new_member.set_external_id(Some(user_data.external_id.clone()));
new_org_user.access_all = false; new_member.access_all = false;
new_org_user.atype = UserOrgType::User as i32; new_member.atype = MembershipType::User as i32;
new_org_user.status = user_org_status; new_member.status = member_status;
new_org_user.save(&mut conn).await?; new_member.save(&mut conn).await?;
if CONFIG.mail_enabled() { if CONFIG.mail_enabled() {
let (org_name, org_email) = match Organization::find_by_uuid(&org_id, &mut conn).await { let (org_name, org_email) = match Organization::find_by_uuid(&org_id, &mut conn).await {
@ -123,8 +119,24 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
None => err!("Error looking up organization"), None => err!("Error looking up organization"),
}; };
mail::send_invite(&user, Some(org_id.clone()), Some(new_org_user.uuid), &org_name, Some(org_email)) if let Err(e) = mail::send_invite(
.await?; &user,
Some(org_id.clone()),
Some(new_member.uuid.clone()),
&org_name,
Some(org_email),
)
.await
{
// Upon error delete the user, invite and org member records when needed
if user_created {
user.delete(&mut conn).await?;
} else {
new_member.delete(&mut conn).await?;
}
err!(format!("Error sending invite: {e:?} "));
}
} }
} }
} }
@ -149,9 +161,8 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?; GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
for ext_id in &group_data.member_external_ids { for ext_id in &group_data.member_external_ids {
if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await if let Some(member) = Membership::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await {
{ let mut group_user = GroupUser::new(group_uuid.clone(), member.uuid.clone());
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
group_user.save(&mut conn).await?; group_user.save(&mut conn).await?;
} }
} }
@ -164,20 +175,19 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
if data.overwrite_existing { if data.overwrite_existing {
// Generate a HashSet to quickly verify if a member is listed or not. // Generate a HashSet to quickly verify if a member is listed or not.
let sync_members: HashSet<String> = data.members.into_iter().map(|m| m.external_id).collect(); let sync_members: HashSet<String> = data.members.into_iter().map(|m| m.external_id).collect();
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await { for member in Membership::find_by_org(&org_id, &mut conn).await {
if let Some(ref user_external_id) = user_org.external_id { if let Some(ref user_external_id) = member.external_id {
if !sync_members.contains(user_external_id) { if !sync_members.contains(user_external_id) {
if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 { if member.atype == MembershipType::Owner && member.status == MembershipStatus::Confirmed as i32 {
// Removing owner, check that there is at least one other confirmed owner // Removing owner, check that there is at least one other confirmed owner
if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn) if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await
.await
<= 1 <= 1
{ {
warn!("Can't delete the last owner"); warn!("Can't delete the last owner");
continue; continue;
} }
} }
user_org.delete(&mut conn).await?; member.delete(&mut conn).await?;
} }
} }
} }
@ -186,7 +196,7 @@ async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: Db
Ok(()) Ok(())
} }
pub struct PublicToken(String); pub struct PublicToken(OrganizationId);
#[rocket::async_trait] #[rocket::async_trait]
impl<'r> FromRequest<'r> for PublicToken { impl<'r> FromRequest<'r> for PublicToken {
@ -203,9 +213,8 @@ impl<'r> FromRequest<'r> for PublicToken {
None => err_handler!("No access token provided"), None => err_handler!("No access token provided"),
}; };
// Check JWT token is valid and get device and user from it // Check JWT token is valid and get device and user from it
let claims = match auth::decode_api_org(access_token) { let Ok(claims) = auth::decode_api_org(access_token) else {
Ok(claims) => claims, err_handler!("Invalid claim")
Err(_) => err_handler!("Invalid claim"),
}; };
// Check if time is between claims.nbf and claims.exp // Check if time is between claims.nbf and claims.exp
let time_now = Utc::now().timestamp(); let time_now = Utc::now().timestamp();
@ -227,13 +236,12 @@ impl<'r> FromRequest<'r> for PublicToken {
Outcome::Success(conn) => conn, Outcome::Success(conn) => conn,
_ => err_handler!("Error getting DB"), _ => err_handler!("Error getting DB"),
}; };
let org_uuid = match claims.client_id.strip_prefix("organization.") { let Some(org_id) = claims.client_id.strip_prefix("organization.") else {
Some(uuid) => uuid, err_handler!("Malformed client_id")
None => err_handler!("Malformed client_id"),
}; };
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_uuid, &conn).await { let org_id: OrganizationId = org_id.to_string().into();
Some(org_api_key) => org_api_key, let Some(org_api_key) = OrganizationApiKey::find_by_org_uuid(&org_id, &conn).await else {
None => err_handler!("Invalid client_id"), err_handler!("Invalid client_id")
}; };
if org_api_key.org_uuid != claims.client_sub { if org_api_key.org_uuid != claims.client_sub {
err_handler!("Token not issued for this org"); err_handler!("Token not issued for this org");

121
src/api/core/sends.rs

@ -12,7 +12,7 @@ use crate::{
api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType}, api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType},
auth::{ClientIp, Headers, Host}, auth::{ClientIp, Headers, Host},
db::{models::*, DbConn, DbPool}, db::{models::*, DbConn, DbPool},
util::{NumberOrString, SafeString}, util::NumberOrString,
CONFIG, CONFIG,
}; };
@ -67,7 +67,7 @@ pub struct SendData {
file_length: Option<NumberOrString>, file_length: Option<NumberOrString>,
// Used for key rotations // Used for key rotations
pub id: Option<String>, pub id: Option<SendId>,
} }
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to /// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
@ -79,9 +79,9 @@ pub struct SendData {
/// There is also a Vaultwarden-specific `sends_allowed` config setting that /// There is also a Vaultwarden-specific `sends_allowed` config setting that
/// controls this policy globally. /// controls this policy globally.
async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> EmptyResult { async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> EmptyResult {
let user_uuid = &headers.user.uuid; let user_id = &headers.user.uuid;
if !CONFIG.sends_allowed() if !CONFIG.sends_allowed()
|| OrgPolicy::is_applicable_to_user(user_uuid, OrgPolicyType::DisableSend, None, conn).await || OrgPolicy::is_applicable_to_user(user_id, OrgPolicyType::DisableSend, None, conn).await
{ {
err!("Due to an Enterprise Policy, you are only able to delete an existing Send.") err!("Due to an Enterprise Policy, you are only able to delete an existing Send.")
} }
@ -95,9 +95,9 @@ async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> Em
/// ///
/// Ref: https://bitwarden.com/help/article/policies/#send-options /// Ref: https://bitwarden.com/help/article/policies/#send-options
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult { async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult {
let user_uuid = &headers.user.uuid; let user_id = &headers.user.uuid;
let hide_email = data.hide_email.unwrap_or(false); let hide_email = data.hide_email.unwrap_or(false);
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await { if hide_email && OrgPolicy::is_hide_email_disabled(user_id, conn).await {
err!( err!(
"Due to an Enterprise Policy, you are not allowed to hide your email address \ "Due to an Enterprise Policy, you are not allowed to hide your email address \
from recipients when creating or editing a Send." from recipients when creating or editing a Send."
@ -106,7 +106,7 @@ async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, c
Ok(()) Ok(())
} }
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> { fn create_send(data: SendData, user_id: UserId) -> ApiResult<Send> {
let data_val = if data.r#type == SendType::Text as i32 { let data_val = if data.r#type == SendType::Text as i32 {
data.text data.text
} else if data.r#type == SendType::File as i32 { } else if data.r#type == SendType::File as i32 {
@ -129,7 +129,7 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
} }
let mut send = Send::new(data.r#type, data.name, data_str, data.key, data.deletion_date.naive_utc()); let mut send = Send::new(data.r#type, data.name, data_str, data.key, data.deletion_date.naive_utc());
send.user_uuid = Some(user_uuid); send.user_uuid = Some(user_id);
send.notes = data.notes; send.notes = data.notes;
send.max_access_count = match data.max_access_count { send.max_access_count = match data.max_access_count {
Some(m) => Some(m.into_i32()?), Some(m) => Some(m.into_i32()?),
@ -157,18 +157,12 @@ async fn get_sends(headers: Headers, mut conn: DbConn) -> Json<Value> {
})) }))
} }
#[get("/sends/<uuid>")] #[get("/sends/<send_id>")]
async fn get_send(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn get_send(send_id: SendId, headers: Headers, mut conn: DbConn) -> JsonResult {
let send = match Send::find_by_uuid(uuid, &mut conn).await { match Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await {
Some(send) => send, Some(send) => Ok(Json(send.to_json())),
None => err!("Send not found"), None => err!("Send not found", "Invalid send uuid or does not belong to user"),
};
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
err!("Send is not owned by user")
} }
Ok(Json(send.to_json()))
} }
#[post("/sends", data = "<data>")] #[post("/sends", data = "<data>")]
@ -255,7 +249,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
err!("Send content is not a file"); err!("Send content is not a file");
} }
let file_id = crate::crypto::generate_send_id(); let file_id = crate::crypto::generate_send_file_id();
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid); let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
let file_path = folder_path.join(&file_id); let file_path = folder_path.join(&file_id);
tokio::fs::create_dir_all(&folder_path).await?; tokio::fs::create_dir_all(&folder_path).await?;
@ -330,7 +324,7 @@ async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbC
let mut send = create_send(data, headers.user.uuid)?; let mut send = create_send(data, headers.user.uuid)?;
let file_id = crate::crypto::generate_send_id(); let file_id = crate::crypto::generate_send_file_id();
let mut data_value: Value = serde_json::from_str(&send.data)?; let mut data_value: Value = serde_json::from_str(&send.data)?;
if let Some(o) = data_value.as_object_mut() { if let Some(o) = data_value.as_object_mut() {
@ -352,16 +346,16 @@ async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbC
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[allow(non_snake_case)]
pub struct SendFileData { pub struct SendFileData {
id: String, id: SendFileId,
size: u64, size: u64,
fileName: String, fileName: String,
} }
// https://github.com/bitwarden/server/blob/66f95d1c443490b653e5a15d32977e2f5a3f9e32/src/Api/Tools/Controllers/SendsController.cs#L250 // https://github.com/bitwarden/server/blob/66f95d1c443490b653e5a15d32977e2f5a3f9e32/src/Api/Tools/Controllers/SendsController.cs#L250
#[post("/sends/<send_uuid>/file/<file_id>", format = "multipart/form-data", data = "<data>")] #[post("/sends/<send_id>/file/<file_id>", format = "multipart/form-data", data = "<data>")]
async fn post_send_file_v2_data( async fn post_send_file_v2_data(
send_uuid: &str, send_id: SendId,
file_id: &str, file_id: SendFileId,
data: Form<UploadDataV2<'_>>, data: Form<UploadDataV2<'_>>,
headers: Headers, headers: Headers,
mut conn: DbConn, mut conn: DbConn,
@ -371,22 +365,14 @@ async fn post_send_file_v2_data(
let mut data = data.into_inner(); let mut data = data.into_inner();
let Some(send) = Send::find_by_uuid(send_uuid, &mut conn).await else { let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
err!("Send not found. Unable to save the file.") err!("Send not found. Unable to save the file.", "Invalid send uuid or does not belong to user.")
}; };
if send.atype != SendType::File as i32 { if send.atype != SendType::File as i32 {
err!("Send is not a file type send."); err!("Send is not a file type send.");
} }
let Some(send_user_id) = &send.user_uuid else {
err!("Sends are only supported for users at the moment.")
};
if send_user_id != &headers.user.uuid {
err!("Send doesn't belong to user.");
}
let Ok(send_data) = serde_json::from_str::<SendFileData>(&send.data) else { let Ok(send_data) = serde_json::from_str::<SendFileData>(&send.data) else {
err!("Unable to decode send data as json.") err!("Unable to decode send data as json.")
}; };
@ -416,7 +402,7 @@ async fn post_send_file_v2_data(
err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size)); err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size));
} }
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_uuid); let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_id);
let file_path = folder_path.join(file_id); let file_path = folder_path.join(file_id);
// Check if the file already exists, if that is the case do not overwrite it // Check if the file already exists, if that is the case do not overwrite it
@ -456,9 +442,8 @@ async fn post_access(
ip: ClientIp, ip: ClientIp,
nt: Notify<'_>, nt: Notify<'_>,
) -> JsonResult { ) -> JsonResult {
let mut send = match Send::find_by_access_id(access_id, &mut conn).await { let Some(mut send) = Send::find_by_access_id(access_id, &mut conn).await else {
Some(s) => s, err_code!(SEND_INACCESSIBLE_MSG, 404)
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
}; };
if let Some(max_access_count) = send.max_access_count { if let Some(max_access_count) = send.max_access_count {
@ -500,7 +485,7 @@ async fn post_access(
UpdateType::SyncSendUpdate, UpdateType::SyncSendUpdate,
&send, &send,
&send.update_users_revision(&mut conn).await, &send.update_users_revision(&mut conn).await,
&String::from("00000000-0000-0000-0000-000000000000"), &String::from("00000000-0000-0000-0000-000000000000").into(),
&mut conn, &mut conn,
) )
.await; .await;
@ -510,16 +495,15 @@ async fn post_access(
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")] #[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
async fn post_access_file( async fn post_access_file(
send_id: &str, send_id: SendId,
file_id: &str, file_id: SendFileId,
data: Json<SendAccessData>, data: Json<SendAccessData>,
host: Host, host: Host,
mut conn: DbConn, mut conn: DbConn,
nt: Notify<'_>, nt: Notify<'_>,
) -> JsonResult { ) -> JsonResult {
let mut send = match Send::find_by_uuid(send_id, &mut conn).await { let Some(mut send) = Send::find_by_uuid(&send_id, &mut conn).await else {
Some(s) => s, err_code!(SEND_INACCESSIBLE_MSG, 404)
None => err_code!(SEND_INACCESSIBLE_MSG, 404),
}; };
if let Some(max_access_count) = send.max_access_count { if let Some(max_access_count) = send.max_access_count {
@ -558,12 +542,12 @@ async fn post_access_file(
UpdateType::SyncSendUpdate, UpdateType::SyncSendUpdate,
&send, &send,
&send.update_users_revision(&mut conn).await, &send.update_users_revision(&mut conn).await,
&String::from("00000000-0000-0000-0000-000000000000"), &String::from("00000000-0000-0000-0000-000000000000").into(),
&mut conn, &mut conn,
) )
.await; .await;
let token_claims = crate::auth::generate_send_claims(send_id, file_id); let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
let token = crate::auth::encode_jwt(&token_claims); let token = crate::auth::encode_jwt(&token_claims);
Ok(Json(json!({ Ok(Json(json!({
"object": "send-fileDownload", "object": "send-fileDownload",
@ -573,7 +557,7 @@ async fn post_access_file(
} }
#[get("/sends/<send_id>/<file_id>?<t>")] #[get("/sends/<send_id>/<file_id>?<t>")]
async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Option<NamedFile> { async fn download_send(send_id: SendId, file_id: SendFileId, t: &str) -> Option<NamedFile> {
if let Ok(claims) = crate::auth::decode_send(t) { if let Ok(claims) = crate::auth::decode_send(t) {
if claims.sub == format!("{send_id}/{file_id}") { if claims.sub == format!("{send_id}/{file_id}") {
return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok(); return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok();
@ -582,16 +566,21 @@ async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Opt
None None
} }
#[put("/sends/<id>", data = "<data>")] #[put("/sends/<send_id>", data = "<data>")]
async fn put_send(id: &str, data: Json<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { async fn put_send(
send_id: SendId,
data: Json<SendData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
enforce_disable_send_policy(&headers, &mut conn).await?; enforce_disable_send_policy(&headers, &mut conn).await?;
let data: SendData = data.into_inner(); let data: SendData = data.into_inner();
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?; enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
let mut send = match Send::find_by_uuid(id, &mut conn).await { let Some(mut send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
Some(s) => s, err!("Send not found", "Send send_id is invalid or does not belong to user")
None => err!("Send not found"),
}; };
update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?; update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?;
@ -657,17 +646,12 @@ pub async fn update_send_from_data(
Ok(()) Ok(())
} }
#[delete("/sends/<id>")] #[delete("/sends/<send_id>")]
async fn delete_send(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { async fn delete_send(send_id: SendId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
let send = match Send::find_by_uuid(id, &mut conn).await { let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
Some(s) => s, err!("Send not found", "Invalid send uuid, or does not belong to user")
None => err!("Send not found"),
}; };
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
err!("Send is not owned by user")
}
send.delete(&mut conn).await?; send.delete(&mut conn).await?;
nt.send_send_update( nt.send_send_update(
UpdateType::SyncSendDelete, UpdateType::SyncSendDelete,
@ -681,19 +665,14 @@ async fn delete_send(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_
Ok(()) Ok(())
} }
#[put("/sends/<id>/remove-password")] #[put("/sends/<send_id>/remove-password")]
async fn put_remove_password(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { async fn put_remove_password(send_id: SendId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
enforce_disable_send_policy(&headers, &mut conn).await?; enforce_disable_send_policy(&headers, &mut conn).await?;
let mut send = match Send::find_by_uuid(id, &mut conn).await { let Some(mut send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
Some(s) => s, err!("Send not found", "Invalid send uuid, or does not belong to user")
None => err!("Send not found"),
}; };
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
err!("Send is not owned by user")
}
send.set_password(None); send.set_password(None);
send.save(&mut conn).await?; send.save(&mut conn).await?;
nt.send_send_update( nt.send_send_update(

69
src/api/core/two_factor/authenticator.rs

@ -7,7 +7,7 @@ use crate::{
auth::{ClientIp, Headers}, auth::{ClientIp, Headers},
crypto, crypto,
db::{ db::{
models::{EventType, TwoFactor, TwoFactorType}, models::{EventType, TwoFactor, TwoFactorType, UserId},
DbConn, DbConn,
}, },
util::NumberOrString, util::NumberOrString,
@ -16,7 +16,7 @@ use crate::{
pub use crate::config::CONFIG; pub use crate::config::CONFIG;
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
routes![generate_authenticator, activate_authenticator, activate_authenticator_put,] routes![generate_authenticator, activate_authenticator, activate_authenticator_put, disable_authenticator]
} }
#[post("/two-factor/get-authenticator", data = "<data>")] #[post("/two-factor/get-authenticator", data = "<data>")]
@ -95,7 +95,7 @@ async fn activate_authenticator_put(data: Json<EnableAuthenticatorData>, headers
} }
pub async fn validate_totp_code_str( pub async fn validate_totp_code_str(
user_uuid: &str, user_id: &UserId,
totp_code: &str, totp_code: &str,
secret: &str, secret: &str,
ip: &ClientIp, ip: &ClientIp,
@ -105,11 +105,11 @@ pub async fn validate_totp_code_str(
err!("TOTP code is not a number"); err!("TOTP code is not a number");
} }
validate_totp_code(user_uuid, totp_code, secret, ip, conn).await validate_totp_code(user_id, totp_code, secret, ip, conn).await
} }
pub async fn validate_totp_code( pub async fn validate_totp_code(
user_uuid: &str, user_id: &UserId,
totp_code: &str, totp_code: &str,
secret: &str, secret: &str,
ip: &ClientIp, ip: &ClientIp,
@ -117,16 +117,15 @@ pub async fn validate_totp_code(
) -> EmptyResult { ) -> EmptyResult {
use totp_lite::{totp_custom, Sha1}; use totp_lite::{totp_custom, Sha1};
let decoded_secret = match BASE32.decode(secret.as_bytes()) { let Ok(decoded_secret) = BASE32.decode(secret.as_bytes()) else {
Ok(s) => s, err!("Invalid TOTP secret")
Err(_) => err!("Invalid TOTP secret"),
}; };
let mut twofactor = let mut twofactor = match TwoFactor::find_by_user_and_type(user_id, TwoFactorType::Authenticator as i32, conn).await
match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn).await { {
Some(tf) => tf, Some(tf) => tf,
_ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()), _ => TwoFactor::new(user_id.clone(), TwoFactorType::Authenticator, secret.to_string()),
}; };
// The amount of steps back and forward in time // The amount of steps back and forward in time
// Also check if we need to disable time drifted TOTP codes. // Also check if we need to disable time drifted TOTP codes.
@ -176,3 +175,47 @@ pub async fn validate_totp_code(
} }
); );
} }
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct DisableAuthenticatorData {
key: String,
master_password_hash: String,
r#type: NumberOrString,
}
#[delete("/two-factor/authenticator", data = "<data>")]
async fn disable_authenticator(data: Json<DisableAuthenticatorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let user = headers.user;
let type_ = data.r#type.into_i32()?;
if !user.check_valid_password(&data.master_password_hash) {
err!("Invalid password");
}
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
if twofactor.data == data.key {
twofactor.delete(&mut conn).await?;
log_user_event(
EventType::UserDisabled2fa as i32,
&user.uuid,
headers.device.atype,
&headers.ip.ip,
&mut conn,
)
.await;
} else {
err!(format!("TOTP key for user {} does not match recorded value, cannot deactivate", &user.email));
}
}
if TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty() {
super::enforce_2fa_policy(&user, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await?;
}
Ok(Json(json!({
"enabled": false,
"keys": type_,
"object": "twoFactorProvider"
})))
}

19
src/api/core/two_factor/duo.rs

@ -11,7 +11,7 @@ use crate::{
auth::Headers, auth::Headers,
crypto, crypto,
db::{ db::{
models::{EventType, TwoFactor, TwoFactorType, User}, models::{EventType, TwoFactor, TwoFactorType, User, UserId},
DbConn, DbConn,
}, },
error::MapResult, error::MapResult,
@ -228,13 +228,12 @@ const AUTH_PREFIX: &str = "AUTH";
const DUO_PREFIX: &str = "TX"; const DUO_PREFIX: &str = "TX";
const APP_PREFIX: &str = "APP"; const APP_PREFIX: &str = "APP";
async fn get_user_duo_data(uuid: &str, conn: &mut DbConn) -> DuoStatus { async fn get_user_duo_data(user_id: &UserId, conn: &mut DbConn) -> DuoStatus {
let type_ = TwoFactorType::Duo as i32; let type_ = TwoFactorType::Duo as i32;
// If the user doesn't have an entry, disabled // If the user doesn't have an entry, disabled
let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn).await { let Some(twofactor) = TwoFactor::find_by_user_and_type(user_id, type_, conn).await else {
Some(t) => t, return DuoStatus::Disabled(DuoData::global().is_some());
None => return DuoStatus::Disabled(DuoData::global().is_some()),
}; };
// If the user has the required values, we use those // If the user has the required values, we use those
@ -333,14 +332,12 @@ fn parse_duo_values(key: &str, val: &str, ikey: &str, prefix: &str, time: i64) -
err!("Prefixes don't match") err!("Prefixes don't match")
} }
let cookie_vec = match BASE64.decode(u_b64.as_bytes()) { let Ok(cookie_vec) = BASE64.decode(u_b64.as_bytes()) else {
Ok(c) => c, err!("Invalid Duo cookie encoding")
Err(_) => err!("Invalid Duo cookie encoding"),
}; };
let cookie = match String::from_utf8(cookie_vec) { let Ok(cookie) = String::from_utf8(cookie_vec) else {
Ok(c) => c, err!("Invalid Duo cookie encoding")
Err(_) => err!("Invalid Duo cookie encoding"),
}; };
let cookie_split: Vec<&str> = cookie.split('|').collect(); let cookie_split: Vec<&str> = cookie.split('|').collect();

6
src/api/core/two_factor/duo_oidc.rs

@ -10,7 +10,7 @@ use crate::{
api::{core::two_factor::duo::get_duo_keys_email, EmptyResult}, api::{core::two_factor::duo::get_duo_keys_email, EmptyResult},
crypto, crypto,
db::{ db::{
models::{EventType, TwoFactorDuoContext}, models::{DeviceId, EventType, TwoFactorDuoContext},
DbConn, DbPool, DbConn, DbPool,
}, },
error::Error, error::Error,
@ -379,7 +379,7 @@ fn make_callback_url(client_name: &str) -> Result<String, Error> {
pub async fn get_duo_auth_url( pub async fn get_duo_auth_url(
email: &str, email: &str,
client_id: &str, client_id: &str,
device_identifier: &String, device_identifier: &DeviceId,
conn: &mut DbConn, conn: &mut DbConn,
) -> Result<String, Error> { ) -> Result<String, Error> {
let (ik, sk, _, host) = get_duo_keys_email(email, conn).await?; let (ik, sk, _, host) = get_duo_keys_email(email, conn).await?;
@ -417,7 +417,7 @@ pub async fn validate_duo_login(
email: &str, email: &str,
two_factor_token: &str, two_factor_token: &str,
client_id: &str, client_id: &str,
device_identifier: &str, device_identifier: &DeviceId,
conn: &mut DbConn, conn: &mut DbConn,
) -> EmptyResult { ) -> EmptyResult {
// Result supplied to us by clients in the form "<authz code>|<state>" // Result supplied to us by clients in the form "<authz code>|<state>"

32
src/api/core/two_factor/email.rs

@ -10,7 +10,7 @@ use crate::{
auth::Headers, auth::Headers,
crypto, crypto,
db::{ db::{
models::{EventType, TwoFactor, TwoFactorType, User}, models::{EventType, TwoFactor, TwoFactorType, User, UserId},
DbConn, DbConn,
}, },
error::{Error, MapResult}, error::{Error, MapResult},
@ -40,9 +40,8 @@ async fn send_email_login(data: Json<SendEmailLoginData>, mut conn: DbConn) -> E
use crate::db::models::User; use crate::db::models::User;
// Get the user // Get the user
let user = match User::find_by_mail(&data.email, &mut conn).await { let Some(user) = User::find_by_mail(&data.email, &mut conn).await else {
Some(user) => user, err!("Username or password is incorrect. Try again.")
None => err!("Username or password is incorrect. Try again."),
}; };
// Check password // Check password
@ -60,10 +59,9 @@ async fn send_email_login(data: Json<SendEmailLoginData>, mut conn: DbConn) -> E
} }
/// Generate the token, save the data for later verification and send email to user /// Generate the token, save the data for later verification and send email to user
pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn send_token(user_id: &UserId, conn: &mut DbConn) -> EmptyResult {
let type_ = TwoFactorType::Email as i32; let type_ = TwoFactorType::Email as i32;
let mut twofactor = let mut twofactor = TwoFactor::find_by_user_and_type(user_id, type_, conn).await.map_res("Two factor not found")?;
TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await.map_res("Two factor not found")?;
let generated_token = crypto::generate_email_token(CONFIG.email_token_size()); let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
@ -174,9 +172,8 @@ async fn email(data: Json<EmailData>, headers: Headers, mut conn: DbConn) -> Jso
let mut email_data = EmailTokenData::from_json(&twofactor.data)?; let mut email_data = EmailTokenData::from_json(&twofactor.data)?;
let issued_token = match &email_data.last_token { let Some(issued_token) = &email_data.last_token else {
Some(t) => t, err!("No token available")
_ => err!("No token available"),
}; };
if !crypto::ct_eq(issued_token, data.token) { if !crypto::ct_eq(issued_token, data.token) {
@ -200,19 +197,18 @@ async fn email(data: Json<EmailData>, headers: Headers, mut conn: DbConn) -> Jso
} }
/// Validate the email code when used as TwoFactor token mechanism /// Validate the email code when used as TwoFactor token mechanism
pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &mut DbConn) -> EmptyResult { pub async fn validate_email_code_str(user_id: &UserId, token: &str, data: &str, conn: &mut DbConn) -> EmptyResult {
let mut email_data = EmailTokenData::from_json(data)?; let mut email_data = EmailTokenData::from_json(data)?;
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Email as i32, conn) let mut twofactor = TwoFactor::find_by_user_and_type(user_id, TwoFactorType::Email as i32, conn)
.await .await
.map_res("Two factor not found")?; .map_res("Two factor not found")?;
let issued_token = match &email_data.last_token { let Some(issued_token) = &email_data.last_token else {
Some(t) => t, err!(
_ => err!(
"No token available", "No token available",
ErrorEvent { ErrorEvent {
event: EventType::UserFailedLogIn2fa event: EventType::UserFailedLogIn2fa
} }
), )
}; };
if !crypto::ct_eq(issued_token, token) { if !crypto::ct_eq(issued_token, token) {
@ -330,8 +326,8 @@ pub fn obscure_email(email: &str) -> String {
format!("{}@{}", new_name, &domain) format!("{}@{}", new_name, &domain)
} }
pub async fn find_and_activate_email_2fa(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn find_and_activate_email_2fa(user_id: &UserId, conn: &mut DbConn) -> EmptyResult {
if let Some(user) = User::find_by_uuid(user_uuid, conn).await { if let Some(user) = User::find_by_uuid(user_id, conn).await {
activate_email_2fa(&user, conn).await activate_email_2fa(&user, conn).await
} else { } else {
err!("User not found!"); err!("User not found!");

30
src/api/core/two_factor/mod.rs

@ -85,9 +85,8 @@ async fn recover(data: Json<RecoverTwoFactor>, client_headers: ClientHeaders, mu
use crate::db::models::User; use crate::db::models::User;
// Get the user // Get the user
let mut user = match User::find_by_mail(&data.email, &mut conn).await { let Some(mut user) = User::find_by_mail(&data.email, &mut conn).await else {
Some(user) => user, err!("Username or password is incorrect. Try again.")
None => err!("Username or password is incorrect. Try again."),
}; };
// Check password // Check password
@ -174,17 +173,16 @@ async fn disable_twofactor_put(data: Json<DisableTwoFactorData>, headers: Header
pub async fn enforce_2fa_policy( pub async fn enforce_2fa_policy(
user: &User, user: &User,
act_uuid: &str, act_user_id: &UserId,
device_type: i32, device_type: i32,
ip: &std::net::IpAddr, ip: &std::net::IpAddr,
conn: &mut DbConn, conn: &mut DbConn,
) -> EmptyResult { ) -> EmptyResult {
for member in UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn) for member in
.await Membership::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn).await.into_iter()
.into_iter()
{ {
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org // Policy only applies to non-Owner/non-Admin members who have accepted joining the org
if member.atype < UserOrgType::Admin { if member.atype < MembershipType::Admin {
if CONFIG.mail_enabled() { if CONFIG.mail_enabled() {
let org = Organization::find_by_uuid(&member.org_uuid, conn).await.unwrap(); let org = Organization::find_by_uuid(&member.org_uuid, conn).await.unwrap();
mail::send_2fa_removed_from_org(&user.email, &org.name).await?; mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
@ -197,7 +195,7 @@ pub async fn enforce_2fa_policy(
EventType::OrganizationUserRevoked as i32, EventType::OrganizationUserRevoked as i32,
&member.uuid, &member.uuid,
&member.org_uuid, &member.org_uuid,
act_uuid, act_user_id,
device_type, device_type,
ip, ip,
conn, conn,
@ -210,16 +208,16 @@ pub async fn enforce_2fa_policy(
} }
pub async fn enforce_2fa_policy_for_org( pub async fn enforce_2fa_policy_for_org(
org_uuid: &str, org_id: &OrganizationId,
act_uuid: &str, act_user_id: &UserId,
device_type: i32, device_type: i32,
ip: &std::net::IpAddr, ip: &std::net::IpAddr,
conn: &mut DbConn, conn: &mut DbConn,
) -> EmptyResult { ) -> EmptyResult {
let org = Organization::find_by_uuid(org_uuid, conn).await.unwrap(); let org = Organization::find_by_uuid(org_id, conn).await.unwrap();
for member in UserOrganization::find_confirmed_by_org(org_uuid, conn).await.into_iter() { for member in Membership::find_confirmed_by_org(org_id, conn).await.into_iter() {
// Don't enforce the policy for Admins and Owners. // Don't enforce the policy for Admins and Owners.
if member.atype < UserOrgType::Admin && TwoFactor::find_by_user(&member.user_uuid, conn).await.is_empty() { if member.atype < MembershipType::Admin && TwoFactor::find_by_user(&member.user_uuid, conn).await.is_empty() {
if CONFIG.mail_enabled() { if CONFIG.mail_enabled() {
let user = User::find_by_uuid(&member.user_uuid, conn).await.unwrap(); let user = User::find_by_uuid(&member.user_uuid, conn).await.unwrap();
mail::send_2fa_removed_from_org(&user.email, &org.name).await?; mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
@ -231,8 +229,8 @@ pub async fn enforce_2fa_policy_for_org(
log_event( log_event(
EventType::OrganizationUserRevoked as i32, EventType::OrganizationUserRevoked as i32,
&member.uuid, &member.uuid,
org_uuid, org_id,
act_uuid, act_user_id,
device_type, device_type,
ip, ip,
conn, conn,

6
src/api/core/two_factor/protected_actions.rs

@ -6,7 +6,7 @@ use crate::{
auth::Headers, auth::Headers,
crypto, crypto,
db::{ db::{
models::{TwoFactor, TwoFactorType}, models::{TwoFactor, TwoFactorType, UserId},
DbConn, DbConn,
}, },
error::{Error, MapResult}, error::{Error, MapResult},
@ -104,11 +104,11 @@ async fn verify_otp(data: Json<ProtectedActionVerify>, headers: Headers, mut con
pub async fn validate_protected_action_otp( pub async fn validate_protected_action_otp(
otp: &str, otp: &str,
user_uuid: &str, user_id: &UserId,
delete_if_valid: bool, delete_if_valid: bool,
conn: &mut DbConn, conn: &mut DbConn,
) -> EmptyResult { ) -> EmptyResult {
let pa = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::ProtectedActions as i32, conn) let pa = TwoFactor::find_by_user_and_type(user_id, TwoFactorType::ProtectedActions as i32, conn)
.await .await
.map_res("Protected action token not found, try sending the code again or restart the process")?; .map_res("Protected action token not found, try sending the code again or restart the process")?;
let mut pa_data = ProtectedActionData::from_json(&pa.data)?; let mut pa_data = ProtectedActionData::from_json(&pa.data)?;

37
src/api/core/two_factor/webauthn.rs

@ -11,7 +11,7 @@ use crate::{
}, },
auth::Headers, auth::Headers,
db::{ db::{
models::{EventType, TwoFactor, TwoFactorType}, models::{EventType, TwoFactor, TwoFactorType, UserId},
DbConn, DbConn,
}, },
error::Error, error::Error,
@ -148,7 +148,7 @@ async fn generate_webauthn_challenge(data: Json<PasswordOrOtpData>, headers: Hea
)?; )?;
let type_ = TwoFactorType::WebauthnRegisterChallenge; let type_ = TwoFactorType::WebauthnRegisterChallenge;
TwoFactor::new(user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?; TwoFactor::new(user.uuid.clone(), type_, serde_json::to_string(&state)?).save(&mut conn).await?;
let mut challenge_value = serde_json::to_value(challenge.public_key)?; let mut challenge_value = serde_json::to_value(challenge.public_key)?;
challenge_value["status"] = "ok".into(); challenge_value["status"] = "ok".into();
@ -309,17 +309,16 @@ async fn delete_webauthn(data: Json<DeleteU2FData>, headers: Headers, mut conn:
err!("Invalid password"); err!("Invalid password");
} }
let mut tf = let Some(mut tf) =
match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &mut conn).await { TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &mut conn).await
Some(tf) => tf, else {
None => err!("Webauthn data not found!"), err!("Webauthn data not found!")
}; };
let mut data: Vec<WebauthnRegistration> = serde_json::from_str(&tf.data)?; let mut data: Vec<WebauthnRegistration> = serde_json::from_str(&tf.data)?;
let item_pos = match data.iter().position(|r| r.id == id) { let Some(item_pos) = data.iter().position(|r| r.id == id) else {
Some(p) => p, err!("Webauthn entry not found")
None => err!("Webauthn entry not found"),
}; };
let removed_item = data.remove(item_pos); let removed_item = data.remove(item_pos);
@ -353,20 +352,20 @@ async fn delete_webauthn(data: Json<DeleteU2FData>, headers: Headers, mut conn:
} }
pub async fn get_webauthn_registrations( pub async fn get_webauthn_registrations(
user_uuid: &str, user_id: &UserId,
conn: &mut DbConn, conn: &mut DbConn,
) -> Result<(bool, Vec<WebauthnRegistration>), Error> { ) -> Result<(bool, Vec<WebauthnRegistration>), Error> {
let type_ = TwoFactorType::Webauthn as i32; let type_ = TwoFactorType::Webauthn as i32;
match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await { match TwoFactor::find_by_user_and_type(user_id, type_, conn).await {
Some(tf) => Ok((tf.enabled, serde_json::from_str(&tf.data)?)), Some(tf) => Ok((tf.enabled, serde_json::from_str(&tf.data)?)),
None => Ok((false, Vec::new())), // If no data, return empty list None => Ok((false, Vec::new())), // If no data, return empty list
} }
} }
pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> JsonResult { pub async fn generate_webauthn_login(user_id: &UserId, conn: &mut DbConn) -> JsonResult {
// Load saved credentials // Load saved credentials
let creds: Vec<Credential> = let creds: Vec<Credential> =
get_webauthn_registrations(user_uuid, conn).await?.1.into_iter().map(|r| r.credential).collect(); get_webauthn_registrations(user_id, conn).await?.1.into_iter().map(|r| r.credential).collect();
if creds.is_empty() { if creds.is_empty() {
err!("No Webauthn devices registered") err!("No Webauthn devices registered")
@ -377,7 +376,7 @@ pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> Json
let (response, state) = WebauthnConfig::load().generate_challenge_authenticate_options(creds, Some(ext))?; let (response, state) = WebauthnConfig::load().generate_challenge_authenticate_options(creds, Some(ext))?;
// Save the challenge state for later validation // Save the challenge state for later validation
TwoFactor::new(user_uuid.into(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?) TwoFactor::new(user_id.clone(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?)
.save(conn) .save(conn)
.await?; .await?;
@ -385,9 +384,9 @@ pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> Json
Ok(Json(serde_json::to_value(response.public_key)?)) Ok(Json(serde_json::to_value(response.public_key)?))
} }
pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut DbConn) -> EmptyResult { pub async fn validate_webauthn_login(user_id: &UserId, response: &str, conn: &mut DbConn) -> EmptyResult {
let type_ = TwoFactorType::WebauthnLoginChallenge as i32; let type_ = TwoFactorType::WebauthnLoginChallenge as i32;
let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await { let state = match TwoFactor::find_by_user_and_type(user_id, type_, conn).await {
Some(tf) => { Some(tf) => {
let state: AuthenticationState = serde_json::from_str(&tf.data)?; let state: AuthenticationState = serde_json::from_str(&tf.data)?;
tf.delete(conn).await?; tf.delete(conn).await?;
@ -404,7 +403,7 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut
let rsp: PublicKeyCredentialCopy = serde_json::from_str(response)?; let rsp: PublicKeyCredentialCopy = serde_json::from_str(response)?;
let rsp: PublicKeyCredential = rsp.into(); let rsp: PublicKeyCredential = rsp.into();
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1; let mut registrations = get_webauthn_registrations(user_id, conn).await?.1;
// If the credential we received is migrated from U2F, enable the U2F compatibility // If the credential we received is migrated from U2F, enable the U2F compatibility
//let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0); //let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0);
@ -414,7 +413,7 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut
if &reg.credential.cred_id == cred_id { if &reg.credential.cred_id == cred_id {
reg.credential.counter = auth_data.counter; reg.credential.counter = auth_data.counter;
TwoFactor::new(user_uuid.to_string(), TwoFactorType::Webauthn, serde_json::to_string(&registrations)?) TwoFactor::new(user_id.clone(), TwoFactorType::Webauthn, serde_json::to_string(&registrations)?)
.save(conn) .save(conn)
.await?; .await?;
return Ok(()); return Ok(());

4
src/api/core/two_factor/yubikey.rs

@ -92,10 +92,10 @@ async fn generate_yubikey(data: Json<PasswordOrOtpData>, headers: Headers, mut c
data.validate(&user, false, &mut conn).await?; data.validate(&user, false, &mut conn).await?;
let user_uuid = &user.uuid; let user_id = &user.uuid;
let yubikey_type = TwoFactorType::YubiKey as i32; let yubikey_type = TwoFactorType::YubiKey as i32;
let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &mut conn).await; let r = TwoFactor::find_by_user_and_type(user_id, yubikey_type, &mut conn).await;
if let Some(r) = r { if let Some(r) = r {
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?; let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;

87
src/api/identity.rs

@ -31,7 +31,7 @@ pub fn routes() -> Vec<Route> {
async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn: DbConn) -> JsonResult { async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn: DbConn) -> JsonResult {
let data: ConnectData = data.into_inner(); let data: ConnectData = data.into_inner();
let mut user_uuid: Option<String> = None; let mut user_id: Option<UserId> = None;
let login_result = match data.grant_type.as_ref() { let login_result = match data.grant_type.as_ref() {
"refresh_token" => { "refresh_token" => {
@ -48,7 +48,7 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
_check_is_some(&data.device_name, "device_name cannot be blank")?; _check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?; _check_is_some(&data.device_type, "device_type cannot be blank")?;
_password_login(data, &mut user_uuid, &mut conn, &client_header.ip).await _password_login(data, &mut user_id, &mut conn, &client_header.ip).await
} }
"client_credentials" => { "client_credentials" => {
_check_is_some(&data.client_id, "client_id cannot be blank")?; _check_is_some(&data.client_id, "client_id cannot be blank")?;
@ -59,17 +59,17 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
_check_is_some(&data.device_name, "device_name cannot be blank")?; _check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?; _check_is_some(&data.device_type, "device_type cannot be blank")?;
_api_key_login(data, &mut user_uuid, &mut conn, &client_header.ip).await _api_key_login(data, &mut user_id, &mut conn, &client_header.ip).await
} }
t => err!("Invalid type", t), t => err!("Invalid type", t),
}; };
if let Some(user_uuid) = user_uuid { if let Some(user_id) = user_id {
match &login_result { match &login_result {
Ok(_) => { Ok(_) => {
log_user_event( log_user_event(
EventType::UserLoggedIn as i32, EventType::UserLoggedIn as i32,
&user_uuid, &user_id,
client_header.device_type, client_header.device_type,
&client_header.ip.ip, &client_header.ip.ip,
&mut conn, &mut conn,
@ -80,7 +80,7 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn:
if let Some(ev) = e.get_event() { if let Some(ev) = e.get_event() {
log_user_event( log_user_event(
ev.event as i32, ev.event as i32,
&user_uuid, &user_id,
client_header.device_type, client_header.device_type,
&client_header.ip.ip, &client_header.ip.ip,
&mut conn, &mut conn,
@ -111,7 +111,7 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out // Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
// See: https://github.com/dani-garcia/vaultwarden/issues/4156 // See: https://github.com/dani-garcia/vaultwarden/issues/4156
// --- // ---
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await; // let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec); let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
device.save(conn).await?; device.save(conn).await?;
@ -141,7 +141,7 @@ struct MasterPasswordPolicy {
async fn _password_login( async fn _password_login(
data: ConnectData, data: ConnectData,
user_uuid: &mut Option<String>, user_id: &mut Option<UserId>,
conn: &mut DbConn, conn: &mut DbConn,
ip: &ClientIp, ip: &ClientIp,
) -> JsonResult { ) -> JsonResult {
@ -157,13 +157,12 @@ async fn _password_login(
// Get the user // Get the user
let username = data.username.as_ref().unwrap().trim(); let username = data.username.as_ref().unwrap().trim();
let mut user = match User::find_by_mail(username, conn).await { let Some(mut user) = User::find_by_mail(username, conn).await else {
Some(user) => user, err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username))
None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)),
}; };
// Set the user_uuid here to be passed back used for event logging. // Set the user_id here to be passed back used for event logging.
*user_uuid = Some(user.uuid.clone()); *user_id = Some(user.uuid.clone());
// Check if the user is disabled // Check if the user is disabled
if !user.enabled { if !user.enabled {
@ -179,8 +178,8 @@ async fn _password_login(
let password = data.password.as_ref().unwrap(); let password = data.password.as_ref().unwrap();
// If we get an auth request, we don't check the user's password, but the access code of the auth request // If we get an auth request, we don't check the user's password, but the access code of the auth request
if let Some(ref auth_request_uuid) = data.auth_request { if let Some(ref auth_request_id) = data.auth_request {
let Some(auth_request) = AuthRequest::find_by_uuid(auth_request_uuid.as_str(), conn).await else { let Some(auth_request) = AuthRequest::find_by_uuid_and_user(auth_request_id, &user.uuid, conn).await else {
err!( err!(
"Auth request not found. Try again.", "Auth request not found. Try again.",
format!("IP: {}. Username: {}.", ip.ip, username), format!("IP: {}. Username: {}.", ip.ip, username),
@ -291,7 +290,7 @@ async fn _password_login(
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out // Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
// See: https://github.com/dani-garcia/vaultwarden/issues/4156 // See: https://github.com/dani-garcia/vaultwarden/issues/4156
// --- // ---
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await; // let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec); let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
device.save(conn).await?; device.save(conn).await?;
@ -359,7 +358,7 @@ async fn _password_login(
async fn _api_key_login( async fn _api_key_login(
data: ConnectData, data: ConnectData,
user_uuid: &mut Option<String>, user_id: &mut Option<UserId>,
conn: &mut DbConn, conn: &mut DbConn,
ip: &ClientIp, ip: &ClientIp,
) -> JsonResult { ) -> JsonResult {
@ -368,7 +367,7 @@ async fn _api_key_login(
// Validate scope // Validate scope
match data.scope.as_ref().unwrap().as_ref() { match data.scope.as_ref().unwrap().as_ref() {
"api" => _user_api_key_login(data, user_uuid, conn, ip).await, "api" => _user_api_key_login(data, user_id, conn, ip).await,
"api.organization" => _organization_api_key_login(data, conn, ip).await, "api.organization" => _organization_api_key_login(data, conn, ip).await,
_ => err!("Scope not supported"), _ => err!("Scope not supported"),
} }
@ -376,23 +375,22 @@ async fn _api_key_login(
async fn _user_api_key_login( async fn _user_api_key_login(
data: ConnectData, data: ConnectData,
user_uuid: &mut Option<String>, user_id: &mut Option<UserId>,
conn: &mut DbConn, conn: &mut DbConn,
ip: &ClientIp, ip: &ClientIp,
) -> JsonResult { ) -> JsonResult {
// Get the user via the client_id // Get the user via the client_id
let client_id = data.client_id.as_ref().unwrap(); let client_id = data.client_id.as_ref().unwrap();
let client_user_uuid = match client_id.strip_prefix("user.") { let Some(client_user_id) = client_id.strip_prefix("user.") else {
Some(uuid) => uuid, err!("Malformed client_id", format!("IP: {}.", ip.ip))
None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
}; };
let user = match User::find_by_uuid(client_user_uuid, conn).await { let client_user_id: UserId = client_user_id.into();
Some(user) => user, let Some(user) = User::find_by_uuid(&client_user_id, conn).await else {
None => err!("Invalid client_id", format!("IP: {}.", ip.ip)), err!("Invalid client_id", format!("IP: {}.", ip.ip))
}; };
// Set the user_uuid here to be passed back used for event logging. // Set the user_id here to be passed back used for event logging.
*user_uuid = Some(user.uuid.clone()); *user_id = Some(user.uuid.clone());
// Check if the user is disabled // Check if the user is disabled
if !user.enabled { if !user.enabled {
@ -442,7 +440,7 @@ async fn _user_api_key_login(
// Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out // Because this might get used in the future, and is add by the Bitwarden Server, lets keep it, but then commented out
// See: https://github.com/dani-garcia/vaultwarden/issues/4156 // See: https://github.com/dani-garcia/vaultwarden/issues/4156
// --- // ---
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await; // let members = Membership::find_confirmed_by_user(&user.uuid, conn).await;
let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec); let (access_token, expires_in) = device.refresh_tokens(&user, scope_vec);
device.save(conn).await?; device.save(conn).await?;
@ -471,13 +469,12 @@ async fn _user_api_key_login(
async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: &ClientIp) -> JsonResult { async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: &ClientIp) -> JsonResult {
// Get the org via the client_id // Get the org via the client_id
let client_id = data.client_id.as_ref().unwrap(); let client_id = data.client_id.as_ref().unwrap();
let org_uuid = match client_id.strip_prefix("organization.") { let Some(org_id) = client_id.strip_prefix("organization.") else {
Some(uuid) => uuid, err!("Malformed client_id", format!("IP: {}.", ip.ip))
None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
}; };
let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_uuid, conn).await { let org_id: OrganizationId = org_id.to_string().into();
Some(org_api_key) => org_api_key, let Some(org_api_key) = OrganizationApiKey::find_by_org_uuid(&org_id, conn).await else {
None => err!("Invalid client_id", format!("IP: {}.", ip.ip)), err!("Invalid client_id", format!("IP: {}.", ip.ip))
}; };
// Check API key. // Check API key.
@ -618,7 +615,7 @@ fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
async fn _json_err_twofactor( async fn _json_err_twofactor(
providers: &[i32], providers: &[i32],
user_uuid: &str, user_id: &UserId,
data: &ConnectData, data: &ConnectData,
conn: &mut DbConn, conn: &mut DbConn,
) -> ApiResult<Value> { ) -> ApiResult<Value> {
@ -639,12 +636,12 @@ async fn _json_err_twofactor(
Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ } Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => { Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => {
let request = webauthn::generate_webauthn_login(user_uuid, conn).await?; let request = webauthn::generate_webauthn_login(user_id, conn).await?;
result["TwoFactorProviders2"][provider.to_string()] = request.0; result["TwoFactorProviders2"][provider.to_string()] = request.0;
} }
Some(TwoFactorType::Duo) => { Some(TwoFactorType::Duo) => {
let email = match User::find_by_uuid(user_uuid, conn).await { let email = match User::find_by_uuid(user_id, conn).await {
Some(u) => u.email, Some(u) => u.email,
None => err!("User does not exist"), None => err!("User does not exist"),
}; };
@ -676,9 +673,8 @@ async fn _json_err_twofactor(
} }
Some(tf_type @ TwoFactorType::YubiKey) => { Some(tf_type @ TwoFactorType::YubiKey) => {
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await { let Some(twofactor) = TwoFactor::find_by_user_and_type(user_id, tf_type as i32, conn).await else {
Some(tf) => tf, err!("No YubiKey devices registered")
None => err!("No YubiKey devices registered"),
}; };
let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?; let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
@ -689,14 +685,13 @@ async fn _json_err_twofactor(
} }
Some(tf_type @ TwoFactorType::Email) => { Some(tf_type @ TwoFactorType::Email) => {
let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await { let Some(twofactor) = TwoFactor::find_by_user_and_type(user_id, tf_type as i32, conn).await else {
Some(tf) => tf, err!("No twofactor email registered")
None => err!("No twofactor email registered"),
}; };
// Send email immediately if email is the only 2FA option // Send email immediately if email is the only 2FA option
if providers.len() == 1 { if providers.len() == 1 {
email::send_token(user_uuid, conn).await? email::send_token(user_id, conn).await?
} }
let email_data = email::EmailTokenData::from_json(&twofactor.data)?; let email_data = email::EmailTokenData::from_json(&twofactor.data)?;
@ -751,7 +746,7 @@ struct ConnectData {
#[field(name = uncased("device_identifier"))] #[field(name = uncased("device_identifier"))]
#[field(name = uncased("deviceidentifier"))] #[field(name = uncased("deviceidentifier"))]
device_identifier: Option<String>, device_identifier: Option<DeviceId>,
#[field(name = uncased("device_name"))] #[field(name = uncased("device_name"))]
#[field(name = uncased("devicename"))] #[field(name = uncased("devicename"))]
device_name: Option<String>, device_name: Option<String>,
@ -774,7 +769,7 @@ struct ConnectData {
#[field(name = uncased("twofactorremember"))] #[field(name = uncased("twofactorremember"))]
two_factor_remember: Option<i32>, two_factor_remember: Option<i32>,
#[field(name = uncased("authrequest"))] #[field(name = uncased("authrequest"))]
auth_request: Option<String>, auth_request: Option<AuthRequestId>,
} }
fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult { fn _check_is_some<T>(value: &Option<T>, msg: &str) -> EmptyResult {

125
src/api/notifications.rs

@ -10,7 +10,7 @@ use rocket_ws::{Message, WebSocket};
use crate::{ use crate::{
auth::{ClientIp, WsAccessTokenHeader}, auth::{ClientIp, WsAccessTokenHeader},
db::{ db::{
models::{Cipher, Folder, Send as DbSend, User}, models::{Cipher, CollectionId, DeviceId, Folder, Send as DbSend, User, UserId},
DbConn, DbConn,
}, },
Error, CONFIG, Error, CONFIG,
@ -53,13 +53,13 @@ struct WsAccessToken {
struct WSEntryMapGuard { struct WSEntryMapGuard {
users: Arc<WebSocketUsers>, users: Arc<WebSocketUsers>,
user_uuid: String, user_uuid: UserId,
entry_uuid: uuid::Uuid, entry_uuid: uuid::Uuid,
addr: IpAddr, addr: IpAddr,
} }
impl WSEntryMapGuard { impl WSEntryMapGuard {
fn new(users: Arc<WebSocketUsers>, user_uuid: String, entry_uuid: uuid::Uuid, addr: IpAddr) -> Self { fn new(users: Arc<WebSocketUsers>, user_uuid: UserId, entry_uuid: uuid::Uuid, addr: IpAddr) -> Self {
Self { Self {
users, users,
user_uuid, user_uuid,
@ -72,7 +72,7 @@ impl WSEntryMapGuard {
impl Drop for WSEntryMapGuard { impl Drop for WSEntryMapGuard {
fn drop(&mut self) { fn drop(&mut self) {
info!("Closing WS connection from {}", self.addr); info!("Closing WS connection from {}", self.addr);
if let Some(mut entry) = self.users.map.get_mut(&self.user_uuid) { if let Some(mut entry) = self.users.map.get_mut(self.user_uuid.as_ref()) {
entry.retain(|(uuid, _)| uuid != &self.entry_uuid); entry.retain(|(uuid, _)| uuid != &self.entry_uuid);
} }
} }
@ -101,6 +101,7 @@ impl Drop for WSAnonymousEntryMapGuard {
} }
} }
#[allow(tail_expr_drop_order)]
#[get("/hub?<data..>")] #[get("/hub?<data..>")]
fn websockets_hub<'r>( fn websockets_hub<'r>(
ws: WebSocket, ws: WebSocket,
@ -129,7 +130,7 @@ fn websockets_hub<'r>(
// Add a channel to send messages to this client to the map // Add a channel to send messages to this client to the map
let entry_uuid = uuid::Uuid::new_v4(); let entry_uuid = uuid::Uuid::new_v4();
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100); let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
users.map.entry(claims.sub.clone()).or_default().push((entry_uuid, tx)); users.map.entry(claims.sub.to_string()).or_default().push((entry_uuid, tx));
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map // Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
(rx, WSEntryMapGuard::new(users, claims.sub, entry_uuid, addr)) (rx, WSEntryMapGuard::new(users, claims.sub, entry_uuid, addr))
@ -186,6 +187,7 @@ fn websockets_hub<'r>(
}) })
} }
#[allow(tail_expr_drop_order)]
#[get("/anonymous-hub?<token..>")] #[get("/anonymous-hub?<token..>")]
fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> Result<rocket_ws::Stream!['r], Error> { fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> Result<rocket_ws::Stream!['r], Error> {
let addr = ip.ip; let addr = ip.ip;
@ -290,7 +292,7 @@ fn serialize(val: Value) -> Vec<u8> {
fn serialize_date(date: NaiveDateTime) -> Value { fn serialize_date(date: NaiveDateTime) -> Value {
let seconds: i64 = date.and_utc().timestamp(); let seconds: i64 = date.and_utc().timestamp();
let nanos: i64 = date.and_utc().timestamp_subsec_nanos().into(); let nanos: i64 = date.and_utc().timestamp_subsec_nanos().into();
let timestamp = nanos << 34 | seconds; let timestamp = (nanos << 34) | seconds;
let bs = timestamp.to_be_bytes(); let bs = timestamp.to_be_bytes();
@ -328,8 +330,8 @@ pub struct WebSocketUsers {
} }
impl WebSocketUsers { impl WebSocketUsers {
async fn send_update(&self, user_uuid: &str, data: &[u8]) { async fn send_update(&self, user_id: &UserId, data: &[u8]) {
if let Some(user) = self.map.get(user_uuid).map(|v| v.clone()) { if let Some(user) = self.map.get(user_id.as_ref()).map(|v| v.clone()) {
for (_, sender) in user.iter() { for (_, sender) in user.iter() {
if let Err(e) = sender.send(Message::binary(data)).await { if let Err(e) = sender.send(Message::binary(data)).await {
error!("Error sending WS update {e}"); error!("Error sending WS update {e}");
@ -345,7 +347,7 @@ impl WebSocketUsers {
return; return;
} }
let data = create_update( let data = create_update(
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))], vec![("UserId".into(), user.uuid.to_string().into()), ("Date".into(), serialize_date(user.updated_at))],
ut, ut,
None, None,
); );
@ -359,15 +361,15 @@ impl WebSocketUsers {
} }
} }
pub async fn send_logout(&self, user: &User, acting_device_uuid: Option<String>) { pub async fn send_logout(&self, user: &User, acting_device_id: Option<DeviceId>) {
// Skip any processing if both WebSockets and Push are not active // Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED { if *NOTIFICATIONS_DISABLED {
return; return;
} }
let data = create_update( let data = create_update(
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))], vec![("UserId".into(), user.uuid.to_string().into()), ("Date".into(), serialize_date(user.updated_at))],
UpdateType::LogOut, UpdateType::LogOut,
acting_device_uuid.clone(), acting_device_id.clone(),
); );
if CONFIG.enable_websocket() { if CONFIG.enable_websocket() {
@ -375,7 +377,7 @@ impl WebSocketUsers {
} }
if CONFIG.push_enabled() { if CONFIG.push_enabled() {
push_logout(user, acting_device_uuid); push_logout(user, acting_device_id.clone());
} }
} }
@ -383,7 +385,7 @@ impl WebSocketUsers {
&self, &self,
ut: UpdateType, ut: UpdateType,
folder: &Folder, folder: &Folder,
acting_device_uuid: &String, acting_device_id: &DeviceId,
conn: &mut DbConn, conn: &mut DbConn,
) { ) {
// Skip any processing if both WebSockets and Push are not active // Skip any processing if both WebSockets and Push are not active
@ -392,12 +394,12 @@ impl WebSocketUsers {
} }
let data = create_update( let data = create_update(
vec![ vec![
("Id".into(), folder.uuid.clone().into()), ("Id".into(), folder.uuid.to_string().into()),
("UserId".into(), folder.user_uuid.clone().into()), ("UserId".into(), folder.user_uuid.to_string().into()),
("RevisionDate".into(), serialize_date(folder.updated_at)), ("RevisionDate".into(), serialize_date(folder.updated_at)),
], ],
ut, ut,
Some(acting_device_uuid.into()), Some(acting_device_id.clone()),
); );
if CONFIG.enable_websocket() { if CONFIG.enable_websocket() {
@ -405,7 +407,7 @@ impl WebSocketUsers {
} }
if CONFIG.push_enabled() { if CONFIG.push_enabled() {
push_folder_update(ut, folder, acting_device_uuid, conn).await; push_folder_update(ut, folder, acting_device_id, conn).await;
} }
} }
@ -413,48 +415,48 @@ impl WebSocketUsers {
&self, &self,
ut: UpdateType, ut: UpdateType,
cipher: &Cipher, cipher: &Cipher,
user_uuids: &[String], user_ids: &[UserId],
acting_device_uuid: &String, acting_device_id: &DeviceId,
collection_uuids: Option<Vec<String>>, collection_uuids: Option<Vec<CollectionId>>,
conn: &mut DbConn, conn: &mut DbConn,
) { ) {
// Skip any processing if both WebSockets and Push are not active // Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED { if *NOTIFICATIONS_DISABLED {
return; return;
} }
let org_uuid = convert_option(cipher.organization_uuid.clone()); let org_id = convert_option(cipher.organization_uuid.as_deref());
// Depending if there are collections provided or not, we need to have different values for the following variables. // Depending if there are collections provided or not, we need to have different values for the following variables.
// The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change. // The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change.
let (user_uuid, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids { let (user_id, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids {
( (
Value::Nil, Value::Nil,
Value::Array(collection_uuids.into_iter().map(|v| v.into()).collect::<Vec<Value>>()), Value::Array(collection_uuids.into_iter().map(|v| v.to_string().into()).collect::<Vec<Value>>()),
serialize_date(Utc::now().naive_utc()), serialize_date(Utc::now().naive_utc()),
) )
} else { } else {
(convert_option(cipher.user_uuid.clone()), Value::Nil, serialize_date(cipher.updated_at)) (convert_option(cipher.user_uuid.as_deref()), Value::Nil, serialize_date(cipher.updated_at))
}; };
let data = create_update( let data = create_update(
vec![ vec![
("Id".into(), cipher.uuid.clone().into()), ("Id".into(), cipher.uuid.to_string().into()),
("UserId".into(), user_uuid), ("UserId".into(), user_id),
("OrganizationId".into(), org_uuid), ("OrganizationId".into(), org_id),
("CollectionIds".into(), collection_uuids), ("CollectionIds".into(), collection_uuids),
("RevisionDate".into(), revision_date), ("RevisionDate".into(), revision_date),
], ],
ut, ut,
Some(acting_device_uuid.into()), Some(acting_device_id.clone()),
); );
if CONFIG.enable_websocket() { if CONFIG.enable_websocket() {
for uuid in user_uuids { for uuid in user_ids {
self.send_update(uuid, &data).await; self.send_update(uuid, &data).await;
} }
} }
if CONFIG.push_enabled() && user_uuids.len() == 1 { if CONFIG.push_enabled() && user_ids.len() == 1 {
push_cipher_update(ut, cipher, acting_device_uuid, conn).await; push_cipher_update(ut, cipher, acting_device_id, conn).await;
} }
} }
@ -462,20 +464,20 @@ impl WebSocketUsers {
&self, &self,
ut: UpdateType, ut: UpdateType,
send: &DbSend, send: &DbSend,
user_uuids: &[String], user_ids: &[UserId],
acting_device_uuid: &String, acting_device_id: &DeviceId,
conn: &mut DbConn, conn: &mut DbConn,
) { ) {
// Skip any processing if both WebSockets and Push are not active // Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED { if *NOTIFICATIONS_DISABLED {
return; return;
} }
let user_uuid = convert_option(send.user_uuid.clone()); let user_id = convert_option(send.user_uuid.as_deref());
let data = create_update( let data = create_update(
vec![ vec![
("Id".into(), send.uuid.clone().into()), ("Id".into(), send.uuid.to_string().into()),
("UserId".into(), user_uuid), ("UserId".into(), user_id),
("RevisionDate".into(), serialize_date(send.revision_date)), ("RevisionDate".into(), serialize_date(send.revision_date)),
], ],
ut, ut,
@ -483,20 +485,20 @@ impl WebSocketUsers {
); );
if CONFIG.enable_websocket() { if CONFIG.enable_websocket() {
for uuid in user_uuids { for uuid in user_ids {
self.send_update(uuid, &data).await; self.send_update(uuid, &data).await;
} }
} }
if CONFIG.push_enabled() && user_uuids.len() == 1 { if CONFIG.push_enabled() && user_ids.len() == 1 {
push_send_update(ut, send, acting_device_uuid, conn).await; push_send_update(ut, send, acting_device_id, conn).await;
} }
} }
pub async fn send_auth_request( pub async fn send_auth_request(
&self, &self,
user_uuid: &String, user_id: &UserId,
auth_request_uuid: &String, auth_request_uuid: &String,
acting_device_uuid: &String, acting_device_id: &DeviceId,
conn: &mut DbConn, conn: &mut DbConn,
) { ) {
// Skip any processing if both WebSockets and Push are not active // Skip any processing if both WebSockets and Push are not active
@ -504,24 +506,24 @@ impl WebSocketUsers {
return; return;
} }
let data = create_update( let data = create_update(
vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_uuid.clone().into())], vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_id.to_string().into())],
UpdateType::AuthRequest, UpdateType::AuthRequest,
Some(acting_device_uuid.to_string()), Some(acting_device_id.clone()),
); );
if CONFIG.enable_websocket() { if CONFIG.enable_websocket() {
self.send_update(user_uuid, &data).await; self.send_update(user_id, &data).await;
} }
if CONFIG.push_enabled() { if CONFIG.push_enabled() {
push_auth_request(user_uuid.to_string(), auth_request_uuid.to_string(), conn).await; push_auth_request(user_id.clone(), auth_request_uuid.to_string(), conn).await;
} }
} }
pub async fn send_auth_response( pub async fn send_auth_response(
&self, &self,
user_uuid: &String, user_id: &UserId,
auth_response_uuid: &str, auth_response_uuid: &str,
approving_device_uuid: String, approving_device_uuid: DeviceId,
conn: &mut DbConn, conn: &mut DbConn,
) { ) {
// Skip any processing if both WebSockets and Push are not active // Skip any processing if both WebSockets and Push are not active
@ -529,17 +531,16 @@ impl WebSocketUsers {
return; return;
} }
let data = create_update( let data = create_update(
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())], vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_id.to_string().into())],
UpdateType::AuthRequestResponse, UpdateType::AuthRequestResponse,
approving_device_uuid.clone().into(), Some(approving_device_uuid.clone()),
); );
if CONFIG.enable_websocket() { if CONFIG.enable_websocket() {
self.send_update(auth_response_uuid, &data).await; self.send_update(user_id, &data).await;
} }
if CONFIG.push_enabled() { if CONFIG.push_enabled() {
push_auth_response(user_uuid.to_string(), auth_response_uuid.to_string(), approving_device_uuid, conn) push_auth_response(user_id.clone(), auth_response_uuid.to_string(), approving_device_uuid, conn).await;
.await;
} }
} }
} }
@ -558,16 +559,16 @@ impl AnonymousWebSocketSubscriptions {
} }
} }
pub async fn send_auth_response(&self, user_uuid: &String, auth_response_uuid: &str) { pub async fn send_auth_response(&self, user_id: &UserId, auth_response_uuid: &str) {
if !CONFIG.enable_websocket() { if !CONFIG.enable_websocket() {
return; return;
} }
let data = create_anonymous_update( let data = create_anonymous_update(
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())], vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_id.to_string().into())],
UpdateType::AuthRequestResponse, UpdateType::AuthRequestResponse,
user_uuid.to_string(), user_id.clone(),
); );
self.send_update(auth_response_uuid, &data).await; self.send_update(user_id, &data).await;
} }
} }
@ -579,14 +580,14 @@ impl AnonymousWebSocketSubscriptions {
"ReceiveMessage", // Target "ReceiveMessage", // Target
[ // Arguments [ // Arguments
{ {
"ContextId": acting_device_uuid || Nil, "ContextId": acting_device_id || Nil,
"Type": ut as i32, "Type": ut as i32,
"Payload": {} "Payload": {}
} }
] ]
] ]
*/ */
fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uuid: Option<String>) -> Vec<u8> { fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_id: Option<DeviceId>) -> Vec<u8> {
use rmpv::Value as V; use rmpv::Value as V;
let value = V::Array(vec![ let value = V::Array(vec![
@ -595,7 +596,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uui
V::Nil, V::Nil,
"ReceiveMessage".into(), "ReceiveMessage".into(),
V::Array(vec![V::Map(vec![ V::Array(vec![V::Map(vec![
("ContextId".into(), acting_device_uuid.map(|v| v.into()).unwrap_or_else(|| V::Nil)), ("ContextId".into(), acting_device_id.map(|v| v.to_string().into()).unwrap_or_else(|| V::Nil)),
("Type".into(), (ut as i32).into()), ("Type".into(), (ut as i32).into()),
("Payload".into(), payload.into()), ("Payload".into(), payload.into()),
])]), ])]),
@ -604,7 +605,7 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uui
serialize(value) serialize(value)
} }
fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id: String) -> Vec<u8> { fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id: UserId) -> Vec<u8> {
use rmpv::Value as V; use rmpv::Value as V;
let value = V::Array(vec![ let value = V::Array(vec![
@ -615,7 +616,7 @@ fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id
V::Array(vec![V::Map(vec![ V::Array(vec![V::Map(vec![
("Type".into(), (ut as i32).into()), ("Type".into(), (ut as i32).into()),
("Payload".into(), payload.into()), ("Payload".into(), payload.into()),
("UserId".into(), user_id.into()), ("UserId".into(), user_id.to_string().into()),
])]), ])]),
]); ]);

75
src/api/push.rs

@ -7,7 +7,7 @@ use tokio::sync::RwLock;
use crate::{ use crate::{
api::{ApiResult, EmptyResult, UpdateType}, api::{ApiResult, EmptyResult, UpdateType},
db::models::{Cipher, Device, Folder, Send, User}, db::models::{Cipher, Device, DeviceId, Folder, Send, User, UserId},
http_client::make_http_request, http_client::make_http_request,
util::format_date, util::format_date,
CONFIG, CONFIG,
@ -126,15 +126,15 @@ pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbC
Ok(()) Ok(())
} }
pub async fn unregister_push_device(push_uuid: Option<String>) -> EmptyResult { pub async fn unregister_push_device(push_id: Option<String>) -> EmptyResult {
if !CONFIG.push_enabled() || push_uuid.is_none() { if !CONFIG.push_enabled() || push_id.is_none() {
return Ok(()); return Ok(());
} }
let auth_push_token = get_auth_push_token().await?; let auth_push_token = get_auth_push_token().await?;
let auth_header = format!("Bearer {}", &auth_push_token); let auth_header = format!("Bearer {}", &auth_push_token);
match make_http_request(Method::DELETE, &(CONFIG.push_relay_uri() + "/push/" + &push_uuid.unwrap()))? match make_http_request(Method::DELETE, &(CONFIG.push_relay_uri() + "/push/" + &push_id.unwrap()))?
.header(AUTHORIZATION, auth_header) .header(AUTHORIZATION, auth_header)
.send() .send()
.await .await
@ -148,27 +148,24 @@ pub async fn unregister_push_device(push_uuid: Option<String>) -> EmptyResult {
pub async fn push_cipher_update( pub async fn push_cipher_update(
ut: UpdateType, ut: UpdateType,
cipher: &Cipher, cipher: &Cipher,
acting_device_uuid: &String, acting_device_id: &DeviceId,
conn: &mut crate::db::DbConn, conn: &mut crate::db::DbConn,
) { ) {
// We shouldn't send a push notification on cipher update if the cipher belongs to an organization, this isn't implemented in the upstream server too. // We shouldn't send a push notification on cipher update if the cipher belongs to an organization, this isn't implemented in the upstream server too.
if cipher.organization_uuid.is_some() { if cipher.organization_uuid.is_some() {
return; return;
}; };
let user_uuid = match &cipher.user_uuid { let Some(user_id) = &cipher.user_uuid else {
Some(c) => c, debug!("Cipher has no uuid");
None => { return;
debug!("Cipher has no uuid");
return;
}
}; };
if Device::check_user_has_push_device(user_uuid, conn).await { if Device::check_user_has_push_device(user_id, conn).await {
send_to_push_relay(json!({ send_to_push_relay(json!({
"userId": user_uuid, "userId": user_id,
"organizationId": (), "organizationId": (),
"deviceId": acting_device_uuid, "deviceId": acting_device_id,
"identifier": acting_device_uuid, "identifier": acting_device_id,
"type": ut as i32, "type": ut as i32,
"payload": { "payload": {
"Id": cipher.uuid, "Id": cipher.uuid,
@ -181,14 +178,14 @@ pub async fn push_cipher_update(
} }
} }
pub fn push_logout(user: &User, acting_device_uuid: Option<String>) { pub fn push_logout(user: &User, acting_device_id: Option<DeviceId>) {
let acting_device_uuid: Value = acting_device_uuid.map(|v| v.into()).unwrap_or_else(|| Value::Null); let acting_device_id: Value = acting_device_id.map(|v| v.to_string().into()).unwrap_or_else(|| Value::Null);
tokio::task::spawn(send_to_push_relay(json!({ tokio::task::spawn(send_to_push_relay(json!({
"userId": user.uuid, "userId": user.uuid,
"organizationId": (), "organizationId": (),
"deviceId": acting_device_uuid, "deviceId": acting_device_id,
"identifier": acting_device_uuid, "identifier": acting_device_id,
"type": UpdateType::LogOut as i32, "type": UpdateType::LogOut as i32,
"payload": { "payload": {
"UserId": user.uuid, "UserId": user.uuid,
@ -214,15 +211,15 @@ pub fn push_user_update(ut: UpdateType, user: &User) {
pub async fn push_folder_update( pub async fn push_folder_update(
ut: UpdateType, ut: UpdateType,
folder: &Folder, folder: &Folder,
acting_device_uuid: &String, acting_device_id: &DeviceId,
conn: &mut crate::db::DbConn, conn: &mut crate::db::DbConn,
) { ) {
if Device::check_user_has_push_device(&folder.user_uuid, conn).await { if Device::check_user_has_push_device(&folder.user_uuid, conn).await {
tokio::task::spawn(send_to_push_relay(json!({ tokio::task::spawn(send_to_push_relay(json!({
"userId": folder.user_uuid, "userId": folder.user_uuid,
"organizationId": (), "organizationId": (),
"deviceId": acting_device_uuid, "deviceId": acting_device_id,
"identifier": acting_device_uuid, "identifier": acting_device_id,
"type": ut as i32, "type": ut as i32,
"payload": { "payload": {
"Id": folder.uuid, "Id": folder.uuid,
@ -233,14 +230,14 @@ pub async fn push_folder_update(
} }
} }
pub async fn push_send_update(ut: UpdateType, send: &Send, acting_device_uuid: &String, conn: &mut crate::db::DbConn) { pub async fn push_send_update(ut: UpdateType, send: &Send, acting_device_id: &DeviceId, conn: &mut crate::db::DbConn) {
if let Some(s) = &send.user_uuid { if let Some(s) = &send.user_uuid {
if Device::check_user_has_push_device(s, conn).await { if Device::check_user_has_push_device(s, conn).await {
tokio::task::spawn(send_to_push_relay(json!({ tokio::task::spawn(send_to_push_relay(json!({
"userId": send.user_uuid, "userId": send.user_uuid,
"organizationId": (), "organizationId": (),
"deviceId": acting_device_uuid, "deviceId": acting_device_id,
"identifier": acting_device_uuid, "identifier": acting_device_id,
"type": ut as i32, "type": ut as i32,
"payload": { "payload": {
"Id": send.uuid, "Id": send.uuid,
@ -287,38 +284,38 @@ async fn send_to_push_relay(notification_data: Value) {
}; };
} }
pub async fn push_auth_request(user_uuid: String, auth_request_uuid: String, conn: &mut crate::db::DbConn) { pub async fn push_auth_request(user_id: UserId, auth_request_id: String, conn: &mut crate::db::DbConn) {
if Device::check_user_has_push_device(user_uuid.as_str(), conn).await { if Device::check_user_has_push_device(&user_id, conn).await {
tokio::task::spawn(send_to_push_relay(json!({ tokio::task::spawn(send_to_push_relay(json!({
"userId": user_uuid, "userId": user_id,
"organizationId": (), "organizationId": (),
"deviceId": null, "deviceId": null,
"identifier": null, "identifier": null,
"type": UpdateType::AuthRequest as i32, "type": UpdateType::AuthRequest as i32,
"payload": { "payload": {
"Id": auth_request_uuid, "Id": auth_request_id,
"UserId": user_uuid, "UserId": user_id,
} }
}))); })));
} }
} }
pub async fn push_auth_response( pub async fn push_auth_response(
user_uuid: String, user_id: UserId,
auth_request_uuid: String, auth_request_id: String,
approving_device_uuid: String, approving_device_id: DeviceId,
conn: &mut crate::db::DbConn, conn: &mut crate::db::DbConn,
) { ) {
if Device::check_user_has_push_device(user_uuid.as_str(), conn).await { if Device::check_user_has_push_device(&user_id, conn).await {
tokio::task::spawn(send_to_push_relay(json!({ tokio::task::spawn(send_to_push_relay(json!({
"userId": user_uuid, "userId": user_id,
"organizationId": (), "organizationId": (),
"deviceId": approving_device_uuid, "deviceId": approving_device_id,
"identifier": approving_device_uuid, "identifier": approving_device_id,
"type": UpdateType::AuthRequestResponse as i32, "type": UpdateType::AuthRequestResponse as i32,
"payload": { "payload": {
"Id": auth_request_uuid, "Id": auth_request_id,
"UserId": user_uuid, "UserId": user_id,
} }
}))); })));
} }

48
src/api/web.rs

@ -1,4 +1,3 @@
use once_cell::sync::Lazy;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use rocket::{ use rocket::{
@ -13,8 +12,9 @@ use serde_json::Value;
use crate::{ use crate::{
api::{core::now, ApiResult, EmptyResult}, api::{core::now, ApiResult, EmptyResult},
auth::decode_file_download, auth::decode_file_download,
db::models::{AttachmentId, CipherId},
error::Error, error::Error,
util::{get_web_vault_version, Cached, SafeString}, util::Cached,
CONFIG, CONFIG,
}; };
@ -54,43 +54,7 @@ fn not_found() -> ApiResult<Html<String>> {
#[get("/css/vaultwarden.css")] #[get("/css/vaultwarden.css")]
fn vaultwarden_css() -> Cached<Css<String>> { fn vaultwarden_css() -> Cached<Css<String>> {
// Configure the web-vault version as an integer so it can be used as a comparison smaller or greater then.
// The default is based upon the version since this feature is added.
static WEB_VAULT_VERSION: Lazy<u32> = Lazy::new(|| {
let re = regex::Regex::new(r"(\d{4})\.(\d{1,2})\.(\d{1,2})").unwrap();
let vault_version = get_web_vault_version();
let (major, minor, patch) = match re.captures(&vault_version) {
Some(c) if c.len() == 4 => (
c.get(1).unwrap().as_str().parse().unwrap(),
c.get(2).unwrap().as_str().parse().unwrap(),
c.get(3).unwrap().as_str().parse().unwrap(),
),
_ => (2024, 6, 2),
};
format!("{major}{minor:02}{patch:02}").parse::<u32>().unwrap()
});
// Configure the Vaultwarden version as an integer so it can be used as a comparison smaller or greater then.
// The default is based upon the version since this feature is added.
static VW_VERSION: Lazy<u32> = Lazy::new(|| {
let re = regex::Regex::new(r"(\d{1})\.(\d{1,2})\.(\d{1,2})").unwrap();
let vw_version = crate::VERSION.unwrap_or("1.32.1");
let (major, minor, patch) = match re.captures(vw_version) {
Some(c) if c.len() == 4 => (
c.get(1).unwrap().as_str().parse().unwrap(),
c.get(2).unwrap().as_str().parse().unwrap(),
c.get(3).unwrap().as_str().parse().unwrap(),
),
_ => (1, 32, 1),
};
format!("{major}{minor:02}{patch:02}").parse::<u32>().unwrap()
});
let css_options = json!({ let css_options = json!({
"web_vault_version": *WEB_VAULT_VERSION,
"vw_version": *VW_VERSION,
"signup_disabled": !CONFIG.signups_allowed() && CONFIG.signups_domains_whitelist().is_empty(), "signup_disabled": !CONFIG.signups_allowed() && CONFIG.signups_domains_whitelist().is_empty(),
"mail_enabled": CONFIG.mail_enabled(), "mail_enabled": CONFIG.mail_enabled(),
"yubico_enabled": CONFIG._enable_yubico() && (CONFIG.yubico_client_id().is_some() == CONFIG.yubico_secret_key().is_some()), "yubico_enabled": CONFIG._enable_yubico() && (CONFIG.yubico_client_id().is_some() == CONFIG.yubico_secret_key().is_some()),
@ -195,16 +159,16 @@ async fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> {
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).await.ok(), true) Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).await.ok(), true)
} }
#[get("/attachments/<uuid>/<file_id>?<token>")] #[get("/attachments/<cipher_id>/<file_id>?<token>")]
async fn attachments(uuid: SafeString, file_id: SafeString, token: String) -> Option<NamedFile> { async fn attachments(cipher_id: CipherId, file_id: AttachmentId, token: String) -> Option<NamedFile> {
let Ok(claims) = decode_file_download(&token) else { let Ok(claims) = decode_file_download(&token) else {
return None; return None;
}; };
if claims.sub != *uuid || claims.file_id != *file_id { if claims.sub != cipher_id || claims.file_id != file_id {
return None; return None;
} }
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).await.ok() NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(cipher_id.as_ref()).join(file_id.as_ref())).await.ok()
} }
// We use DbConn here to let the alive healthcheck also verify the database connection. // We use DbConn here to let the alive healthcheck also verify the database connection.

141
src/auth.rs

@ -14,6 +14,10 @@ use std::{
net::IpAddr, net::IpAddr,
}; };
use crate::db::models::{
AttachmentId, CipherId, CollectionId, DeviceId, EmergencyAccessId, MembershipId, OrgApiKeyId, OrganizationId,
SendFileId, SendId, UserId,
};
use crate::{error::Error, CONFIG}; use crate::{error::Error, CONFIG};
const JWT_ALGORITHM: Algorithm = Algorithm::RS256; const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
@ -150,7 +154,7 @@ pub struct LoginJwtClaims {
// Issuer // Issuer
pub iss: String, pub iss: String,
// Subject // Subject
pub sub: String, pub sub: UserId,
pub premium: bool, pub premium: bool,
pub name: String, pub name: String,
@ -171,7 +175,7 @@ pub struct LoginJwtClaims {
// user security_stamp // user security_stamp
pub sstamp: String, pub sstamp: String,
// device uuid // device uuid
pub device: String, pub device: DeviceId,
// [ "api", "offline_access" ] // [ "api", "offline_access" ]
pub scope: Vec<String>, pub scope: Vec<String>,
// [ "Application" ] // [ "Application" ]
@ -187,19 +191,19 @@ pub struct InviteJwtClaims {
// Issuer // Issuer
pub iss: String, pub iss: String,
// Subject // Subject
pub sub: String, pub sub: UserId,
pub email: String, pub email: String,
pub org_id: Option<String>, pub org_id: Option<OrganizationId>,
pub user_org_id: Option<String>, pub member_id: Option<MembershipId>,
pub invited_by_email: Option<String>, pub invited_by_email: Option<String>,
} }
pub fn generate_invite_claims( pub fn generate_invite_claims(
uuid: String, user_id: UserId,
email: String, email: String,
org_id: Option<String>, org_id: Option<OrganizationId>,
user_org_id: Option<String>, member_id: Option<MembershipId>,
invited_by_email: Option<String>, invited_by_email: Option<String>,
) -> InviteJwtClaims { ) -> InviteJwtClaims {
let time_now = Utc::now(); let time_now = Utc::now();
@ -208,10 +212,10 @@ pub fn generate_invite_claims(
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(), exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
iss: JWT_INVITE_ISSUER.to_string(), iss: JWT_INVITE_ISSUER.to_string(),
sub: uuid, sub: user_id,
email, email,
org_id, org_id,
user_org_id, member_id,
invited_by_email, invited_by_email,
} }
} }
@ -225,18 +229,18 @@ pub struct EmergencyAccessInviteJwtClaims {
// Issuer // Issuer
pub iss: String, pub iss: String,
// Subject // Subject
pub sub: String, pub sub: UserId,
pub email: String, pub email: String,
pub emer_id: String, pub emer_id: EmergencyAccessId,
pub grantor_name: String, pub grantor_name: String,
pub grantor_email: String, pub grantor_email: String,
} }
pub fn generate_emergency_access_invite_claims( pub fn generate_emergency_access_invite_claims(
uuid: String, user_id: UserId,
email: String, email: String,
emer_id: String, emer_id: EmergencyAccessId,
grantor_name: String, grantor_name: String,
grantor_email: String, grantor_email: String,
) -> EmergencyAccessInviteJwtClaims { ) -> EmergencyAccessInviteJwtClaims {
@ -246,7 +250,7 @@ pub fn generate_emergency_access_invite_claims(
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(), exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(), iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
sub: uuid, sub: user_id,
email, email,
emer_id, emer_id,
grantor_name, grantor_name,
@ -263,21 +267,24 @@ pub struct OrgApiKeyLoginJwtClaims {
// Issuer // Issuer
pub iss: String, pub iss: String,
// Subject // Subject
pub sub: String, pub sub: OrgApiKeyId,
pub client_id: String, pub client_id: String,
pub client_sub: String, pub client_sub: OrganizationId,
pub scope: Vec<String>, pub scope: Vec<String>,
} }
pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims { pub fn generate_organization_api_key_login_claims(
org_api_key_uuid: OrgApiKeyId,
org_id: OrganizationId,
) -> OrgApiKeyLoginJwtClaims {
let time_now = Utc::now(); let time_now = Utc::now();
OrgApiKeyLoginJwtClaims { OrgApiKeyLoginJwtClaims {
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_hours(1).unwrap()).timestamp(), exp: (time_now + TimeDelta::try_hours(1).unwrap()).timestamp(),
iss: JWT_ORG_API_KEY_ISSUER.to_string(), iss: JWT_ORG_API_KEY_ISSUER.to_string(),
sub: uuid, sub: org_api_key_uuid,
client_id: format!("organization.{org_id}"), client_id: format!("organization.{}", org_id),
client_sub: org_id, client_sub: org_id,
scope: vec!["api.organization".into()], scope: vec!["api.organization".into()],
} }
@ -292,18 +299,18 @@ pub struct FileDownloadClaims {
// Issuer // Issuer
pub iss: String, pub iss: String,
// Subject // Subject
pub sub: String, pub sub: CipherId,
pub file_id: String, pub file_id: AttachmentId,
} }
pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims { pub fn generate_file_download_claims(cipher_id: CipherId, file_id: AttachmentId) -> FileDownloadClaims {
let time_now = Utc::now(); let time_now = Utc::now();
FileDownloadClaims { FileDownloadClaims {
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_minutes(5).unwrap()).timestamp(), exp: (time_now + TimeDelta::try_minutes(5).unwrap()).timestamp(),
iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(), iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(),
sub: uuid, sub: cipher_id,
file_id, file_id,
} }
} }
@ -331,14 +338,14 @@ pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
} }
} }
pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims { pub fn generate_verify_email_claims(user_id: UserId) -> BasicJwtClaims {
let time_now = Utc::now(); let time_now = Utc::now();
let expire_hours = i64::from(CONFIG.invitation_expiration_hours()); let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
BasicJwtClaims { BasicJwtClaims {
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(), exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
iss: JWT_VERIFYEMAIL_ISSUER.to_string(), iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
sub: uuid, sub: user_id.to_string(),
} }
} }
@ -352,7 +359,7 @@ pub fn generate_admin_claims() -> BasicJwtClaims {
} }
} }
pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims { pub fn generate_send_claims(send_id: &SendId, file_id: &SendFileId) -> BasicJwtClaims {
let time_now = Utc::now(); let time_now = Utc::now();
BasicJwtClaims { BasicJwtClaims {
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
@ -371,7 +378,7 @@ use rocket::{
}; };
use crate::db::{ use crate::db::{
models::{Collection, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException}, models::{Collection, Device, Membership, MembershipStatus, MembershipType, User, UserStampException},
DbConn, DbConn,
}; };
@ -471,36 +478,32 @@ impl<'r> FromRequest<'r> for Headers {
}; };
// Check JWT token is valid and get device and user from it // Check JWT token is valid and get device and user from it
let claims = match decode_login(access_token) { let Ok(claims) = decode_login(access_token) else {
Ok(claims) => claims, err_handler!("Invalid claim")
Err(_) => err_handler!("Invalid claim"),
}; };
let device_uuid = claims.device; let device_id = claims.device;
let user_uuid = claims.sub; let user_id = claims.sub;
let mut conn = match DbConn::from_request(request).await { let mut conn = match DbConn::from_request(request).await {
Outcome::Success(conn) => conn, Outcome::Success(conn) => conn,
_ => err_handler!("Error getting DB"), _ => err_handler!("Error getting DB"),
}; };
let device = match Device::find_by_uuid_and_user(&device_uuid, &user_uuid, &mut conn).await { let Some(device) = Device::find_by_uuid_and_user(&device_id, &user_id, &mut conn).await else {
Some(device) => device, err_handler!("Invalid device id")
None => err_handler!("Invalid device id"),
}; };
let user = match User::find_by_uuid(&user_uuid, &mut conn).await { let Some(user) = User::find_by_uuid(&user_id, &mut conn).await else {
Some(user) => user, err_handler!("Device has no user associated")
None => err_handler!("Device has no user associated"),
}; };
if user.security_stamp != claims.sstamp { if user.security_stamp != claims.sstamp {
if let Some(stamp_exception) = if let Some(stamp_exception) =
user.stamp_exception.as_deref().and_then(|s| serde_json::from_str::<UserStampException>(s).ok()) user.stamp_exception.as_deref().and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
{ {
let current_route = match request.route().and_then(|r| r.name.as_deref()) { let Some(current_route) = request.route().and_then(|r| r.name.as_deref()) else {
Some(name) => name, err_handler!("Error getting current route for stamp exception")
_ => err_handler!("Error getting current route for stamp exception"),
}; };
// Check if the stamp exception has expired first. // Check if the stamp exception has expired first.
@ -538,8 +541,8 @@ pub struct OrgHeaders {
pub host: String, pub host: String,
pub device: Device, pub device: Device,
pub user: User, pub user: User,
pub org_user_type: UserOrgType, pub membership_type: MembershipType,
pub org_user: UserOrganization, pub membership: Membership,
pub ip: ClientIp, pub ip: ClientIp,
} }
@ -553,17 +556,17 @@ impl<'r> FromRequest<'r> for OrgHeaders {
// org_id is usually the second path param ("/organizations/<org_id>"), // org_id is usually the second path param ("/organizations/<org_id>"),
// but there are cases where it is a query value. // but there are cases where it is a query value.
// First check the path, if this is not a valid uuid, try the query values. // First check the path, if this is not a valid uuid, try the query values.
let url_org_id: Option<&str> = { let url_org_id: Option<OrganizationId> = {
let mut url_org_id = None; let mut url_org_id = None;
if let Some(Ok(org_id)) = request.param::<&str>(1) { if let Some(Ok(org_id)) = request.param::<&str>(1) {
if uuid::Uuid::parse_str(org_id).is_ok() { if uuid::Uuid::parse_str(org_id).is_ok() {
url_org_id = Some(org_id); url_org_id = Some(org_id.to_string().into());
} }
} }
if let Some(Ok(org_id)) = request.query_value::<&str>("organizationId") { if let Some(Ok(org_id)) = request.query_value::<&str>("organizationId") {
if uuid::Uuid::parse_str(org_id).is_ok() { if uuid::Uuid::parse_str(org_id).is_ok() {
url_org_id = Some(org_id); url_org_id = Some(org_id.to_string().into());
} }
} }
@ -578,10 +581,10 @@ impl<'r> FromRequest<'r> for OrgHeaders {
}; };
let user = headers.user; let user = headers.user;
let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, org_id, &mut conn).await { let membership = match Membership::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await {
Some(user) => { Some(member) => {
if user.status == UserOrgStatus::Confirmed as i32 { if member.status == MembershipStatus::Confirmed as i32 {
user member
} else { } else {
err_handler!("The current user isn't confirmed member of the organization") err_handler!("The current user isn't confirmed member of the organization")
} }
@ -593,15 +596,15 @@ impl<'r> FromRequest<'r> for OrgHeaders {
host: headers.host, host: headers.host,
device: headers.device, device: headers.device,
user, user,
org_user_type: { membership_type: {
if let Some(org_usr_type) = UserOrgType::from_i32(org_user.atype) { if let Some(org_usr_type) = MembershipType::from_i32(membership.atype) {
org_usr_type org_usr_type
} else { } else {
// This should only happen if the DB is corrupted // This should only happen if the DB is corrupted
err_handler!("Unknown user type in the database") err_handler!("Unknown user type in the database")
} }
}, },
org_user, membership,
ip: headers.ip, ip: headers.ip,
}) })
} }
@ -614,7 +617,7 @@ pub struct AdminHeaders {
pub host: String, pub host: String,
pub device: Device, pub device: Device,
pub user: User, pub user: User,
pub org_user_type: UserOrgType, pub membership_type: MembershipType,
pub ip: ClientIp, pub ip: ClientIp,
} }
@ -624,12 +627,12 @@ impl<'r> FromRequest<'r> for AdminHeaders {
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> { async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = try_outcome!(OrgHeaders::from_request(request).await); let headers = try_outcome!(OrgHeaders::from_request(request).await);
if headers.org_user_type >= UserOrgType::Admin { if headers.membership_type >= MembershipType::Admin {
Outcome::Success(Self { Outcome::Success(Self {
host: headers.host, host: headers.host,
device: headers.device, device: headers.device,
user: headers.user, user: headers.user,
org_user_type: headers.org_user_type, membership_type: headers.membership_type,
ip: headers.ip, ip: headers.ip,
}) })
} else { } else {
@ -652,16 +655,16 @@ impl From<AdminHeaders> for Headers {
// col_id is usually the fourth path param ("/organizations/<org_id>/collections/<col_id>"), // col_id is usually the fourth path param ("/organizations/<org_id>/collections/<col_id>"),
// but there could be cases where it is a query value. // but there could be cases where it is a query value.
// First check the path, if this is not a valid uuid, try the query values. // First check the path, if this is not a valid uuid, try the query values.
fn get_col_id(request: &Request<'_>) -> Option<String> { fn get_col_id(request: &Request<'_>) -> Option<CollectionId> {
if let Some(Ok(col_id)) = request.param::<String>(3) { if let Some(Ok(col_id)) = request.param::<String>(3) {
if uuid::Uuid::parse_str(&col_id).is_ok() { if uuid::Uuid::parse_str(&col_id).is_ok() {
return Some(col_id); return Some(col_id.into());
} }
} }
if let Some(Ok(col_id)) = request.query_value::<String>("collectionId") { if let Some(Ok(col_id)) = request.query_value::<String>("collectionId") {
if uuid::Uuid::parse_str(&col_id).is_ok() { if uuid::Uuid::parse_str(&col_id).is_ok() {
return Some(col_id); return Some(col_id.into());
} }
} }
@ -684,7 +687,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> { async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = try_outcome!(OrgHeaders::from_request(request).await); let headers = try_outcome!(OrgHeaders::from_request(request).await);
if headers.org_user_type >= UserOrgType::Manager { if headers.membership_type >= MembershipType::Manager {
match get_col_id(request) { match get_col_id(request) {
Some(col_id) => { Some(col_id) => {
let mut conn = match DbConn::from_request(request).await { let mut conn = match DbConn::from_request(request).await {
@ -692,7 +695,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
_ => err_handler!("Error getting DB"), _ => err_handler!("Error getting DB"),
}; };
if !Collection::can_access_collection(&headers.org_user, &col_id, &mut conn).await { if !Collection::can_access_collection(&headers.membership, &col_id, &mut conn).await {
err_handler!("The current user isn't a manager for this collection") err_handler!("The current user isn't a manager for this collection")
} }
} }
@ -728,7 +731,7 @@ pub struct ManagerHeadersLoose {
pub host: String, pub host: String,
pub device: Device, pub device: Device,
pub user: User, pub user: User,
pub org_user: UserOrganization, pub membership: Membership,
pub ip: ClientIp, pub ip: ClientIp,
} }
@ -738,12 +741,12 @@ impl<'r> FromRequest<'r> for ManagerHeadersLoose {
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> { async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = try_outcome!(OrgHeaders::from_request(request).await); let headers = try_outcome!(OrgHeaders::from_request(request).await);
if headers.org_user_type >= UserOrgType::Manager { if headers.membership_type >= MembershipType::Manager {
Outcome::Success(Self { Outcome::Success(Self {
host: headers.host, host: headers.host,
device: headers.device, device: headers.device,
user: headers.user, user: headers.user,
org_user: headers.org_user, membership: headers.membership,
ip: headers.ip, ip: headers.ip,
}) })
} else { } else {
@ -766,14 +769,14 @@ impl From<ManagerHeadersLoose> for Headers {
impl ManagerHeaders { impl ManagerHeaders {
pub async fn from_loose( pub async fn from_loose(
h: ManagerHeadersLoose, h: ManagerHeadersLoose,
collections: &Vec<String>, collections: &Vec<CollectionId>,
conn: &mut DbConn, conn: &mut DbConn,
) -> Result<ManagerHeaders, Error> { ) -> Result<ManagerHeaders, Error> {
for col_id in collections { for col_id in collections {
if uuid::Uuid::parse_str(col_id).is_err() { if uuid::Uuid::parse_str(col_id.as_ref()).is_err() {
err!("Collection Id is malformed!"); err!("Collection Id is malformed!");
} }
if !Collection::can_access_collection(&h.org_user, col_id, conn).await { if !Collection::can_access_collection(&h.membership, col_id, conn).await {
err!("You don't have access to all collections!"); err!("You don't have access to all collections!");
} }
} }
@ -799,7 +802,7 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> { async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = try_outcome!(OrgHeaders::from_request(request).await); let headers = try_outcome!(OrgHeaders::from_request(request).await);
if headers.org_user_type == UserOrgType::Owner { if headers.membership_type == MembershipType::Owner {
Outcome::Success(Self { Outcome::Success(Self {
device: headers.device, device: headers.device,
user: headers.user, user: headers.user,

56
src/config.rs

@ -12,7 +12,7 @@ use reqwest::Url;
use crate::{ use crate::{
db::DbConnType, db::DbConnType,
error::Error, error::Error,
util::{get_env, get_env_bool, parse_experimental_client_feature_flags}, util::{get_env, get_env_bool, get_web_vault_version, parse_experimental_client_feature_flags},
}; };
static CONFIG_FILE: Lazy<String> = Lazy::new(|| { static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
@ -238,6 +238,7 @@ macro_rules! make_config {
// Besides Pass, only String types will be masked via _privacy_mask. // Besides Pass, only String types will be masked via _privacy_mask.
const PRIVACY_CONFIG: &[&str] = &[ const PRIVACY_CONFIG: &[&str] = &[
"allowed_iframe_ancestors", "allowed_iframe_ancestors",
"allowed_connect_src",
"database_url", "database_url",
"domain_origin", "domain_origin",
"domain_path", "domain_path",
@ -248,6 +249,7 @@ macro_rules! make_config {
"smtp_from", "smtp_from",
"smtp_host", "smtp_host",
"smtp_username", "smtp_username",
"_smtp_img_src",
]; ];
let cfg = { let cfg = {
@ -609,6 +611,9 @@ make_config! {
/// Allowed iframe ancestors (Know the risks!) |> Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets /// Allowed iframe ancestors (Know the risks!) |> Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets
allowed_iframe_ancestors: String, true, def, String::new(); allowed_iframe_ancestors: String, true, def, String::new();
/// Allowed connect-src (Know the risks!) |> Allows other domains to URLs which can be loaded using script interfaces like the Forwarded email alias feature
allowed_connect_src: String, true, def, String::new();
/// Seconds between login requests |> Number of seconds, on average, between login and 2FA requests from the same IP address before rate limiting kicks in /// Seconds between login requests |> Number of seconds, on average, between login and 2FA requests from the same IP address before rate limiting kicks in
login_ratelimit_seconds: u64, false, def, 60; login_ratelimit_seconds: u64, false, def, 60;
/// Max burst size for login requests |> Allow a burst of requests of up to this size, while maintaining the average indicated by `login_ratelimit_seconds`. Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2 /// Max burst size for login requests |> Allow a burst of requests of up to this size, while maintaining the average indicated by `login_ratelimit_seconds`. Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2
@ -760,6 +765,13 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
); );
} }
let connect_src = cfg.allowed_connect_src.to_lowercase();
for url in connect_src.split_whitespace() {
if !url.starts_with("https://") || Url::parse(url).is_err() {
err!("ALLOWED_CONNECT_SRC variable contains one or more invalid URLs. Only FQDN's starting with https are allowed");
}
}
let whitelist = &cfg.signups_domains_whitelist; let whitelist = &cfg.signups_domains_whitelist;
if !whitelist.is_empty() && whitelist.split(',').any(|d| d.trim().is_empty()) { if !whitelist.is_empty() && whitelist.split(',').any(|d| d.trim().is_empty()) {
err!("`SIGNUPS_DOMAINS_WHITELIST` contains empty tokens"); err!("`SIGNUPS_DOMAINS_WHITELIST` contains empty tokens");
@ -817,6 +829,7 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
"browser-fileless-import", "browser-fileless-import",
"extension-refresh", "extension-refresh",
"fido2-vault-credentials", "fido2-vault-credentials",
"inline-menu-positioning-improvements",
"ssh-key-vault-item", "ssh-key-vault-item",
"ssh-agent", "ssh-agent",
]; ];
@ -1314,6 +1327,8 @@ where
// Register helpers // Register helpers
hb.register_helper("case", Box::new(case_helper)); hb.register_helper("case", Box::new(case_helper));
hb.register_helper("to_json", Box::new(to_json)); hb.register_helper("to_json", Box::new(to_json));
hb.register_helper("webver", Box::new(webver));
hb.register_helper("vwver", Box::new(vwver));
macro_rules! reg { macro_rules! reg {
($name:expr) => {{ ($name:expr) => {{
@ -1417,3 +1432,42 @@ fn to_json<'reg, 'rc>(
out.write(&json)?; out.write(&json)?;
Ok(()) Ok(())
} }
// Configure the web-vault version as an integer so it can be used as a comparison smaller or greater then.
// The default is based upon the version since this feature is added.
static WEB_VAULT_VERSION: Lazy<semver::Version> = Lazy::new(|| {
let vault_version = get_web_vault_version();
// Use a single regex capture to extract version components
let re = regex::Regex::new(r"(\d{4})\.(\d{1,2})\.(\d{1,2})").unwrap();
re.captures(&vault_version)
.and_then(|c| {
(c.len() == 4).then(|| {
format!("{}.{}.{}", c.get(1).unwrap().as_str(), c.get(2).unwrap().as_str(), c.get(3).unwrap().as_str())
})
})
.and_then(|v| semver::Version::parse(&v).ok())
.unwrap_or_else(|| semver::Version::parse("2024.6.2").unwrap())
});
// Configure the Vaultwarden version as an integer so it can be used as a comparison smaller or greater then.
// The default is based upon the version since this feature is added.
static VW_VERSION: Lazy<semver::Version> = Lazy::new(|| {
let vw_version = crate::VERSION.unwrap_or("1.32.5");
// Use a single regex capture to extract version components
let re = regex::Regex::new(r"(\d{1})\.(\d{1,2})\.(\d{1,2})").unwrap();
re.captures(vw_version)
.and_then(|c| {
(c.len() == 4).then(|| {
format!("{}.{}.{}", c.get(1).unwrap().as_str(), c.get(2).unwrap().as_str(), c.get(3).unwrap().as_str())
})
})
.and_then(|v| semver::Version::parse(&v).ok())
.unwrap_or_else(|| semver::Version::parse("1.32.5").unwrap())
});
handlebars::handlebars_helper!(webver: | web_vault_version: String |
semver::VersionReq::parse(&web_vault_version).expect("Invalid web-vault version compare string").matches(&WEB_VAULT_VERSION)
);
handlebars::handlebars_helper!(vwver: | vw_version: String |
semver::VersionReq::parse(&vw_version).expect("Invalid Vaultwarden version compare string").matches(&VW_VERSION)
);

9
src/crypto.rs

@ -84,14 +84,15 @@ pub fn generate_id<const N: usize>() -> String {
encode_random_bytes::<N>(HEXLOWER) encode_random_bytes::<N>(HEXLOWER)
} }
pub fn generate_send_id() -> String { pub fn generate_send_file_id() -> String {
// Send IDs are globally scoped, so make them longer to avoid collisions. // Send File IDs are globally scoped, so make them longer to avoid collisions.
generate_id::<32>() // 256 bits generate_id::<32>() // 256 bits
} }
pub fn generate_attachment_id() -> String { use crate::db::models::AttachmentId;
pub fn generate_attachment_id() -> AttachmentId {
// Attachment IDs are scoped to a cipher, so they can be smaller. // Attachment IDs are scoped to a cipher, so they can be smaller.
generate_id::<10>() // 80 bits AttachmentId(generate_id::<10>()) // 80 bits
} }
/// Generates a numeric token for email-based verifications. /// Generates a numeric token for email-based verifications.

52
src/db/models/attachment.rs

@ -1,9 +1,12 @@
use std::io::ErrorKind; use std::io::ErrorKind;
use bigdecimal::{BigDecimal, ToPrimitive}; use bigdecimal::{BigDecimal, ToPrimitive};
use derive_more::{AsRef, Deref, Display};
use serde_json::Value; use serde_json::Value;
use super::{CipherId, OrganizationId, UserId};
use crate::CONFIG; use crate::CONFIG;
use macros::IdFromParam;
db_object! { db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
@ -11,8 +14,8 @@ db_object! {
#[diesel(treat_none_as_null = true)] #[diesel(treat_none_as_null = true)]
#[diesel(primary_key(id))] #[diesel(primary_key(id))]
pub struct Attachment { pub struct Attachment {
pub id: String, pub id: AttachmentId,
pub cipher_uuid: String, pub cipher_uuid: CipherId,
pub file_name: String, // encrypted pub file_name: String, // encrypted
pub file_size: i64, pub file_size: i64,
pub akey: Option<String>, pub akey: Option<String>,
@ -21,7 +24,13 @@ db_object! {
/// Local methods /// Local methods
impl Attachment { impl Attachment {
pub const fn new(id: String, cipher_uuid: String, file_name: String, file_size: i64, akey: Option<String>) -> Self { pub const fn new(
id: AttachmentId,
cipher_uuid: CipherId,
file_name: String,
file_size: i64,
akey: Option<String>,
) -> Self {
Self { Self {
id, id,
cipher_uuid, cipher_uuid,
@ -117,14 +126,14 @@ impl Attachment {
}} }}
} }
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
for attachment in Attachment::find_by_cipher(cipher_uuid, conn).await { for attachment in Attachment::find_by_cipher(cipher_uuid, conn).await {
attachment.delete(conn).await?; attachment.delete(conn).await?;
} }
Ok(()) Ok(())
} }
pub async fn find_by_id(id: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_id(id: &AttachmentId, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
attachments::table attachments::table
.filter(attachments::id.eq(id.to_lowercase())) .filter(attachments::id.eq(id.to_lowercase()))
@ -134,7 +143,7 @@ impl Attachment {
}} }}
} }
pub async fn find_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
attachments::table attachments::table
.filter(attachments::cipher_uuid.eq(cipher_uuid)) .filter(attachments::cipher_uuid.eq(cipher_uuid))
@ -144,7 +153,7 @@ impl Attachment {
}} }}
} }
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 { pub async fn size_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 {
db_run! { conn: { db_run! { conn: {
let result: Option<BigDecimal> = attachments::table let result: Option<BigDecimal> = attachments::table
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
@ -161,7 +170,7 @@ impl Attachment {
}} }}
} }
pub async fn count_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 { pub async fn count_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 {
db_run! { conn: { db_run! { conn: {
attachments::table attachments::table
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
@ -172,7 +181,7 @@ impl Attachment {
}} }}
} }
pub async fn size_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 { pub async fn size_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
db_run! { conn: { db_run! { conn: {
let result: Option<BigDecimal> = attachments::table let result: Option<BigDecimal> = attachments::table
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
@ -189,7 +198,7 @@ impl Attachment {
}} }}
} }
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 { pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
db_run! { conn: { db_run! { conn: {
attachments::table attachments::table
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
@ -203,7 +212,11 @@ impl Attachment {
// This will return all attachments linked to the user or org // This will return all attachments linked to the user or org
// There is no filtering done here if the user actually has access! // There is no filtering done here if the user actually has access!
// It is used to speed up the sync process, and the matching is done in a different part. // It is used to speed up the sync process, and the matching is done in a different part.
pub async fn find_all_by_user_and_orgs(user_uuid: &str, org_uuids: &Vec<String>, conn: &mut DbConn) -> Vec<Self> { pub async fn find_all_by_user_and_orgs(
user_uuid: &UserId,
org_uuids: &Vec<OrganizationId>,
conn: &mut DbConn,
) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
attachments::table attachments::table
.left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
@ -216,3 +229,20 @@ impl Attachment {
}} }}
} }
} }
#[derive(
Clone,
Debug,
AsRef,
Deref,
DieselNewType,
Display,
FromForm,
Hash,
PartialEq,
Eq,
Serialize,
Deserialize,
IdFromParam,
)]
pub struct AttachmentId(pub String);

52
src/db/models/auth_request.rs

@ -1,5 +1,8 @@
use super::{DeviceId, OrganizationId, UserId};
use crate::crypto::ct_eq; use crate::crypto::ct_eq;
use chrono::{NaiveDateTime, Utc}; use chrono::{NaiveDateTime, Utc};
use derive_more::{AsRef, Deref, Display, From};
use macros::UuidFromParam;
db_object! { db_object! {
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset, Deserialize, Serialize)] #[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset, Deserialize, Serialize)]
@ -7,15 +10,15 @@ db_object! {
#[diesel(treat_none_as_null = true)] #[diesel(treat_none_as_null = true)]
#[diesel(primary_key(uuid))] #[diesel(primary_key(uuid))]
pub struct AuthRequest { pub struct AuthRequest {
pub uuid: String, pub uuid: AuthRequestId,
pub user_uuid: String, pub user_uuid: UserId,
pub organization_uuid: Option<String>, pub organization_uuid: Option<OrganizationId>,
pub request_device_identifier: String, pub request_device_identifier: DeviceId,
pub device_type: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs pub device_type: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
pub request_ip: String, pub request_ip: String,
pub response_device_id: Option<String>, pub response_device_id: Option<DeviceId>,
pub access_code: String, pub access_code: String,
pub public_key: String, pub public_key: String,
@ -33,8 +36,8 @@ db_object! {
impl AuthRequest { impl AuthRequest {
pub fn new( pub fn new(
user_uuid: String, user_uuid: UserId,
request_device_identifier: String, request_device_identifier: DeviceId,
device_type: i32, device_type: i32,
request_ip: String, request_ip: String,
access_code: String, access_code: String,
@ -43,7 +46,7 @@ impl AuthRequest {
let now = Utc::now().naive_utc(); let now = Utc::now().naive_utc();
Self { Self {
uuid: crate::util::get_uuid(), uuid: AuthRequestId(crate::util::get_uuid()),
user_uuid, user_uuid,
organization_uuid: None, organization_uuid: None,
@ -101,7 +104,7 @@ impl AuthRequest {
} }
} }
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid(uuid: &AuthRequestId, conn: &mut DbConn) -> Option<Self> {
db_run! {conn: { db_run! {conn: {
auth_requests::table auth_requests::table
.filter(auth_requests::uuid.eq(uuid)) .filter(auth_requests::uuid.eq(uuid))
@ -111,7 +114,18 @@ impl AuthRequest {
}} }}
} }
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_uuid_and_user(uuid: &AuthRequestId, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
db_run! {conn: {
auth_requests::table
.filter(auth_requests::uuid.eq(uuid))
.filter(auth_requests::user_uuid.eq(user_uuid))
.first::<AuthRequestDb>(conn)
.ok()
.from_db()
}}
}
pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
db_run! {conn: { db_run! {conn: {
auth_requests::table auth_requests::table
.filter(auth_requests::user_uuid.eq(user_uuid)) .filter(auth_requests::user_uuid.eq(user_uuid))
@ -146,3 +160,21 @@ impl AuthRequest {
} }
} }
} }
#[derive(
Clone,
Debug,
AsRef,
Deref,
DieselNewType,
Display,
From,
FromForm,
Hash,
PartialEq,
Eq,
Serialize,
Deserialize,
UuidFromParam,
)]
pub struct AuthRequestId(String);

237
src/db/models/cipher.rs

@ -1,13 +1,15 @@
use crate::util::LowerCase; use crate::util::LowerCase;
use crate::CONFIG; use crate::CONFIG;
use chrono::{NaiveDateTime, TimeDelta, Utc}; use chrono::{NaiveDateTime, TimeDelta, Utc};
use derive_more::{AsRef, Deref, Display, From};
use serde_json::Value; use serde_json::Value;
use super::{ use super::{
Attachment, CollectionCipher, Favorite, FolderCipher, Group, User, UserOrgStatus, UserOrgType, UserOrganization, Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership, MembershipStatus,
MembershipType, OrganizationId, User, UserId,
}; };
use crate::api::core::{CipherData, CipherSyncData, CipherSyncType}; use crate::api::core::{CipherData, CipherSyncData, CipherSyncType};
use macros::UuidFromParam;
use std::borrow::Cow; use std::borrow::Cow;
@ -17,12 +19,12 @@ db_object! {
#[diesel(treat_none_as_null = true)] #[diesel(treat_none_as_null = true)]
#[diesel(primary_key(uuid))] #[diesel(primary_key(uuid))]
pub struct Cipher { pub struct Cipher {
pub uuid: String, pub uuid: CipherId,
pub created_at: NaiveDateTime, pub created_at: NaiveDateTime,
pub updated_at: NaiveDateTime, pub updated_at: NaiveDateTime,
pub user_uuid: Option<String>, pub user_uuid: Option<UserId>,
pub organization_uuid: Option<String>, pub organization_uuid: Option<OrganizationId>,
pub key: Option<String>, pub key: Option<String>,
@ -57,7 +59,7 @@ impl Cipher {
let now = Utc::now().naive_utc(); let now = Utc::now().naive_utc();
Self { Self {
uuid: crate::util::get_uuid(), uuid: CipherId(crate::util::get_uuid()),
created_at: now, created_at: now,
updated_at: now, updated_at: now,
@ -135,7 +137,7 @@ impl Cipher {
pub async fn to_json( pub async fn to_json(
&self, &self,
host: &str, host: &str,
user_uuid: &str, user_uuid: &UserId,
cipher_sync_data: Option<&CipherSyncData>, cipher_sync_data: Option<&CipherSyncData>,
sync_type: CipherSyncType, sync_type: CipherSyncType,
conn: &mut DbConn, conn: &mut DbConn,
@ -241,12 +243,23 @@ impl Cipher {
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream // NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients. // Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
if self.atype == 1 { if self.atype == 1 {
if type_data_json["uris"].is_array() { // Upstream always has an `uri` key/value
let uri = type_data_json["uris"][0]["uri"].clone(); type_data_json["uri"] = Value::Null;
type_data_json["uri"] = uri; if let Some(uris) = type_data_json["uris"].as_array_mut() {
} else { if !uris.is_empty() {
// Upstream always has an Uri key/value // Fix uri match values first, they are only allowed to be a number or null
type_data_json["uri"] = Value::Null; // If it is a string, convert it to an int or null if that fails
for uri in &mut *uris {
if uri["match"].is_string() {
let match_value = match uri["match"].as_str().unwrap_or_default().parse::<u8>() {
Ok(n) => json!(n),
_ => Value::Null,
};
uri["match"] = match_value;
}
}
type_data_json["uri"] = uris[0]["uri"].clone();
}
} }
} }
@ -261,6 +274,19 @@ impl Cipher {
} }
} }
// Fix invalid SSH Entries
// This breaks at least the native mobile client if invalid
// The only way to fix this is by setting type_data_json to `null`
// Opening this ssh-key in the mobile client will probably crash the client, but you can edit, save and afterwards delete it
if self.atype == 5
&& (type_data_json["keyFingerprint"].as_str().is_none_or(|v| v.is_empty())
|| type_data_json["privateKey"].as_str().is_none_or(|v| v.is_empty())
|| type_data_json["publicKey"].as_str().is_none_or(|v| v.is_empty()))
{
warn!("Error parsing ssh-key, mandatory fields are invalid for {}", self.uuid);
type_data_json = Value::Null;
}
// Clone the type_data and add some default value. // Clone the type_data and add some default value.
let mut data_json = type_data_json.clone(); let mut data_json = type_data_json.clone();
@ -278,7 +304,7 @@ impl Cipher {
Cow::from(Vec::with_capacity(0)) Cow::from(Vec::with_capacity(0))
} }
} else { } else {
Cow::from(self.get_admin_collections(user_uuid.to_string(), conn).await) Cow::from(self.get_admin_collections(user_uuid.clone(), conn).await)
}; };
// There are three types of cipher response models in upstream // There are three types of cipher response models in upstream
@ -327,7 +353,7 @@ impl Cipher {
// Skip adding these fields in that case // Skip adding these fields in that case
if sync_type == CipherSyncType::User { if sync_type == CipherSyncType::User {
json_object["folderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data { json_object["folderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string()) cipher_sync_data.cipher_folders.get(&self.uuid).cloned()
} else { } else {
self.get_folder_uuid(user_uuid, conn).await self.get_folder_uuid(user_uuid, conn).await
}); });
@ -356,7 +382,7 @@ impl Cipher {
json_object json_object
} }
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<String> { pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<UserId> {
let mut user_uuids = Vec::new(); let mut user_uuids = Vec::new();
match self.user_uuid { match self.user_uuid {
Some(ref user_uuid) => { Some(ref user_uuid) => {
@ -367,17 +393,16 @@ impl Cipher {
// Belongs to Organization, need to update affected users // Belongs to Organization, need to update affected users
if let Some(ref org_uuid) = self.organization_uuid { if let Some(ref org_uuid) = self.organization_uuid {
// users having access to the collection // users having access to the collection
let mut collection_users = let mut collection_users = Membership::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await;
UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await;
if CONFIG.org_groups_enabled() { if CONFIG.org_groups_enabled() {
// members of a group having access to the collection // members of a group having access to the collection
let group_users = let group_users =
UserOrganization::find_by_cipher_and_org_with_group(&self.uuid, org_uuid, conn).await; Membership::find_by_cipher_and_org_with_group(&self.uuid, org_uuid, conn).await;
collection_users.extend(group_users); collection_users.extend(group_users);
} }
for user_org in collection_users { for member in collection_users {
User::update_uuid_revision(&user_org.user_uuid, conn).await; User::update_uuid_revision(&member.user_uuid, conn).await;
user_uuids.push(user_org.user_uuid.clone()) user_uuids.push(member.user_uuid.clone())
} }
} }
} }
@ -435,7 +460,7 @@ impl Cipher {
}} }}
} }
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult {
// TODO: Optimize this by executing a DELETE directly on the database, instead of first fetching. // TODO: Optimize this by executing a DELETE directly on the database, instead of first fetching.
for cipher in Self::find_by_org(org_uuid, conn).await { for cipher in Self::find_by_org(org_uuid, conn).await {
cipher.delete(conn).await?; cipher.delete(conn).await?;
@ -443,7 +468,7 @@ impl Cipher {
Ok(()) Ok(())
} }
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
for cipher in Self::find_owned_by_user(user_uuid, conn).await { for cipher in Self::find_owned_by_user(user_uuid, conn).await {
cipher.delete(conn).await?; cipher.delete(conn).await?;
} }
@ -461,52 +486,59 @@ impl Cipher {
} }
} }
pub async fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn move_to_folder(
&self,
folder_uuid: Option<FolderId>,
user_uuid: &UserId,
conn: &mut DbConn,
) -> EmptyResult {
User::update_uuid_revision(user_uuid, conn).await; User::update_uuid_revision(user_uuid, conn).await;
match (self.get_folder_uuid(user_uuid, conn).await, folder_uuid) { match (self.get_folder_uuid(user_uuid, conn).await, folder_uuid) {
// No changes // No changes
(None, None) => Ok(()), (None, None) => Ok(()),
(Some(ref old), Some(ref new)) if old == new => Ok(()), (Some(ref old_folder), Some(ref new_folder)) if old_folder == new_folder => Ok(()),
// Add to folder // Add to folder
(None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(conn).await, (None, Some(new_folder)) => FolderCipher::new(new_folder, self.uuid.clone()).save(conn).await,
// Remove from folder // Remove from folder
(Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await { (Some(old_folder), None) => {
Some(old) => old.delete(conn).await, match FolderCipher::find_by_folder_and_cipher(&old_folder, &self.uuid, conn).await {
None => err!("Couldn't move from previous folder"), Some(old_folder) => old_folder.delete(conn).await,
}, None => err!("Couldn't move from previous folder"),
}
}
// Move to another folder // Move to another folder
(Some(old), Some(new)) => { (Some(old_folder), Some(new_folder)) => {
if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await { if let Some(old_folder) = FolderCipher::find_by_folder_and_cipher(&old_folder, &self.uuid, conn).await {
old.delete(conn).await?; old_folder.delete(conn).await?;
} }
FolderCipher::new(&new, &self.uuid).save(conn).await FolderCipher::new(new_folder, self.uuid.clone()).save(conn).await
} }
} }
} }
/// Returns whether this cipher is directly owned by the user. /// Returns whether this cipher is directly owned by the user.
pub fn is_owned_by_user(&self, user_uuid: &str) -> bool { pub fn is_owned_by_user(&self, user_uuid: &UserId) -> bool {
self.user_uuid.is_some() && self.user_uuid.as_ref().unwrap() == user_uuid self.user_uuid.is_some() && self.user_uuid.as_ref().unwrap() == user_uuid
} }
/// Returns whether this cipher is owned by an org in which the user has full access. /// Returns whether this cipher is owned by an org in which the user has full access.
async fn is_in_full_access_org( async fn is_in_full_access_org(
&self, &self,
user_uuid: &str, user_uuid: &UserId,
cipher_sync_data: Option<&CipherSyncData>, cipher_sync_data: Option<&CipherSyncData>,
conn: &mut DbConn, conn: &mut DbConn,
) -> bool { ) -> bool {
if let Some(ref org_uuid) = self.organization_uuid { if let Some(ref org_uuid) = self.organization_uuid {
if let Some(cipher_sync_data) = cipher_sync_data { if let Some(cipher_sync_data) = cipher_sync_data {
if let Some(cached_user_org) = cipher_sync_data.user_organizations.get(org_uuid) { if let Some(cached_member) = cipher_sync_data.members.get(org_uuid) {
return cached_user_org.has_full_access(); return cached_member.has_full_access();
} }
} else if let Some(user_org) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn).await { } else if let Some(member) = Membership::find_by_user_and_org(user_uuid, org_uuid, conn).await {
return user_org.has_full_access(); return member.has_full_access();
} }
} }
false false
@ -515,7 +547,7 @@ impl Cipher {
/// Returns whether this cipher is owned by an group in which the user has full access. /// Returns whether this cipher is owned by an group in which the user has full access.
async fn is_in_full_access_group( async fn is_in_full_access_group(
&self, &self,
user_uuid: &str, user_uuid: &UserId,
cipher_sync_data: Option<&CipherSyncData>, cipher_sync_data: Option<&CipherSyncData>,
conn: &mut DbConn, conn: &mut DbConn,
) -> bool { ) -> bool {
@ -539,7 +571,7 @@ impl Cipher {
/// the access restrictions. /// the access restrictions.
pub async fn get_access_restrictions( pub async fn get_access_restrictions(
&self, &self,
user_uuid: &str, user_uuid: &UserId,
cipher_sync_data: Option<&CipherSyncData>, cipher_sync_data: Option<&CipherSyncData>,
conn: &mut DbConn, conn: &mut DbConn,
) -> Option<(bool, bool)> { ) -> Option<(bool, bool)> {
@ -599,7 +631,7 @@ impl Cipher {
Some((read_only, hide_passwords)) Some((read_only, hide_passwords))
} }
async fn get_user_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> { async fn get_user_collections_access_flags(&self, user_uuid: &UserId, conn: &mut DbConn) -> Vec<(bool, bool)> {
db_run! {conn: { db_run! {conn: {
// Check whether this cipher is in any collections accessible to the // Check whether this cipher is in any collections accessible to the
// user. If so, retrieve the access flags for each collection. // user. If so, retrieve the access flags for each collection.
@ -616,7 +648,7 @@ impl Cipher {
}} }}
} }
async fn get_group_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> { async fn get_group_collections_access_flags(&self, user_uuid: &UserId, conn: &mut DbConn) -> Vec<(bool, bool)> {
if !CONFIG.org_groups_enabled() { if !CONFIG.org_groups_enabled() {
return Vec::new(); return Vec::new();
} }
@ -642,43 +674,43 @@ impl Cipher {
}} }}
} }
pub async fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool { pub async fn is_write_accessible_to_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
match self.get_access_restrictions(user_uuid, None, conn).await { match self.get_access_restrictions(user_uuid, None, conn).await {
Some((read_only, _hide_passwords)) => !read_only, Some((read_only, _hide_passwords)) => !read_only,
None => false, None => false,
} }
} }
pub async fn is_accessible_to_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool { pub async fn is_accessible_to_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
self.get_access_restrictions(user_uuid, None, conn).await.is_some() self.get_access_restrictions(user_uuid, None, conn).await.is_some()
} }
// Returns whether this cipher is a favorite of the specified user. // Returns whether this cipher is a favorite of the specified user.
pub async fn is_favorite(&self, user_uuid: &str, conn: &mut DbConn) -> bool { pub async fn is_favorite(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
Favorite::is_favorite(&self.uuid, user_uuid, conn).await Favorite::is_favorite(&self.uuid, user_uuid, conn).await
} }
// Sets whether this cipher is a favorite of the specified user. // Sets whether this cipher is a favorite of the specified user.
pub async fn set_favorite(&self, favorite: Option<bool>, user_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn set_favorite(&self, favorite: Option<bool>, user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
match favorite { match favorite {
None => Ok(()), // No change requested. None => Ok(()), // No change requested.
Some(status) => Favorite::set_favorite(status, &self.uuid, user_uuid, conn).await, Some(status) => Favorite::set_favorite(status, &self.uuid, user_uuid, conn).await,
} }
} }
pub async fn get_folder_uuid(&self, user_uuid: &str, conn: &mut DbConn) -> Option<String> { pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &mut DbConn) -> Option<FolderId> {
db_run! {conn: { db_run! {conn: {
folders_ciphers::table folders_ciphers::table
.inner_join(folders::table) .inner_join(folders::table)
.filter(folders::user_uuid.eq(&user_uuid)) .filter(folders::user_uuid.eq(&user_uuid))
.filter(folders_ciphers::cipher_uuid.eq(&self.uuid)) .filter(folders_ciphers::cipher_uuid.eq(&self.uuid))
.select(folders_ciphers::folder_uuid) .select(folders_ciphers::folder_uuid)
.first::<String>(conn) .first::<FolderId>(conn)
.ok() .ok()
}} }}
} }
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid(uuid: &CipherId, conn: &mut DbConn) -> Option<Self> {
db_run! {conn: { db_run! {conn: {
ciphers::table ciphers::table
.filter(ciphers::uuid.eq(uuid)) .filter(ciphers::uuid.eq(uuid))
@ -688,7 +720,11 @@ impl Cipher {
}} }}
} }
pub async fn find_by_uuid_and_org(cipher_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid_and_org(
cipher_uuid: &CipherId,
org_uuid: &OrganizationId,
conn: &mut DbConn,
) -> Option<Self> {
db_run! {conn: { db_run! {conn: {
ciphers::table ciphers::table
.filter(ciphers::uuid.eq(cipher_uuid)) .filter(ciphers::uuid.eq(cipher_uuid))
@ -711,7 +747,7 @@ impl Cipher {
// true, then the non-interesting ciphers will not be returned. As a // true, then the non-interesting ciphers will not be returned. As a
// result, those ciphers will not appear in "My Vault" for the org // result, those ciphers will not appear in "My Vault" for the org
// owner/admin, but they can still be accessed via the org vault view. // owner/admin, but they can still be accessed via the org vault view.
pub async fn find_by_user(user_uuid: &str, visible_only: bool, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_user(user_uuid: &UserId, visible_only: bool, conn: &mut DbConn) -> Vec<Self> {
if CONFIG.org_groups_enabled() { if CONFIG.org_groups_enabled() {
db_run! {conn: { db_run! {conn: {
let mut query = ciphers::table let mut query = ciphers::table
@ -721,7 +757,7 @@ impl Cipher {
.left_join(users_organizations::table.on( .left_join(users_organizations::table.on(
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable()) ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
.and(users_organizations::user_uuid.eq(user_uuid)) .and(users_organizations::user_uuid.eq(user_uuid))
.and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32)) .and(users_organizations::status.eq(MembershipStatus::Confirmed as i32))
)) ))
.left_join(users_collections::table.on( .left_join(users_collections::table.on(
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid) ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
@ -748,7 +784,7 @@ impl Cipher {
if !visible_only { if !visible_only {
query = query.or_filter( query = query.or_filter(
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner users_organizations::atype.le(MembershipType::Admin as i32) // Org admin/owner
); );
} }
@ -766,7 +802,7 @@ impl Cipher {
.left_join(users_organizations::table.on( .left_join(users_organizations::table.on(
ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable()) ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())
.and(users_organizations::user_uuid.eq(user_uuid)) .and(users_organizations::user_uuid.eq(user_uuid))
.and(users_organizations::status.eq(UserOrgStatus::Confirmed as i32)) .and(users_organizations::status.eq(MembershipStatus::Confirmed as i32))
)) ))
.left_join(users_collections::table.on( .left_join(users_collections::table.on(
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid) ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
@ -780,7 +816,7 @@ impl Cipher {
if !visible_only { if !visible_only {
query = query.or_filter( query = query.or_filter(
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin/owner users_organizations::atype.le(MembershipType::Admin as i32) // Org admin/owner
); );
} }
@ -793,12 +829,12 @@ impl Cipher {
} }
// Find all ciphers visible to the specified user. // Find all ciphers visible to the specified user.
pub async fn find_by_user_visible(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_user_visible(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
Self::find_by_user(user_uuid, true, conn).await Self::find_by_user(user_uuid, true, conn).await
} }
// Find all ciphers directly owned by the specified user. // Find all ciphers directly owned by the specified user.
pub async fn find_owned_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_owned_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
db_run! {conn: { db_run! {conn: {
ciphers::table ciphers::table
.filter( .filter(
@ -809,7 +845,7 @@ impl Cipher {
}} }}
} }
pub async fn count_owned_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 { pub async fn count_owned_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 {
db_run! {conn: { db_run! {conn: {
ciphers::table ciphers::table
.filter(ciphers::user_uuid.eq(user_uuid)) .filter(ciphers::user_uuid.eq(user_uuid))
@ -820,7 +856,7 @@ impl Cipher {
}} }}
} }
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
db_run! {conn: { db_run! {conn: {
ciphers::table ciphers::table
.filter(ciphers::organization_uuid.eq(org_uuid)) .filter(ciphers::organization_uuid.eq(org_uuid))
@ -828,7 +864,7 @@ impl Cipher {
}} }}
} }
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 { pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
db_run! {conn: { db_run! {conn: {
ciphers::table ciphers::table
.filter(ciphers::organization_uuid.eq(org_uuid)) .filter(ciphers::organization_uuid.eq(org_uuid))
@ -839,7 +875,7 @@ impl Cipher {
}} }}
} }
pub async fn find_by_folder(folder_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> Vec<Self> {
db_run! {conn: { db_run! {conn: {
folders_ciphers::table.inner_join(ciphers::table) folders_ciphers::table.inner_join(ciphers::table)
.filter(folders_ciphers::folder_uuid.eq(folder_uuid)) .filter(folders_ciphers::folder_uuid.eq(folder_uuid))
@ -857,7 +893,7 @@ impl Cipher {
}} }}
} }
pub async fn get_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> { pub async fn get_collections(&self, user_uuid: UserId, conn: &mut DbConn) -> Vec<CollectionId> {
if CONFIG.org_groups_enabled() { if CONFIG.org_groups_enabled() {
db_run! {conn: { db_run! {conn: {
ciphers_collections::table ciphers_collections::table
@ -867,11 +903,11 @@ impl Cipher {
)) ))
.left_join(users_organizations::table.on( .left_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid) users_organizations::org_uuid.eq(collections::org_uuid)
.and(users_organizations::user_uuid.eq(user_id.clone())) .and(users_organizations::user_uuid.eq(user_uuid.clone()))
)) ))
.left_join(users_collections::table.on( .left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid) users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
.and(users_collections::user_uuid.eq(user_id.clone())) .and(users_collections::user_uuid.eq(user_uuid.clone()))
)) ))
.left_join(groups_users::table.on( .left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid) groups_users::users_organizations_uuid.eq(users_organizations::uuid)
@ -882,14 +918,14 @@ impl Cipher {
.and(collections_groups::groups_uuid.eq(groups::uuid)) .and(collections_groups::groups_uuid.eq(groups::uuid))
)) ))
.filter(users_organizations::access_all.eq(true) // User has access all .filter(users_organizations::access_all.eq(true) // User has access all
.or(users_collections::user_uuid.eq(user_id) // User has access to collection .or(users_collections::user_uuid.eq(user_uuid) // User has access to collection
.and(users_collections::read_only.eq(false))) .and(users_collections::read_only.eq(false)))
.or(groups::access_all.eq(true)) // Access via groups .or(groups::access_all.eq(true)) // Access via groups
.or(collections_groups::collections_uuid.is_not_null() // Access via groups .or(collections_groups::collections_uuid.is_not_null() // Access via groups
.and(collections_groups::read_only.eq(false))) .and(collections_groups::read_only.eq(false)))
) )
.select(ciphers_collections::collection_uuid) .select(ciphers_collections::collection_uuid)
.load::<String>(conn).unwrap_or_default() .load::<CollectionId>(conn).unwrap_or_default()
}} }}
} else { } else {
db_run! {conn: { db_run! {conn: {
@ -900,23 +936,23 @@ impl Cipher {
)) ))
.inner_join(users_organizations::table.on( .inner_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid) users_organizations::org_uuid.eq(collections::org_uuid)
.and(users_organizations::user_uuid.eq(user_id.clone())) .and(users_organizations::user_uuid.eq(user_uuid.clone()))
)) ))
.left_join(users_collections::table.on( .left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid) users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
.and(users_collections::user_uuid.eq(user_id.clone())) .and(users_collections::user_uuid.eq(user_uuid.clone()))
)) ))
.filter(users_organizations::access_all.eq(true) // User has access all .filter(users_organizations::access_all.eq(true) // User has access all
.or(users_collections::user_uuid.eq(user_id) // User has access to collection .or(users_collections::user_uuid.eq(user_uuid) // User has access to collection
.and(users_collections::read_only.eq(false))) .and(users_collections::read_only.eq(false)))
) )
.select(ciphers_collections::collection_uuid) .select(ciphers_collections::collection_uuid)
.load::<String>(conn).unwrap_or_default() .load::<CollectionId>(conn).unwrap_or_default()
}} }}
} }
} }
pub async fn get_admin_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> { pub async fn get_admin_collections(&self, user_uuid: UserId, conn: &mut DbConn) -> Vec<CollectionId> {
if CONFIG.org_groups_enabled() { if CONFIG.org_groups_enabled() {
db_run! {conn: { db_run! {conn: {
ciphers_collections::table ciphers_collections::table
@ -926,11 +962,11 @@ impl Cipher {
)) ))
.left_join(users_organizations::table.on( .left_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid) users_organizations::org_uuid.eq(collections::org_uuid)
.and(users_organizations::user_uuid.eq(user_id.clone())) .and(users_organizations::user_uuid.eq(user_uuid.clone()))
)) ))
.left_join(users_collections::table.on( .left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid) users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
.and(users_collections::user_uuid.eq(user_id.clone())) .and(users_collections::user_uuid.eq(user_uuid.clone()))
)) ))
.left_join(groups_users::table.on( .left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid) groups_users::users_organizations_uuid.eq(users_organizations::uuid)
@ -941,15 +977,15 @@ impl Cipher {
.and(collections_groups::groups_uuid.eq(groups::uuid)) .and(collections_groups::groups_uuid.eq(groups::uuid))
)) ))
.filter(users_organizations::access_all.eq(true) // User has access all .filter(users_organizations::access_all.eq(true) // User has access all
.or(users_collections::user_uuid.eq(user_id) // User has access to collection .or(users_collections::user_uuid.eq(user_uuid) // User has access to collection
.and(users_collections::read_only.eq(false))) .and(users_collections::read_only.eq(false)))
.or(groups::access_all.eq(true)) // Access via groups .or(groups::access_all.eq(true)) // Access via groups
.or(collections_groups::collections_uuid.is_not_null() // Access via groups .or(collections_groups::collections_uuid.is_not_null() // Access via groups
.and(collections_groups::read_only.eq(false))) .and(collections_groups::read_only.eq(false)))
.or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner .or(users_organizations::atype.le(MembershipType::Admin as i32)) // User is admin or owner
) )
.select(ciphers_collections::collection_uuid) .select(ciphers_collections::collection_uuid)
.load::<String>(conn).unwrap_or_default() .load::<CollectionId>(conn).unwrap_or_default()
}} }}
} else { } else {
db_run! {conn: { db_run! {conn: {
@ -960,26 +996,29 @@ impl Cipher {
)) ))
.inner_join(users_organizations::table.on( .inner_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid) users_organizations::org_uuid.eq(collections::org_uuid)
.and(users_organizations::user_uuid.eq(user_id.clone())) .and(users_organizations::user_uuid.eq(user_uuid.clone()))
)) ))
.left_join(users_collections::table.on( .left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid) users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
.and(users_collections::user_uuid.eq(user_id.clone())) .and(users_collections::user_uuid.eq(user_uuid.clone()))
)) ))
.filter(users_organizations::access_all.eq(true) // User has access all .filter(users_organizations::access_all.eq(true) // User has access all
.or(users_collections::user_uuid.eq(user_id) // User has access to collection .or(users_collections::user_uuid.eq(user_uuid) // User has access to collection
.and(users_collections::read_only.eq(false))) .and(users_collections::read_only.eq(false)))
.or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner .or(users_organizations::atype.le(MembershipType::Admin as i32)) // User is admin or owner
) )
.select(ciphers_collections::collection_uuid) .select(ciphers_collections::collection_uuid)
.load::<String>(conn).unwrap_or_default() .load::<CollectionId>(conn).unwrap_or_default()
}} }}
} }
} }
/// Return a Vec with (cipher_uuid, collection_uuid) /// Return a Vec with (cipher_uuid, collection_uuid)
/// This is used during a full sync so we only need one query for all collections accessible. /// This is used during a full sync so we only need one query for all collections accessible.
pub async fn get_collections_with_cipher_by_user(user_id: String, conn: &mut DbConn) -> Vec<(String, String)> { pub async fn get_collections_with_cipher_by_user(
user_uuid: UserId,
conn: &mut DbConn,
) -> Vec<(CipherId, CollectionId)> {
db_run! {conn: { db_run! {conn: {
ciphers_collections::table ciphers_collections::table
.inner_join(collections::table.on( .inner_join(collections::table.on(
@ -987,12 +1026,12 @@ impl Cipher {
)) ))
.inner_join(users_organizations::table.on( .inner_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid).and( users_organizations::org_uuid.eq(collections::org_uuid).and(
users_organizations::user_uuid.eq(user_id.clone()) users_organizations::user_uuid.eq(user_uuid.clone())
) )
)) ))
.left_join(users_collections::table.on( .left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and( users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and(
users_collections::user_uuid.eq(user_id.clone()) users_collections::user_uuid.eq(user_uuid.clone())
) )
)) ))
.left_join(groups_users::table.on( .left_join(groups_users::table.on(
@ -1006,14 +1045,32 @@ impl Cipher {
collections_groups::groups_uuid.eq(groups::uuid) collections_groups::groups_uuid.eq(groups::uuid)
) )
)) ))
.or_filter(users_collections::user_uuid.eq(user_id)) // User has access to collection .or_filter(users_collections::user_uuid.eq(user_uuid)) // User has access to collection
.or_filter(users_organizations::access_all.eq(true)) // User has access all .or_filter(users_organizations::access_all.eq(true)) // User has access all
.or_filter(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner .or_filter(users_organizations::atype.le(MembershipType::Admin as i32)) // User is admin or owner
.or_filter(groups::access_all.eq(true)) //Access via group .or_filter(groups::access_all.eq(true)) //Access via group
.or_filter(collections_groups::collections_uuid.is_not_null()) //Access via group .or_filter(collections_groups::collections_uuid.is_not_null()) //Access via group
.select(ciphers_collections::all_columns) .select(ciphers_collections::all_columns)
.distinct() .distinct()
.load::<(String, String)>(conn).unwrap_or_default() .load::<(CipherId, CollectionId)>(conn).unwrap_or_default()
}} }}
} }
} }
#[derive(
Clone,
Debug,
AsRef,
Deref,
DieselNewType,
Display,
From,
FromForm,
Hash,
PartialEq,
Eq,
Serialize,
Deserialize,
UuidFromParam,
)]
pub struct CipherId(String);

215
src/db/models/collection.rs

@ -1,15 +1,20 @@
use derive_more::{AsRef, Deref, Display, From};
use serde_json::Value; use serde_json::Value;
use super::{CollectionGroup, GroupUser, User, UserOrgStatus, UserOrgType, UserOrganization}; use super::{
CipherId, CollectionGroup, GroupUser, Membership, MembershipId, MembershipStatus, MembershipType, OrganizationId,
User, UserId,
};
use crate::CONFIG; use crate::CONFIG;
use macros::UuidFromParam;
db_object! { db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[diesel(table_name = collections)] #[diesel(table_name = collections)]
#[diesel(primary_key(uuid))] #[diesel(primary_key(uuid))]
pub struct Collection { pub struct Collection {
pub uuid: String, pub uuid: CollectionId,
pub org_uuid: String, pub org_uuid: OrganizationId,
pub name: String, pub name: String,
pub external_id: Option<String>, pub external_id: Option<String>,
} }
@ -18,8 +23,8 @@ db_object! {
#[diesel(table_name = users_collections)] #[diesel(table_name = users_collections)]
#[diesel(primary_key(user_uuid, collection_uuid))] #[diesel(primary_key(user_uuid, collection_uuid))]
pub struct CollectionUser { pub struct CollectionUser {
pub user_uuid: String, pub user_uuid: UserId,
pub collection_uuid: String, pub collection_uuid: CollectionId,
pub read_only: bool, pub read_only: bool,
pub hide_passwords: bool, pub hide_passwords: bool,
} }
@ -28,16 +33,16 @@ db_object! {
#[diesel(table_name = ciphers_collections)] #[diesel(table_name = ciphers_collections)]
#[diesel(primary_key(cipher_uuid, collection_uuid))] #[diesel(primary_key(cipher_uuid, collection_uuid))]
pub struct CollectionCipher { pub struct CollectionCipher {
pub cipher_uuid: String, pub cipher_uuid: CipherId,
pub collection_uuid: String, pub collection_uuid: CollectionId,
} }
} }
/// Local methods /// Local methods
impl Collection { impl Collection {
pub fn new(org_uuid: String, name: String, external_id: Option<String>) -> Self { pub fn new(org_uuid: OrganizationId, name: String, external_id: Option<String>) -> Self {
let mut new_model = Self { let mut new_model = Self {
uuid: crate::util::get_uuid(), uuid: CollectionId(crate::util::get_uuid()),
org_uuid, org_uuid,
name, name,
external_id: None, external_id: None,
@ -74,18 +79,18 @@ impl Collection {
pub async fn to_json_details( pub async fn to_json_details(
&self, &self,
user_uuid: &str, user_uuid: &UserId,
cipher_sync_data: Option<&crate::api::core::CipherSyncData>, cipher_sync_data: Option<&crate::api::core::CipherSyncData>,
conn: &mut DbConn, conn: &mut DbConn,
) -> Value { ) -> Value {
let (read_only, hide_passwords, can_manage) = if let Some(cipher_sync_data) = cipher_sync_data { let (read_only, hide_passwords, can_manage) = if let Some(cipher_sync_data) = cipher_sync_data {
match cipher_sync_data.user_organizations.get(&self.org_uuid) { match cipher_sync_data.members.get(&self.org_uuid) {
// Only for Manager types Bitwarden returns true for the can_manage option // Only for Manager types Bitwarden returns true for the can_manage option
// Owners and Admins always have true // Owners and Admins always have true
Some(uo) if uo.has_full_access() => (false, false, uo.atype >= UserOrgType::Manager), Some(m) if m.has_full_access() => (false, false, m.atype >= MembershipType::Manager),
Some(uo) => { Some(m) => {
// Only let a manager manage collections when the have full read/write access // Only let a manager manage collections when the have full read/write access
let is_manager = uo.atype == UserOrgType::Manager; let is_manager = m.atype == MembershipType::Manager;
if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) { if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) {
(uc.read_only, uc.hide_passwords, is_manager && !uc.read_only && !uc.hide_passwords) (uc.read_only, uc.hide_passwords, is_manager && !uc.read_only && !uc.hide_passwords)
} else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) { } else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) {
@ -97,10 +102,10 @@ impl Collection {
_ => (true, true, false), _ => (true, true, false),
} }
} else { } else {
match UserOrganization::find_confirmed_by_user_and_org(user_uuid, &self.org_uuid, conn).await { match Membership::find_confirmed_by_user_and_org(user_uuid, &self.org_uuid, conn).await {
Some(ou) if ou.has_full_access() => (false, false, ou.atype >= UserOrgType::Manager), Some(m) if m.has_full_access() => (false, false, m.atype >= MembershipType::Manager),
Some(ou) => { Some(m) => {
let is_manager = ou.atype == UserOrgType::Manager; let is_manager = m.atype == MembershipType::Manager;
let read_only = !self.is_writable_by_user(user_uuid, conn).await; let read_only = !self.is_writable_by_user(user_uuid, conn).await;
let hide_passwords = self.hide_passwords_for_user(user_uuid, conn).await; let hide_passwords = self.hide_passwords_for_user(user_uuid, conn).await;
(read_only, hide_passwords, is_manager && !read_only && !hide_passwords) (read_only, hide_passwords, is_manager && !read_only && !hide_passwords)
@ -121,13 +126,13 @@ impl Collection {
json_object json_object
} }
pub async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool { pub async fn can_access_collection(member: &Membership, col_id: &CollectionId, conn: &mut DbConn) -> bool {
org_user.has_status(UserOrgStatus::Confirmed) member.has_status(MembershipStatus::Confirmed)
&& (org_user.has_full_access() && (member.has_full_access()
|| CollectionUser::has_access_to_collection_by_user(col_id, &org_user.user_uuid, conn).await || CollectionUser::has_access_to_collection_by_user(col_id, &member.user_uuid, conn).await
|| (CONFIG.org_groups_enabled() || (CONFIG.org_groups_enabled()
&& (GroupUser::has_full_access_by_member(&org_user.org_uuid, &org_user.uuid, conn).await && (GroupUser::has_full_access_by_member(&member.org_uuid, &member.uuid, conn).await
|| GroupUser::has_access_to_collection_by_member(col_id, &org_user.uuid, conn).await))) || GroupUser::has_access_to_collection_by_member(col_id, &member.uuid, conn).await)))
} }
} }
@ -185,7 +190,7 @@ impl Collection {
}} }}
} }
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult {
for collection in Self::find_by_organization(org_uuid, conn).await { for collection in Self::find_by_organization(org_uuid, conn).await {
collection.delete(conn).await?; collection.delete(conn).await?;
} }
@ -193,12 +198,12 @@ impl Collection {
} }
pub async fn update_users_revision(&self, conn: &mut DbConn) { pub async fn update_users_revision(&self, conn: &mut DbConn) {
for user_org in UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await.iter() { for member in Membership::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await.iter() {
User::update_uuid_revision(&user_org.user_uuid, conn).await; User::update_uuid_revision(&member.user_uuid, conn).await;
} }
} }
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid(uuid: &CollectionId, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
collections::table collections::table
.filter(collections::uuid.eq(uuid)) .filter(collections::uuid.eq(uuid))
@ -208,7 +213,7 @@ impl Collection {
}} }}
} }
pub async fn find_by_user_uuid(user_uuid: String, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_user_uuid(user_uuid: UserId, conn: &mut DbConn) -> Vec<Self> {
if CONFIG.org_groups_enabled() { if CONFIG.org_groups_enabled() {
db_run! { conn: { db_run! { conn: {
collections::table collections::table
@ -234,7 +239,7 @@ impl Collection {
) )
)) ))
.filter( .filter(
users_organizations::status.eq(UserOrgStatus::Confirmed as i32) users_organizations::status.eq(MembershipStatus::Confirmed as i32)
) )
.filter( .filter(
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
@ -265,7 +270,7 @@ impl Collection {
) )
)) ))
.filter( .filter(
users_organizations::status.eq(UserOrgStatus::Confirmed as i32) users_organizations::status.eq(MembershipStatus::Confirmed as i32)
) )
.filter( .filter(
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
@ -279,15 +284,19 @@ impl Collection {
} }
} }
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_organization_and_user_uuid(
org_uuid: &OrganizationId,
user_uuid: &UserId,
conn: &mut DbConn,
) -> Vec<Self> {
Self::find_by_user_uuid(user_uuid.to_owned(), conn) Self::find_by_user_uuid(user_uuid.to_owned(), conn)
.await .await
.into_iter() .into_iter()
.filter(|c| c.org_uuid == org_uuid) .filter(|c| &c.org_uuid == org_uuid)
.collect() .collect()
} }
pub async fn find_by_organization(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
collections::table collections::table
.filter(collections::org_uuid.eq(org_uuid)) .filter(collections::org_uuid.eq(org_uuid))
@ -297,7 +306,7 @@ impl Collection {
}} }}
} }
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 { pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
db_run! { conn: { db_run! { conn: {
collections::table collections::table
.filter(collections::org_uuid.eq(org_uuid)) .filter(collections::org_uuid.eq(org_uuid))
@ -308,7 +317,11 @@ impl Collection {
}} }}
} }
pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid_and_org(
uuid: &CollectionId,
org_uuid: &OrganizationId,
conn: &mut DbConn,
) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
collections::table collections::table
.filter(collections::uuid.eq(uuid)) .filter(collections::uuid.eq(uuid))
@ -320,7 +333,7 @@ impl Collection {
}} }}
} }
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: String, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid_and_user(uuid: &CollectionId, user_uuid: UserId, conn: &mut DbConn) -> Option<Self> {
if CONFIG.org_groups_enabled() { if CONFIG.org_groups_enabled() {
db_run! { conn: { db_run! { conn: {
collections::table collections::table
@ -349,7 +362,7 @@ impl Collection {
.filter( .filter(
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
users_organizations::access_all.eq(true).or( // access_all in Organization users_organizations::access_all.eq(true).or( // access_all in Organization
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
)).or( )).or(
groups::access_all.eq(true) // access_all in groups groups::access_all.eq(true) // access_all in groups
).or( // access via groups ).or( // access via groups
@ -378,7 +391,7 @@ impl Collection {
.filter( .filter(
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
users_organizations::access_all.eq(true).or( // access_all in Organization users_organizations::access_all.eq(true).or( // access_all in Organization
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
)) ))
).select(collections::all_columns) ).select(collections::all_columns)
.first::<CollectionDb>(conn).ok() .first::<CollectionDb>(conn).ok()
@ -387,7 +400,7 @@ impl Collection {
} }
} }
pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool { pub async fn is_writable_by_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
let user_uuid = user_uuid.to_string(); let user_uuid = user_uuid.to_string();
if CONFIG.org_groups_enabled() { if CONFIG.org_groups_enabled() {
db_run! { conn: { db_run! { conn: {
@ -411,7 +424,7 @@ impl Collection {
collections_groups::groups_uuid.eq(groups_users::groups_uuid) collections_groups::groups_uuid.eq(groups_users::groups_uuid)
.and(collections_groups::collections_uuid.eq(collections::uuid)) .and(collections_groups::collections_uuid.eq(collections::uuid))
)) ))
.filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner .filter(users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
.or(users_organizations::access_all.eq(true)) // access_all via membership .or(users_organizations::access_all.eq(true)) // access_all via membership
.or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection .or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
.and(users_collections::read_only.eq(false))) .and(users_collections::read_only.eq(false)))
@ -436,7 +449,7 @@ impl Collection {
users_collections::collection_uuid.eq(collections::uuid) users_collections::collection_uuid.eq(collections::uuid)
.and(users_collections::user_uuid.eq(user_uuid)) .and(users_collections::user_uuid.eq(user_uuid))
)) ))
.filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner .filter(users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
.or(users_organizations::access_all.eq(true)) // access_all via membership .or(users_organizations::access_all.eq(true)) // access_all via membership
.or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection .or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
.and(users_collections::read_only.eq(false))) .and(users_collections::read_only.eq(false)))
@ -449,7 +462,7 @@ impl Collection {
} }
} }
pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool { pub async fn hide_passwords_for_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool {
let user_uuid = user_uuid.to_string(); let user_uuid = user_uuid.to_string();
db_run! { conn: { db_run! { conn: {
collections::table collections::table
@ -478,7 +491,7 @@ impl Collection {
.filter( .filter(
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::hide_passwords.eq(true)).or(// Directly accessed collection users_collections::collection_uuid.eq(&self.uuid).and(users_collections::hide_passwords.eq(true)).or(// Directly accessed collection
users_organizations::access_all.eq(true).or( // access_all in Organization users_organizations::access_all.eq(true).or( // access_all in Organization
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner
)).or( )).or(
groups::access_all.eq(true) // access_all in groups groups::access_all.eq(true) // access_all in groups
).or( // access via groups ).or( // access via groups
@ -498,7 +511,11 @@ impl Collection {
/// Database methods /// Database methods
impl CollectionUser { impl CollectionUser {
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_organization_and_user_uuid(
org_uuid: &OrganizationId,
user_uuid: &UserId,
conn: &mut DbConn,
) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
users_collections::table users_collections::table
.filter(users_collections::user_uuid.eq(user_uuid)) .filter(users_collections::user_uuid.eq(user_uuid))
@ -511,8 +528,11 @@ impl CollectionUser {
}} }}
} }
pub async fn find_by_organization(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_organization_swap_user_uuid_with_member_uuid(
db_run! { conn: { org_uuid: &OrganizationId,
conn: &mut DbConn,
) -> Vec<CollectionMembership> {
let col_users = db_run! { conn: {
users_collections::table users_collections::table
.inner_join(collections::table.on(collections::uuid.eq(users_collections::collection_uuid))) .inner_join(collections::table.on(collections::uuid.eq(users_collections::collection_uuid)))
.filter(collections::org_uuid.eq(org_uuid)) .filter(collections::org_uuid.eq(org_uuid))
@ -521,12 +541,13 @@ impl CollectionUser {
.load::<CollectionUserDb>(conn) .load::<CollectionUserDb>(conn)
.expect("Error loading users_collections") .expect("Error loading users_collections")
.from_db() .from_db()
}} }};
col_users.into_iter().map(|c| c.into()).collect()
} }
pub async fn save( pub async fn save(
user_uuid: &str, user_uuid: &UserId,
collection_uuid: &str, collection_uuid: &CollectionId,
read_only: bool, read_only: bool,
hide_passwords: bool, hide_passwords: bool,
conn: &mut DbConn, conn: &mut DbConn,
@ -596,7 +617,7 @@ impl CollectionUser {
}} }}
} }
pub async fn find_by_collection(collection_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
users_collections::table users_collections::table
.filter(users_collections::collection_uuid.eq(collection_uuid)) .filter(users_collections::collection_uuid.eq(collection_uuid))
@ -607,11 +628,11 @@ impl CollectionUser {
}} }}
} }
pub async fn find_by_collection_swap_user_uuid_with_org_user_uuid( pub async fn find_by_collection_swap_user_uuid_with_member_uuid(
collection_uuid: &str, collection_uuid: &CollectionId,
conn: &mut DbConn, conn: &mut DbConn,
) -> Vec<Self> { ) -> Vec<CollectionMembership> {
db_run! { conn: { let col_users = db_run! { conn: {
users_collections::table users_collections::table
.filter(users_collections::collection_uuid.eq(collection_uuid)) .filter(users_collections::collection_uuid.eq(collection_uuid))
.inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid))) .inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid)))
@ -619,12 +640,13 @@ impl CollectionUser {
.load::<CollectionUserDb>(conn) .load::<CollectionUserDb>(conn)
.expect("Error loading users_collections") .expect("Error loading users_collections")
.from_db() .from_db()
}} }};
col_users.into_iter().map(|c| c.into()).collect()
} }
pub async fn find_by_collection_and_user( pub async fn find_by_collection_and_user(
collection_uuid: &str, collection_uuid: &CollectionId,
user_uuid: &str, user_uuid: &UserId,
conn: &mut DbConn, conn: &mut DbConn,
) -> Option<Self> { ) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
@ -638,7 +660,7 @@ impl CollectionUser {
}} }}
} }
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
users_collections::table users_collections::table
.filter(users_collections::user_uuid.eq(user_uuid)) .filter(users_collections::user_uuid.eq(user_uuid))
@ -649,7 +671,7 @@ impl CollectionUser {
}} }}
} }
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
for collection in CollectionUser::find_by_collection(collection_uuid, conn).await.iter() { for collection in CollectionUser::find_by_collection(collection_uuid, conn).await.iter() {
User::update_uuid_revision(&collection.user_uuid, conn).await; User::update_uuid_revision(&collection.user_uuid, conn).await;
} }
@ -661,7 +683,11 @@ impl CollectionUser {
}} }}
} }
pub async fn delete_all_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_user_and_org(
user_uuid: &UserId,
org_uuid: &OrganizationId,
conn: &mut DbConn,
) -> EmptyResult {
let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await; let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await;
db_run! { conn: { db_run! { conn: {
@ -677,14 +703,18 @@ impl CollectionUser {
}} }}
} }
pub async fn has_access_to_collection_by_user(col_id: &str, user_uuid: &str, conn: &mut DbConn) -> bool { pub async fn has_access_to_collection_by_user(
col_id: &CollectionId,
user_uuid: &UserId,
conn: &mut DbConn,
) -> bool {
Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some() Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some()
} }
} }
/// Database methods /// Database methods
impl CollectionCipher { impl CollectionCipher {
pub async fn save(cipher_uuid: &str, collection_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn save(cipher_uuid: &CipherId, collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
Self::update_users_revision(collection_uuid, conn).await; Self::update_users_revision(collection_uuid, conn).await;
db_run! { conn: db_run! { conn:
@ -714,7 +744,7 @@ impl CollectionCipher {
} }
} }
pub async fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete(cipher_uuid: &CipherId, collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
Self::update_users_revision(collection_uuid, conn).await; Self::update_users_revision(collection_uuid, conn).await;
db_run! { conn: { db_run! { conn: {
@ -728,7 +758,7 @@ impl CollectionCipher {
}} }}
} }
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
diesel::delete(ciphers_collections::table.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid))) diesel::delete(ciphers_collections::table.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid)))
.execute(conn) .execute(conn)
@ -736,7 +766,7 @@ impl CollectionCipher {
}} }}
} }
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
diesel::delete(ciphers_collections::table.filter(ciphers_collections::collection_uuid.eq(collection_uuid))) diesel::delete(ciphers_collections::table.filter(ciphers_collections::collection_uuid.eq(collection_uuid)))
.execute(conn) .execute(conn)
@ -744,9 +774,60 @@ impl CollectionCipher {
}} }}
} }
pub async fn update_users_revision(collection_uuid: &str, conn: &mut DbConn) { pub async fn update_users_revision(collection_uuid: &CollectionId, conn: &mut DbConn) {
if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn).await { if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn).await {
collection.update_users_revision(conn).await; collection.update_users_revision(conn).await;
} }
} }
} }
// Added in case we need the membership_uuid instead of the user_uuid
pub struct CollectionMembership {
pub membership_uuid: MembershipId,
pub collection_uuid: CollectionId,
pub read_only: bool,
pub hide_passwords: bool,
}
impl CollectionMembership {
pub fn to_json_details_for_user(&self, membership_type: i32) -> Value {
json!({
"id": self.membership_uuid,
"readOnly": self.read_only,
"hidePasswords": self.hide_passwords,
"manage": membership_type >= MembershipType::Admin
|| (membership_type == MembershipType::Manager
&& !self.read_only
&& !self.hide_passwords),
})
}
}
impl From<CollectionUser> for CollectionMembership {
fn from(c: CollectionUser) -> Self {
Self {
membership_uuid: c.user_uuid.to_string().into(),
collection_uuid: c.collection_uuid,
read_only: c.read_only,
hide_passwords: c.hide_passwords,
}
}
}
#[derive(
Clone,
Debug,
AsRef,
Deref,
DieselNewType,
Display,
From,
FromForm,
Hash,
PartialEq,
Eq,
Serialize,
Deserialize,
UuidFromParam,
)]
pub struct CollectionId(String);

101
src/db/models/device.rs

@ -1,7 +1,9 @@
use chrono::{NaiveDateTime, Utc}; use chrono::{NaiveDateTime, Utc};
use derive_more::{Display, From};
use super::UserId;
use crate::{crypto, CONFIG}; use crate::{crypto, CONFIG};
use core::fmt; use macros::IdFromParam;
db_object! { db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
@ -9,11 +11,11 @@ db_object! {
#[diesel(treat_none_as_null = true)] #[diesel(treat_none_as_null = true)]
#[diesel(primary_key(uuid, user_uuid))] #[diesel(primary_key(uuid, user_uuid))]
pub struct Device { pub struct Device {
pub uuid: String, pub uuid: DeviceId,
pub created_at: NaiveDateTime, pub created_at: NaiveDateTime,
pub updated_at: NaiveDateTime, pub updated_at: NaiveDateTime,
pub user_uuid: String, pub user_uuid: UserId,
pub name: String, pub name: String,
pub atype: i32, // https://github.com/bitwarden/server/blob/dcc199bcce4aa2d5621f6fab80f1b49d8b143418/src/Core/Enums/DeviceType.cs pub atype: i32, // https://github.com/bitwarden/server/blob/dcc199bcce4aa2d5621f6fab80f1b49d8b143418/src/Core/Enums/DeviceType.cs
@ -28,7 +30,7 @@ db_object! {
/// Local methods /// Local methods
impl Device { impl Device {
pub fn new(uuid: String, user_uuid: String, name: String, atype: i32) -> Self { pub fn new(uuid: DeviceId, user_uuid: UserId, name: String, atype: i32) -> Self {
let now = Utc::now().naive_utc(); let now = Utc::now().naive_utc();
Self { Self {
@ -75,12 +77,12 @@ impl Device {
// Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients // Also These key/value pairs are not used anywhere by either Vaultwarden or Bitwarden Clients
// Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out // Because these might get used in the future, and they are added by the Bitwarden Server, lets keep it, but then commented out
// --- // ---
// fn arg: orgs: Vec<super::UserOrganization>, // fn arg: members: Vec<super::Membership>,
// --- // ---
// let orgowner: Vec<_> = orgs.iter().filter(|o| o.atype == 0).map(|o| o.org_uuid.clone()).collect(); // let orgowner: Vec<_> = members.iter().filter(|m| m.atype == 0).map(|o| o.org_uuid.clone()).collect();
// let orgadmin: Vec<_> = orgs.iter().filter(|o| o.atype == 1).map(|o| o.org_uuid.clone()).collect(); // let orgadmin: Vec<_> = members.iter().filter(|m| m.atype == 1).map(|o| o.org_uuid.clone()).collect();
// let orguser: Vec<_> = orgs.iter().filter(|o| o.atype == 2).map(|o| o.org_uuid.clone()).collect(); // let orguser: Vec<_> = members.iter().filter(|m| m.atype == 2).map(|o| o.org_uuid.clone()).collect();
// let orgmanager: Vec<_> = orgs.iter().filter(|o| o.atype == 3).map(|o| o.org_uuid.clone()).collect(); // let orgmanager: Vec<_> = members.iter().filter(|m| m.atype == 3).map(|o| o.org_uuid.clone()).collect();
// Create the JWT claims struct, to send to the client // Create the JWT claims struct, to send to the client
use crate::auth::{encode_jwt, LoginJwtClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER}; use crate::auth::{encode_jwt, LoginJwtClaims, DEFAULT_VALIDITY, JWT_LOGIN_ISSUER};
@ -150,7 +152,7 @@ impl Device {
} }
} }
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
diesel::delete(devices::table.filter(devices::user_uuid.eq(user_uuid))) diesel::delete(devices::table.filter(devices::user_uuid.eq(user_uuid)))
.execute(conn) .execute(conn)
@ -158,7 +160,7 @@ impl Device {
}} }}
} }
pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid_and_user(uuid: &DeviceId, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
devices::table devices::table
.filter(devices::uuid.eq(uuid)) .filter(devices::uuid.eq(uuid))
@ -169,7 +171,7 @@ impl Device {
}} }}
} }
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
devices::table devices::table
.filter(devices::user_uuid.eq(user_uuid)) .filter(devices::user_uuid.eq(user_uuid))
@ -179,7 +181,7 @@ impl Device {
}} }}
} }
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid(uuid: &DeviceId, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
devices::table devices::table
.filter(devices::uuid.eq(uuid)) .filter(devices::uuid.eq(uuid))
@ -189,7 +191,7 @@ impl Device {
}} }}
} }
pub async fn clear_push_token_by_uuid(uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn clear_push_token_by_uuid(uuid: &DeviceId, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
diesel::update(devices::table) diesel::update(devices::table)
.filter(devices::uuid.eq(uuid)) .filter(devices::uuid.eq(uuid))
@ -208,7 +210,7 @@ impl Device {
}} }}
} }
pub async fn find_latest_active_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_latest_active_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
devices::table devices::table
.filter(devices::user_uuid.eq(user_uuid)) .filter(devices::user_uuid.eq(user_uuid))
@ -219,7 +221,7 @@ impl Device {
}} }}
} }
pub async fn find_push_devices_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_push_devices_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
devices::table devices::table
.filter(devices::user_uuid.eq(user_uuid)) .filter(devices::user_uuid.eq(user_uuid))
@ -230,7 +232,7 @@ impl Device {
}} }}
} }
pub async fn check_user_has_push_device(user_uuid: &str, conn: &mut DbConn) -> bool { pub async fn check_user_has_push_device(user_uuid: &UserId, conn: &mut DbConn) -> bool {
db_run! { conn: { db_run! { conn: {
devices::table devices::table
.filter(devices::user_uuid.eq(user_uuid)) .filter(devices::user_uuid.eq(user_uuid))
@ -243,68 +245,62 @@ impl Device {
} }
} }
#[derive(Display)]
pub enum DeviceType { pub enum DeviceType {
#[display("Android")]
Android = 0, Android = 0,
#[display("iOS")]
Ios = 1, Ios = 1,
#[display("Chrome Extension")]
ChromeExtension = 2, ChromeExtension = 2,
#[display("Firefox Extension")]
FirefoxExtension = 3, FirefoxExtension = 3,
#[display("Opera Extension")]
OperaExtension = 4, OperaExtension = 4,
#[display("Edge Extension")]
EdgeExtension = 5, EdgeExtension = 5,
#[display("Windows")]
WindowsDesktop = 6, WindowsDesktop = 6,
#[display("macOS")]
MacOsDesktop = 7, MacOsDesktop = 7,
#[display("Linux")]
LinuxDesktop = 8, LinuxDesktop = 8,
#[display("Chrome")]
ChromeBrowser = 9, ChromeBrowser = 9,
#[display("Firefox")]
FirefoxBrowser = 10, FirefoxBrowser = 10,
#[display("Opera")]
OperaBrowser = 11, OperaBrowser = 11,
#[display("Edge")]
EdgeBrowser = 12, EdgeBrowser = 12,
#[display("Internet Explorer")]
IEBrowser = 13, IEBrowser = 13,
#[display("Unknown Browser")]
UnknownBrowser = 14, UnknownBrowser = 14,
#[display("Android")]
AndroidAmazon = 15, AndroidAmazon = 15,
#[display("UWP")]
Uwp = 16, Uwp = 16,
#[display("Safari")]
SafariBrowser = 17, SafariBrowser = 17,
#[display("Vivaldi")]
VivaldiBrowser = 18, VivaldiBrowser = 18,
#[display("Vivaldi Extension")]
VivaldiExtension = 19, VivaldiExtension = 19,
#[display("Safari Extension")]
SafariExtension = 20, SafariExtension = 20,
#[display("SDK")]
Sdk = 21, Sdk = 21,
#[display("Server")]
Server = 22, Server = 22,
#[display("Windows CLI")]
WindowsCLI = 23, WindowsCLI = 23,
#[display("macOS CLI")]
MacOsCLI = 24, MacOsCLI = 24,
#[display("Linux CLI")]
LinuxCLI = 25, LinuxCLI = 25,
} }
impl fmt::Display for DeviceType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DeviceType::Android => write!(f, "Android"),
DeviceType::Ios => write!(f, "iOS"),
DeviceType::ChromeExtension => write!(f, "Chrome Extension"),
DeviceType::FirefoxExtension => write!(f, "Firefox Extension"),
DeviceType::OperaExtension => write!(f, "Opera Extension"),
DeviceType::EdgeExtension => write!(f, "Edge Extension"),
DeviceType::WindowsDesktop => write!(f, "Windows"),
DeviceType::MacOsDesktop => write!(f, "macOS"),
DeviceType::LinuxDesktop => write!(f, "Linux"),
DeviceType::ChromeBrowser => write!(f, "Chrome"),
DeviceType::FirefoxBrowser => write!(f, "Firefox"),
DeviceType::OperaBrowser => write!(f, "Opera"),
DeviceType::EdgeBrowser => write!(f, "Edge"),
DeviceType::IEBrowser => write!(f, "Internet Explorer"),
DeviceType::UnknownBrowser => write!(f, "Unknown Browser"),
DeviceType::AndroidAmazon => write!(f, "Android"),
DeviceType::Uwp => write!(f, "UWP"),
DeviceType::SafariBrowser => write!(f, "Safari"),
DeviceType::VivaldiBrowser => write!(f, "Vivaldi"),
DeviceType::VivaldiExtension => write!(f, "Vivaldi Extension"),
DeviceType::SafariExtension => write!(f, "Safari Extension"),
DeviceType::Sdk => write!(f, "SDK"),
DeviceType::Server => write!(f, "Server"),
DeviceType::WindowsCLI => write!(f, "Windows CLI"),
DeviceType::MacOsCLI => write!(f, "macOS CLI"),
DeviceType::LinuxCLI => write!(f, "Linux CLI"),
}
}
}
impl DeviceType { impl DeviceType {
pub fn from_i32(value: i32) -> DeviceType { pub fn from_i32(value: i32) -> DeviceType {
match value { match value {
@ -338,3 +334,8 @@ impl DeviceType {
} }
} }
} }
#[derive(
Clone, Debug, DieselNewType, Display, From, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize, IdFromParam,
)]
pub struct DeviceId(String);

72
src/db/models/emergency_access.rs

@ -1,9 +1,10 @@
use chrono::{NaiveDateTime, Utc}; use chrono::{NaiveDateTime, Utc};
use derive_more::{AsRef, Deref, Display, From};
use serde_json::Value; use serde_json::Value;
use super::{User, UserId};
use crate::{api::EmptyResult, db::DbConn, error::MapResult}; use crate::{api::EmptyResult, db::DbConn, error::MapResult};
use macros::UuidFromParam;
use super::User;
db_object! { db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
@ -11,9 +12,9 @@ db_object! {
#[diesel(treat_none_as_null = true)] #[diesel(treat_none_as_null = true)]
#[diesel(primary_key(uuid))] #[diesel(primary_key(uuid))]
pub struct EmergencyAccess { pub struct EmergencyAccess {
pub uuid: String, pub uuid: EmergencyAccessId,
pub grantor_uuid: String, pub grantor_uuid: UserId,
pub grantee_uuid: Option<String>, pub grantee_uuid: Option<UserId>,
pub email: Option<String>, pub email: Option<String>,
pub key_encrypted: Option<String>, pub key_encrypted: Option<String>,
pub atype: i32, //EmergencyAccessType pub atype: i32, //EmergencyAccessType
@ -29,11 +30,11 @@ db_object! {
// Local methods // Local methods
impl EmergencyAccess { impl EmergencyAccess {
pub fn new(grantor_uuid: String, email: String, status: i32, atype: i32, wait_time_days: i32) -> Self { pub fn new(grantor_uuid: UserId, email: String, status: i32, atype: i32, wait_time_days: i32) -> Self {
let now = Utc::now().naive_utc(); let now = Utc::now().naive_utc();
Self { Self {
uuid: crate::util::get_uuid(), uuid: EmergencyAccessId(crate::util::get_uuid()),
grantor_uuid, grantor_uuid,
grantee_uuid: None, grantee_uuid: None,
email: Some(email), email: Some(email),
@ -82,7 +83,7 @@ impl EmergencyAccess {
} }
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option<Value> { pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option<Value> {
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() { let grantee_user = if let Some(grantee_uuid) = &self.grantee_uuid {
User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.") User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
} else if let Some(email) = self.email.as_deref() { } else if let Some(email) = self.email.as_deref() {
match User::find_by_mail(email, conn).await { match User::find_by_mail(email, conn).await {
@ -211,7 +212,7 @@ impl EmergencyAccess {
}} }}
} }
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
for ea in Self::find_all_by_grantor_uuid(user_uuid, conn).await { for ea in Self::find_all_by_grantor_uuid(user_uuid, conn).await {
ea.delete(conn).await?; ea.delete(conn).await?;
} }
@ -239,8 +240,8 @@ impl EmergencyAccess {
} }
pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email( pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email(
grantor_uuid: &str, grantor_uuid: &UserId,
grantee_uuid: &str, grantee_uuid: &UserId,
email: &str, email: &str,
conn: &mut DbConn, conn: &mut DbConn,
) -> Option<Self> { ) -> Option<Self> {
@ -262,7 +263,11 @@ impl EmergencyAccess {
}} }}
} }
pub async fn find_by_uuid_and_grantor_uuid(uuid: &str, grantor_uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid_and_grantor_uuid(
uuid: &EmergencyAccessId,
grantor_uuid: &UserId,
conn: &mut DbConn,
) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
emergency_access::table emergency_access::table
.filter(emergency_access::uuid.eq(uuid)) .filter(emergency_access::uuid.eq(uuid))
@ -272,7 +277,11 @@ impl EmergencyAccess {
}} }}
} }
pub async fn find_by_uuid_and_grantee_uuid(uuid: &str, grantee_uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid_and_grantee_uuid(
uuid: &EmergencyAccessId,
grantee_uuid: &UserId,
conn: &mut DbConn,
) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
emergency_access::table emergency_access::table
.filter(emergency_access::uuid.eq(uuid)) .filter(emergency_access::uuid.eq(uuid))
@ -282,7 +291,11 @@ impl EmergencyAccess {
}} }}
} }
pub async fn find_by_uuid_and_grantee_email(uuid: &str, grantee_email: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid_and_grantee_email(
uuid: &EmergencyAccessId,
grantee_email: &str,
conn: &mut DbConn,
) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
emergency_access::table emergency_access::table
.filter(emergency_access::uuid.eq(uuid)) .filter(emergency_access::uuid.eq(uuid))
@ -292,7 +305,7 @@ impl EmergencyAccess {
}} }}
} }
pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_all_by_grantee_uuid(grantee_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
emergency_access::table emergency_access::table
.filter(emergency_access::grantee_uuid.eq(grantee_uuid)) .filter(emergency_access::grantee_uuid.eq(grantee_uuid))
@ -319,7 +332,7 @@ impl EmergencyAccess {
}} }}
} }
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_all_by_grantor_uuid(grantor_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
emergency_access::table emergency_access::table
.filter(emergency_access::grantor_uuid.eq(grantor_uuid)) .filter(emergency_access::grantor_uuid.eq(grantor_uuid))
@ -327,7 +340,12 @@ impl EmergencyAccess {
}} }}
} }
pub async fn accept_invite(&mut self, grantee_uuid: &str, grantee_email: &str, conn: &mut DbConn) -> EmptyResult { pub async fn accept_invite(
&mut self,
grantee_uuid: &UserId,
grantee_email: &str,
conn: &mut DbConn,
) -> EmptyResult {
if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email { if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email {
err!("User email does not match invite."); err!("User email does not match invite.");
} }
@ -337,10 +355,28 @@ impl EmergencyAccess {
} }
self.status = EmergencyAccessStatus::Accepted as i32; self.status = EmergencyAccessStatus::Accepted as i32;
self.grantee_uuid = Some(String::from(grantee_uuid)); self.grantee_uuid = Some(grantee_uuid.clone());
self.email = None; self.email = None;
self.save(conn).await self.save(conn).await
} }
} }
// endregion // endregion
#[derive(
Clone,
Debug,
AsRef,
Deref,
DieselNewType,
Display,
From,
FromForm,
Hash,
PartialEq,
Eq,
Serialize,
Deserialize,
UuidFromParam,
)]
pub struct EmergencyAccessId(String);

45
src/db/models/event.rs

@ -1,9 +1,9 @@
use crate::db::DbConn; use chrono::{NaiveDateTime, TimeDelta, Utc};
//use derive_more::{AsRef, Deref, Display, From};
use serde_json::Value; use serde_json::Value;
use crate::{api::EmptyResult, error::MapResult, CONFIG}; use super::{CipherId, CollectionId, GroupId, MembershipId, OrgPolicyId, OrganizationId, UserId};
use crate::{api::EmptyResult, db::DbConn, error::MapResult, CONFIG};
use chrono::{NaiveDateTime, TimeDelta, Utc};
// https://bitwarden.com/help/event-logs/ // https://bitwarden.com/help/event-logs/
@ -15,20 +15,20 @@ db_object! {
#[diesel(table_name = event)] #[diesel(table_name = event)]
#[diesel(primary_key(uuid))] #[diesel(primary_key(uuid))]
pub struct Event { pub struct Event {
pub uuid: String, pub uuid: EventId,
pub event_type: i32, // EventType pub event_type: i32, // EventType
pub user_uuid: Option<String>, pub user_uuid: Option<UserId>,
pub org_uuid: Option<String>, pub org_uuid: Option<OrganizationId>,
pub cipher_uuid: Option<String>, pub cipher_uuid: Option<CipherId>,
pub collection_uuid: Option<String>, pub collection_uuid: Option<CollectionId>,
pub group_uuid: Option<String>, pub group_uuid: Option<GroupId>,
pub org_user_uuid: Option<String>, pub org_user_uuid: Option<MembershipId>,
pub act_user_uuid: Option<String>, pub act_user_uuid: Option<UserId>,
// Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/DeviceType.cs // Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/DeviceType.cs
pub device_type: Option<i32>, pub device_type: Option<i32>,
pub ip_address: Option<String>, pub ip_address: Option<String>,
pub event_date: NaiveDateTime, pub event_date: NaiveDateTime,
pub policy_uuid: Option<String>, pub policy_uuid: Option<OrgPolicyId>,
pub provider_uuid: Option<String>, pub provider_uuid: Option<String>,
pub provider_user_uuid: Option<String>, pub provider_user_uuid: Option<String>,
pub provider_org_uuid: Option<String>, pub provider_org_uuid: Option<String>,
@ -128,7 +128,7 @@ impl Event {
}; };
Self { Self {
uuid: crate::util::get_uuid(), uuid: EventId(crate::util::get_uuid()),
event_type, event_type,
user_uuid: None, user_uuid: None,
org_uuid: None, org_uuid: None,
@ -246,7 +246,7 @@ impl Event {
/// ############## /// ##############
/// Custom Queries /// Custom Queries
pub async fn find_by_organization_uuid( pub async fn find_by_organization_uuid(
org_uuid: &str, org_uuid: &OrganizationId,
start: &NaiveDateTime, start: &NaiveDateTime,
end: &NaiveDateTime, end: &NaiveDateTime,
conn: &mut DbConn, conn: &mut DbConn,
@ -263,7 +263,7 @@ impl Event {
}} }}
} }
pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 { pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
db_run! { conn: { db_run! { conn: {
event::table event::table
.filter(event::org_uuid.eq(org_uuid)) .filter(event::org_uuid.eq(org_uuid))
@ -274,16 +274,16 @@ impl Event {
}} }}
} }
pub async fn find_by_org_and_user_org( pub async fn find_by_org_and_member(
org_uuid: &str, org_uuid: &OrganizationId,
user_org_uuid: &str, member_uuid: &MembershipId,
start: &NaiveDateTime, start: &NaiveDateTime,
end: &NaiveDateTime, end: &NaiveDateTime,
conn: &mut DbConn, conn: &mut DbConn,
) -> Vec<Self> { ) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
event::table event::table
.inner_join(users_organizations::table.on(users_organizations::uuid.eq(user_org_uuid))) .inner_join(users_organizations::table.on(users_organizations::uuid.eq(member_uuid)))
.filter(event::org_uuid.eq(org_uuid)) .filter(event::org_uuid.eq(org_uuid))
.filter(event::event_date.between(start, end)) .filter(event::event_date.between(start, end))
.filter(event::user_uuid.eq(users_organizations::user_uuid.nullable()).or(event::act_user_uuid.eq(users_organizations::user_uuid.nullable()))) .filter(event::user_uuid.eq(users_organizations::user_uuid.nullable()).or(event::act_user_uuid.eq(users_organizations::user_uuid.nullable())))
@ -297,7 +297,7 @@ impl Event {
} }
pub async fn find_by_cipher_uuid( pub async fn find_by_cipher_uuid(
cipher_uuid: &str, cipher_uuid: &CipherId,
start: &NaiveDateTime, start: &NaiveDateTime,
end: &NaiveDateTime, end: &NaiveDateTime,
conn: &mut DbConn, conn: &mut DbConn,
@ -327,3 +327,6 @@ impl Event {
} }
} }
} }
#[derive(Clone, Debug, DieselNewType, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub struct EventId(String);

23
src/db/models/favorite.rs

@ -1,12 +1,12 @@
use super::User; use super::{CipherId, User, UserId};
db_object! { db_object! {
#[derive(Identifiable, Queryable, Insertable)] #[derive(Identifiable, Queryable, Insertable)]
#[diesel(table_name = favorites)] #[diesel(table_name = favorites)]
#[diesel(primary_key(user_uuid, cipher_uuid))] #[diesel(primary_key(user_uuid, cipher_uuid))]
pub struct Favorite { pub struct Favorite {
pub user_uuid: String, pub user_uuid: UserId,
pub cipher_uuid: String, pub cipher_uuid: CipherId,
} }
} }
@ -17,7 +17,7 @@ use crate::error::MapResult;
impl Favorite { impl Favorite {
// Returns whether the specified cipher is a favorite of the specified user. // Returns whether the specified cipher is a favorite of the specified user.
pub async fn is_favorite(cipher_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> bool { pub async fn is_favorite(cipher_uuid: &CipherId, user_uuid: &UserId, conn: &mut DbConn) -> bool {
db_run! { conn: { db_run! { conn: {
let query = favorites::table let query = favorites::table
.filter(favorites::cipher_uuid.eq(cipher_uuid)) .filter(favorites::cipher_uuid.eq(cipher_uuid))
@ -29,7 +29,12 @@ impl Favorite {
} }
// Sets whether the specified cipher is a favorite of the specified user. // Sets whether the specified cipher is a favorite of the specified user.
pub async fn set_favorite(favorite: bool, cipher_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn set_favorite(
favorite: bool,
cipher_uuid: &CipherId,
user_uuid: &UserId,
conn: &mut DbConn,
) -> EmptyResult {
let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, conn).await, favorite); let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, conn).await, favorite);
match (old, new) { match (old, new) {
(false, true) => { (false, true) => {
@ -62,7 +67,7 @@ impl Favorite {
} }
// Delete all favorite entries associated with the specified cipher. // Delete all favorite entries associated with the specified cipher.
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
diesel::delete(favorites::table.filter(favorites::cipher_uuid.eq(cipher_uuid))) diesel::delete(favorites::table.filter(favorites::cipher_uuid.eq(cipher_uuid)))
.execute(conn) .execute(conn)
@ -71,7 +76,7 @@ impl Favorite {
} }
// Delete all favorite entries associated with the specified user. // Delete all favorite entries associated with the specified user.
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
diesel::delete(favorites::table.filter(favorites::user_uuid.eq(user_uuid))) diesel::delete(favorites::table.filter(favorites::user_uuid.eq(user_uuid)))
.execute(conn) .execute(conn)
@ -81,12 +86,12 @@ impl Favorite {
/// Return a vec with (cipher_uuid) this will only contain favorite flagged ciphers /// Return a vec with (cipher_uuid) this will only contain favorite flagged ciphers
/// This is used during a full sync so we only need one query for all favorite cipher matches. /// This is used during a full sync so we only need one query for all favorite cipher matches.
pub async fn get_all_cipher_uuid_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<String> { pub async fn get_all_cipher_uuid_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<CipherId> {
db_run! { conn: { db_run! { conn: {
favorites::table favorites::table
.filter(favorites::user_uuid.eq(user_uuid)) .filter(favorites::user_uuid.eq(user_uuid))
.select(favorites::cipher_uuid) .select(favorites::cipher_uuid)
.load::<String>(conn) .load::<CipherId>(conn)
.unwrap_or_default() .unwrap_or_default()
}} }}
} }

63
src/db/models/folder.rs

@ -1,17 +1,19 @@
use chrono::{NaiveDateTime, Utc}; use chrono::{NaiveDateTime, Utc};
use derive_more::{AsRef, Deref, Display, From};
use serde_json::Value; use serde_json::Value;
use super::User; use super::{CipherId, User, UserId};
use macros::UuidFromParam;
db_object! { db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[diesel(table_name = folders)] #[diesel(table_name = folders)]
#[diesel(primary_key(uuid))] #[diesel(primary_key(uuid))]
pub struct Folder { pub struct Folder {
pub uuid: String, pub uuid: FolderId,
pub created_at: NaiveDateTime, pub created_at: NaiveDateTime,
pub updated_at: NaiveDateTime, pub updated_at: NaiveDateTime,
pub user_uuid: String, pub user_uuid: UserId,
pub name: String, pub name: String,
} }
@ -19,18 +21,18 @@ db_object! {
#[diesel(table_name = folders_ciphers)] #[diesel(table_name = folders_ciphers)]
#[diesel(primary_key(cipher_uuid, folder_uuid))] #[diesel(primary_key(cipher_uuid, folder_uuid))]
pub struct FolderCipher { pub struct FolderCipher {
pub cipher_uuid: String, pub cipher_uuid: CipherId,
pub folder_uuid: String, pub folder_uuid: FolderId,
} }
} }
/// Local methods /// Local methods
impl Folder { impl Folder {
pub fn new(user_uuid: String, name: String) -> Self { pub fn new(user_uuid: UserId, name: String) -> Self {
let now = Utc::now().naive_utc(); let now = Utc::now().naive_utc();
Self { Self {
uuid: crate::util::get_uuid(), uuid: FolderId(crate::util::get_uuid()),
created_at: now, created_at: now,
updated_at: now, updated_at: now,
@ -52,10 +54,10 @@ impl Folder {
} }
impl FolderCipher { impl FolderCipher {
pub fn new(folder_uuid: &str, cipher_uuid: &str) -> Self { pub fn new(folder_uuid: FolderId, cipher_uuid: CipherId) -> Self {
Self { Self {
folder_uuid: folder_uuid.to_string(), folder_uuid,
cipher_uuid: cipher_uuid.to_string(), cipher_uuid,
} }
} }
} }
@ -113,24 +115,25 @@ impl Folder {
}} }}
} }
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
for folder in Self::find_by_user(user_uuid, conn).await { for folder in Self::find_by_user(user_uuid, conn).await {
folder.delete(conn).await?; folder.delete(conn).await?;
} }
Ok(()) Ok(())
} }
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid_and_user(uuid: &FolderId, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
folders::table folders::table
.filter(folders::uuid.eq(uuid)) .filter(folders::uuid.eq(uuid))
.filter(folders::user_uuid.eq(user_uuid))
.first::<FolderDb>(conn) .first::<FolderDb>(conn)
.ok() .ok()
.from_db() .from_db()
}} }}
} }
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
folders::table folders::table
.filter(folders::user_uuid.eq(user_uuid)) .filter(folders::user_uuid.eq(user_uuid))
@ -176,7 +179,7 @@ impl FolderCipher {
}} }}
} }
pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
diesel::delete(folders_ciphers::table.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid))) diesel::delete(folders_ciphers::table.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid)))
.execute(conn) .execute(conn)
@ -184,7 +187,7 @@ impl FolderCipher {
}} }}
} }
pub async fn delete_all_by_folder(folder_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid))) diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid)))
.execute(conn) .execute(conn)
@ -192,7 +195,11 @@ impl FolderCipher {
}} }}
} }
pub async fn find_by_folder_and_cipher(folder_uuid: &str, cipher_uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_folder_and_cipher(
folder_uuid: &FolderId,
cipher_uuid: &CipherId,
conn: &mut DbConn,
) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
folders_ciphers::table folders_ciphers::table
.filter(folders_ciphers::folder_uuid.eq(folder_uuid)) .filter(folders_ciphers::folder_uuid.eq(folder_uuid))
@ -203,7 +210,7 @@ impl FolderCipher {
}} }}
} }
pub async fn find_by_folder(folder_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
folders_ciphers::table folders_ciphers::table
.filter(folders_ciphers::folder_uuid.eq(folder_uuid)) .filter(folders_ciphers::folder_uuid.eq(folder_uuid))
@ -215,14 +222,32 @@ impl FolderCipher {
/// Return a vec with (cipher_uuid, folder_uuid) /// Return a vec with (cipher_uuid, folder_uuid)
/// This is used during a full sync so we only need one query for all folder matches. /// This is used during a full sync so we only need one query for all folder matches.
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<(String, String)> { pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<(CipherId, FolderId)> {
db_run! { conn: { db_run! { conn: {
folders_ciphers::table folders_ciphers::table
.inner_join(folders::table) .inner_join(folders::table)
.filter(folders::user_uuid.eq(user_uuid)) .filter(folders::user_uuid.eq(user_uuid))
.select(folders_ciphers::all_columns) .select(folders_ciphers::all_columns)
.load::<(String, String)>(conn) .load::<(CipherId, FolderId)>(conn)
.unwrap_or_default() .unwrap_or_default()
}} }}
} }
} }
#[derive(
Clone,
Debug,
AsRef,
Deref,
DieselNewType,
Display,
From,
FromForm,
Hash,
PartialEq,
Eq,
Serialize,
Deserialize,
UuidFromParam,
)]
pub struct FolderId(String);

151
src/db/models/group.rs

@ -1,8 +1,10 @@
use super::{User, UserOrganization}; use super::{CollectionId, Membership, MembershipId, OrganizationId, User, UserId};
use crate::api::EmptyResult; use crate::api::EmptyResult;
use crate::db::DbConn; use crate::db::DbConn;
use crate::error::MapResult; use crate::error::MapResult;
use chrono::{NaiveDateTime, Utc}; use chrono::{NaiveDateTime, Utc};
use derive_more::{AsRef, Deref, Display, From};
use macros::UuidFromParam;
use serde_json::Value; use serde_json::Value;
db_object! { db_object! {
@ -10,8 +12,8 @@ db_object! {
#[diesel(table_name = groups)] #[diesel(table_name = groups)]
#[diesel(primary_key(uuid))] #[diesel(primary_key(uuid))]
pub struct Group { pub struct Group {
pub uuid: String, pub uuid: GroupId,
pub organizations_uuid: String, pub organizations_uuid: OrganizationId,
pub name: String, pub name: String,
pub access_all: bool, pub access_all: bool,
pub external_id: Option<String>, pub external_id: Option<String>,
@ -23,8 +25,8 @@ db_object! {
#[diesel(table_name = collections_groups)] #[diesel(table_name = collections_groups)]
#[diesel(primary_key(collections_uuid, groups_uuid))] #[diesel(primary_key(collections_uuid, groups_uuid))]
pub struct CollectionGroup { pub struct CollectionGroup {
pub collections_uuid: String, pub collections_uuid: CollectionId,
pub groups_uuid: String, pub groups_uuid: GroupId,
pub read_only: bool, pub read_only: bool,
pub hide_passwords: bool, pub hide_passwords: bool,
} }
@ -33,18 +35,23 @@ db_object! {
#[diesel(table_name = groups_users)] #[diesel(table_name = groups_users)]
#[diesel(primary_key(groups_uuid, users_organizations_uuid))] #[diesel(primary_key(groups_uuid, users_organizations_uuid))]
pub struct GroupUser { pub struct GroupUser {
pub groups_uuid: String, pub groups_uuid: GroupId,
pub users_organizations_uuid: String pub users_organizations_uuid: MembershipId
} }
} }
/// Local methods /// Local methods
impl Group { impl Group {
pub fn new(organizations_uuid: String, name: String, access_all: bool, external_id: Option<String>) -> Self { pub fn new(
organizations_uuid: OrganizationId,
name: String,
access_all: bool,
external_id: Option<String>,
) -> Self {
let now = Utc::now().naive_utc(); let now = Utc::now().naive_utc();
let mut new_model = Self { let mut new_model = Self {
uuid: crate::util::get_uuid(), uuid: GroupId(crate::util::get_uuid()),
organizations_uuid, organizations_uuid,
name, name,
access_all, access_all,
@ -74,6 +81,9 @@ impl Group {
} }
pub async fn to_json_details(&self, conn: &mut DbConn) -> Value { pub async fn to_json_details(&self, conn: &mut DbConn) -> Value {
// If both read_only and hide_passwords are false, then manage should be true
// You can't have an entry with read_only and manage, or hide_passwords and manage
// Or an entry with everything to false
let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, conn) let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, conn)
.await .await
.iter() .iter()
@ -82,7 +92,7 @@ impl Group {
"id": entry.collections_uuid, "id": entry.collections_uuid,
"readOnly": entry.read_only, "readOnly": entry.read_only,
"hidePasswords": entry.hide_passwords, "hidePasswords": entry.hide_passwords,
"manage": false "manage": !entry.read_only && !entry.hide_passwords,
}) })
}) })
.collect(); .collect();
@ -108,7 +118,7 @@ impl Group {
} }
impl CollectionGroup { impl CollectionGroup {
pub fn new(collections_uuid: String, groups_uuid: String, read_only: bool, hide_passwords: bool) -> Self { pub fn new(collections_uuid: CollectionId, groups_uuid: GroupId, read_only: bool, hide_passwords: bool) -> Self {
Self { Self {
collections_uuid, collections_uuid,
groups_uuid, groups_uuid,
@ -116,10 +126,22 @@ impl CollectionGroup {
hide_passwords, hide_passwords,
} }
} }
pub fn to_json_details_for_group(&self) -> Value {
// If both read_only and hide_passwords are false, then manage should be true
// You can't have an entry with read_only and manage, or hide_passwords and manage
// Or an entry with everything to false
json!({
"id": self.groups_uuid,
"readOnly": self.read_only,
"hidePasswords": self.hide_passwords,
"manage": !self.read_only && !self.hide_passwords,
})
}
} }
impl GroupUser { impl GroupUser {
pub fn new(groups_uuid: String, users_organizations_uuid: String) -> Self { pub fn new(groups_uuid: GroupId, users_organizations_uuid: MembershipId) -> Self {
Self { Self {
groups_uuid, groups_uuid,
users_organizations_uuid, users_organizations_uuid,
@ -163,27 +185,27 @@ impl Group {
} }
} }
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult {
for group in Self::find_by_organization(org_uuid, conn).await { for group in Self::find_by_organization(org_uuid, conn).await {
group.delete(conn).await?; group.delete(conn).await?;
} }
Ok(()) Ok(())
} }
pub async fn find_by_organization(organizations_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
groups::table groups::table
.filter(groups::organizations_uuid.eq(organizations_uuid)) .filter(groups::organizations_uuid.eq(org_uuid))
.load::<GroupDb>(conn) .load::<GroupDb>(conn)
.expect("Error loading groups") .expect("Error loading groups")
.from_db() .from_db()
}} }}
} }
pub async fn count_by_org(organizations_uuid: &str, conn: &mut DbConn) -> i64 { pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 {
db_run! { conn: { db_run! { conn: {
groups::table groups::table
.filter(groups::organizations_uuid.eq(organizations_uuid)) .filter(groups::organizations_uuid.eq(org_uuid))
.count() .count()
.first::<i64>(conn) .first::<i64>(conn)
.ok() .ok()
@ -191,17 +213,22 @@ impl Group {
}} }}
} }
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid_and_org(uuid: &GroupId, org_uuid: &OrganizationId, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
groups::table groups::table
.filter(groups::uuid.eq(uuid)) .filter(groups::uuid.eq(uuid))
.filter(groups::organizations_uuid.eq(org_uuid))
.first::<GroupDb>(conn) .first::<GroupDb>(conn)
.ok() .ok()
.from_db() .from_db()
}} }}
} }
pub async fn find_by_external_id_and_org(external_id: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_external_id_and_org(
external_id: &str,
org_uuid: &OrganizationId,
conn: &mut DbConn,
) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
groups::table groups::table
.filter(groups::external_id.eq(external_id)) .filter(groups::external_id.eq(external_id))
@ -212,7 +239,7 @@ impl Group {
}} }}
} }
//Returns all organizations the user has full access to //Returns all organizations the user has full access to
pub async fn gather_user_organizations_full_access(user_uuid: &str, conn: &mut DbConn) -> Vec<String> { pub async fn get_orgs_by_user_with_full_access(user_uuid: &UserId, conn: &mut DbConn) -> Vec<OrganizationId> {
db_run! { conn: { db_run! { conn: {
groups_users::table groups_users::table
.inner_join(users_organizations::table.on( .inner_join(users_organizations::table.on(
@ -225,12 +252,12 @@ impl Group {
.filter(groups::access_all.eq(true)) .filter(groups::access_all.eq(true))
.select(groups::organizations_uuid) .select(groups::organizations_uuid)
.distinct() .distinct()
.load::<String>(conn) .load::<OrganizationId>(conn)
.expect("Error loading organization group full access information for user") .expect("Error loading organization group full access information for user")
}} }}
} }
pub async fn is_in_full_access_group(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> bool { pub async fn is_in_full_access_group(user_uuid: &UserId, org_uuid: &OrganizationId, conn: &mut DbConn) -> bool {
db_run! { conn: { db_run! { conn: {
groups::table groups::table
.inner_join(groups_users::table.on( .inner_join(groups_users::table.on(
@ -259,13 +286,13 @@ impl Group {
}} }}
} }
pub async fn update_revision(uuid: &str, conn: &mut DbConn) { pub async fn update_revision(uuid: &GroupId, conn: &mut DbConn) {
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await { if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await {
warn!("Failed to update revision for {}: {:#?}", uuid, e); warn!("Failed to update revision for {}: {:#?}", uuid, e);
} }
} }
async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult { async fn _update_revision(uuid: &GroupId, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult {
db_run! {conn: { db_run! {conn: {
crate::util::retry(|| { crate::util::retry(|| {
diesel::update(groups::table.filter(groups::uuid.eq(uuid))) diesel::update(groups::table.filter(groups::uuid.eq(uuid)))
@ -333,7 +360,7 @@ impl CollectionGroup {
} }
} }
pub async fn find_by_group(group_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
collections_groups::table collections_groups::table
.filter(collections_groups::groups_uuid.eq(group_uuid)) .filter(collections_groups::groups_uuid.eq(group_uuid))
@ -343,7 +370,7 @@ impl CollectionGroup {
}} }}
} }
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
collections_groups::table collections_groups::table
.inner_join(groups_users::table.on( .inner_join(groups_users::table.on(
@ -360,7 +387,7 @@ impl CollectionGroup {
}} }}
} }
pub async fn find_by_collection(collection_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
collections_groups::table collections_groups::table
.filter(collections_groups::collections_uuid.eq(collection_uuid)) .filter(collections_groups::collections_uuid.eq(collection_uuid))
@ -386,7 +413,7 @@ impl CollectionGroup {
}} }}
} }
pub async fn delete_all_by_group(group_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> EmptyResult {
let group_users = GroupUser::find_by_group(group_uuid, conn).await; let group_users = GroupUser::find_by_group(group_uuid, conn).await;
for group_user in group_users { for group_user in group_users {
group_user.update_user_revision(conn).await; group_user.update_user_revision(conn).await;
@ -400,7 +427,7 @@ impl CollectionGroup {
}} }}
} }
pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult {
let collection_assigned_to_groups = CollectionGroup::find_by_collection(collection_uuid, conn).await; let collection_assigned_to_groups = CollectionGroup::find_by_collection(collection_uuid, conn).await;
for collection_assigned_to_group in collection_assigned_to_groups { for collection_assigned_to_group in collection_assigned_to_groups {
let group_users = GroupUser::find_by_group(&collection_assigned_to_group.groups_uuid, conn).await; let group_users = GroupUser::find_by_group(&collection_assigned_to_group.groups_uuid, conn).await;
@ -465,7 +492,7 @@ impl GroupUser {
} }
} }
pub async fn find_by_group(group_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
groups_users::table groups_users::table
.filter(groups_users::groups_uuid.eq(group_uuid)) .filter(groups_users::groups_uuid.eq(group_uuid))
@ -475,10 +502,10 @@ impl GroupUser {
}} }}
} }
pub async fn find_by_user(users_organizations_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_member(member_uuid: &MembershipId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
groups_users::table groups_users::table
.filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid)) .filter(groups_users::users_organizations_uuid.eq(member_uuid))
.load::<GroupUserDb>(conn) .load::<GroupUserDb>(conn)
.expect("Error loading groups for user") .expect("Error loading groups for user")
.from_db() .from_db()
@ -486,8 +513,8 @@ impl GroupUser {
} }
pub async fn has_access_to_collection_by_member( pub async fn has_access_to_collection_by_member(
collection_uuid: &str, collection_uuid: &CollectionId,
member_uuid: &str, member_uuid: &MembershipId,
conn: &mut DbConn, conn: &mut DbConn,
) -> bool { ) -> bool {
db_run! { conn: { db_run! { conn: {
@ -503,7 +530,11 @@ impl GroupUser {
}} }}
} }
pub async fn has_full_access_by_member(org_uuid: &str, member_uuid: &str, conn: &mut DbConn) -> bool { pub async fn has_full_access_by_member(
org_uuid: &OrganizationId,
member_uuid: &MembershipId,
conn: &mut DbConn,
) -> bool {
db_run! { conn: { db_run! { conn: {
groups_users::table groups_users::table
.inner_join(groups::table.on( .inner_join(groups::table.on(
@ -519,32 +550,32 @@ impl GroupUser {
} }
pub async fn update_user_revision(&self, conn: &mut DbConn) { pub async fn update_user_revision(&self, conn: &mut DbConn) {
match UserOrganization::find_by_uuid(&self.users_organizations_uuid, conn).await { match Membership::find_by_uuid(&self.users_organizations_uuid, conn).await {
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await, Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await,
None => warn!("User could not be found!"), None => warn!("Member could not be found!"),
} }
} }
pub async fn delete_by_group_id_and_user_id( pub async fn delete_by_group_and_member(
group_uuid: &str, group_uuid: &GroupId,
users_organizations_uuid: &str, member_uuid: &MembershipId,
conn: &mut DbConn, conn: &mut DbConn,
) -> EmptyResult { ) -> EmptyResult {
match UserOrganization::find_by_uuid(users_organizations_uuid, conn).await { match Membership::find_by_uuid(member_uuid, conn).await {
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await, Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await,
None => warn!("User could not be found!"), None => warn!("Member could not be found!"),
}; };
db_run! { conn: { db_run! { conn: {
diesel::delete(groups_users::table) diesel::delete(groups_users::table)
.filter(groups_users::groups_uuid.eq(group_uuid)) .filter(groups_users::groups_uuid.eq(group_uuid))
.filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid)) .filter(groups_users::users_organizations_uuid.eq(member_uuid))
.execute(conn) .execute(conn)
.map_res("Error deleting group users") .map_res("Error deleting group users")
}} }}
} }
pub async fn delete_all_by_group(group_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> EmptyResult {
let group_users = GroupUser::find_by_group(group_uuid, conn).await; let group_users = GroupUser::find_by_group(group_uuid, conn).await;
for group_user in group_users { for group_user in group_users {
group_user.update_user_revision(conn).await; group_user.update_user_revision(conn).await;
@ -558,17 +589,35 @@ impl GroupUser {
}} }}
} }
pub async fn delete_all_by_user(users_organizations_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_member(member_uuid: &MembershipId, conn: &mut DbConn) -> EmptyResult {
match UserOrganization::find_by_uuid(users_organizations_uuid, conn).await { match Membership::find_by_uuid(member_uuid, conn).await {
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await, Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await,
None => warn!("User could not be found!"), None => warn!("Member could not be found!"),
} }
db_run! { conn: { db_run! { conn: {
diesel::delete(groups_users::table) diesel::delete(groups_users::table)
.filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid)) .filter(groups_users::users_organizations_uuid.eq(member_uuid))
.execute(conn) .execute(conn)
.map_res("Error deleting user groups") .map_res("Error deleting user groups")
}} }}
} }
} }
#[derive(
Clone,
Debug,
AsRef,
Deref,
DieselNewType,
Display,
From,
FromForm,
Hash,
PartialEq,
Eq,
Serialize,
Deserialize,
UuidFromParam,
)]
pub struct GroupId(String);

30
src/db/models/mod.rs

@ -16,20 +16,26 @@ mod two_factor_duo_context;
mod two_factor_incomplete; mod two_factor_incomplete;
mod user; mod user;
pub use self::attachment::Attachment; pub use self::attachment::{Attachment, AttachmentId};
pub use self::auth_request::AuthRequest; pub use self::auth_request::{AuthRequest, AuthRequestId};
pub use self::cipher::{Cipher, RepromptType}; pub use self::cipher::{Cipher, CipherId, RepromptType};
pub use self::collection::{Collection, CollectionCipher, CollectionUser}; pub use self::collection::{Collection, CollectionCipher, CollectionId, CollectionUser};
pub use self::device::{Device, DeviceType}; pub use self::device::{Device, DeviceId, DeviceType};
pub use self::emergency_access::{EmergencyAccess, EmergencyAccessStatus, EmergencyAccessType}; pub use self::emergency_access::{EmergencyAccess, EmergencyAccessId, EmergencyAccessStatus, EmergencyAccessType};
pub use self::event::{Event, EventType}; pub use self::event::{Event, EventType};
pub use self::favorite::Favorite; pub use self::favorite::Favorite;
pub use self::folder::{Folder, FolderCipher}; pub use self::folder::{Folder, FolderCipher, FolderId};
pub use self::group::{CollectionGroup, Group, GroupUser}; pub use self::group::{CollectionGroup, Group, GroupId, GroupUser};
pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyType}; pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyId, OrgPolicyType};
pub use self::organization::{Organization, OrganizationApiKey, UserOrgStatus, UserOrgType, UserOrganization}; pub use self::organization::{
pub use self::send::{Send, SendType}; Membership, MembershipId, MembershipStatus, MembershipType, OrgApiKeyId, Organization, OrganizationApiKey,
OrganizationId,
};
pub use self::send::{
id::{SendFileId, SendId},
Send, SendType,
};
pub use self::two_factor::{TwoFactor, TwoFactorType}; pub use self::two_factor::{TwoFactor, TwoFactorType};
pub use self::two_factor_duo_context::TwoFactorDuoContext; pub use self::two_factor_duo_context::TwoFactorDuoContext;
pub use self::two_factor_incomplete::TwoFactorIncomplete; pub use self::two_factor_incomplete::TwoFactorIncomplete;
pub use self::user::{Invitation, User, UserKdfType, UserStampException}; pub use self::user::{Invitation, User, UserId, UserKdfType, UserStampException};

80
src/db/models/org_policy.rs

@ -1,3 +1,4 @@
use derive_more::{AsRef, From};
use serde::Deserialize; use serde::Deserialize;
use serde_json::Value; use serde_json::Value;
@ -5,15 +6,15 @@ use crate::api::EmptyResult;
use crate::db::DbConn; use crate::db::DbConn;
use crate::error::MapResult; use crate::error::MapResult;
use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization}; use super::{Membership, MembershipId, MembershipStatus, MembershipType, OrganizationId, TwoFactor, UserId};
db_object! { db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[diesel(table_name = org_policies)] #[diesel(table_name = org_policies)]
#[diesel(primary_key(uuid))] #[diesel(primary_key(uuid))]
pub struct OrgPolicy { pub struct OrgPolicy {
pub uuid: String, pub uuid: OrgPolicyId,
pub org_uuid: String, pub org_uuid: OrganizationId,
pub atype: i32, pub atype: i32,
pub enabled: bool, pub enabled: bool,
pub data: String, pub data: String,
@ -62,9 +63,9 @@ pub enum OrgPolicyErr {
/// Local methods /// Local methods
impl OrgPolicy { impl OrgPolicy {
pub fn new(org_uuid: String, atype: OrgPolicyType, data: String) -> Self { pub fn new(org_uuid: OrganizationId, atype: OrgPolicyType, data: String) -> Self {
Self { Self {
uuid: crate::util::get_uuid(), uuid: OrgPolicyId(crate::util::get_uuid()),
org_uuid, org_uuid,
atype: atype as i32, atype: atype as i32,
enabled: false, enabled: false,
@ -142,17 +143,7 @@ impl OrgPolicy {
}} }}
} }
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
org_policies::table
.filter(org_policies::uuid.eq(uuid))
.first::<OrgPolicyDb>(conn)
.ok()
.from_db()
}}
}
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
org_policies::table org_policies::table
.filter(org_policies::org_uuid.eq(org_uuid)) .filter(org_policies::org_uuid.eq(org_uuid))
@ -162,7 +153,7 @@ impl OrgPolicy {
}} }}
} }
pub async fn find_confirmed_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_confirmed_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
org_policies::table org_policies::table
.inner_join( .inner_join(
@ -171,7 +162,7 @@ impl OrgPolicy {
.and(users_organizations::user_uuid.eq(user_uuid))) .and(users_organizations::user_uuid.eq(user_uuid)))
) )
.filter( .filter(
users_organizations::status.eq(UserOrgStatus::Confirmed as i32) users_organizations::status.eq(MembershipStatus::Confirmed as i32)
) )
.select(org_policies::all_columns) .select(org_policies::all_columns)
.load::<OrgPolicyDb>(conn) .load::<OrgPolicyDb>(conn)
@ -180,7 +171,11 @@ impl OrgPolicy {
}} }}
} }
pub async fn find_by_org_and_type(org_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_org_and_type(
org_uuid: &OrganizationId,
policy_type: OrgPolicyType,
conn: &mut DbConn,
) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
org_policies::table org_policies::table
.filter(org_policies::org_uuid.eq(org_uuid)) .filter(org_policies::org_uuid.eq(org_uuid))
@ -191,7 +186,7 @@ impl OrgPolicy {
}} }}
} }
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid))) diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid)))
.execute(conn) .execute(conn)
@ -200,7 +195,7 @@ impl OrgPolicy {
} }
pub async fn find_accepted_and_confirmed_by_user_and_active_policy( pub async fn find_accepted_and_confirmed_by_user_and_active_policy(
user_uuid: &str, user_uuid: &UserId,
policy_type: OrgPolicyType, policy_type: OrgPolicyType,
conn: &mut DbConn, conn: &mut DbConn,
) -> Vec<Self> { ) -> Vec<Self> {
@ -212,10 +207,10 @@ impl OrgPolicy {
.and(users_organizations::user_uuid.eq(user_uuid))) .and(users_organizations::user_uuid.eq(user_uuid)))
) )
.filter( .filter(
users_organizations::status.eq(UserOrgStatus::Accepted as i32) users_organizations::status.eq(MembershipStatus::Accepted as i32)
) )
.or_filter( .or_filter(
users_organizations::status.eq(UserOrgStatus::Confirmed as i32) users_organizations::status.eq(MembershipStatus::Confirmed as i32)
) )
.filter(org_policies::atype.eq(policy_type as i32)) .filter(org_policies::atype.eq(policy_type as i32))
.filter(org_policies::enabled.eq(true)) .filter(org_policies::enabled.eq(true))
@ -227,7 +222,7 @@ impl OrgPolicy {
} }
pub async fn find_confirmed_by_user_and_active_policy( pub async fn find_confirmed_by_user_and_active_policy(
user_uuid: &str, user_uuid: &UserId,
policy_type: OrgPolicyType, policy_type: OrgPolicyType,
conn: &mut DbConn, conn: &mut DbConn,
) -> Vec<Self> { ) -> Vec<Self> {
@ -239,7 +234,7 @@ impl OrgPolicy {
.and(users_organizations::user_uuid.eq(user_uuid))) .and(users_organizations::user_uuid.eq(user_uuid)))
) )
.filter( .filter(
users_organizations::status.eq(UserOrgStatus::Confirmed as i32) users_organizations::status.eq(MembershipStatus::Confirmed as i32)
) )
.filter(org_policies::atype.eq(policy_type as i32)) .filter(org_policies::atype.eq(policy_type as i32))
.filter(org_policies::enabled.eq(true)) .filter(org_policies::enabled.eq(true))
@ -254,21 +249,21 @@ impl OrgPolicy {
/// and the user is not an owner or admin of that org. This is only useful for checking /// and the user is not an owner or admin of that org. This is only useful for checking
/// applicability of policy types that have these particular semantics. /// applicability of policy types that have these particular semantics.
pub async fn is_applicable_to_user( pub async fn is_applicable_to_user(
user_uuid: &str, user_uuid: &UserId,
policy_type: OrgPolicyType, policy_type: OrgPolicyType,
exclude_org_uuid: Option<&str>, exclude_org_uuid: Option<&OrganizationId>,
conn: &mut DbConn, conn: &mut DbConn,
) -> bool { ) -> bool {
for policy in for policy in
OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(user_uuid, policy_type, conn).await OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(user_uuid, policy_type, conn).await
{ {
// Check if we need to skip this organization. // Check if we need to skip this organization.
if exclude_org_uuid.is_some() && exclude_org_uuid.unwrap() == policy.org_uuid { if exclude_org_uuid.is_some() && *exclude_org_uuid.unwrap() == policy.org_uuid {
continue; continue;
} }
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await { if let Some(user) = Membership::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
if user.atype < UserOrgType::Admin { if user.atype < MembershipType::Admin {
return true; return true;
} }
} }
@ -277,8 +272,8 @@ impl OrgPolicy {
} }
pub async fn is_user_allowed( pub async fn is_user_allowed(
user_uuid: &str, user_uuid: &UserId,
org_uuid: &str, org_uuid: &OrganizationId,
exclude_current_org: bool, exclude_current_org: bool,
conn: &mut DbConn, conn: &mut DbConn,
) -> OrgPolicyResult { ) -> OrgPolicyResult {
@ -306,7 +301,7 @@ impl OrgPolicy {
Ok(()) Ok(())
} }
pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool { pub async fn org_is_reset_password_auto_enroll(org_uuid: &OrganizationId, conn: &mut DbConn) -> bool {
match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await { match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await {
Some(policy) => match serde_json::from_str::<ResetPasswordDataModel>(&policy.data) { Some(policy) => match serde_json::from_str::<ResetPasswordDataModel>(&policy.data) {
Ok(opts) => { Ok(opts) => {
@ -322,12 +317,12 @@ impl OrgPolicy {
/// Returns true if the user belongs to an org that has enabled the `DisableHideEmail` /// Returns true if the user belongs to an org that has enabled the `DisableHideEmail`
/// option of the `Send Options` policy, and the user is not an owner or admin of that org. /// option of the `Send Options` policy, and the user is not an owner or admin of that org.
pub async fn is_hide_email_disabled(user_uuid: &str, conn: &mut DbConn) -> bool { pub async fn is_hide_email_disabled(user_uuid: &UserId, conn: &mut DbConn) -> bool {
for policy in for policy in
OrgPolicy::find_confirmed_by_user_and_active_policy(user_uuid, OrgPolicyType::SendOptions, conn).await OrgPolicy::find_confirmed_by_user_and_active_policy(user_uuid, OrgPolicyType::SendOptions, conn).await
{ {
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await { if let Some(user) = Membership::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
if user.atype < UserOrgType::Admin { if user.atype < MembershipType::Admin {
match serde_json::from_str::<SendOptionsPolicyData>(&policy.data) { match serde_json::from_str::<SendOptionsPolicyData>(&policy.data) {
Ok(opts) => { Ok(opts) => {
if opts.disable_hide_email { if opts.disable_hide_email {
@ -342,12 +337,19 @@ impl OrgPolicy {
false false
} }
pub async fn is_enabled_for_member(org_user_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> bool { pub async fn is_enabled_for_member(
if let Some(membership) = UserOrganization::find_by_uuid(org_user_uuid, conn).await { member_uuid: &MembershipId,
if let Some(policy) = OrgPolicy::find_by_org_and_type(&membership.org_uuid, policy_type, conn).await { policy_type: OrgPolicyType,
conn: &mut DbConn,
) -> bool {
if let Some(member) = Membership::find_by_uuid(member_uuid, conn).await {
if let Some(policy) = OrgPolicy::find_by_org_and_type(&member.org_uuid, policy_type, conn).await {
return policy.enabled; return policy.enabled;
} }
} }
false false
} }
} }
#[derive(Clone, Debug, AsRef, DieselNewType, From, FromForm, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct OrgPolicyId(String);

476
src/db/models/organization.rs

File diff suppressed because it is too large

87
src/db/models/send.rs

@ -3,7 +3,8 @@ use serde_json::Value;
use crate::util::LowerCase; use crate::util::LowerCase;
use super::User; use super::{OrganizationId, User, UserId};
use id::SendId;
db_object! { db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
@ -11,11 +12,10 @@ db_object! {
#[diesel(treat_none_as_null = true)] #[diesel(treat_none_as_null = true)]
#[diesel(primary_key(uuid))] #[diesel(primary_key(uuid))]
pub struct Send { pub struct Send {
pub uuid: String, pub uuid: SendId,
pub user_uuid: Option<String>,
pub organization_uuid: Option<String>,
pub user_uuid: Option<UserId>,
pub organization_uuid: Option<OrganizationId>,
pub name: String, pub name: String,
pub notes: Option<String>, pub notes: Option<String>,
@ -51,7 +51,7 @@ impl Send {
let now = Utc::now().naive_utc(); let now = Utc::now().naive_utc();
Self { Self {
uuid: crate::util::get_uuid(), uuid: SendId::from(crate::util::get_uuid()),
user_uuid: None, user_uuid: None,
organization_uuid: None, organization_uuid: None,
@ -243,7 +243,7 @@ impl Send {
} }
} }
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<String> { pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<UserId> {
let mut user_uuids = Vec::new(); let mut user_uuids = Vec::new();
match &self.user_uuid { match &self.user_uuid {
Some(user_uuid) => { Some(user_uuid) => {
@ -257,7 +257,7 @@ impl Send {
user_uuids user_uuids
} }
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
for send in Self::find_by_user(user_uuid, conn).await { for send in Self::find_by_user(user_uuid, conn).await {
send.delete(conn).await?; send.delete(conn).await?;
} }
@ -268,30 +268,40 @@ impl Send {
use data_encoding::BASE64URL_NOPAD; use data_encoding::BASE64URL_NOPAD;
use uuid::Uuid; use uuid::Uuid;
let uuid_vec = match BASE64URL_NOPAD.decode(access_id.as_bytes()) { let Ok(uuid_vec) = BASE64URL_NOPAD.decode(access_id.as_bytes()) else {
Ok(v) => v, return None;
Err(_) => return None,
}; };
let uuid = match Uuid::from_slice(&uuid_vec) { let uuid = match Uuid::from_slice(&uuid_vec) {
Ok(u) => u.to_string(), Ok(u) => SendId::from(u.to_string()),
Err(_) => return None, Err(_) => return None,
}; };
Self::find_by_uuid(&uuid, conn).await Self::find_by_uuid(&uuid, conn).await
} }
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid(uuid: &SendId, conn: &mut DbConn) -> Option<Self> {
db_run! {conn: {
sends::table
.filter(sends::uuid.eq(uuid))
.first::<SendDb>(conn)
.ok()
.from_db()
}}
}
pub async fn find_by_uuid_and_user(uuid: &SendId, user_uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
db_run! {conn: { db_run! {conn: {
sends::table sends::table
.filter(sends::uuid.eq(uuid)) .filter(sends::uuid.eq(uuid))
.filter(sends::user_uuid.eq(user_uuid))
.first::<SendDb>(conn) .first::<SendDb>(conn)
.ok() .ok()
.from_db() .from_db()
}} }}
} }
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
db_run! {conn: { db_run! {conn: {
sends::table sends::table
.filter(sends::user_uuid.eq(user_uuid)) .filter(sends::user_uuid.eq(user_uuid))
@ -299,7 +309,7 @@ impl Send {
}} }}
} }
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<i64> { pub async fn size_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Option<i64> {
let sends = Self::find_by_user(user_uuid, conn).await; let sends = Self::find_by_user(user_uuid, conn).await;
#[derive(serde::Deserialize)] #[derive(serde::Deserialize)]
@ -322,7 +332,7 @@ impl Send {
Some(total) Some(total)
} }
pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec<Self> {
db_run! {conn: { db_run! {conn: {
sends::table sends::table
.filter(sends::organization_uuid.eq(org_uuid)) .filter(sends::organization_uuid.eq(org_uuid))
@ -339,3 +349,48 @@ impl Send {
}} }}
} }
} }
// separate namespace to avoid name collision with std::marker::Send
pub mod id {
use derive_more::{AsRef, Deref, Display, From};
use macros::{IdFromParam, UuidFromParam};
use std::marker::Send;
use std::path::Path;
#[derive(
Clone,
Debug,
AsRef,
Deref,
DieselNewType,
Display,
From,
FromForm,
Hash,
PartialEq,
Eq,
Serialize,
Deserialize,
UuidFromParam,
)]
pub struct SendId(String);
impl AsRef<Path> for SendId {
#[inline]
fn as_ref(&self) -> &Path {
Path::new(&self.0)
}
}
#[derive(
Clone, Debug, AsRef, Deref, Display, From, FromForm, Hash, PartialEq, Eq, Serialize, Deserialize, IdFromParam,
)]
pub struct SendFileId(String);
impl AsRef<Path> for SendFileId {
#[inline]
fn as_ref(&self) -> &Path {
Path::new(&self.0)
}
}
}

18
src/db/models/two_factor.rs

@ -1,5 +1,6 @@
use serde_json::Value; use serde_json::Value;
use super::UserId;
use crate::{api::EmptyResult, db::DbConn, error::MapResult}; use crate::{api::EmptyResult, db::DbConn, error::MapResult};
db_object! { db_object! {
@ -7,8 +8,8 @@ db_object! {
#[diesel(table_name = twofactor)] #[diesel(table_name = twofactor)]
#[diesel(primary_key(uuid))] #[diesel(primary_key(uuid))]
pub struct TwoFactor { pub struct TwoFactor {
pub uuid: String, pub uuid: TwoFactorId,
pub user_uuid: String, pub user_uuid: UserId,
pub atype: i32, pub atype: i32,
pub enabled: bool, pub enabled: bool,
pub data: String, pub data: String,
@ -41,9 +42,9 @@ pub enum TwoFactorType {
/// Local methods /// Local methods
impl TwoFactor { impl TwoFactor {
pub fn new(user_uuid: String, atype: TwoFactorType, data: String) -> Self { pub fn new(user_uuid: UserId, atype: TwoFactorType, data: String) -> Self {
Self { Self {
uuid: crate::util::get_uuid(), uuid: TwoFactorId(crate::util::get_uuid()),
user_uuid, user_uuid,
atype: atype as i32, atype: atype as i32,
enabled: true, enabled: true,
@ -118,7 +119,7 @@ impl TwoFactor {
}} }}
} }
pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
twofactor::table twofactor::table
.filter(twofactor::user_uuid.eq(user_uuid)) .filter(twofactor::user_uuid.eq(user_uuid))
@ -129,7 +130,7 @@ impl TwoFactor {
}} }}
} }
pub async fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_user_and_type(user_uuid: &UserId, atype: i32, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
twofactor::table twofactor::table
.filter(twofactor::user_uuid.eq(user_uuid)) .filter(twofactor::user_uuid.eq(user_uuid))
@ -140,7 +141,7 @@ impl TwoFactor {
}} }}
} }
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid))) diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
.execute(conn) .execute(conn)
@ -217,3 +218,6 @@ impl TwoFactor {
Ok(()) Ok(())
} }
} }
#[derive(Clone, Debug, DieselNewType, FromForm, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct TwoFactorId(String);

35
src/db/models/two_factor_incomplete.rs

@ -1,17 +1,26 @@
use chrono::{NaiveDateTime, Utc}; use chrono::{NaiveDateTime, Utc};
use crate::{api::EmptyResult, auth::ClientIp, db::DbConn, error::MapResult, CONFIG}; use crate::{
api::EmptyResult,
auth::ClientIp,
db::{
models::{DeviceId, UserId},
DbConn,
},
error::MapResult,
CONFIG,
};
db_object! { db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[diesel(table_name = twofactor_incomplete)] #[diesel(table_name = twofactor_incomplete)]
#[diesel(primary_key(user_uuid, device_uuid))] #[diesel(primary_key(user_uuid, device_uuid))]
pub struct TwoFactorIncomplete { pub struct TwoFactorIncomplete {
pub user_uuid: String, pub user_uuid: UserId,
// This device UUID is simply what's claimed by the device. It doesn't // This device UUID is simply what's claimed by the device. It doesn't
// necessarily correspond to any UUID in the devices table, since a device // necessarily correspond to any UUID in the devices table, since a device
// must complete 2FA login before being added into the devices table. // must complete 2FA login before being added into the devices table.
pub device_uuid: String, pub device_uuid: DeviceId,
pub device_name: String, pub device_name: String,
pub device_type: i32, pub device_type: i32,
pub login_time: NaiveDateTime, pub login_time: NaiveDateTime,
@ -21,8 +30,8 @@ db_object! {
impl TwoFactorIncomplete { impl TwoFactorIncomplete {
pub async fn mark_incomplete( pub async fn mark_incomplete(
user_uuid: &str, user_uuid: &UserId,
device_uuid: &str, device_uuid: &DeviceId,
device_name: &str, device_name: &str,
device_type: i32, device_type: i32,
ip: &ClientIp, ip: &ClientIp,
@ -55,7 +64,7 @@ impl TwoFactorIncomplete {
}} }}
} }
pub async fn mark_complete(user_uuid: &str, device_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn mark_complete(user_uuid: &UserId, device_uuid: &DeviceId, conn: &mut DbConn) -> EmptyResult {
if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() { if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() {
return Ok(()); return Ok(());
} }
@ -63,7 +72,11 @@ impl TwoFactorIncomplete {
Self::delete_by_user_and_device(user_uuid, device_uuid, conn).await Self::delete_by_user_and_device(user_uuid, device_uuid, conn).await
} }
pub async fn find_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_user_and_device(
user_uuid: &UserId,
device_uuid: &DeviceId,
conn: &mut DbConn,
) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
twofactor_incomplete::table twofactor_incomplete::table
.filter(twofactor_incomplete::user_uuid.eq(user_uuid)) .filter(twofactor_incomplete::user_uuid.eq(user_uuid))
@ -88,7 +101,11 @@ impl TwoFactorIncomplete {
Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn).await Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn).await
} }
pub async fn delete_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_by_user_and_device(
user_uuid: &UserId,
device_uuid: &DeviceId,
conn: &mut DbConn,
) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
diesel::delete(twofactor_incomplete::table diesel::delete(twofactor_incomplete::table
.filter(twofactor_incomplete::user_uuid.eq(user_uuid)) .filter(twofactor_incomplete::user_uuid.eq(user_uuid))
@ -98,7 +115,7 @@ impl TwoFactorIncomplete {
}} }}
} }
pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
diesel::delete(twofactor_incomplete::table.filter(twofactor_incomplete::user_uuid.eq(user_uuid))) diesel::delete(twofactor_incomplete::table.filter(twofactor_incomplete::user_uuid.eq(user_uuid)))
.execute(conn) .execute(conn)

68
src/db/models/user.rs

@ -1,9 +1,19 @@
use crate::util::{format_date, get_uuid, retry};
use chrono::{NaiveDateTime, TimeDelta, Utc}; use chrono::{NaiveDateTime, TimeDelta, Utc};
use derive_more::{AsRef, Deref, Display, From};
use serde_json::Value; use serde_json::Value;
use crate::crypto; use super::{
use crate::CONFIG; Cipher, Device, EmergencyAccess, Favorite, Folder, Membership, MembershipType, TwoFactor, TwoFactorIncomplete,
};
use crate::{
api::EmptyResult,
crypto,
db::DbConn,
error::MapResult,
util::{format_date, get_uuid, retry},
CONFIG,
};
use macros::UuidFromParam;
db_object! { db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)] #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
@ -11,7 +21,7 @@ db_object! {
#[diesel(treat_none_as_null = true)] #[diesel(treat_none_as_null = true)]
#[diesel(primary_key(uuid))] #[diesel(primary_key(uuid))]
pub struct User { pub struct User {
pub uuid: String, pub uuid: UserId,
pub enabled: bool, pub enabled: bool,
pub created_at: NaiveDateTime, pub created_at: NaiveDateTime,
pub updated_at: NaiveDateTime, pub updated_at: NaiveDateTime,
@ -91,7 +101,7 @@ impl User {
let email = email.to_lowercase(); let email = email.to_lowercase();
Self { Self {
uuid: get_uuid(), uuid: UserId(get_uuid()),
enabled: true, enabled: true,
created_at: now, created_at: now,
updated_at: now, updated_at: now,
@ -214,20 +224,11 @@ impl User {
} }
} }
use super::{
Cipher, Device, EmergencyAccess, Favorite, Folder, Send, TwoFactor, TwoFactorIncomplete, UserOrgType,
UserOrganization,
};
use crate::db::DbConn;
use crate::api::EmptyResult;
use crate::error::MapResult;
/// Database methods /// Database methods
impl User { impl User {
pub async fn to_json(&self, conn: &mut DbConn) -> Value { pub async fn to_json(&self, conn: &mut DbConn) -> Value {
let mut orgs_json = Vec::new(); let mut orgs_json = Vec::new();
for c in UserOrganization::find_confirmed_by_user(&self.uuid, conn).await { for c in Membership::find_confirmed_by_user(&self.uuid, conn).await {
orgs_json.push(c.to_json(conn).await); orgs_json.push(c.to_json(conn).await);
} }
@ -304,19 +305,18 @@ impl User {
} }
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
for user_org in UserOrganization::find_confirmed_by_user(&self.uuid, conn).await { for member in Membership::find_confirmed_by_user(&self.uuid, conn).await {
if user_org.atype == UserOrgType::Owner if member.atype == MembershipType::Owner
&& UserOrganization::count_confirmed_by_org_and_type(&user_org.org_uuid, UserOrgType::Owner, conn).await && Membership::count_confirmed_by_org_and_type(&member.org_uuid, MembershipType::Owner, conn).await <= 1
<= 1
{ {
err!("Can't delete last owner") err!("Can't delete last owner")
} }
} }
Send::delete_all_by_user(&self.uuid, conn).await?; super::Send::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?; EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?; EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?;
UserOrganization::delete_all_by_user(&self.uuid, conn).await?; Membership::delete_all_by_user(&self.uuid, conn).await?;
Cipher::delete_all_by_user(&self.uuid, conn).await?; Cipher::delete_all_by_user(&self.uuid, conn).await?;
Favorite::delete_all_by_user(&self.uuid, conn).await?; Favorite::delete_all_by_user(&self.uuid, conn).await?;
Folder::delete_all_by_user(&self.uuid, conn).await?; Folder::delete_all_by_user(&self.uuid, conn).await?;
@ -332,7 +332,7 @@ impl User {
}} }}
} }
pub async fn update_uuid_revision(uuid: &str, conn: &mut DbConn) { pub async fn update_uuid_revision(uuid: &UserId, conn: &mut DbConn) {
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await { if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await {
warn!("Failed to update revision for {}: {:#?}", uuid, e); warn!("Failed to update revision for {}: {:#?}", uuid, e);
} }
@ -357,7 +357,7 @@ impl User {
Self::_update_revision(&self.uuid, &self.updated_at, conn).await Self::_update_revision(&self.uuid, &self.updated_at, conn).await
} }
async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult { async fn _update_revision(uuid: &UserId, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult {
db_run! {conn: { db_run! {conn: {
retry(|| { retry(|| {
diesel::update(users::table.filter(users::uuid.eq(uuid))) diesel::update(users::table.filter(users::uuid.eq(uuid)))
@ -379,7 +379,7 @@ impl User {
}} }}
} }
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_uuid(uuid: &UserId, conn: &mut DbConn) -> Option<Self> {
db_run! {conn: { db_run! {conn: {
users::table.filter(users::uuid.eq(uuid)).first::<UserDb>(conn).ok().from_db() users::table.filter(users::uuid.eq(uuid)).first::<UserDb>(conn).ok().from_db()
}} }}
@ -458,3 +458,23 @@ impl Invitation {
} }
} }
} }
#[derive(
Clone,
Debug,
DieselNewType,
FromForm,
PartialEq,
Eq,
Hash,
Serialize,
Deserialize,
AsRef,
Deref,
Display,
From,
UuidFromParam,
)]
#[deref(forward)]
#[from(forward)]
pub struct UserId(String);

58
src/mail.rs

@ -17,7 +17,7 @@ use crate::{
encode_jwt, generate_delete_claims, generate_emergency_access_invite_claims, generate_invite_claims, encode_jwt, generate_delete_claims, generate_emergency_access_invite_claims, generate_invite_claims,
generate_verify_email_claims, generate_verify_email_claims,
}, },
db::models::{Device, DeviceType, User}, db::models::{Device, DeviceType, EmergencyAccessId, MembershipId, OrganizationId, User, UserId},
error::Error, error::Error,
CONFIG, CONFIG,
}; };
@ -166,8 +166,8 @@ pub async fn send_password_hint(address: &str, hint: Option<String>) -> EmptyRes
send_email(address, &subject, body_html, body_text).await send_email(address, &subject, body_html, body_text).await
} }
pub async fn send_delete_account(address: &str, uuid: &str) -> EmptyResult { pub async fn send_delete_account(address: &str, user_id: &UserId) -> EmptyResult {
let claims = generate_delete_claims(uuid.to_string()); let claims = generate_delete_claims(user_id.to_string());
let delete_token = encode_jwt(&claims); let delete_token = encode_jwt(&claims);
let (subject, body_html, body_text) = get_text( let (subject, body_html, body_text) = get_text(
@ -175,7 +175,7 @@ pub async fn send_delete_account(address: &str, uuid: &str) -> EmptyResult {
json!({ json!({
"url": CONFIG.domain(), "url": CONFIG.domain(),
"img_src": CONFIG._smtp_img_src(), "img_src": CONFIG._smtp_img_src(),
"user_id": uuid, "user_id": user_id,
"email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(), "email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(),
"token": delete_token, "token": delete_token,
}), }),
@ -184,8 +184,8 @@ pub async fn send_delete_account(address: &str, uuid: &str) -> EmptyResult {
send_email(address, &subject, body_html, body_text).await send_email(address, &subject, body_html, body_text).await
} }
pub async fn send_verify_email(address: &str, uuid: &str) -> EmptyResult { pub async fn send_verify_email(address: &str, user_id: &UserId) -> EmptyResult {
let claims = generate_verify_email_claims(uuid.to_string()); let claims = generate_verify_email_claims(user_id.clone());
let verify_email_token = encode_jwt(&claims); let verify_email_token = encode_jwt(&claims);
let (subject, body_html, body_text) = get_text( let (subject, body_html, body_text) = get_text(
@ -193,7 +193,7 @@ pub async fn send_verify_email(address: &str, uuid: &str) -> EmptyResult {
json!({ json!({
"url": CONFIG.domain(), "url": CONFIG.domain(),
"img_src": CONFIG._smtp_img_src(), "img_src": CONFIG._smtp_img_src(),
"user_id": uuid, "user_id": user_id,
"email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(), "email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(),
"token": verify_email_token, "token": verify_email_token,
}), }),
@ -214,8 +214,8 @@ pub async fn send_welcome(address: &str) -> EmptyResult {
send_email(address, &subject, body_html, body_text).await send_email(address, &subject, body_html, body_text).await
} }
pub async fn send_welcome_must_verify(address: &str, uuid: &str) -> EmptyResult { pub async fn send_welcome_must_verify(address: &str, user_id: &UserId) -> EmptyResult {
let claims = generate_verify_email_claims(uuid.to_string()); let claims = generate_verify_email_claims(user_id.clone());
let verify_email_token = encode_jwt(&claims); let verify_email_token = encode_jwt(&claims);
let (subject, body_html, body_text) = get_text( let (subject, body_html, body_text) = get_text(
@ -223,7 +223,7 @@ pub async fn send_welcome_must_verify(address: &str, uuid: &str) -> EmptyResult
json!({ json!({
"url": CONFIG.domain(), "url": CONFIG.domain(),
"img_src": CONFIG._smtp_img_src(), "img_src": CONFIG._smtp_img_src(),
"user_id": uuid, "user_id": user_id,
"token": verify_email_token, "token": verify_email_token,
}), }),
)?; )?;
@ -259,8 +259,8 @@ pub async fn send_single_org_removed_from_org(address: &str, org_name: &str) ->
pub async fn send_invite( pub async fn send_invite(
user: &User, user: &User,
org_id: Option<String>, org_id: Option<OrganizationId>,
org_user_id: Option<String>, member_id: Option<MembershipId>,
org_name: &str, org_name: &str,
invited_by_email: Option<String>, invited_by_email: Option<String>,
) -> EmptyResult { ) -> EmptyResult {
@ -268,27 +268,34 @@ pub async fn send_invite(
user.uuid.clone(), user.uuid.clone(),
user.email.clone(), user.email.clone(),
org_id.clone(), org_id.clone(),
org_user_id.clone(), member_id.clone(),
invited_by_email, invited_by_email,
); );
let invite_token = encode_jwt(&claims); let invite_token = encode_jwt(&claims);
let org_id = match org_id {
Some(ref org_id) => org_id.as_ref(),
None => "_",
};
let member_id = match member_id {
Some(ref member_id) => member_id.as_ref(),
None => "_",
};
let mut query = url::Url::parse("https://query.builder").unwrap(); let mut query = url::Url::parse("https://query.builder").unwrap();
{ {
let mut query_params = query.query_pairs_mut(); let mut query_params = query.query_pairs_mut();
query_params query_params
.append_pair("email", &user.email) .append_pair("email", &user.email)
.append_pair("organizationName", org_name) .append_pair("organizationName", org_name)
.append_pair("organizationId", org_id.as_deref().unwrap_or("_")) .append_pair("organizationId", org_id)
.append_pair("organizationUserId", org_user_id.as_deref().unwrap_or("_")) .append_pair("organizationUserId", member_id)
.append_pair("token", &invite_token); .append_pair("token", &invite_token);
if user.private_key.is_some() { if user.private_key.is_some() {
query_params.append_pair("orgUserHasExistingUser", "true"); query_params.append_pair("orgUserHasExistingUser", "true");
} }
} }
let query_string = match query.query() { let Some(query_string) = query.query() else {
None => err!("Failed to build invite URL query parameters"), err!("Failed to build invite URL query parameters")
Some(query) => query,
}; };
let (subject, body_html, body_text) = get_text( let (subject, body_html, body_text) = get_text(
@ -306,15 +313,15 @@ pub async fn send_invite(
pub async fn send_emergency_access_invite( pub async fn send_emergency_access_invite(
address: &str, address: &str,
uuid: &str, user_id: UserId,
emer_id: &str, emer_id: EmergencyAccessId,
grantor_name: &str, grantor_name: &str,
grantor_email: &str, grantor_email: &str,
) -> EmptyResult { ) -> EmptyResult {
let claims = generate_emergency_access_invite_claims( let claims = generate_emergency_access_invite_claims(
String::from(uuid), user_id,
String::from(address), String::from(address),
String::from(emer_id), emer_id.clone(),
String::from(grantor_name), String::from(grantor_name),
String::from(grantor_email), String::from(grantor_email),
); );
@ -324,15 +331,14 @@ pub async fn send_emergency_access_invite(
{ {
let mut query_params = query.query_pairs_mut(); let mut query_params = query.query_pairs_mut();
query_params query_params
.append_pair("id", emer_id) .append_pair("id", &emer_id.to_string())
.append_pair("name", grantor_name) .append_pair("name", grantor_name)
.append_pair("email", address) .append_pair("email", address)
.append_pair("token", &encode_jwt(&claims)); .append_pair("token", &encode_jwt(&claims));
} }
let query_string = match query.query() { let Some(query_string) = query.query() else {
None => err!("Failed to build emergency invite URL query parameters"), err!("Failed to build emergency invite URL query parameters")
Some(query) => query,
}; };
let (subject, body_html, body_text) = get_text( let (subject, body_html, body_text) = get_text(

2
src/main.rs

@ -24,6 +24,8 @@ extern crate log;
extern crate diesel; extern crate diesel;
#[macro_use] #[macro_use]
extern crate diesel_migrations; extern crate diesel_migrations;
#[macro_use]
extern crate diesel_derive_newtype;
use std::{ use std::{
collections::HashMap, collections::HashMap,

4
src/static/scripts/admin.css

@ -38,8 +38,8 @@ img {
max-width: 130px; max-width: 130px;
} }
#users-table .vw-actions, #orgs-table .vw-actions { #users-table .vw-actions, #orgs-table .vw-actions {
min-width: 130px; min-width: 135px;
max-width: 130px; max-width: 140px;
} }
#users-table .vw-org-cell { #users-table .vw-org-cell {
max-height: 120px; max-height: 120px;

212
src/static/scripts/admin_diagnostics.js

@ -7,6 +7,8 @@ var timeCheck = false;
var ntpTimeCheck = false; var ntpTimeCheck = false;
var domainCheck = false; var domainCheck = false;
var httpsCheck = false; var httpsCheck = false;
var websocketCheck = false;
var httpResponseCheck = false;
// ================================ // ================================
// Date & Time Check // Date & Time Check
@ -76,18 +78,15 @@ async function generateSupportString(event, dj) {
event.preventDefault(); event.preventDefault();
event.stopPropagation(); event.stopPropagation();
let supportString = "### Your environment (Generated via diagnostics page)\n"; let supportString = "### Your environment (Generated via diagnostics page)\n\n";
supportString += `* Vaultwarden version: v${dj.current_release}\n`; supportString += `* Vaultwarden version: v${dj.current_release}\n`;
supportString += `* Web-vault version: v${dj.web_vault_version}\n`; supportString += `* Web-vault version: v${dj.web_vault_version}\n`;
supportString += `* OS/Arch: ${dj.host_os}/${dj.host_arch}\n`; supportString += `* OS/Arch: ${dj.host_os}/${dj.host_arch}\n`;
supportString += `* Running within a container: ${dj.running_within_container} (Base: ${dj.container_base_image})\n`; supportString += `* Running within a container: ${dj.running_within_container} (Base: ${dj.container_base_image})\n`;
supportString += "* Environment settings overridden: "; supportString += `* Database type: ${dj.db_type}\n`;
if (dj.overrides != "") { supportString += `* Database version: ${dj.db_version}\n`;
supportString += "true\n"; supportString += `* Environment settings overridden!: ${dj.overrides !== ""}\n`;
} else {
supportString += "false\n";
}
supportString += `* Uses a reverse proxy: ${dj.ip_header_exists}\n`; supportString += `* Uses a reverse proxy: ${dj.ip_header_exists}\n`;
if (dj.ip_header_exists) { if (dj.ip_header_exists) {
supportString += `* IP Header check: ${dj.ip_header_match} (${dj.ip_header_name})\n`; supportString += `* IP Header check: ${dj.ip_header_match} (${dj.ip_header_name})\n`;
@ -99,11 +98,12 @@ async function generateSupportString(event, dj) {
supportString += `* Server/NTP Time Check: ${ntpTimeCheck}\n`; supportString += `* Server/NTP Time Check: ${ntpTimeCheck}\n`;
supportString += `* Domain Configuration Check: ${domainCheck}\n`; supportString += `* Domain Configuration Check: ${domainCheck}\n`;
supportString += `* HTTPS Check: ${httpsCheck}\n`; supportString += `* HTTPS Check: ${httpsCheck}\n`;
supportString += `* Database type: ${dj.db_type}\n`; if (dj.enable_websocket) {
supportString += `* Database version: ${dj.db_version}\n`; supportString += `* Websocket Check: ${websocketCheck}\n`;
supportString += "* Clients used: \n"; } else {
supportString += "* Reverse proxy and version: \n"; supportString += "* Websocket Check: disabled\n";
supportString += "* Other relevant information: \n"; }
supportString += `* HTTP Response Checks: ${httpResponseCheck}\n`;
const jsonResponse = await fetch(`${BASE_URL}/admin/diagnostics/config`, { const jsonResponse = await fetch(`${BASE_URL}/admin/diagnostics/config`, {
"headers": { "Accept": "application/json" } "headers": { "Accept": "application/json" }
@ -113,10 +113,30 @@ async function generateSupportString(event, dj) {
throw new Error(jsonResponse); throw new Error(jsonResponse);
} }
const configJson = await jsonResponse.json(); const configJson = await jsonResponse.json();
supportString += "\n### Config (Generated via diagnostics page)\n<details><summary>Show Running Config</summary>\n";
supportString += `\n**Environment settings which are overridden:** ${dj.overrides}\n`;
supportString += "\n\n```json\n" + JSON.stringify(configJson, undefined, 2) + "\n```\n</details>\n";
// Start Config and Details section within a details block which is collapsed by default
supportString += "\n### Config & Details (Generated via diagnostics page)\n\n";
supportString += "<details><summary>Show Config & Details</summary>\n";
// Add overrides if they exists
if (dj.overrides != "") {
supportString += `\n**Environment settings which are overridden:** ${dj.overrides}\n`;
}
// Add http response check messages if they exists
if (httpResponseCheck === false) {
supportString += "\n**Failed HTTP Checks:**\n";
// We use `innerText` here since that will convert <br> into new-lines
supportString += "\n```yaml\n" + document.getElementById("http-response-errors").innerText.trim() + "\n```\n";
}
// Add the current config in json form
supportString += "\n**Config:**\n";
supportString += "\n```json\n" + JSON.stringify(configJson, undefined, 2) + "\n```\n";
supportString += "\n</details>\n";
// Add the support string to the textbox so it can be viewed and copied
document.getElementById("support-string").textContent = supportString; document.getElementById("support-string").textContent = supportString;
document.getElementById("support-string").classList.remove("d-none"); document.getElementById("support-string").classList.remove("d-none");
document.getElementById("copy-support").classList.remove("d-none"); document.getElementById("copy-support").classList.remove("d-none");
@ -199,6 +219,162 @@ function checkDns(dns_resolved) {
} }
} }
async function fetchCheckUrl(url) {
try {
const response = await fetch(url);
return { headers: response.headers, status: response.status, text: await response.text() };
} catch (error) {
console.error(`Error fetching ${url}: ${error}`);
return { error };
}
}
function checkSecurityHeaders(headers, omit) {
let securityHeaders = {
"x-frame-options": ["SAMEORIGIN"],
"x-content-type-options": ["nosniff"],
"referrer-policy": ["same-origin"],
"x-xss-protection": ["0"],
"x-robots-tag": ["noindex", "nofollow"],
"content-security-policy": [
"default-src 'self'",
"base-uri 'self'",
"form-action 'self'",
"object-src 'self' blob:",
"script-src 'self' 'wasm-unsafe-eval'",
"style-src 'self' 'unsafe-inline'",
"child-src 'self' https://*.duosecurity.com https://*.duofederal.com",
"frame-src 'self' https://*.duosecurity.com https://*.duofederal.com",
"frame-ancestors 'self' chrome-extension://nngceckbapebfimnlniiiahkandclblb chrome-extension://jbkfoedolllekgbhcbcoahefnbanhhlh moz-extension://*",
"img-src 'self' data: https://haveibeenpwned.com",
"connect-src 'self' https://api.pwnedpasswords.com https://api.2fa.directory https://app.simplelogin.io/api/ https://app.addy.io/api/ https://api.fastmail.com/ https://api.forwardemail.net",
]
};
let messages = [];
for (let header in securityHeaders) {
// Skip some headers for specific endpoints if needed
if (typeof omit === "object" && omit.includes(header) === true) {
continue;
}
// If the header exists, check if the contents matches what we expect it to be
let headerValue = headers.get(header);
if (headerValue !== null) {
securityHeaders[header].forEach((expectedValue) => {
if (headerValue.indexOf(expectedValue) === -1) {
messages.push(`'${header}' does not contain '${expectedValue}'`);
}
});
} else {
messages.push(`'${header}' is missing!`);
}
}
return messages;
}
async function checkHttpResponse() {
const [apiConfig, webauthnConnector, notFound, notFoundApi, badRequest, unauthorized, forbidden] = await Promise.all([
fetchCheckUrl(`${BASE_URL}/api/config`),
fetchCheckUrl(`${BASE_URL}/webauthn-connector.html`),
fetchCheckUrl(`${BASE_URL}/admin/does-not-exist`),
fetchCheckUrl(`${BASE_URL}/admin/diagnostics/http?code=404`),
fetchCheckUrl(`${BASE_URL}/admin/diagnostics/http?code=400`),
fetchCheckUrl(`${BASE_URL}/admin/diagnostics/http?code=401`),
fetchCheckUrl(`${BASE_URL}/admin/diagnostics/http?code=403`),
]);
const respErrorElm = document.getElementById("http-response-errors");
// Check and validate the default API header responses
let apiErrors = checkSecurityHeaders(apiConfig.headers);
if (apiErrors.length >= 1) {
respErrorElm.innerHTML += "<b>API calls:</b><br>";
apiErrors.forEach((errMsg) => {
respErrorElm.innerHTML += `<b>Header:</b> ${errMsg}<br>`;
});
}
// Check the special `-connector.html` headers, these should have some headers omitted.
const omitConnectorHeaders = ["x-frame-options", "content-security-policy"];
let connectorErrors = checkSecurityHeaders(webauthnConnector.headers, omitConnectorHeaders);
omitConnectorHeaders.forEach((header) => {
if (webauthnConnector.headers.get(header) !== null) {
connectorErrors.push(`'${header}' is present while it should not`);
}
});
if (connectorErrors.length >= 1) {
respErrorElm.innerHTML += "<b>2FA Connector calls:</b><br>";
connectorErrors.forEach((errMsg) => {
respErrorElm.innerHTML += `<b>Header:</b> ${errMsg}<br>`;
});
}
// Check specific error code responses if they are not re-written by a reverse proxy
let responseErrors = [];
if (notFound.status !== 404 || notFound.text.indexOf("return to the web-vault") === -1) {
responseErrors.push("404 (Not Found) HTML is invalid");
}
if (notFoundApi.status !== 404 || notFoundApi.text.indexOf("\"message\":\"Testing error 404 response\",") === -1) {
responseErrors.push("404 (Not Found) JSON is invalid");
}
if (badRequest.status !== 400 || badRequest.text.indexOf("\"message\":\"Testing error 400 response\",") === -1) {
responseErrors.push("400 (Bad Request) is invalid");
}
if (unauthorized.status !== 401 || unauthorized.text.indexOf("\"message\":\"Testing error 401 response\",") === -1) {
responseErrors.push("401 (Unauthorized) is invalid");
}
if (forbidden.status !== 403 || forbidden.text.indexOf("\"message\":\"Testing error 403 response\",") === -1) {
responseErrors.push("403 (Forbidden) is invalid");
}
if (responseErrors.length >= 1) {
respErrorElm.innerHTML += "<b>HTTP error responses:</b><br>";
responseErrors.forEach((errMsg) => {
respErrorElm.innerHTML += `<b>Response to:</b> ${errMsg}<br>`;
});
}
if (responseErrors.length >= 1 || connectorErrors.length >= 1 || apiErrors.length >= 1) {
document.getElementById("http-response-warning").classList.remove("d-none");
} else {
httpResponseCheck = true;
document.getElementById("http-response-success").classList.remove("d-none");
}
}
async function fetchWsUrl(wsUrl) {
return new Promise((resolve, reject) => {
try {
const ws = new WebSocket(wsUrl);
ws.onopen = () => {
ws.close();
resolve(true);
};
ws.onerror = () => {
reject(false);
};
} catch (_) {
reject(false);
}
});
}
async function checkWebsocketConnection() {
// Test Websocket connections via the anonymous (login with device) connection
const isConnected = await fetchWsUrl(`${BASE_URL}/notifications/anonymous-hub?token=admin-diagnostics`).catch(() => false);
if (isConnected) {
websocketCheck = true;
document.getElementById("websocket-success").classList.remove("d-none");
} else {
document.getElementById("websocket-error").classList.remove("d-none");
}
}
function init(dj) { function init(dj) {
// Time check // Time check
document.getElementById("time-browser-string").textContent = browserUTC; document.getElementById("time-browser-string").textContent = browserUTC;
@ -225,6 +401,12 @@ function init(dj) {
// DNS Check // DNS Check
checkDns(dj.dns_resolved); checkDns(dj.dns_resolved);
checkHttpResponse();
if (dj.enable_websocket) {
checkWebsocketConnection();
}
} }
// onLoad events // onLoad events

2
src/static/scripts/admin_users.js

@ -152,7 +152,7 @@ const ORG_TYPES = {
"name": "User", "name": "User",
"bg": "blue" "bg": "blue"
}, },
"3": { "4": {
"name": "Manager", "name": "Manager",
"bg": "green" "bg": "green"
}, },

42
src/static/scripts/datatables.css

@ -4,10 +4,10 @@
* *
* To rebuild or modify this file with the latest versions of the included * To rebuild or modify this file with the latest versions of the included
* software please visit: * software please visit:
* https://datatables.net/download/#bs5/dt-2.0.8 * https://datatables.net/download/#bs5/dt-2.1.8
* *
* Included libraries: * Included libraries:
* DataTables 2.0.8 * DataTables 2.1.8
*/ */
@charset "UTF-8"; @charset "UTF-8";
@ -45,15 +45,21 @@ table.dataTable tr.dt-hasChild td.dt-control:before {
} }
html.dark table.dataTable td.dt-control:before, html.dark table.dataTable td.dt-control:before,
:root[data-bs-theme=dark] table.dataTable td.dt-control:before { :root[data-bs-theme=dark] table.dataTable td.dt-control:before,
:root[data-theme=dark] table.dataTable td.dt-control:before {
border-left-color: rgba(255, 255, 255, 0.5); border-left-color: rgba(255, 255, 255, 0.5);
} }
html.dark table.dataTable tr.dt-hasChild td.dt-control:before, html.dark table.dataTable tr.dt-hasChild td.dt-control:before,
:root[data-bs-theme=dark] table.dataTable tr.dt-hasChild td.dt-control:before { :root[data-bs-theme=dark] table.dataTable tr.dt-hasChild td.dt-control:before,
:root[data-theme=dark] table.dataTable tr.dt-hasChild td.dt-control:before {
border-top-color: rgba(255, 255, 255, 0.5); border-top-color: rgba(255, 255, 255, 0.5);
border-left-color: transparent; border-left-color: transparent;
} }
div.dt-scroll {
width: 100%;
}
div.dt-scroll-body thead tr, div.dt-scroll-body thead tr,
div.dt-scroll-body tfoot tr { div.dt-scroll-body tfoot tr {
height: 0; height: 0;
@ -377,6 +383,31 @@ table.table.dataTable.table-hover > tbody > tr.selected:hover > * {
box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-selected), 0.975); box-shadow: inset 0 0 0 9999px rgba(var(--dt-row-selected), 0.975);
} }
div.dt-container div.dt-layout-start > *:not(:last-child) {
margin-right: 1em;
}
div.dt-container div.dt-layout-end > *:not(:first-child) {
margin-left: 1em;
}
div.dt-container div.dt-layout-full {
width: 100%;
}
div.dt-container div.dt-layout-full > *:only-child {
margin-left: auto;
margin-right: auto;
}
div.dt-container div.dt-layout-table > div {
display: block !important;
}
@media screen and (max-width: 767px) {
div.dt-container div.dt-layout-start > *:not(:last-child) {
margin-right: 0;
}
div.dt-container div.dt-layout-end > *:not(:first-child) {
margin-left: 0;
}
}
div.dt-container div.dt-length label { div.dt-container div.dt-length label {
font-weight: normal; font-weight: normal;
text-align: left; text-align: left;
@ -400,9 +431,6 @@ div.dt-container div.dt-search input {
display: inline-block; display: inline-block;
width: auto; width: auto;
} }
div.dt-container div.dt-info {
padding-top: 0.85em;
}
div.dt-container div.dt-paging { div.dt-container div.dt-paging {
margin: 0; margin: 0;
} }

1372
src/static/scripts/datatables.js

File diff suppressed because it is too large

23
src/static/templates/admin/diagnostics.hbs

@ -132,6 +132,21 @@
<span class="d-block" title="We have direct internet access, no outgoing proxy configured."><b>No</b></span> <span class="d-block" title="We have direct internet access, no outgoing proxy configured."><b>No</b></span>
{{/unless}} {{/unless}}
</dd> </dd>
<dt class="col-sm-5">Websocket enabled
{{#if page_data.enable_websocket}}
<span class="badge bg-success d-none" id="websocket-success" title="Websocket connection is working.">Ok</span>
<span class="badge bg-danger d-none" id="websocket-error" title="Websocket connection error, validate your reverse proxy configuration!">Error</span>
{{/if}}
</dt>
<dd class="col-sm-7">
{{#if page_data.enable_websocket}}
<span class="d-block" title="Websocket connections are enabled (ENABLE_WEBSOCKET is true)."><b>Yes</b></span>
{{/if}}
{{#unless page_data.enable_websocket}}
<span class="d-block" title="Websocket connections are disabled (ENABLE_WEBSOCKET is false)."><b>No</b></span>
{{/unless}}
</dd>
<dt class="col-sm-5">DNS (github.com) <dt class="col-sm-5">DNS (github.com)
<span class="badge bg-success d-none" id="dns-success" title="DNS Resolving works!">Ok</span> <span class="badge bg-success d-none" id="dns-success" title="DNS Resolving works!">Ok</span>
<span class="badge bg-danger d-none" id="dns-warning" title="DNS Resolving failed. Please fix.">Error</span> <span class="badge bg-danger d-none" id="dns-warning" title="DNS Resolving failed. Please fix.">Error</span>
@ -167,6 +182,14 @@
<span id="domain-server" class="d-block"><b>Server:</b> <span id="domain-server-string">{{page_data.admin_url}}</span></span> <span id="domain-server" class="d-block"><b>Server:</b> <span id="domain-server-string">{{page_data.admin_url}}</span></span>
<span id="domain-browser" class="d-block"><b>Browser:</b> <span id="domain-browser-string"></span></span> <span id="domain-browser" class="d-block"><b>Browser:</b> <span id="domain-browser-string"></span></span>
</dd> </dd>
<dt class="col-sm-5">HTTP Response validation
<span class="badge bg-success d-none" id="http-response-success" title="All headers and HTTP request responses seem to be ok.">Ok</span>
<span class="badge bg-danger d-none" id="http-response-warning" title="Some headers or HTTP request responses return invalid data!">Error</span>
</dt>
<dd class="col-sm-7">
<span id="http-response-errors" class="d-block"></span>
</dd>
</dl> </dl>
</div> </div>
</div> </div>

2
src/static/templates/admin/users.hbs

@ -19,7 +19,7 @@
<tr> <tr>
<td> <td>
<svg width="48" height="48" class="float-start me-2 rounded" data-jdenticon-value="{{email}}"> <svg width="48" height="48" class="float-start me-2 rounded" data-jdenticon-value="{{email}}">
<div class="float-start"> <div>
<strong>{{name}}</strong> <strong>{{name}}</strong>
<span class="d-block">{{email}}</span> <span class="d-block">{{email}}</span>
<span class="d-block"> <span class="d-block">

55
src/static/templates/scss/vaultwarden.scss.hbs

@ -42,12 +42,6 @@ label[for^="ownedBusiness"] {
@extend %vw-hide; @extend %vw-hide;
} }
/* Hide the radio button and label for the `Custom` org user type */
#userTypeCustom,
label[for^="userTypeCustom"] {
@extend %vw-hide;
}
/* Hide Business Name */ /* Hide Business Name */
app-org-account form div bit-form-field.tw-block:nth-child(3) { app-org-account form div bit-form-field.tw-block:nth-child(3) {
@extend %vw-hide; @extend %vw-hide;
@ -58,42 +52,77 @@ app-organization-plans > form > bit-section:nth-child(2) {
@extend %vw-hide; @extend %vw-hide;
} }
/* Hide Collection Management Form */
app-org-account form.ng-untouched:nth-child(6) {
@extend %vw-hide;
}
/* Hide 'Member Access' Report Card from Org Reports */
app-org-reports-home > app-report-list > div.tw-inline-grid > div:nth-child(6) {
@extend %vw-hide;
}
/* Hide Device Verification form at the Two Step Login screen */ /* Hide Device Verification form at the Two Step Login screen */
app-security > app-two-factor-setup > form { app-security > app-two-factor-setup > form {
@extend %vw-hide; @extend %vw-hide;
} }
/* Hide unsupported Custom Role options */
bit-dialog div.tw-ml-4:has(bit-form-control input),
bit-dialog div.tw-col-span-4:has(input[formcontrolname*="access"], input[formcontrolname*="manage"]) {
@extend %vw-hide;
}
/* Hide Log in with passkey */
app-login div.tw-flex:nth-child(4) {
@extend %vw-hide;
}
/* Change collapsed menu icon to Vaultwarden */
bit-nav-logo bit-nav-item a:before {
content: "";
background-image: url("../images/icon-white.svg");
background-repeat: no-repeat;
background-position: center center;
height: 32px;
display: block;
}
bit-nav-logo bit-nav-item .bwi-shield {
@extend %vw-hide;
}
/**** END Static Vaultwarden Changes ****/ /**** END Static Vaultwarden Changes ****/
/**** START Dynamic Vaultwarden Changes ****/ /**** START Dynamic Vaultwarden Changes ****/
{{#if signup_disabled}} {{#if signup_disabled}}
/* Hide the register link on the login screen */ /* Hide the register link on the login screen */
app-frontend-layout > app-login > form > div > div > div > p { app-login form div + div + div + div + hr,
app-login form div + div + div + div + hr + p {
@extend %vw-hide; @extend %vw-hide;
} }
{{/if}} {{/if}}
/* Hide `Email` 2FA if mail is not enabled */
{{#unless mail_enabled}} {{#unless mail_enabled}}
app-two-factor-setup ul.list-group.list-group-2fa li.list-group-item:nth-child(5) { /* Hide `Email` 2FA if mail is not enabled */
app-two-factor-setup ul.list-group.list-group-2fa li.list-group-item:nth-child(1) {
@extend %vw-hide; @extend %vw-hide;
} }
{{/unless}} {{/unless}}
/* Hide `YubiKey OTP security key` 2FA if it is not enabled */
{{#unless yubico_enabled}} {{#unless yubico_enabled}}
app-two-factor-setup ul.list-group.list-group-2fa li.list-group-item:nth-child(2) { /* Hide `YubiKey OTP security key` 2FA if it is not enabled */
app-two-factor-setup ul.list-group.list-group-2fa li.list-group-item:nth-child(4) {
@extend %vw-hide; @extend %vw-hide;
} }
{{/unless}} {{/unless}}
/* Hide Emergency Access if not allowed */
{{#unless emergency_access_allowed}} {{#unless emergency_access_allowed}}
/* Hide Emergency Access if not allowed */
bit-nav-item[route="settings/emergency-access"] { bit-nav-item[route="settings/emergency-access"] {
@extend %vw-hide; @extend %vw-hide;
} }
{{/unless}} {{/unless}}
/* Hide Sends if not allowed */
{{#unless sends_allowed}} {{#unless sends_allowed}}
/* Hide Sends if not allowed */
bit-nav-item[route="sends"] { bit-nav-item[route="sends"] {
@extend %vw-hide; @extend %vw-hide;
} }

46
src/util.rs

@ -1,13 +1,12 @@
// //
// Web Headers and caching // Web Headers and caching
// //
use std::{collections::HashMap, io::Cursor, ops::Deref, path::Path}; use std::{collections::HashMap, io::Cursor, path::Path};
use num_traits::ToPrimitive; use num_traits::ToPrimitive;
use rocket::{ use rocket::{
fairing::{Fairing, Info, Kind}, fairing::{Fairing, Info, Kind},
http::{ContentType, Header, HeaderMap, Method, Status}, http::{ContentType, Header, HeaderMap, Method, Status},
request::FromParam,
response::{self, Responder}, response::{self, Responder},
Data, Orbit, Request, Response, Rocket, Data, Orbit, Request, Response, Rocket,
}; };
@ -51,9 +50,11 @@ impl Fairing for AppHeaders {
} }
} }
// NOTE: When modifying or adding security headers be sure to also update the diagnostic checks in `src/static/scripts/admin_diagnostics.js` in `checkSecurityHeaders`
res.set_raw_header("Permissions-Policy", "accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), execution-while-not-rendered=(), execution-while-out-of-viewport=(), fullscreen=(), geolocation=(), gyroscope=(), keyboard-map=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), screen-wake-lock=(), sync-xhr=(), usb=(), web-share=(), xr-spatial-tracking=()"); res.set_raw_header("Permissions-Policy", "accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), execution-while-not-rendered=(), execution-while-out-of-viewport=(), fullscreen=(), geolocation=(), gyroscope=(), keyboard-map=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), screen-wake-lock=(), sync-xhr=(), usb=(), web-share=(), xr-spatial-tracking=()");
res.set_raw_header("Referrer-Policy", "same-origin"); res.set_raw_header("Referrer-Policy", "same-origin");
res.set_raw_header("X-Content-Type-Options", "nosniff"); res.set_raw_header("X-Content-Type-Options", "nosniff");
res.set_raw_header("X-Robots-Tag", "noindex, nofollow");
// Obsolete in modern browsers, unsafe (XS-Leak), and largely replaced by CSP // Obsolete in modern browsers, unsafe (XS-Leak), and largely replaced by CSP
res.set_raw_header("X-XSS-Protection", "0"); res.set_raw_header("X-XSS-Protection", "0");
@ -96,10 +97,11 @@ impl Fairing for AppHeaders {
https://app.addy.io/api/ \ https://app.addy.io/api/ \
https://api.fastmail.com/ \ https://api.fastmail.com/ \
https://api.forwardemail.net \ https://api.forwardemail.net \
;\ {allowed_connect_src};\
", ",
icon_service_csp = CONFIG._icon_service_csp(), icon_service_csp = CONFIG._icon_service_csp(),
allowed_iframe_ancestors = CONFIG.allowed_iframe_ancestors() allowed_iframe_ancestors = CONFIG.allowed_iframe_ancestors(),
allowed_connect_src = CONFIG.allowed_connect_src(),
); );
res.set_raw_header("Content-Security-Policy", csp); res.set_raw_header("Content-Security-Policy", csp);
res.set_raw_header("X-Frame-Options", "SAMEORIGIN"); res.set_raw_header("X-Frame-Options", "SAMEORIGIN");
@ -220,42 +222,6 @@ impl<'r, R: 'r + Responder<'r, 'static> + Send> Responder<'r, 'static> for Cache
} }
} }
pub struct SafeString(String);
impl fmt::Display for SafeString {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
impl Deref for SafeString {
type Target = String;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl AsRef<Path> for SafeString {
#[inline]
fn as_ref(&self) -> &Path {
Path::new(&self.0)
}
}
impl<'r> FromParam<'r> for SafeString {
type Error = ();
#[inline(always)]
fn from_param(param: &'r str) -> Result<Self, Self::Error> {
if param.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-')) {
Ok(SafeString(param.to_string()))
} else {
Err(())
}
}
}
// Log all the routes from the main paths list, and the attachments endpoint // Log all the routes from the main paths list, and the attachments endpoint
// Effectively ignores, any static file route, and the alive endpoint // Effectively ignores, any static file route, and the alive endpoint
const LOGGED_ROUTES: [&str; 7] = ["/api", "/admin", "/identity", "/icons", "/attachments", "/events", "/notifications"]; const LOGGED_ROUTES: [&str; 7] = ["/api", "/admin", "/identity", "/icons", "/attachments", "/events", "/notifications"];

Loading…
Cancel
Save