Browse Source

Merge branch 'dani-garcia:main' into main

pull/5093/head
Calvin Li 10 months ago
committed by GitHub
parent
commit
c91ea7822e
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 17
      .env.template
  2. 6
      .github/workflows/build.yml
  3. 2
      .github/workflows/hadolint.yml
  4. 50
      .github/workflows/release.yml
  5. 6
      .github/workflows/trivy.yml
  6. 1532
      Cargo.lock
  7. 85
      Cargo.toml
  8. 7
      build.rs
  9. 12
      docker/DockerSettings.yaml
  10. 49
      docker/Dockerfile.alpine
  11. 57
      docker/Dockerfile.debian
  12. 52
      docker/Dockerfile.j2
  13. 5
      docker/README.md
  14. 34
      docker/docker-bake.hcl
  15. 0
      migrations/mysql/2024-02-14-135828_change_time_stamp_data_type/down.sql
  16. 1
      migrations/mysql/2024-02-14-135828_change_time_stamp_data_type/up.sql
  17. 0
      migrations/postgresql/2024-02-14-135953_change_time_stamp_data_type/down.sql
  18. 3
      migrations/postgresql/2024-02-14-135953_change_time_stamp_data_type/up.sql
  19. 0
      migrations/sqlite/2024-02-14-140000_change_time_stamp_data_type/down.sql
  20. 1
      migrations/sqlite/2024-02-14-140000_change_time_stamp_data_type/up.sql
  21. 2
      rust-toolchain.toml
  22. 35
      src/api/admin.rs
  23. 558
      src/api/core/accounts.rs
  24. 547
      src/api/core/ciphers.rs
  25. 298
      src/api/core/emergency_access.rs
  26. 53
      src/api/core/events.rs
  27. 33
      src/api/core/folders.rs
  28. 70
      src/api/core/mod.rs
  29. 858
      src/api/core/organizations.rs
  30. 82
      src/api/core/public.rs
  31. 197
      src/api/core/sends.rs
  32. 63
      src/api/core/two_factor/authenticator.rs
  33. 60
      src/api/core/two_factor/duo.rs
  34. 99
      src/api/core/two_factor/email.rs
  35. 64
      src/api/core/two_factor/mod.rs
  36. 25
      src/api/core/two_factor/protected_actions.rs
  37. 160
      src/api/core/two_factor/webauthn.rs
  38. 93
      src/api/core/two_factor/yubikey.rs
  39. 244
      src/api/icons.rs
  40. 23
      src/api/identity.rs
  41. 17
      src/api/mod.rs
  42. 212
      src/api/notifications.rs
  43. 4
      src/api/push.rs
  44. 2
      src/api/web.rs
  45. 124
      src/auth.rs
  46. 32
      src/config.rs
  47. 4
      src/db/mod.rs
  48. 18
      src/db/models/attachment.rs
  49. 2
      src/db/models/auth_request.rs
  50. 232
      src/db/models/cipher.rs
  51. 106
      src/db/models/collection.rs
  52. 4
      src/db/models/device.rs
  53. 125
      src/db/models/emergency_access.rs
  54. 4
      src/db/models/event.rs
  55. 8
      src/db/models/folder.rs
  56. 74
      src/db/models/group.rs
  57. 40
      src/db/models/org_policy.rs
  58. 261
      src/db/models/organization.rs
  59. 91
      src/db/models/send.rs
  60. 16
      src/db/models/two_factor.rs
  61. 43
      src/db/models/user.rs
  62. 2
      src/db/schemas/mysql/schema.rs
  63. 2
      src/db/schemas/postgresql/schema.rs
  64. 2
      src/db/schemas/sqlite/schema.rs
  65. 20
      src/error.rs
  66. 51
      src/main.rs
  67. 540
      src/static/global_domains.json
  68. 6
      src/static/scripts/admin_diagnostics.js
  69. 17
      src/static/scripts/bootstrap.bundle.js
  70. 53
      src/static/scripts/bootstrap.css
  71. 18
      src/static/scripts/datatables.css
  72. 1085
      src/static/scripts/datatables.js
  73. 61
      src/static/scripts/jdenticon-3.3.0.js
  74. 12
      src/static/templates/admin/organizations.hbs
  75. 26
      src/static/templates/admin/users.hbs
  76. 2
      src/static/templates/email/change_email.hbs
  77. 2
      src/static/templates/email/change_email.html.hbs
  78. 367
      src/util.rs
  79. 6
      tools/global_domains.py

17
.env.template

@ -84,12 +84,8 @@
### WebSocket ### ### WebSocket ###
################# #################
## Enables websocket notifications ## Enable websocket notifications
# WEBSOCKET_ENABLED=false # ENABLE_WEBSOCKET=true
## Controls the WebSocket server address and port
# WEBSOCKET_ADDRESS=0.0.0.0
# WEBSOCKET_PORT=3012
########################## ##########################
### Push notifications ### ### Push notifications ###
@ -448,6 +444,11 @@
## ##
## Maximum attempts before an email token is reset and a new email will need to be sent. ## Maximum attempts before an email token is reset and a new email will need to be sent.
# EMAIL_ATTEMPTS_LIMIT=3 # EMAIL_ATTEMPTS_LIMIT=3
##
## Setup email 2FA regardless of any organization policy
# EMAIL_2FA_ENFORCE_ON_VERIFIED_INVITE=false
## Automatically setup email 2FA as fallback provider when needed
# EMAIL_2FA_AUTO_FALLBACK=false
## Other MFA/2FA settings ## Other MFA/2FA settings
## Disable 2FA remember ## Disable 2FA remember
@ -524,9 +525,9 @@
## Only use this as a last resort if you are not able to use a valid certificate. ## Only use this as a last resort if you are not able to use a valid certificate.
# SMTP_ACCEPT_INVALID_HOSTNAMES=false # SMTP_ACCEPT_INVALID_HOSTNAMES=false
########################## #######################
### Rocket settings ### ### Rocket settings ###
########################## #######################
## Rocket specific settings ## Rocket specific settings
## See https://rocket.rs/v0.5/guide/configuration/ for more details. ## See https://rocket.rs/v0.5/guide/configuration/ for more details.

6
.github/workflows/build.yml

@ -46,7 +46,7 @@ jobs:
steps: steps:
# Checkout the repo # Checkout the repo
- name: "Checkout" - name: "Checkout"
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1 uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 #v4.1.7
# End Checkout the repo # End Checkout the repo
@ -74,7 +74,7 @@ jobs:
# Only install the clippy and rustfmt components on the default rust-toolchain # Only install the clippy and rustfmt components on the default rust-toolchain
- name: "Install rust-toolchain version" - name: "Install rust-toolchain version"
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1 uses: dtolnay/rust-toolchain@21dc36fb71dd22e3317045c0c31a3f4249868b17 # master @ Jun 13, 2024, 6:20 PM GMT+2
if: ${{ matrix.channel == 'rust-toolchain' }} if: ${{ matrix.channel == 'rust-toolchain' }}
with: with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}" toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
@ -84,7 +84,7 @@ jobs:
# Install the any other channel to be used for which we do not execute clippy and rustfmt # Install the any other channel to be used for which we do not execute clippy and rustfmt
- name: "Install MSRV version" - name: "Install MSRV version"
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1 uses: dtolnay/rust-toolchain@21dc36fb71dd22e3317045c0c31a3f4249868b17 # master @ Jun 13, 2024, 6:20 PM GMT+2
if: ${{ matrix.channel != 'rust-toolchain' }} if: ${{ matrix.channel != 'rust-toolchain' }}
with: with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}" toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"

2
.github/workflows/hadolint.yml

@ -13,7 +13,7 @@ jobs:
steps: steps:
# Checkout the repo # Checkout the repo
- name: Checkout - name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
# End Checkout the repo # End Checkout the repo
# Download hadolint - https://github.com/hadolint/hadolint/releases # Download hadolint - https://github.com/hadolint/hadolint/releases

50
.github/workflows/release.yml

@ -58,7 +58,7 @@ jobs:
steps: steps:
# Checkout the repo # Checkout the repo
- name: Checkout - name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with: with:
fetch-depth: 0 fetch-depth: 0
@ -69,13 +69,13 @@ jobs:
# Start Docker Buildx # Start Docker Buildx
- name: Setup Docker Buildx - name: Setup Docker Buildx
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
# https://github.com/moby/buildkit/issues/3969 # https://github.com/moby/buildkit/issues/3969
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions # Also set max parallelism to 3, the default of 4 breaks GitHub Actions and causes OOMKills
with: with:
config-inline: | buildkitd-config-inline: |
[worker.oci] [worker.oci]
max-parallelism = 2 max-parallelism = 3
driver-opts: | driver-opts: |
network=host network=host
@ -102,7 +102,7 @@ jobs:
# Login to Docker Hub # Login to Docker Hub
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
@ -116,7 +116,7 @@ jobs:
# Login to GitHub Container Registry # Login to GitHub Container Registry
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
@ -129,15 +129,9 @@ jobs:
run: | run: |
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}" echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
- name: Add registry for ghcr.io
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
shell: bash
run: |
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}"
# Login to Quay.io # Login to Quay.io
- name: Login to Quay.io - name: Login to Quay.io
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
with: with:
registry: quay.io registry: quay.io
username: ${{ secrets.QUAY_USERNAME }} username: ${{ secrets.QUAY_USERNAME }}
@ -157,7 +151,7 @@ jobs:
# Check if there is a GitHub Container Registry Login and use it for caching # Check if there is a GitHub Container Registry Login and use it for caching
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
echo "BAKE_CACHE_FROM=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }}" | tee -a "${GITHUB_ENV}" echo "BAKE_CACHE_FROM=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }}" | tee -a "${GITHUB_ENV}"
echo "BAKE_CACHE_TO=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }},mode=max" | tee -a "${GITHUB_ENV}" echo "BAKE_CACHE_TO=type=registry,ref=${{ vars.GHCR_REPO }}-buildcache:${{ matrix.base_image }},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}"
else else
echo "BAKE_CACHE_FROM=" echo "BAKE_CACHE_FROM="
echo "BAKE_CACHE_TO=" echo "BAKE_CACHE_TO="
@ -171,7 +165,7 @@ jobs:
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}" echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
- name: Bake ${{ matrix.base_image }} containers - name: Bake ${{ matrix.base_image }} containers
uses: docker/bake-action@849707117b03d39aba7924c50a10376a69e88d7d # v4.1.0 uses: docker/bake-action@1c5f18a523c4c68524cfbc5161494d8bb5b29d20 # v5.0.1
env: env:
BASE_TAGS: "${{ env.BASE_TAGS }}" BASE_TAGS: "${{ env.BASE_TAGS }}"
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}" SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
@ -204,53 +198,53 @@ jobs:
# This is needed because using different platforms doesn't trigger a new pull/download # This is needed because using different platforms doesn't trigger a new pull/download
# Extract amd64 binary # Extract amd64 binary
docker create --name amd64 --platform=linux/amd64 "vaultwarden/server:${EXTRACT_TAG}-alpine" docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
docker cp amd64:/vaultwarden vaultwarden-amd64 docker cp amd64:/vaultwarden vaultwarden-amd64
docker rm --force amd64 docker rm --force amd64
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine" docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
# Extract arm64 binary # Extract arm64 binary
docker create --name arm64 --platform=linux/arm64 "vaultwarden/server:${EXTRACT_TAG}-alpine" docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
docker cp arm64:/vaultwarden vaultwarden-arm64 docker cp arm64:/vaultwarden vaultwarden-arm64
docker rm --force arm64 docker rm --force arm64
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine" docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
# Extract armv7 binary # Extract armv7 binary
docker create --name armv7 --platform=linux/arm/v7 "vaultwarden/server:${EXTRACT_TAG}-alpine" docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
docker cp armv7:/vaultwarden vaultwarden-armv7 docker cp armv7:/vaultwarden vaultwarden-armv7
docker rm --force armv7 docker rm --force armv7
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine" docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
# Extract armv6 binary # Extract armv6 binary
docker create --name armv6 --platform=linux/arm/v6 "vaultwarden/server:${EXTRACT_TAG}-alpine" docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
docker cp armv6:/vaultwarden vaultwarden-armv6 docker cp armv6:/vaultwarden vaultwarden-armv6
docker rm --force armv6 docker rm --force armv6
docker rmi --force "vaultwarden/server:${EXTRACT_TAG}-alpine" docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine"
# Upload artifacts to Github Actions # Upload artifacts to Github Actions
- name: "Upload amd64 artifact" - name: "Upload amd64 artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: ${{ matrix.base_image == 'alpine' }} if: ${{ matrix.base_image == 'alpine' }}
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64 name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
path: vaultwarden-amd64 path: vaultwarden-amd64
- name: "Upload arm64 artifact" - name: "Upload arm64 artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: ${{ matrix.base_image == 'alpine' }} if: ${{ matrix.base_image == 'alpine' }}
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64 name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
path: vaultwarden-arm64 path: vaultwarden-arm64
- name: "Upload armv7 artifact" - name: "Upload armv7 artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: ${{ matrix.base_image == 'alpine' }} if: ${{ matrix.base_image == 'alpine' }}
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7 name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
path: vaultwarden-armv7 path: vaultwarden-armv7
- name: "Upload armv6 artifact" - name: "Upload armv6 artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: ${{ matrix.base_image == 'alpine' }} if: ${{ matrix.base_image == 'alpine' }}
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6 name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6

6
.github/workflows/trivy.yml

@ -25,10 +25,10 @@ jobs:
actions: read actions: read
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1 uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 #v4.1.7
- name: Run Trivy vulnerability scanner - name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca # v0.16.1 uses: aquasecurity/trivy-action@7c2007bcb556501da015201bcba5aa14069b74e2 # v0.23.0
with: with:
scan-type: repo scan-type: repo
ignore-unfixed: true ignore-unfixed: true
@ -37,6 +37,6 @@ jobs:
severity: CRITICAL,HIGH severity: CRITICAL,HIGH
- name: Upload Trivy scan results to GitHub Security tab - name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@b7bf0a3ed3ecfa44160715d7c442788f65f0f923 # v3.23.2 uses: github/codeql-action/upload-sarif@2bbafcdd7fbf96243689e764c2f15d9735164f33 # v3.25.10
with: with:
sarif_file: 'trivy-results.sarif' sarif_file: 'trivy-results.sarif'

1532
Cargo.lock

File diff suppressed because it is too large

85
Cargo.toml

@ -3,7 +3,7 @@ name = "vaultwarden"
version = "1.0.0" version = "1.0.0"
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"] authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
edition = "2021" edition = "2021"
rust-version = "1.74.0" rust-version = "1.78.0"
resolver = "2" resolver = "2"
repository = "https://github.com/dani-garcia/vaultwarden" repository = "https://github.com/dani-garcia/vaultwarden"
@ -36,11 +36,11 @@ unstable = []
[target."cfg(not(windows))".dependencies] [target."cfg(not(windows))".dependencies]
# Logging # Logging
syslog = "6.1.0" syslog = "6.1.1"
[dependencies] [dependencies]
# Logging # Logging
log = "0.4.20" log = "0.4.22"
fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] } fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] }
tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work
@ -51,57 +51,56 @@ dotenvy = { version = "0.15.7", default-features = false }
once_cell = "1.19.0" once_cell = "1.19.0"
# Numerical libraries # Numerical libraries
num-traits = "0.2.18" num-traits = "0.2.19"
num-derive = "0.4.2" num-derive = "0.4.2"
bigdecimal = "0.4.2" bigdecimal = "0.4.5"
# Web framework # Web framework
rocket = { version = "0.5.0", features = ["tls", "json"], default-features = false } rocket = { version = "0.5.1", features = ["tls", "json"], default-features = false }
rocket_ws = { version ="0.1.0" } rocket_ws = { version ="0.1.1" }
# WebSockets libraries # WebSockets libraries
tokio-tungstenite = "0.20.1" rmpv = "1.3.0" # MessagePack library
rmpv = "1.0.1" # MessagePack library
# Concurrent HashMap used for WebSocket messaging and favicons # Concurrent HashMap used for WebSocket messaging and favicons
dashmap = "5.5.3" dashmap = "6.0.1"
# Async futures # Async futures
futures = "0.3.30" futures = "0.3.30"
tokio = { version = "1.36.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] } tokio = { version = "1.38.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
# A generic serialization/deserialization framework # A generic serialization/deserialization framework
serde = { version = "1.0.197", features = ["derive"] } serde = { version = "1.0.204", features = ["derive"] }
serde_json = "1.0.114" serde_json = "1.0.120"
# A safe, extensible ORM and Query builder # A safe, extensible ORM and Query builder
diesel = { version = "2.1.4", features = ["chrono", "r2d2", "numeric"] } diesel = { version = "2.2.1", features = ["chrono", "r2d2", "numeric"] }
diesel_migrations = "2.1.0" diesel_migrations = "2.2.0"
diesel_logger = { version = "0.3.0", optional = true } diesel_logger = { version = "0.3.0", optional = true }
# Bundled/Static SQLite # Bundled/Static SQLite
libsqlite3-sys = { version = "0.27.0", features = ["bundled"], optional = true } libsqlite3-sys = { version = "0.28.0", features = ["bundled"], optional = true }
# Crypto-related libraries # Crypto-related libraries
rand = { version = "0.8.5", features = ["small_rng"] } rand = { version = "0.8.5", features = ["small_rng"] }
ring = "0.17.8" ring = "0.17.8"
# UUID generation # UUID generation
uuid = { version = "1.7.0", features = ["v4"] } uuid = { version = "1.9.1", features = ["v4"] }
# Date and time libraries # Date and time libraries
chrono = { version = "0.4.34", features = ["clock", "serde"], default-features = false } chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false }
chrono-tz = "0.8.6" chrono-tz = "0.9.0"
time = "0.3.34" time = "0.3.36"
# Job scheduler # Job scheduler
job_scheduler_ng = "2.0.4" job_scheduler_ng = "2.0.5"
# Data encoding library Hex/Base32/Base64 # Data encoding library Hex/Base32/Base64
data-encoding = "2.5.0" data-encoding = "2.6.0"
# JWT library # JWT library
jsonwebtoken = "9.2.0" jsonwebtoken = "9.3.0"
# TOTP library # TOTP library
totp-lite = "2.0.1" totp-lite = "2.0.1"
@ -113,31 +112,32 @@ yubico = { version = "0.11.0", features = ["online-tokio"], default-features = f
webauthn-rs = "0.3.2" webauthn-rs = "0.3.2"
# Handling of URL's for WebAuthn and favicons # Handling of URL's for WebAuthn and favicons
url = "2.5.0" url = "2.5.2"
# Email libraries # Email libraries
lettre = { version = "0.11.4", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false } lettre = { version = "0.11.7", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
email_address = "0.2.4" email_address = "0.2.5"
# HTML Template library # HTML Template library
handlebars = { version = "5.1.0", features = ["dir_source"] } handlebars = { version = "5.1.2", features = ["dir_source"] }
# HTTP client (Used for favicons, version check, DUO and HIBP API) # HTTP client (Used for favicons, version check, DUO and HIBP API)
reqwest = { version = "0.11.24", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns", "native-tls-alpn"] } reqwest = { version = "0.12.5", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
hickory-resolver = "0.24.1"
# Favicon extraction libraries # Favicon extraction libraries
html5gum = "0.5.7" html5gum = "0.5.7"
regex = { version = "1.10.3", features = ["std", "perf", "unicode-perl"], default-features = false } regex = { version = "1.10.5", features = ["std", "perf", "unicode-perl"], default-features = false }
data-url = "0.3.1" data-url = "0.3.1"
bytes = "1.5.0" bytes = "1.6.0"
# Cache function results (Used for version check and favicon fetching) # Cache function results (Used for version check and favicon fetching)
cached = { version = "0.48.1", features = ["async"] } cached = { version = "0.52.0", features = ["async"] }
# Used for custom short lived cookie jar during favicon extraction # Used for custom short lived cookie jar during favicon extraction
cookie = "0.17.0" cookie = "0.18.1"
cookie_store = "0.20.0" cookie_store = "0.21.0"
# Used by U2F, JWT and PostgreSQL # Used by U2F, JWT and PostgreSQL
openssl = "0.10.64" openssl = "0.10.64"
@ -146,16 +146,16 @@ openssl = "0.10.64"
pico-args = "0.5.0" pico-args = "0.5.0"
# Macro ident concatenation # Macro ident concatenation
paste = "1.0.14" paste = "1.0.15"
governor = "0.6.3" governor = "0.6.3"
# Check client versions for specific features. # Check client versions for specific features.
semver = "1.0.22" semver = "1.0.23"
# Allow overriding the default memory allocator # Allow overriding the default memory allocator
# Mainly used for the musl builds, since the default musl malloc is very slow # Mainly used for the musl builds, since the default musl malloc is very slow
mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true } mimalloc = { version = "0.1.43", features = ["secure"], default-features = false, optional = true }
which = "6.0.0" which = "6.0.1"
# Argon2 library with support for the PHC format # Argon2 library with support for the PHC format
argon2 = "0.5.3" argon2 = "0.5.3"
@ -163,7 +163,6 @@ argon2 = "0.5.3"
# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN # Reading a password from the cli for generating the Argon2id ADMIN_TOKEN
rpassword = "7.3.1" rpassword = "7.3.1"
# Strip debuginfo from the release builds # Strip debuginfo from the release builds
# The symbols are the provide better panic traces # The symbols are the provide better panic traces
# Also enable fat LTO and use 1 codegen unit for optimizations # Also enable fat LTO and use 1 codegen unit for optimizations
@ -172,7 +171,6 @@ strip = "debuginfo"
lto = "fat" lto = "fat"
codegen-units = 1 codegen-units = 1
# A little bit of a speedup # A little bit of a speedup
[profile.dev] [profile.dev]
split-debuginfo = "unpacked" split-debuginfo = "unpacked"
@ -206,14 +204,13 @@ unsafe_code = "forbid"
non_ascii_idents = "forbid" non_ascii_idents = "forbid"
# Deny # Deny
future_incompatible = "deny" future_incompatible = { level = "deny", priority = -1 }
noop_method_call = "deny" noop_method_call = "deny"
pointer_structural_match = "deny" rust_2018_idioms = { level = "deny", priority = -1 }
rust_2018_idioms = "deny" rust_2021_compatibility = { level = "deny", priority = -1 }
rust_2021_compatibility = "deny"
trivial_casts = "deny" trivial_casts = "deny"
trivial_numeric_casts = "deny" trivial_numeric_casts = "deny"
unused = "deny" unused = { level = "deny", priority = -1 }
unused_import_braces = "deny" unused_import_braces = "deny"
unused_lifetimes = "deny" unused_lifetimes = "deny"
deprecated_in_future = "deny" deprecated_in_future = "deny"

7
build.rs

@ -17,6 +17,13 @@ fn main() {
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite" "You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
); );
// Use check-cfg to let cargo know which cfg's we define,
// and avoid warnings when they are used in the code.
println!("cargo::rustc-check-cfg=cfg(sqlite)");
println!("cargo::rustc-check-cfg=cfg(mysql)");
println!("cargo::rustc-check-cfg=cfg(postgresql)");
println!("cargo::rustc-check-cfg=cfg(query_logger)");
// Rerun when these paths are changed. // Rerun when these paths are changed.
// Someone could have checked-out a tag or specific commit, but no other files changed. // Someone could have checked-out a tag or specific commit, but no other files changed.
println!("cargo:rerun-if-changed=.git"); println!("cargo:rerun-if-changed=.git");

12
docker/DockerSettings.yaml

@ -1,12 +1,12 @@
--- ---
vault_version: "v2024.1.2b" vault_version: "v2024.5.1b"
vault_image_digest: "sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08" vault_image_digest: "sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375"
# Cross Compile Docker Helper Scripts v1.3.0 # Cross Compile Docker Helper Scripts v1.4.0
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts # We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc" xx_image_digest: "sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4"
rust_version: 1.76.0 # Rust version to be used rust_version: 1.79.0 # Rust version to be used
debian_version: bookworm # Debian release name to be used debian_version: bookworm # Debian release name to be used
alpine_version: 3.19 # Alpine version to be used alpine_version: "3.20" # Alpine version to be used
# For which platforms/architectures will we try to build images # For which platforms/architectures will we try to build images
platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"] platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"]
# Determine the build images per OS/Arch # Determine the build images per OS/Arch

49
docker/Dockerfile.alpine

@ -18,23 +18,23 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, # - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to. # click the tag name to view the digest of the image it currently points to.
# - From the command line: # - From the command line:
# $ docker pull docker.io/vaultwarden/web-vault:v2024.1.2b # $ docker pull docker.io/vaultwarden/web-vault:v2024.5.1b
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.1.2b # $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.5.1b
# [docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08] # [docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375]
# #
# - Conversely, to get the tag name from the digest: # - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08 # $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375
# [docker.io/vaultwarden/web-vault:v2024.1.2b] # [docker.io/vaultwarden/web-vault:v2024.5.1b]
# #
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08 as vault FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375 as vault
########################## ALPINE BUILD IMAGES ########################## ########################## ALPINE BUILD IMAGES ##########################
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 ## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
## And for Alpine we define all build images here, they will only be loaded when actually used ## And for Alpine we define all build images here, they will only be loaded when actually used
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.76.0 as build_amd64 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.79.0 as build_amd64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.76.0 as build_arm64 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.79.0 as build_arm64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.76.0 as build_armv7 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.79.0 as build_armv7
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.76.0 as build_armv6 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.79.0 as build_armv6
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006 # hadolint ignore=DL3006
@ -58,33 +58,29 @@ ENV DEBIAN_FRONTEND=noninteractive \
# Create CARGO_HOME folder and don't download rust docs # Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" \ RUN mkdir -pv "${CARGO_HOME}" && \
&& rustup set profile minimal rustup set profile minimal
# Creates a dummy project used to grab dependencies # Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app RUN USER=root cargo new --bin /app
WORKDIR /app WORKDIR /app
# Shared variables across Debian and Alpine # Environment variables for Cargo on Alpine based builds
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \ RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
# Output the current contents of the file # Output the current contents of the file
cat /env-cargo cat /env-cargo
# Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
RUN source /env-cargo && \ RUN source /env-cargo && \
rustup target add "${CARGO_TARGET}" rustup target add "${CARGO_TARGET}"
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
ARG CARGO_PROFILE=release ARG CARGO_PROFILE=release
ARG VW_VERSION
# Copies over *only* your manifests and build files # Configure the DB ARG as late as possible to not invalidate the cached layers above
COPY ./Cargo.* ./ # Enable MiMalloc to improve performance on Alpine builds
COPY ./rust-toolchain.toml ./rust-toolchain.toml ARG DB=sqlite,mysql,postgresql,enable_mimalloc
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the # Builds your dependencies and removes the
# dummy project, except the target folder # dummy project, except the target folder
@ -97,6 +93,8 @@ RUN source /env-cargo && \
# To avoid copying unneeded files, use .dockerignore # To avoid copying unneeded files, use .dockerignore
COPY . . COPY . .
ARG VW_VERSION
# Builds again, this time it will be the actual source files being build # Builds again, this time it will be the actual source files being build
RUN source /env-cargo && \ RUN source /env-cargo && \
# Make sure that we actually build the project by updating the src/main.rs timestamp # Make sure that we actually build the project by updating the src/main.rs timestamp
@ -127,7 +125,7 @@ RUN source /env-cargo && \
# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*' # To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*'
# #
# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742 # We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742
FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.19 FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.20
ENV ROCKET_PROFILE="release" \ ENV ROCKET_PROFILE="release" \
ROCKET_ADDRESS=0.0.0.0 \ ROCKET_ADDRESS=0.0.0.0 \
@ -150,8 +148,7 @@ EXPOSE 3012
# and the binary from the "build" stage to the current stage # and the binary from the "build" stage to the current stage
WORKDIR / WORKDIR /
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh docker/start.sh /
COPY docker/start.sh /start.sh
COPY --from=vault /web-vault ./web-vault COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/final/vaultwarden . COPY --from=build /app/target/final/vaultwarden .

57
docker/Dockerfile.debian

@ -18,24 +18,24 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, # - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to. # click the tag name to view the digest of the image it currently points to.
# - From the command line: # - From the command line:
# $ docker pull docker.io/vaultwarden/web-vault:v2024.1.2b # $ docker pull docker.io/vaultwarden/web-vault:v2024.5.1b
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.1.2b # $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.5.1b
# [docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08] # [docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375]
# #
# - Conversely, to get the tag name from the digest: # - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08 # $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375
# [docker.io/vaultwarden/web-vault:v2024.1.2b] # [docker.io/vaultwarden/web-vault:v2024.5.1b]
# #
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08 as vault FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:1a867b4b175e85fc8602314bd83bc263c76c49787031704f16a2915567725375 as vault
########################## Cross Compile Docker Helper Scripts ########################## ########################## Cross Compile Docker Helper Scripts ##########################
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts ## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
## And these bash scripts do not have any significant difference if at all ## And these bash scripts do not have any significant difference if at all
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4 AS xx
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006 # hadolint ignore=DL3006
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.76.0-slim-bookworm as build FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.79.0-slim-bookworm as build
COPY --from=xx / / COPY --from=xx / /
ARG TARGETARCH ARG TARGETARCH
ARG TARGETVARIANT ARG TARGETVARIANT
@ -64,10 +64,7 @@ RUN apt-get update && \
"libc6-$(xx-info debian-arch)-cross" \ "libc6-$(xx-info debian-arch)-cross" \
"libc6-dev-$(xx-info debian-arch)-cross" \ "libc6-dev-$(xx-info debian-arch)-cross" \
"linux-libc-dev-$(xx-info debian-arch)-cross" && \ "linux-libc-dev-$(xx-info debian-arch)-cross" && \
# Run xx-cargo early, since it sometimes seems to break when run at a later stage xx-apt-get install -y \
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
RUN xx-apt-get install -y \
--no-install-recommends \ --no-install-recommends \
gcc \ gcc \
libmariadb3 \ libmariadb3 \
@ -78,19 +75,29 @@ RUN xx-apt-get install -y \
# Force install arch dependend mariadb dev packages # Force install arch dependend mariadb dev packages
# Installing them the normal way breaks several other packages (again) # Installing them the normal way breaks several other packages (again)
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \ apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
dpkg --force-all -i ./libmariadb-dev*.deb dpkg --force-all -i ./libmariadb-dev*.deb && \
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
# Create CARGO_HOME folder and don't download rust docs # Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" \ RUN mkdir -pv "${CARGO_HOME}" && \
&& rustup set profile minimal rustup set profile minimal
# Creates a dummy project used to grab dependencies # Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app RUN USER=root cargo new --bin /app
WORKDIR /app WORKDIR /app
# Environment variables for cargo across Debian and Alpine # Environment variables for Cargo on Debian based builds
ARG ARCH_OPENSSL_LIB_DIR \
ARCH_OPENSSL_INCLUDE_DIR
RUN source /env-cargo && \ RUN source /env-cargo && \
if xx-info is-cross ; then \ if xx-info is-cross ; then \
# Some special variables if needed to override some build paths
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
fi && \
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries. # We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
# Because of this we generate the needed environment variables here which we can load in the needed steps. # Because of this we generate the needed environment variables here which we can load in the needed steps.
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \ echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
@ -103,19 +110,16 @@ RUN source /env-cargo && \
# Output the current contents of the file # Output the current contents of the file
cat /env-cargo cat /env-cargo
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
RUN source /env-cargo && \ RUN source /env-cargo && \
rustup target add "${CARGO_TARGET}" rustup target add "${CARGO_TARGET}"
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
ARG CARGO_PROFILE=release ARG CARGO_PROFILE=release
ARG VW_VERSION
# Copies over *only* your manifests and build files # Configure the DB ARG as late as possible to not invalidate the cached layers above
COPY ./Cargo.* ./ ARG DB=sqlite,mysql,postgresql
COPY ./rust-toolchain.toml ./rust-toolchain.toml
COPY ./build.rs ./build.rs
# Builds your dependencies and removes the # Builds your dependencies and removes the
# dummy project, except the target folder # dummy project, except the target folder
@ -128,6 +132,8 @@ RUN source /env-cargo && \
# To avoid copying unneeded files, use .dockerignore # To avoid copying unneeded files, use .dockerignore
COPY . . COPY . .
ARG VW_VERSION
# Builds again, this time it will be the actual source files being build # Builds again, this time it will be the actual source files being build
RUN source /env-cargo && \ RUN source /env-cargo && \
# Make sure that we actually build the project by updating the src/main.rs timestamp # Make sure that we actually build the project by updating the src/main.rs timestamp
@ -185,8 +191,7 @@ EXPOSE 3012
# and the binary from the "build" stage to the current stage # and the binary from the "build" stage to the current stage
WORKDIR / WORKDIR /
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh docker/start.sh /
COPY docker/start.sh /start.sh
COPY --from=vault /web-vault ./web-vault COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/final/vaultwarden . COPY --from=build /app/target/final/vaultwarden .

52
docker/Dockerfile.j2

@ -82,10 +82,7 @@ RUN apt-get update && \
"libc6-$(xx-info debian-arch)-cross" \ "libc6-$(xx-info debian-arch)-cross" \
"libc6-dev-$(xx-info debian-arch)-cross" \ "libc6-dev-$(xx-info debian-arch)-cross" \
"linux-libc-dev-$(xx-info debian-arch)-cross" && \ "linux-libc-dev-$(xx-info debian-arch)-cross" && \
# Run xx-cargo early, since it sometimes seems to break when run at a later stage xx-apt-get install -y \
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
RUN xx-apt-get install -y \
--no-install-recommends \ --no-install-recommends \
gcc \ gcc \
libmariadb3 \ libmariadb3 \
@ -96,21 +93,31 @@ RUN xx-apt-get install -y \
# Force install arch dependend mariadb dev packages # Force install arch dependend mariadb dev packages
# Installing them the normal way breaks several other packages (again) # Installing them the normal way breaks several other packages (again)
apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \ apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \
dpkg --force-all -i ./libmariadb-dev*.deb dpkg --force-all -i ./libmariadb-dev*.deb && \
# Run xx-cargo early, since it sometimes seems to break when run at a later stage
echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo
{% endif %} {% endif %}
# Create CARGO_HOME folder and don't download rust docs # Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" \ RUN mkdir -pv "${CARGO_HOME}" && \
&& rustup set profile minimal rustup set profile minimal
# Creates a dummy project used to grab dependencies # Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app RUN USER=root cargo new --bin /app
WORKDIR /app WORKDIR /app
{% if base == "debian" %} {% if base == "debian" %}
# Environment variables for cargo across Debian and Alpine # Environment variables for Cargo on Debian based builds
ARG ARCH_OPENSSL_LIB_DIR \
ARCH_OPENSSL_INCLUDE_DIR
RUN source /env-cargo && \ RUN source /env-cargo && \
if xx-info is-cross ; then \ if xx-info is-cross ; then \
# Some special variables if needed to override some build paths
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
fi && \
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries. # We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
# Because of this we generate the needed environment variables here which we can load in the needed steps. # Because of this we generate the needed environment variables here which we can load in the needed steps.
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \ echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
@ -123,30 +130,28 @@ RUN source /env-cargo && \
# Output the current contents of the file # Output the current contents of the file
cat /env-cargo cat /env-cargo
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
{% elif base == "alpine" %} {% elif base == "alpine" %}
# Shared variables across Debian and Alpine # Environment variables for Cargo on Alpine based builds
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \ RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
# Output the current contents of the file # Output the current contents of the file
cat /env-cargo cat /env-cargo
# Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
{% endif %} {% endif %}
RUN source /env-cargo && \ RUN source /env-cargo && \
rustup target add "${CARGO_TARGET}" rustup target add "${CARGO_TARGET}"
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./rust-toolchain.toml ./build.rs ./
ARG CARGO_PROFILE=release ARG CARGO_PROFILE=release
ARG VW_VERSION
# Copies over *only* your manifests and build files # Configure the DB ARG as late as possible to not invalidate the cached layers above
COPY ./Cargo.* ./ {% if base == "debian" %}
COPY ./rust-toolchain.toml ./rust-toolchain.toml ARG DB=sqlite,mysql,postgresql
COPY ./build.rs ./build.rs {% elif base == "alpine" %}
# Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
{% endif %}
# Builds your dependencies and removes the # Builds your dependencies and removes the
# dummy project, except the target folder # dummy project, except the target folder
@ -159,6 +164,8 @@ RUN source /env-cargo && \
# To avoid copying unneeded files, use .dockerignore # To avoid copying unneeded files, use .dockerignore
COPY . . COPY . .
ARG VW_VERSION
# Builds again, this time it will be the actual source files being build # Builds again, this time it will be the actual source files being build
RUN source /env-cargo && \ RUN source /env-cargo && \
# Make sure that we actually build the project by updating the src/main.rs timestamp # Make sure that we actually build the project by updating the src/main.rs timestamp
@ -228,8 +235,7 @@ EXPOSE 3012
# and the binary from the "build" stage to the current stage # and the binary from the "build" stage to the current stage
WORKDIR / WORKDIR /
COPY docker/healthcheck.sh /healthcheck.sh COPY docker/healthcheck.sh docker/start.sh /
COPY docker/start.sh /start.sh
COPY --from=vault /web-vault ./web-vault COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/final/vaultwarden . COPY --from=build /app/target/final/vaultwarden .

5
docker/README.md

@ -11,6 +11,11 @@ With just these two files we can build both Debian and Alpine images for the fol
- armv7 (linux/arm/v7) - armv7 (linux/arm/v7)
- armv6 (linux/arm/v6) - armv6 (linux/arm/v6)
Some unsupported platforms for Debian based images. These are not built and tested by default and are only provided to make it easier for users to build for these architectures.
- 386 (linux/386)
- ppc64le (linux/ppc64le)
- s390x (linux/s390x)
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br> To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
This ensures the container build process can run binaries from other architectures.<br> This ensures the container build process can run binaries from other architectures.<br>

34
docker/docker-bake.hcl

@ -125,6 +125,40 @@ target "debian-armv6" {
tags = generate_tags("", "-armv6") tags = generate_tags("", "-armv6")
} }
// ==== Start of unsupported Debian architecture targets ===
// These are provided just to help users build for these rare platforms
// They will not be built by default
target "debian-386" {
inherits = ["debian"]
platforms = ["linux/386"]
tags = generate_tags("", "-386")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/i386-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/i386-linux-gnu"
}
}
target "debian-ppc64le" {
inherits = ["debian"]
platforms = ["linux/ppc64le"]
tags = generate_tags("", "-ppc64le")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/powerpc64le-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/powerpc64le-linux-gnu"
}
}
target "debian-s390x" {
inherits = ["debian"]
platforms = ["linux/s390x"]
tags = generate_tags("", "-s390x")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/s390x-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/s390x-linux-gnu"
}
}
// ==== End of unsupported Debian architecture targets ===
// A Group to build all platforms individually for local testing // A Group to build all platforms individually for local testing
group "debian-all" { group "debian-all" {
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"] targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]

0
migrations/mysql/2024-02-14-135828_change_time_stamp_data_type/down.sql

1
migrations/mysql/2024-02-14-135828_change_time_stamp_data_type/up.sql

@ -0,0 +1 @@
ALTER TABLE twofactor MODIFY last_used BIGINT NOT NULL;

0
migrations/postgresql/2024-02-14-135953_change_time_stamp_data_type/down.sql

3
migrations/postgresql/2024-02-14-135953_change_time_stamp_data_type/up.sql

@ -0,0 +1,3 @@
ALTER TABLE twofactor
ALTER COLUMN last_used TYPE BIGINT,
ALTER COLUMN last_used SET NOT NULL;

0
migrations/sqlite/2024-02-14-140000_change_time_stamp_data_type/down.sql

1
migrations/sqlite/2024-02-14-140000_change_time_stamp_data_type/up.sql

@ -0,0 +1 @@
-- Integer size in SQLite is already i64, so we don't need to do anything

2
rust-toolchain.toml

@ -1,4 +1,4 @@
[toolchain] [toolchain]
channel = "1.76.0" channel = "1.79.0"
components = [ "rustfmt", "clippy" ] components = [ "rustfmt", "clippy" ]
profile = "minimal" profile = "minimal"

35
src/api/admin.rs

@ -265,8 +265,8 @@ fn admin_page_login() -> ApiResult<Html<String>> {
render_admin_login(None, None) render_admin_login(None, None)
} }
#[derive(Deserialize, Debug)] #[derive(Debug, Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct InviteData { struct InviteData {
email: String, email: String,
} }
@ -326,9 +326,9 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> {
let mut users_json = Vec::with_capacity(users.len()); let mut users_json = Vec::with_capacity(users.len());
for u in users { for u in users {
let mut usr = u.to_json(&mut conn).await; let mut usr = u.to_json(&mut conn).await;
usr["UserEnabled"] = json!(u.enabled); usr["userEnabled"] = json!(u.enabled);
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
usr["LastActive"] = match u.last_active(&mut conn).await { usr["lastActive"] = match u.last_active(&mut conn).await {
Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)), Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)),
None => json!(None::<String>), None => json!(None::<String>),
}; };
@ -364,8 +364,8 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult { async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
if let Some(u) = User::find_by_mail(mail, &mut conn).await { if let Some(u) = User::find_by_mail(mail, &mut conn).await {
let mut usr = u.to_json(&mut conn).await; let mut usr = u.to_json(&mut conn).await;
usr["UserEnabled"] = json!(u.enabled); usr["userEnabled"] = json!(u.enabled);
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
Ok(Json(usr)) Ok(Json(usr))
} else { } else {
err_code!("User doesn't exist", Status::NotFound.code); err_code!("User doesn't exist", Status::NotFound.code);
@ -376,8 +376,8 @@ async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn)
async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult { async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult {
let u = get_user_or_404(uuid, &mut conn).await?; let u = get_user_or_404(uuid, &mut conn).await?;
let mut usr = u.to_json(&mut conn).await; let mut usr = u.to_json(&mut conn).await;
usr["UserEnabled"] = json!(u.enabled); usr["userEnabled"] = json!(u.enabled);
usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
Ok(Json(usr)) Ok(Json(usr))
} }
@ -475,7 +475,7 @@ async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) ->
} }
} }
#[derive(Deserialize, Debug)] #[derive(Debug, Deserialize)]
struct UserOrgTypeData { struct UserOrgTypeData {
user_type: NumberOrString, user_type: NumberOrString,
user_uuid: String, user_uuid: String,
@ -510,7 +510,11 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mu
match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await { match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await {
Ok(_) => {} Ok(_) => {}
Err(OrgPolicyErr::TwoFactorMissing) => { Err(OrgPolicyErr::TwoFactorMissing) => {
err!("You cannot modify this user to this type because it has no two-step login method activated"); if CONFIG.email_2fa_auto_fallback() {
two_factor::email::find_and_activate_email_2fa(&user_to_edit.user_uuid, &mut conn).await?;
} else {
err!("You cannot modify this user to this type because they have not setup 2FA");
}
} }
Err(OrgPolicyErr::SingleOrgEnforced) => { Err(OrgPolicyErr::SingleOrgEnforced) => {
err!("You cannot modify this user to this type because it is a member of an organization which forbids it"); err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
@ -697,10 +701,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
let (latest_release, latest_commit, latest_web_build) = let (latest_release, latest_commit, latest_web_build) =
get_release_info(has_http_access, running_within_container).await; get_release_info(has_http_access, running_within_container).await;
let ip_header_name = match &ip_header.0 { let ip_header_name = &ip_header.0.unwrap_or_default();
Some(h) => h,
_ => "",
};
let diagnostics_json = json!({ let diagnostics_json = json!({
"dns_resolved": dns_resolved, "dns_resolved": dns_resolved,
@ -713,8 +714,8 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
"running_within_container": running_within_container, "running_within_container": running_within_container,
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" }, "container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
"has_http_access": has_http_access, "has_http_access": has_http_access,
"ip_header_exists": &ip_header.0.is_some(), "ip_header_exists": !ip_header_name.is_empty(),
"ip_header_match": ip_header_name == CONFIG.ip_header(), "ip_header_match": ip_header_name.eq(&CONFIG.ip_header()),
"ip_header_name": ip_header_name, "ip_header_name": ip_header_name,
"ip_header_config": &CONFIG.ip_header(), "ip_header_config": &CONFIG.ip_header(),
"uses_proxy": uses_proxy, "uses_proxy": uses_proxy,

558
src/api/core/accounts.rs

File diff suppressed because it is too large

547
src/api/core/ciphers.rs

File diff suppressed because it is too large

298
src/api/core/emergency_access.rs

@ -1,11 +1,11 @@
use chrono::{Duration, Utc}; use chrono::{TimeDelta, Utc};
use rocket::{serde::json::Json, Route}; use rocket::{serde::json::Json, Route};
use serde_json::Value; use serde_json::Value;
use crate::{ use crate::{
api::{ api::{
core::{CipherSyncData, CipherSyncType}, core::{CipherSyncData, CipherSyncType},
EmptyResult, JsonResult, JsonUpcase, EmptyResult, JsonResult,
}, },
auth::{decode_emergency_access_invite, Headers}, auth::{decode_emergency_access_invite, Headers},
db::{models::*, DbConn, DbPool}, db::{models::*, DbConn, DbPool},
@ -43,31 +43,33 @@ pub fn routes() -> Vec<Route> {
async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> { async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
if !CONFIG.emergency_access_allowed() { if !CONFIG.emergency_access_allowed() {
return Json(json!({ return Json(json!({
"Data": [{ "data": [{
"Id": "", "id": "",
"Status": 2, "status": 2,
"Type": 0, "type": 0,
"WaitTimeDays": 0, "waitTimeDays": 0,
"GranteeId": "", "granteeId": "",
"Email": "", "email": "",
"Name": "NOTE: Emergency Access is disabled!", "name": "NOTE: Emergency Access is disabled!",
"Object": "emergencyAccessGranteeDetails", "object": "emergencyAccessGranteeDetails",
}], }],
"Object": "list", "object": "list",
"ContinuationToken": null "continuationToken": null
})); }));
} }
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await; let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len()); let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
for ea in emergency_access_list { for ea in emergency_access_list {
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await); if let Some(grantee) = ea.to_json_grantee_details(&mut conn).await {
emergency_access_list_json.push(grantee)
}
} }
Json(json!({ Json(json!({
"Data": emergency_access_list_json, "data": emergency_access_list_json,
"Object": "list", "object": "list",
"ContinuationToken": null "continuationToken": null
})) }))
} }
@ -84,18 +86,20 @@ async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json<Value> {
} }
Json(json!({ Json(json!({
"Data": emergency_access_list_json, "data": emergency_access_list_json,
"Object": "list", "object": "list",
"ContinuationToken": null "continuationToken": null
})) }))
} }
#[get("/emergency-access/<emer_id>")] #[get("/emergency-access/<emer_id>")]
async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult { async fn get_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)), Some(emergency_access) => Ok(Json(
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
)),
None => err!("Emergency access not valid."), None => err!("Emergency access not valid."),
} }
} }
@ -105,42 +109,49 @@ async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
// region put/post // region put/post
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct EmergencyAccessUpdateData { struct EmergencyAccessUpdateData {
Type: NumberOrString, r#type: NumberOrString,
WaitTimeDays: i32, wait_time_days: i32,
KeyEncrypted: Option<String>, key_encrypted: Option<String>,
} }
#[put("/emergency-access/<emer_id>", data = "<data>")] #[put("/emergency-access/<emer_id>", data = "<data>")]
async fn put_emergency_access(emer_id: &str, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult { async fn put_emergency_access(
post_emergency_access(emer_id, data, conn).await emer_id: &str,
data: Json<EmergencyAccessUpdateData>,
headers: Headers,
conn: DbConn,
) -> JsonResult {
post_emergency_access(emer_id, data, headers, conn).await
} }
#[post("/emergency-access/<emer_id>", data = "<data>")] #[post("/emergency-access/<emer_id>", data = "<data>")]
async fn post_emergency_access( async fn post_emergency_access(
emer_id: &str, emer_id: &str,
data: JsonUpcase<EmergencyAccessUpdateData>, data: Json<EmergencyAccessUpdateData>,
headers: Headers,
mut conn: DbConn, mut conn: DbConn,
) -> JsonResult { ) -> JsonResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let data: EmergencyAccessUpdateData = data.into_inner().data; let data: EmergencyAccessUpdateData = data.into_inner();
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
Some(emergency_access) => emergency_access, Some(emergency_access) => emergency_access,
None => err!("Emergency access not valid."), None => err!("Emergency access not valid."),
}; };
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) { let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
Some(new_type) => new_type as i32, Some(new_type) => new_type as i32,
None => err!("Invalid emergency access type."), None => err!("Invalid emergency access type."),
}; };
emergency_access.atype = new_type; emergency_access.atype = new_type;
emergency_access.wait_time_days = data.WaitTimeDays; emergency_access.wait_time_days = data.wait_time_days;
if data.KeyEncrypted.is_some() { if data.key_encrypted.is_some() {
emergency_access.key_encrypted = data.KeyEncrypted; emergency_access.key_encrypted = data.key_encrypted;
} }
emergency_access.save(&mut conn).await?; emergency_access.save(&mut conn).await?;
@ -155,17 +166,21 @@ async fn post_emergency_access(
async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult { async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let grantor_user = headers.user; let emergency_access = match (
EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await,
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await,
Some(emer) => { ) {
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) { (Some(grantor_emer), None) => {
err!("Emergency access not valid.") info!("Grantor deleted emergency access {emer_id}");
grantor_emer
} }
emer (None, Some(grantee_emer)) => {
info!("Grantee deleted emergency access {emer_id}");
grantee_emer
} }
None => err!("Emergency access not valid."), _ => err!("Emergency access not valid."),
}; };
emergency_access.delete(&mut conn).await?; emergency_access.delete(&mut conn).await?;
Ok(()) Ok(())
} }
@ -180,24 +195,24 @@ async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbC
// region invite // region invite
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct EmergencyAccessInviteData { struct EmergencyAccessInviteData {
Email: String, email: String,
Type: NumberOrString, r#type: NumberOrString,
WaitTimeDays: i32, wait_time_days: i32,
} }
#[post("/emergency-access/invite", data = "<data>")] #[post("/emergency-access/invite", data = "<data>")]
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult { async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let data: EmergencyAccessInviteData = data.into_inner().data; let data: EmergencyAccessInviteData = data.into_inner();
let email = data.Email.to_lowercase(); let email = data.email.to_lowercase();
let wait_time_days = data.WaitTimeDays; let wait_time_days = data.wait_time_days;
let emergency_access_status = EmergencyAccessStatus::Invited as i32; let emergency_access_status = EmergencyAccessStatus::Invited as i32;
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) { let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
Some(new_type) => new_type as i32, Some(new_type) => new_type as i32,
None => err!("Invalid emergency access type."), None => err!("Invalid emergency access type."),
}; };
@ -209,7 +224,7 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
err!("You can not set yourself as an emergency contact.") err!("You can not set yourself as an emergency contact.")
} }
let grantee_user = match User::find_by_mail(&email, &mut conn).await { let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await {
None => { None => {
if !CONFIG.invitations_allowed() { if !CONFIG.invitations_allowed() {
err!(format!("Grantee user does not exist: {}", &email)) err!(format!("Grantee user does not exist: {}", &email))
@ -226,9 +241,10 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
let mut user = User::new(email.clone()); let mut user = User::new(email.clone());
user.save(&mut conn).await?; user.save(&mut conn).await?;
user (user, true)
} }
Some(user) => user, Some(user) if user.password_hash.is_empty() => (user, true),
Some(user) => (user, false),
}; };
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email( if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
@ -256,15 +272,9 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
&grantor_user.email, &grantor_user.email,
) )
.await?; .await?;
} else { } else if !new_user {
// Automatically mark user as accepted if no email invites // if mail is not enabled immediately accept the invitation for existing users
match User::find_by_mail(&email, &mut conn).await { new_emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
},
None => err!("Grantee user not found."),
}
} }
Ok(()) Ok(())
@ -274,15 +284,12 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult { async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
Some(emer) => emer, Some(emer) => emer,
None => err!("Emergency access not valid."), None => err!("Emergency access not valid."),
}; };
if emergency_access.grantor_uuid != headers.user.uuid {
err!("Emergency access not valid.");
}
if emergency_access.status != EmergencyAccessStatus::Invited as i32 { if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
err!("The grantee user is already accepted or confirmed to the organization"); err!("The grantee user is already accepted or confirmed to the organization");
} }
@ -308,34 +315,29 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
&grantor_user.email, &grantor_user.email,
) )
.await?; .await?;
} else { } else if !grantee_user.password_hash.is_empty() {
if Invitation::find_by_mail(&email, &mut conn).await.is_none() { // accept the invitation for existing user
emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
} else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &mut conn).await.is_none() {
let invitation = Invitation::new(&email); let invitation = Invitation::new(&email);
invitation.save(&mut conn).await?; invitation.save(&mut conn).await?;
} }
// Automatically mark user as accepted if no email invites
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
}
}
Ok(()) Ok(())
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct AcceptData { struct AcceptData {
Token: String, token: String,
} }
#[post("/emergency-access/<emer_id>/accept", data = "<data>")] #[post("/emergency-access/<emer_id>/accept", data = "<data>")]
async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult { async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let data: AcceptData = data.into_inner().data; let data: AcceptData = data.into_inner();
let token = &data.Token; let token = &data.token;
let claims = decode_emergency_access_invite(token)?; let claims = decode_emergency_access_invite(token)?;
// This can happen if the user who received the invite used a different email to signup. // This can happen if the user who received the invite used a different email to signup.
@ -352,7 +354,10 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
None => err!("Invited user not found"), None => err!("Invited user not found"),
}; };
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { // We need to search for the uuid in combination with the email, since we do not yet store the uuid of the grantee in the database.
// The uuid of the grantee gets stored once accepted.
let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantee_email(emer_id, &headers.user.email, &mut conn).await {
Some(emer) => emer, Some(emer) => emer,
None => err!("Emergency access not valid."), None => err!("Emergency access not valid."),
}; };
@ -367,10 +372,7 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
&& grantor_user.name == claims.grantor_name && grantor_user.name == claims.grantor_name
&& grantor_user.email == claims.grantor_email && grantor_user.email == claims.grantor_email
{ {
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await { emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &mut conn).await?;
Ok(v) => v,
Err(e) => err!(e.to_string()),
}
if CONFIG.mail_enabled() { if CONFIG.mail_enabled() {
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?; mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
@ -382,46 +384,27 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
} }
} }
async fn accept_invite_process(
grantee_uuid: &str,
emergency_access: &mut EmergencyAccess,
grantee_email: &str,
conn: &mut DbConn,
) -> EmptyResult {
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
err!("User email does not match invite.");
}
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
err!("Emergency contact already accepted.");
}
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
emergency_access.email = None;
emergency_access.save(conn).await
}
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct ConfirmData { struct ConfirmData {
Key: String, key: String,
} }
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")] #[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
async fn confirm_emergency_access( async fn confirm_emergency_access(
emer_id: &str, emer_id: &str,
data: JsonUpcase<ConfirmData>, data: Json<ConfirmData>,
headers: Headers, headers: Headers,
mut conn: DbConn, mut conn: DbConn,
) -> JsonResult { ) -> JsonResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let confirming_user = headers.user; let confirming_user = headers.user;
let data: ConfirmData = data.into_inner().data; let data: ConfirmData = data.into_inner();
let key = data.Key; let key = data.key;
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &confirming_user.uuid, &mut conn).await {
Some(emer) => emer, Some(emer) => emer,
None => err!("Emergency access not valid."), None => err!("Emergency access not valid."),
}; };
@ -467,14 +450,13 @@ async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let initiating_user = headers.user; let initiating_user = headers.user;
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &initiating_user.uuid, &mut conn).await {
Some(emer) => emer, Some(emer) => emer,
None => err!("Emergency access not valid."), None => err!("Emergency access not valid."),
}; };
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32 if emergency_access.status != EmergencyAccessStatus::Confirmed as i32 {
|| emergency_access.grantee_uuid != Some(initiating_user.uuid)
{
err!("Emergency access not valid.") err!("Emergency access not valid.")
} }
@ -506,14 +488,13 @@ async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
Some(emer) => emer, Some(emer) => emer,
None => err!("Emergency access not valid."), None => err!("Emergency access not valid."),
}; };
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 {
|| emergency_access.grantor_uuid != headers.user.uuid
{
err!("Emergency access not valid.") err!("Emergency access not valid.")
} }
@ -544,23 +525,18 @@ async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbC
async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { let mut emergency_access =
match EmergencyAccess::find_by_uuid_and_grantor_uuid(emer_id, &headers.user.uuid, &mut conn).await {
Some(emer) => emer, Some(emer) => emer,
None => err!("Emergency access not valid."), None => err!("Emergency access not valid."),
}; };
if (emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32) && emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32
|| emergency_access.grantor_uuid != headers.user.uuid
{ {
err!("Emergency access not valid.") err!("Emergency access not valid.")
} }
let grantor_user = match User::find_by_uuid(&headers.user.uuid, &mut conn).await {
Some(user) => user,
None => err!("Grantor user not found."),
};
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await { let grantee_user = match User::find_by_uuid(grantee_uuid, &mut conn).await {
Some(user) => user, Some(user) => user,
@ -571,7 +547,7 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
emergency_access.save(&mut conn).await?; emergency_access.save(&mut conn).await?;
if CONFIG.mail_enabled() { if CONFIG.mail_enabled() {
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name).await?; mail::send_emergency_access_recovery_rejected(&grantee_user.email, &headers.user.name).await?;
} }
Ok(Json(emergency_access.to_json())) Ok(Json(emergency_access.to_json()))
} else { } else {
@ -587,7 +563,8 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo
async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { let emergency_access =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &headers.user.uuid, &mut conn).await {
Some(emer) => emer, Some(emer) => emer,
None => err!("Emergency access not valid."), None => err!("Emergency access not valid."),
}; };
@ -614,9 +591,9 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn
} }
Ok(Json(json!({ Ok(Json(json!({
"Ciphers": ciphers_json, "ciphers": ciphers_json,
"KeyEncrypted": &emergency_access.key_encrypted, "keyEncrypted": &emergency_access.key_encrypted,
"Object": "emergencyAccessView", "object": "emergencyAccessView",
}))) })))
} }
@ -625,7 +602,8 @@ async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let requesting_user = headers.user; let requesting_user = headers.user;
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { let emergency_access =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
Some(emer) => emer, Some(emer) => emer,
None => err!("Emergency access not valid."), None => err!("Emergency access not valid."),
}; };
@ -640,39 +618,40 @@ async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
}; };
let result = json!({ let result = json!({
"Kdf": grantor_user.client_kdf_type, "kdf": grantor_user.client_kdf_type,
"KdfIterations": grantor_user.client_kdf_iter, "kdfIterations": grantor_user.client_kdf_iter,
"KdfMemory": grantor_user.client_kdf_memory, "kdfMemory": grantor_user.client_kdf_memory,
"KdfParallelism": grantor_user.client_kdf_parallelism, "kdfParallelism": grantor_user.client_kdf_parallelism,
"KeyEncrypted": &emergency_access.key_encrypted, "keyEncrypted": &emergency_access.key_encrypted,
"Object": "emergencyAccessTakeover", "object": "emergencyAccessTakeover",
}); });
Ok(Json(result)) Ok(Json(result))
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct EmergencyAccessPasswordData { struct EmergencyAccessPasswordData {
NewMasterPasswordHash: String, new_master_password_hash: String,
Key: String, key: String,
} }
#[post("/emergency-access/<emer_id>/password", data = "<data>")] #[post("/emergency-access/<emer_id>/password", data = "<data>")]
async fn password_emergency_access( async fn password_emergency_access(
emer_id: &str, emer_id: &str,
data: JsonUpcase<EmergencyAccessPasswordData>, data: Json<EmergencyAccessPasswordData>,
headers: Headers, headers: Headers,
mut conn: DbConn, mut conn: DbConn,
) -> EmptyResult { ) -> EmptyResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
let data: EmergencyAccessPasswordData = data.into_inner().data; let data: EmergencyAccessPasswordData = data.into_inner();
let new_master_password_hash = &data.NewMasterPasswordHash; let new_master_password_hash = &data.new_master_password_hash;
//let key = &data.Key; //let key = &data.Key;
let requesting_user = headers.user; let requesting_user = headers.user;
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { let emergency_access =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
Some(emer) => emer, Some(emer) => emer,
None => err!("Emergency access not valid."), None => err!("Emergency access not valid."),
}; };
@ -687,7 +666,7 @@ async fn password_emergency_access(
}; };
// change grantor_user password // change grantor_user password
grantor_user.set_password(new_master_password_hash, Some(data.Key), true, None); grantor_user.set_password(new_master_password_hash, Some(data.key), true, None);
grantor_user.save(&mut conn).await?; grantor_user.save(&mut conn).await?;
// Disable TwoFactor providers since they will otherwise block logins // Disable TwoFactor providers since they will otherwise block logins
@ -707,7 +686,8 @@ async fn password_emergency_access(
#[get("/emergency-access/<emer_id>/policies")] #[get("/emergency-access/<emer_id>/policies")]
async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
let requesting_user = headers.user; let requesting_user = headers.user;
let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { let emergency_access =
match EmergencyAccess::find_by_uuid_and_grantee_uuid(emer_id, &requesting_user.uuid, &mut conn).await {
Some(emer) => emer, Some(emer) => emer,
None => err!("Emergency access not valid."), None => err!("Emergency access not valid."),
}; };
@ -725,9 +705,9 @@ async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect(); let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
Ok(Json(json!({ Ok(Json(json!({
"Data": policies_json, "data": policies_json,
"Object": "list", "object": "list",
"ContinuationToken": null "continuationToken": null
}))) })))
} }
@ -766,7 +746,7 @@ pub async fn emergency_request_timeout_job(pool: DbPool) {
for mut emer in emergency_access_list { for mut emer in emergency_access_list {
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None) // The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
let recovery_allowed_at = let recovery_allowed_at =
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days)); emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days)).unwrap();
if recovery_allowed_at.le(&now) { if recovery_allowed_at.le(&now) {
// Only update the access status // Only update the access status
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active // Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
@ -822,10 +802,10 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) {
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None) // The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
// Calculate the day before the recovery will become active // Calculate the day before the recovery will become active
let final_recovery_reminder_at = let final_recovery_reminder_at =
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days - 1)); emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days - 1)).unwrap();
// Calculate if a day has passed since the previous notification, else no notification has been sent before // Calculate if a day has passed since the previous notification, else no notification has been sent before
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at { let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
last_notification_at + Duration::days(1) last_notification_at + TimeDelta::try_days(1).unwrap()
} else { } else {
now now
}; };

53
src/api/core/events.rs

@ -5,7 +5,7 @@ use rocket::{form::FromForm, serde::json::Json, Route};
use serde_json::Value; use serde_json::Value;
use crate::{ use crate::{
api::{EmptyResult, JsonResult, JsonUpcaseVec}, api::{EmptyResult, JsonResult},
auth::{AdminHeaders, Headers}, auth::{AdminHeaders, Headers},
db::{ db::{
models::{Cipher, Event, UserOrganization}, models::{Cipher, Event, UserOrganization},
@ -22,7 +22,6 @@ pub fn routes() -> Vec<Route> {
} }
#[derive(FromForm)] #[derive(FromForm)]
#[allow(non_snake_case)]
struct EventRange { struct EventRange {
start: String, start: String,
end: String, end: String,
@ -53,9 +52,9 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders,
}; };
Ok(Json(json!({ Ok(Json(json!({
"Data": events_json, "data": events_json,
"Object": "list", "object": "list",
"ContinuationToken": get_continuation_token(&events_json), "continuationToken": get_continuation_token(&events_json),
}))) })))
} }
@ -85,9 +84,9 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers,
}; };
Ok(Json(json!({ Ok(Json(json!({
"Data": events_json, "data": events_json,
"Object": "list", "object": "list",
"ContinuationToken": get_continuation_token(&events_json), "continuationToken": get_continuation_token(&events_json),
}))) })))
} }
@ -119,9 +118,9 @@ async fn get_user_events(
}; };
Ok(Json(json!({ Ok(Json(json!({
"Data": events_json, "data": events_json,
"Object": "list", "object": "list",
"ContinuationToken": get_continuation_token(&events_json), "continuationToken": get_continuation_token(&events_json),
}))) })))
} }
@ -145,33 +144,33 @@ pub fn main_routes() -> Vec<Route> {
routes![post_events_collect,] routes![post_events_collect,]
} }
#[derive(Deserialize, Debug)] #[derive(Debug, Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct EventCollection { struct EventCollection {
// Mandatory // Mandatory
Type: i32, r#type: i32,
Date: String, date: String,
// Optional // Optional
CipherId: Option<String>, cipher_id: Option<String>,
OrganizationId: Option<String>, organization_id: Option<String>,
} }
// Upstream: // Upstream:
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs // https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs // https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
#[post("/collect", format = "application/json", data = "<data>")] #[post("/collect", format = "application/json", data = "<data>")]
async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Headers, mut conn: DbConn) -> EmptyResult { async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers, mut conn: DbConn) -> EmptyResult {
if !CONFIG.org_events_enabled() { if !CONFIG.org_events_enabled() {
return Ok(()); return Ok(());
} }
for event in data.iter().map(|d| &d.data) { for event in data.iter() {
let event_date = parse_date(&event.Date); let event_date = parse_date(&event.date);
match event.Type { match event.r#type {
1000..=1099 => { 1000..=1099 => {
_log_user_event( _log_user_event(
event.Type, event.r#type,
&headers.user.uuid, &headers.user.uuid,
headers.device.atype, headers.device.atype,
Some(event_date), Some(event_date),
@ -181,9 +180,9 @@ async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Head
.await; .await;
} }
1600..=1699 => { 1600..=1699 => {
if let Some(org_uuid) = &event.OrganizationId { if let Some(org_uuid) = &event.organization_id {
_log_event( _log_event(
event.Type, event.r#type,
org_uuid, org_uuid,
org_uuid, org_uuid,
&headers.user.uuid, &headers.user.uuid,
@ -196,11 +195,11 @@ async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Head
} }
} }
_ => { _ => {
if let Some(cipher_uuid) = &event.CipherId { if let Some(cipher_uuid) = &event.cipher_id {
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await { if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
if let Some(org_uuid) = cipher.organization_uuid { if let Some(org_uuid) = cipher.organization_uuid {
_log_event( _log_event(
event.Type, event.r#type,
cipher_uuid, cipher_uuid,
&org_uuid, &org_uuid,
&headers.user.uuid, &headers.user.uuid,
@ -289,7 +288,7 @@ async fn _log_event(
let mut event = Event::new(event_type, event_date); let mut event = Event::new(event_type, event_date);
match event_type { match event_type {
// 1000..=1099 Are user events, they need to be logged via log_user_event() // 1000..=1099 Are user events, they need to be logged via log_user_event()
// Collection Events // Cipher Events
1100..=1199 => { 1100..=1199 => {
event.cipher_uuid = Some(String::from(source_uuid)); event.cipher_uuid = Some(String::from(source_uuid));
} }

33
src/api/core/folders.rs

@ -2,7 +2,7 @@ use rocket::serde::json::Json;
use serde_json::Value; use serde_json::Value;
use crate::{ use crate::{
api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType}, api::{EmptyResult, JsonResult, Notify, UpdateType},
auth::Headers, auth::Headers,
db::{models::*, DbConn}, db::{models::*, DbConn},
}; };
@ -17,9 +17,9 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect(); let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
Json(json!({ Json(json!({
"Data": folders_json, "data": folders_json,
"Object": "list", "object": "list",
"ContinuationToken": null, "continuationToken": null,
})) }))
} }
@ -38,16 +38,17 @@ async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResul
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
pub struct FolderData { pub struct FolderData {
pub Name: String, pub name: String,
pub id: Option<String>,
} }
#[post("/folders", data = "<data>")] #[post("/folders", data = "<data>")]
async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { async fn post_folders(data: Json<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
let data: FolderData = data.into_inner().data; let data: FolderData = data.into_inner();
let mut folder = Folder::new(headers.user.uuid, data.Name); let mut folder = Folder::new(headers.user.uuid, data.name);
folder.save(&mut conn).await?; folder.save(&mut conn).await?;
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await; nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await;
@ -56,25 +57,19 @@ async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn:
} }
#[post("/folders/<uuid>", data = "<data>")] #[post("/folders/<uuid>", data = "<data>")]
async fn post_folder( async fn post_folder(uuid: &str, data: Json<FolderData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
uuid: &str,
data: JsonUpcase<FolderData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
put_folder(uuid, data, headers, conn, nt).await put_folder(uuid, data, headers, conn, nt).await
} }
#[put("/folders/<uuid>", data = "<data>")] #[put("/folders/<uuid>", data = "<data>")]
async fn put_folder( async fn put_folder(
uuid: &str, uuid: &str,
data: JsonUpcase<FolderData>, data: Json<FolderData>,
headers: Headers, headers: Headers,
mut conn: DbConn, mut conn: DbConn,
nt: Notify<'_>, nt: Notify<'_>,
) -> JsonResult { ) -> JsonResult {
let data: FolderData = data.into_inner().data; let data: FolderData = data.into_inner();
let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await { let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await {
Some(folder) => folder, Some(folder) => folder,
@ -85,7 +80,7 @@ async fn put_folder(
err!("Folder belongs to another user") err!("Folder belongs to another user")
} }
folder.name = data.Name; folder.name = data.name;
folder.save(&mut conn).await?; folder.save(&mut conn).await?;
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await; nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await;

70
src/api/core/mod.rs

@ -49,19 +49,19 @@ pub fn events_routes() -> Vec<Route> {
use rocket::{serde::json::Json, serde::json::Value, Catcher, Route}; use rocket::{serde::json::Json, serde::json::Value, Catcher, Route};
use crate::{ use crate::{
api::{JsonResult, JsonUpcase, Notify, UpdateType}, api::{JsonResult, Notify, UpdateType},
auth::Headers, auth::Headers,
db::DbConn, db::DbConn,
error::Error, error::Error,
util::{get_reqwest_client, parse_experimental_client_feature_flags}, util::{get_reqwest_client, parse_experimental_client_feature_flags},
}; };
#[derive(Serialize, Deserialize, Debug)] #[derive(Debug, Serialize, Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct GlobalDomain { struct GlobalDomain {
Type: i32, r#type: i32,
Domains: Vec<String>, domains: Vec<String>,
Excluded: bool, excluded: bool,
} }
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json"); const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
@ -81,38 +81,38 @@ fn _get_eq_domains(headers: Headers, no_excluded: bool) -> Json<Value> {
let mut globals: Vec<GlobalDomain> = from_str(GLOBAL_DOMAINS).unwrap(); let mut globals: Vec<GlobalDomain> = from_str(GLOBAL_DOMAINS).unwrap();
for global in &mut globals { for global in &mut globals {
global.Excluded = excluded_globals.contains(&global.Type); global.excluded = excluded_globals.contains(&global.r#type);
} }
if no_excluded { if no_excluded {
globals.retain(|g| !g.Excluded); globals.retain(|g| !g.excluded);
} }
Json(json!({ Json(json!({
"EquivalentDomains": equivalent_domains, "equivalentDomains": equivalent_domains,
"GlobalEquivalentDomains": globals, "globalEquivalentDomains": globals,
"Object": "domains", "object": "domains",
})) }))
} }
#[derive(Deserialize, Debug)] #[derive(Debug, Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct EquivDomainData { struct EquivDomainData {
ExcludedGlobalEquivalentDomains: Option<Vec<i32>>, excluded_global_equivalent_domains: Option<Vec<i32>>,
EquivalentDomains: Option<Vec<Vec<String>>>, equivalent_domains: Option<Vec<Vec<String>>>,
} }
#[post("/settings/domains", data = "<data>")] #[post("/settings/domains", data = "<data>")]
async fn post_eq_domains( async fn post_eq_domains(
data: JsonUpcase<EquivDomainData>, data: Json<EquivDomainData>,
headers: Headers, headers: Headers,
mut conn: DbConn, mut conn: DbConn,
nt: Notify<'_>, nt: Notify<'_>,
) -> JsonResult { ) -> JsonResult {
let data: EquivDomainData = data.into_inner().data; let data: EquivDomainData = data.into_inner();
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default(); let excluded_globals = data.excluded_global_equivalent_domains.unwrap_or_default();
let equivalent_domains = data.EquivalentDomains.unwrap_or_default(); let equivalent_domains = data.equivalent_domains.unwrap_or_default();
let mut user = headers.user; let mut user = headers.user;
use serde_json::to_string; use serde_json::to_string;
@ -128,12 +128,7 @@ async fn post_eq_domains(
} }
#[put("/settings/domains", data = "<data>")] #[put("/settings/domains", data = "<data>")]
async fn put_eq_domains( async fn put_eq_domains(data: Json<EquivDomainData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
data: JsonUpcase<EquivDomainData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
post_eq_domains(data, headers, conn, nt).await post_eq_domains(data, headers, conn, nt).await
} }
@ -157,15 +152,15 @@ async fn hibp_breach(username: &str) -> JsonResult {
Ok(Json(value)) Ok(Json(value))
} else { } else {
Ok(Json(json!([{ Ok(Json(json!([{
"Name": "HaveIBeenPwned", "name": "HaveIBeenPwned",
"Title": "Manual HIBP Check", "title": "Manual HIBP Check",
"Domain": "haveibeenpwned.com", "domain": "haveibeenpwned.com",
"BreachDate": "2019-08-18T00:00:00Z", "breachDate": "2019-08-18T00:00:00Z",
"AddedDate": "2019-08-18T00:00:00Z", "addedDate": "2019-08-18T00:00:00Z",
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"), "description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
"LogoPath": "vw_static/hibp.png", "logoPath": "vw_static/hibp.png",
"PwnCount": 0, "pwnCount": 0,
"DataClasses": [ "dataClasses": [
"Error - No API key set!" "Error - No API key set!"
] ]
}]))) }])))
@ -191,14 +186,17 @@ fn version() -> Json<&'static str> {
#[get("/config")] #[get("/config")]
fn config() -> Json<Value> { fn config() -> Json<Value> {
let domain = crate::CONFIG.domain(); let domain = crate::CONFIG.domain();
let feature_states = parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags()); let mut feature_states =
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
// Force the new key rotation feature
feature_states.insert("key-rotation-improvements".to_string(), true);
Json(json!({ Json(json!({
// Note: The clients use this version to handle backwards compatibility concerns // Note: The clients use this version to handle backwards compatibility concerns
// This means they expect a version that closely matches the Bitwarden server version // This means they expect a version that closely matches the Bitwarden server version
// We should make sure that we keep this updated when we support the new server features // We should make sure that we keep this updated when we support the new server features
// Version history: // Version history:
// - Individual cipher key encryption: 2023.9.1 // - Individual cipher key encryption: 2023.9.1
"version": "2023.9.1", "version": "2024.2.0",
"gitHash": option_env!("GIT_REV"), "gitHash": option_env!("GIT_REV"),
"server": { "server": {
"name": "Vaultwarden", "name": "Vaultwarden",

858
src/api/core/organizations.rs

File diff suppressed because it is too large

82
src/api/core/public.rs

@ -1,13 +1,14 @@
use chrono::Utc; use chrono::Utc;
use rocket::{ use rocket::{
request::{self, FromRequest, Outcome}, request::{self, FromRequest, Outcome},
serde::json::Json,
Request, Route, Request, Route,
}; };
use std::collections::HashSet; use std::collections::HashSet;
use crate::{ use crate::{
api::{EmptyResult, JsonUpcase}, api::EmptyResult,
auth, auth,
db::{models::*, DbConn}, db::{models::*, DbConn},
mail, CONFIG, mail, CONFIG,
@ -18,43 +19,43 @@ pub fn routes() -> Vec<Route> {
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct OrgImportGroupData { struct OrgImportGroupData {
Name: String, name: String,
ExternalId: String, external_id: String,
MemberExternalIds: Vec<String>, member_external_ids: Vec<String>,
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct OrgImportUserData { struct OrgImportUserData {
Email: String, email: String,
ExternalId: String, external_id: String,
Deleted: bool, deleted: bool,
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct OrgImportData { struct OrgImportData {
Groups: Vec<OrgImportGroupData>, groups: Vec<OrgImportGroupData>,
Members: Vec<OrgImportUserData>, members: Vec<OrgImportUserData>,
OverwriteExisting: bool, overwrite_existing: bool,
// LargeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set. // largeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
} }
#[post("/public/organization/import", data = "<data>")] #[post("/public/organization/import", data = "<data>")]
async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult { async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
// Most of the logic for this function can be found here // Most of the logic for this function can be found here
// https://github.com/bitwarden/server/blob/fd892b2ff4547648a276734fb2b14a8abae2c6f5/src/Core/Services/Implementations/OrganizationService.cs#L1797 // https://github.com/bitwarden/server/blob/fd892b2ff4547648a276734fb2b14a8abae2c6f5/src/Core/Services/Implementations/OrganizationService.cs#L1797
let org_id = token.0; let org_id = token.0;
let data = data.into_inner().data; let data = data.into_inner();
for user_data in &data.Members { for user_data in &data.members {
if user_data.Deleted { if user_data.deleted {
// If user is marked for deletion and it exists, revoke it // If user is marked for deletion and it exists, revoke it
if let Some(mut user_org) = if let Some(mut user_org) =
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await
{ {
// Only revoke a user if it is not the last confirmed owner // Only revoke a user if it is not the last confirmed owner
let revoked = if user_org.atype == UserOrgType::Owner let revoked = if user_org.atype == UserOrgType::Owner
@ -72,27 +73,27 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
user_org.revoke() user_org.revoke()
}; };
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone())); let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone()));
if revoked || ext_modified { if revoked || ext_modified {
user_org.save(&mut conn).await?; user_org.save(&mut conn).await?;
} }
} }
// If user is part of the organization, restore it // If user is part of the organization, restore it
} else if let Some(mut user_org) = } else if let Some(mut user_org) =
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await
{ {
let restored = user_org.restore(); let restored = user_org.restore();
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone())); let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone()));
if restored || ext_modified { if restored || ext_modified {
user_org.save(&mut conn).await?; user_org.save(&mut conn).await?;
} }
} else { } else {
// If user is not part of the organization // If user is not part of the organization
let user = match User::find_by_mail(&user_data.Email, &mut conn).await { let user = match User::find_by_mail(&user_data.email, &mut conn).await {
Some(user) => user, // exists in vaultwarden Some(user) => user, // exists in vaultwarden
None => { None => {
// User does not exist yet // User does not exist yet
let mut new_user = User::new(user_data.Email.clone()); let mut new_user = User::new(user_data.email.clone());
new_user.save(&mut conn).await?; new_user.save(&mut conn).await?;
if !CONFIG.mail_enabled() { if !CONFIG.mail_enabled() {
@ -109,7 +110,7 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
}; };
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone()); let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
new_org_user.set_external_id(Some(user_data.ExternalId.clone())); new_org_user.set_external_id(Some(user_data.external_id.clone()));
new_org_user.access_all = false; new_org_user.access_all = false;
new_org_user.atype = UserOrgType::User as i32; new_org_user.atype = UserOrgType::User as i32;
new_org_user.status = user_org_status; new_org_user.status = user_org_status;
@ -123,7 +124,7 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
}; };
mail::send_invite( mail::send_invite(
&user_data.Email, &user_data.email,
&user.uuid, &user.uuid,
Some(org_id.clone()), Some(org_id.clone()),
Some(new_org_user.uuid), Some(new_org_user.uuid),
@ -136,12 +137,17 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
} }
if CONFIG.org_groups_enabled() { if CONFIG.org_groups_enabled() {
for group_data in &data.Groups { for group_data in &data.groups {
let group_uuid = match Group::find_by_external_id(&group_data.ExternalId, &mut conn).await { let group_uuid = match Group::find_by_external_id_and_org(&group_data.external_id, &org_id, &mut conn).await
{
Some(group) => group.uuid, Some(group) => group.uuid,
None => { None => {
let mut group = let mut group = Group::new(
Group::new(org_id.clone(), group_data.Name.clone(), false, Some(group_data.ExternalId.clone())); org_id.clone(),
group_data.name.clone(),
false,
Some(group_data.external_id.clone()),
);
group.save(&mut conn).await?; group.save(&mut conn).await?;
group.uuid group.uuid
} }
@ -149,7 +155,7 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?; GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
for ext_id in &group_data.MemberExternalIds { for ext_id in &group_data.member_external_ids {
if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await
{ {
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone()); let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
@ -162,9 +168,9 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
} }
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true) // If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
if data.OverwriteExisting { if data.overwrite_existing {
// Generate a HashSet to quickly verify if a member is listed or not. // Generate a HashSet to quickly verify if a member is listed or not.
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect(); let sync_members: HashSet<String> = data.members.into_iter().map(|m| m.external_id).collect();
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await { for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
if let Some(ref user_external_id) = user_org.external_id { if let Some(ref user_external_id) = user_org.external_id {
if !sync_members.contains(user_external_id) { if !sync_members.contains(user_external_id) {
@ -209,19 +215,15 @@ impl<'r> FromRequest<'r> for PublicToken {
Err(_) => err_handler!("Invalid claim"), Err(_) => err_handler!("Invalid claim"),
}; };
// Check if time is between claims.nbf and claims.exp // Check if time is between claims.nbf and claims.exp
let time_now = Utc::now().naive_utc().timestamp(); let time_now = Utc::now().timestamp();
if time_now < claims.nbf { if time_now < claims.nbf {
err_handler!("Token issued in the future"); err_handler!("Token issued in the future");
} }
if time_now > claims.exp { if time_now > claims.exp {
err_handler!("Token expired"); err_handler!("Token expired");
} }
// Check if claims.iss is host|claims.scope[0] // Check if claims.iss is domain|claims.scope[0]
let host = match auth::Host::from_request(request).await { let complete_host = format!("{}|{}", CONFIG.domain_origin(), claims.scope[0]);
Outcome::Success(host) => host,
_ => err_handler!("Error getting Host"),
};
let complete_host = format!("{}|{}", host.host, claims.scope[0]);
if complete_host != claims.iss { if complete_host != claims.iss {
err_handler!("Token not issued by this server"); err_handler!("Token not issued by this server");
} }

197
src/api/core/sends.rs

@ -1,6 +1,6 @@
use std::path::Path; use std::path::Path;
use chrono::{DateTime, Duration, Utc}; use chrono::{DateTime, TimeDelta, Utc};
use num_traits::ToPrimitive; use num_traits::ToPrimitive;
use rocket::form::Form; use rocket::form::Form;
use rocket::fs::NamedFile; use rocket::fs::NamedFile;
@ -9,7 +9,7 @@ use rocket::serde::json::Json;
use serde_json::Value; use serde_json::Value;
use crate::{ use crate::{
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType}, api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType},
auth::{ClientIp, Headers, Host}, auth::{ClientIp, Headers, Host},
db::{models::*, DbConn, DbPool}, db::{models::*, DbConn, DbPool},
util::{NumberOrString, SafeString}, util::{NumberOrString, SafeString},
@ -48,23 +48,26 @@ pub async fn purge_sends(pool: DbPool) {
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct SendData { pub struct SendData {
Type: i32, r#type: i32,
Key: String, key: String,
Password: Option<String>, password: Option<String>,
MaxAccessCount: Option<NumberOrString>, max_access_count: Option<NumberOrString>,
ExpirationDate: Option<DateTime<Utc>>, expiration_date: Option<DateTime<Utc>>,
DeletionDate: DateTime<Utc>, deletion_date: DateTime<Utc>,
Disabled: bool, disabled: bool,
HideEmail: Option<bool>, hide_email: Option<bool>,
// Data field // Data field
Name: String, name: String,
Notes: Option<String>, notes: Option<String>,
Text: Option<Value>, text: Option<Value>,
File: Option<Value>, file: Option<Value>,
FileLength: Option<NumberOrString>, file_length: Option<NumberOrString>,
// Used for key rotations
pub id: Option<String>,
} }
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to /// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
@ -93,7 +96,7 @@ async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> Em
/// Ref: https://bitwarden.com/help/article/policies/#send-options /// Ref: https://bitwarden.com/help/article/policies/#send-options
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult { async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult {
let user_uuid = &headers.user.uuid; let user_uuid = &headers.user.uuid;
let hide_email = data.HideEmail.unwrap_or(false); let hide_email = data.hide_email.unwrap_or(false);
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await { if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await {
err!( err!(
"Due to an Enterprise Policy, you are not allowed to hide your email address \ "Due to an Enterprise Policy, you are not allowed to hide your email address \
@ -104,40 +107,40 @@ async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, c
} }
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> { fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
let data_val = if data.Type == SendType::Text as i32 { let data_val = if data.r#type == SendType::Text as i32 {
data.Text data.text
} else if data.Type == SendType::File as i32 { } else if data.r#type == SendType::File as i32 {
data.File data.file
} else { } else {
err!("Invalid Send type") err!("Invalid Send type")
}; };
let data_str = if let Some(mut d) = data_val { let data_str = if let Some(mut d) = data_val {
d.as_object_mut().and_then(|o| o.remove("Response")); d.as_object_mut().and_then(|o| o.remove("response"));
serde_json::to_string(&d)? serde_json::to_string(&d)?
} else { } else {
err!("Send data not provided"); err!("Send data not provided");
}; };
if data.DeletionDate > Utc::now() + Duration::days(31) { if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
err!( err!(
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again." "You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
); );
} }
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc()); let mut send = Send::new(data.r#type, data.name, data_str, data.key, data.deletion_date.naive_utc());
send.user_uuid = Some(user_uuid); send.user_uuid = Some(user_uuid);
send.notes = data.Notes; send.notes = data.notes;
send.max_access_count = match data.MaxAccessCount { send.max_access_count = match data.max_access_count {
Some(m) => Some(m.into_i32()?), Some(m) => Some(m.into_i32()?),
_ => None, _ => None,
}; };
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc()); send.expiration_date = data.expiration_date.map(|d| d.naive_utc());
send.disabled = data.Disabled; send.disabled = data.disabled;
send.hide_email = data.HideEmail; send.hide_email = data.hide_email;
send.atype = data.Type; send.atype = data.r#type;
send.set_password(data.Password.as_deref()); send.set_password(data.password.as_deref());
Ok(send) Ok(send)
} }
@ -148,9 +151,9 @@ async fn get_sends(headers: Headers, mut conn: DbConn) -> Json<Value> {
let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect(); let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect();
Json(json!({ Json(json!({
"Data": sends_json, "data": sends_json,
"Object": "list", "object": "list",
"ContinuationToken": null "continuationToken": null
})) }))
} }
@ -169,13 +172,13 @@ async fn get_send(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult
} }
#[post("/sends", data = "<data>")] #[post("/sends", data = "<data>")]
async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { async fn post_send(data: Json<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
enforce_disable_send_policy(&headers, &mut conn).await?; enforce_disable_send_policy(&headers, &mut conn).await?;
let data: SendData = data.into_inner().data; let data: SendData = data.into_inner();
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?; enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
if data.Type == SendType::File as i32 { if data.r#type == SendType::File as i32 {
err!("File sends should use /api/sends/file") err!("File sends should use /api/sends/file")
} }
@ -195,7 +198,7 @@ async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbCon
#[derive(FromForm)] #[derive(FromForm)]
struct UploadData<'f> { struct UploadData<'f> {
model: Json<crate::util::UpCase<SendData>>, model: Json<SendData>,
data: TempFile<'f>, data: TempFile<'f>,
} }
@ -215,7 +218,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
model, model,
mut data, mut data,
} = data.into_inner(); } = data.into_inner();
let model = model.into_inner().data; let model = model.into_inner();
let Some(size) = data.len().to_i64() else { let Some(size) = data.len().to_i64() else {
err!("Invalid send size"); err!("Invalid send size");
@ -263,9 +266,9 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
let mut data_value: Value = serde_json::from_str(&send.data)?; let mut data_value: Value = serde_json::from_str(&send.data)?;
if let Some(o) = data_value.as_object_mut() { if let Some(o) = data_value.as_object_mut() {
o.insert(String::from("Id"), Value::String(file_id)); o.insert(String::from("id"), Value::String(file_id));
o.insert(String::from("Size"), Value::Number(size.into())); o.insert(String::from("size"), Value::Number(size.into()));
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size))); o.insert(String::from("sizeName"), Value::String(crate::util::get_display_size(size)));
} }
send.data = serde_json::to_string(&data_value)?; send.data = serde_json::to_string(&data_value)?;
@ -285,18 +288,18 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L190 // Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L190
#[post("/sends/file/v2", data = "<data>")] #[post("/sends/file/v2", data = "<data>")]
async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult { async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
enforce_disable_send_policy(&headers, &mut conn).await?; enforce_disable_send_policy(&headers, &mut conn).await?;
let data = data.into_inner().data; let data = data.into_inner();
if data.Type != SendType::File as i32 { if data.r#type != SendType::File as i32 {
err!("Send content is not a file"); err!("Send content is not a file");
} }
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?; enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
let file_length = match &data.FileLength { let file_length = match &data.file_length {
Some(m) => m.into_i64()?, Some(m) => m.into_i64()?,
_ => err!("Invalid send length"), _ => err!("Invalid send length"),
}; };
@ -331,9 +334,9 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
let mut data_value: Value = serde_json::from_str(&send.data)?; let mut data_value: Value = serde_json::from_str(&send.data)?;
if let Some(o) = data_value.as_object_mut() { if let Some(o) = data_value.as_object_mut() {
o.insert(String::from("Id"), Value::String(file_id.clone())); o.insert(String::from("id"), Value::String(file_id.clone()));
o.insert(String::from("Size"), Value::Number(file_length.into())); o.insert(String::from("size"), Value::Number(file_length.into()));
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length))); o.insert(String::from("sizeName"), Value::String(crate::util::get_display_size(file_length)));
} }
send.data = serde_json::to_string(&data_value)?; send.data = serde_json::to_string(&data_value)?;
send.save(&mut conn).await?; send.save(&mut conn).await?;
@ -392,15 +395,15 @@ async fn post_send_file_v2_data(
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
pub struct SendAccessData { pub struct SendAccessData {
pub Password: Option<String>, pub password: Option<String>,
} }
#[post("/sends/access/<access_id>", data = "<data>")] #[post("/sends/access/<access_id>", data = "<data>")]
async fn post_access( async fn post_access(
access_id: &str, access_id: &str,
data: JsonUpcase<SendAccessData>, data: Json<SendAccessData>,
mut conn: DbConn, mut conn: DbConn,
ip: ClientIp, ip: ClientIp,
nt: Notify<'_>, nt: Notify<'_>,
@ -431,7 +434,7 @@ async fn post_access(
} }
if send.password_hash.is_some() { if send.password_hash.is_some() {
match data.into_inner().data.Password { match data.into_inner().password {
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ } Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
Some(_) => err!("Invalid password", format!("IP: {}.", ip.ip)), Some(_) => err!("Invalid password", format!("IP: {}.", ip.ip)),
None => err_code!("Password not provided", format!("IP: {}.", ip.ip), 401), None => err_code!("Password not provided", format!("IP: {}.", ip.ip), 401),
@ -461,7 +464,7 @@ async fn post_access(
async fn post_access_file( async fn post_access_file(
send_id: &str, send_id: &str,
file_id: &str, file_id: &str,
data: JsonUpcase<SendAccessData>, data: Json<SendAccessData>,
host: Host, host: Host,
mut conn: DbConn, mut conn: DbConn,
nt: Notify<'_>, nt: Notify<'_>,
@ -492,7 +495,7 @@ async fn post_access_file(
} }
if send.password_hash.is_some() { if send.password_hash.is_some() {
match data.into_inner().data.Password { match data.into_inner().password {
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ } Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
Some(_) => err!("Invalid password."), Some(_) => err!("Invalid password."),
None => err_code!("Password not provided", 401), None => err_code!("Password not provided", 401),
@ -515,9 +518,9 @@ async fn post_access_file(
let token_claims = crate::auth::generate_send_claims(send_id, file_id); let token_claims = crate::auth::generate_send_claims(send_id, file_id);
let token = crate::auth::encode_jwt(&token_claims); let token = crate::auth::encode_jwt(&token_claims);
Ok(Json(json!({ Ok(Json(json!({
"Object": "send-fileDownload", "object": "send-fileDownload",
"Id": file_id, "id": file_id,
"Url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token) "url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
}))) })))
} }
@ -532,16 +535,10 @@ async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Opt
} }
#[put("/sends/<id>", data = "<data>")] #[put("/sends/<id>", data = "<data>")]
async fn put_send( async fn put_send(id: &str, data: Json<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
id: &str,
data: JsonUpcase<SendData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
enforce_disable_send_policy(&headers, &mut conn).await?; enforce_disable_send_policy(&headers, &mut conn).await?;
let data: SendData = data.into_inner().data; let data: SendData = data.into_inner();
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?; enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
let mut send = match Send::find_by_uuid(id, &mut conn).await { let mut send = match Send::find_by_uuid(id, &mut conn).await {
@ -549,19 +546,38 @@ async fn put_send(
None => err!("Send not found"), None => err!("Send not found"),
}; };
update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?;
Ok(Json(send.to_json()))
}
pub async fn update_send_from_data(
send: &mut Send,
data: SendData,
headers: &Headers,
conn: &mut DbConn,
nt: &Notify<'_>,
ut: UpdateType,
) -> EmptyResult {
if send.user_uuid.as_ref() != Some(&headers.user.uuid) { if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
err!("Send is not owned by user") err!("Send is not owned by user")
} }
if send.atype != data.Type { if send.atype != data.r#type {
err!("Sends can't change type") err!("Sends can't change type")
} }
if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
err!(
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
);
}
// When updating a file Send, we receive nulls in the File field, as it's immutable, // When updating a file Send, we receive nulls in the File field, as it's immutable,
// so we only need to update the data field in the Text case // so we only need to update the data field in the Text case
if data.Type == SendType::Text as i32 { if data.r#type == SendType::Text as i32 {
let data_str = if let Some(mut d) = data.Text { let data_str = if let Some(mut d) = data.text {
d.as_object_mut().and_then(|d| d.remove("Response")); d.as_object_mut().and_then(|d| d.remove("response"));
serde_json::to_string(&d)? serde_json::to_string(&d)?
} else { } else {
err!("Send data not provided"); err!("Send data not provided");
@ -569,39 +585,28 @@ async fn put_send(
send.data = data_str; send.data = data_str;
} }
if data.DeletionDate > Utc::now() + Duration::days(31) { send.name = data.name;
err!( send.akey = data.key;
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again." send.deletion_date = data.deletion_date.naive_utc();
); send.notes = data.notes;
} send.max_access_count = match data.max_access_count {
send.name = data.Name;
send.akey = data.Key;
send.deletion_date = data.DeletionDate.naive_utc();
send.notes = data.Notes;
send.max_access_count = match data.MaxAccessCount {
Some(m) => Some(m.into_i32()?), Some(m) => Some(m.into_i32()?),
_ => None, _ => None,
}; };
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc()); send.expiration_date = data.expiration_date.map(|d| d.naive_utc());
send.hide_email = data.HideEmail; send.hide_email = data.hide_email;
send.disabled = data.Disabled; send.disabled = data.disabled;
// Only change the value if it's present // Only change the value if it's present
if let Some(password) = data.Password { if let Some(password) = data.password {
send.set_password(Some(&password)); send.set_password(Some(&password));
} }
send.save(&mut conn).await?; send.save(conn).await?;
nt.send_send_update( if ut != UpdateType::None {
UpdateType::SyncSendUpdate, nt.send_send_update(ut, send, &send.update_users_revision(conn).await, &headers.device.uuid, conn).await;
&send, }
&send.update_users_revision(&mut conn).await, Ok(())
&headers.device.uuid,
&mut conn,
)
.await;
Ok(Json(send.to_json()))
} }
#[delete("/sends/<id>")] #[delete("/sends/<id>")]

63
src/api/core/two_factor/authenticator.rs

@ -3,10 +3,7 @@ use rocket::serde::json::Json;
use rocket::Route; use rocket::Route;
use crate::{ use crate::{
api::{ api::{core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, PasswordOrOtpData},
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
PasswordOrOtpData,
},
auth::{ClientIp, Headers}, auth::{ClientIp, Headers},
crypto, crypto,
db::{ db::{
@ -23,8 +20,8 @@ pub fn routes() -> Vec<Route> {
} }
#[post("/two-factor/get-authenticator", data = "<data>")] #[post("/two-factor/get-authenticator", data = "<data>")]
async fn generate_authenticator(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult { async fn generate_authenticator(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data; let data: PasswordOrOtpData = data.into_inner();
let user = headers.user; let user = headers.user;
data.validate(&user, false, &mut conn).await?; data.validate(&user, false, &mut conn).await?;
@ -38,36 +35,32 @@ async fn generate_authenticator(data: JsonUpcase<PasswordOrOtpData>, headers: He
}; };
Ok(Json(json!({ Ok(Json(json!({
"Enabled": enabled, "enabled": enabled,
"Key": key, "key": key,
"Object": "twoFactorAuthenticator" "object": "twoFactorAuthenticator"
}))) })))
} }
#[derive(Deserialize, Debug)] #[derive(Debug, Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct EnableAuthenticatorData { struct EnableAuthenticatorData {
Key: String, key: String,
Token: NumberOrString, token: NumberOrString,
MasterPasswordHash: Option<String>, master_password_hash: Option<String>,
Otp: Option<String>, otp: Option<String>,
} }
#[post("/two-factor/authenticator", data = "<data>")] #[post("/two-factor/authenticator", data = "<data>")]
async fn activate_authenticator( async fn activate_authenticator(data: Json<EnableAuthenticatorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
data: JsonUpcase<EnableAuthenticatorData>, let data: EnableAuthenticatorData = data.into_inner();
headers: Headers, let key = data.key;
mut conn: DbConn, let token = data.token.into_string();
) -> JsonResult {
let data: EnableAuthenticatorData = data.into_inner().data;
let key = data.Key;
let token = data.Token.into_string();
let mut user = headers.user; let mut user = headers.user;
PasswordOrOtpData { PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash, master_password_hash: data.master_password_hash,
Otp: data.Otp, otp: data.otp,
} }
.validate(&user, true, &mut conn) .validate(&user, true, &mut conn)
.await?; .await?;
@ -90,18 +83,14 @@ async fn activate_authenticator(
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
Ok(Json(json!({ Ok(Json(json!({
"Enabled": true, "enabled": true,
"Key": key, "key": key,
"Object": "twoFactorAuthenticator" "object": "twoFactorAuthenticator"
}))) })))
} }
#[put("/two-factor/authenticator", data = "<data>")] #[put("/two-factor/authenticator", data = "<data>")]
async fn activate_authenticator_put( async fn activate_authenticator_put(data: Json<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
data: JsonUpcase<EnableAuthenticatorData>,
headers: Headers,
conn: DbConn,
) -> JsonResult {
activate_authenticator(data, headers, conn).await activate_authenticator(data, headers, conn).await
} }
@ -156,8 +145,8 @@ pub async fn validate_totp_code(
let time = (current_timestamp + step * 30i64) as u64; let time = (current_timestamp + step * 30i64) as u64;
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time); let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
// Check the the given code equals the generated and if the time_step is larger then the one last used. // Check the given code equals the generated and if the time_step is larger then the one last used.
if generated == totp_code && time_step > i64::from(twofactor.last_used) { if generated == totp_code && time_step > twofactor.last_used {
// If the step does not equals 0 the time is drifted either server or client side. // If the step does not equals 0 the time is drifted either server or client side.
if step != 0 { if step != 0 {
warn!("TOTP Time drift detected. The step offset is {}", step); warn!("TOTP Time drift detected. The step offset is {}", step);
@ -165,10 +154,10 @@ pub async fn validate_totp_code(
// Save the last used time step so only totp time steps higher then this one are allowed. // Save the last used time step so only totp time steps higher then this one are allowed.
// This will also save a newly created twofactor if the code is correct. // This will also save a newly created twofactor if the code is correct.
twofactor.last_used = time_step as i32; twofactor.last_used = time_step;
twofactor.save(conn).await?; twofactor.save(conn).await?;
return Ok(()); return Ok(());
} else if generated == totp_code && time_step <= i64::from(twofactor.last_used) { } else if generated == totp_code && time_step <= twofactor.last_used {
warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps); warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
err!( err!(
format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip), format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),

60
src/api/core/two_factor/duo.rs

@ -5,7 +5,7 @@ use rocket::Route;
use crate::{ use crate::{
api::{ api::{
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult,
PasswordOrOtpData, PasswordOrOtpData,
}, },
auth::Headers, auth::Headers,
@ -92,8 +92,8 @@ impl DuoStatus {
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>"; const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
#[post("/two-factor/get-duo", data = "<data>")] #[post("/two-factor/get-duo", data = "<data>")]
async fn get_duo(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult { async fn get_duo(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data; let data: PasswordOrOtpData = data.into_inner();
let user = headers.user; let user = headers.user;
data.validate(&user, false, &mut conn).await?; data.validate(&user, false, &mut conn).await?;
@ -109,16 +109,16 @@ async fn get_duo(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn
let json = if let Some(data) = data { let json = if let Some(data) = data {
json!({ json!({
"Enabled": enabled, "enabled": enabled,
"Host": data.host, "host": data.host,
"SecretKey": data.sk, "secretKey": data.sk,
"IntegrationKey": data.ik, "integrationKey": data.ik,
"Object": "twoFactorDuo" "object": "twoFactorDuo"
}) })
} else { } else {
json!({ json!({
"Enabled": enabled, "enabled": enabled,
"Object": "twoFactorDuo" "object": "twoFactorDuo"
}) })
}; };
@ -126,21 +126,21 @@ async fn get_duo(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case, dead_code)] #[serde(rename_all = "camelCase")]
struct EnableDuoData { struct EnableDuoData {
Host: String, host: String,
SecretKey: String, secret_key: String,
IntegrationKey: String, integration_key: String,
MasterPasswordHash: Option<String>, master_password_hash: Option<String>,
Otp: Option<String>, otp: Option<String>,
} }
impl From<EnableDuoData> for DuoData { impl From<EnableDuoData> for DuoData {
fn from(d: EnableDuoData) -> Self { fn from(d: EnableDuoData) -> Self {
Self { Self {
host: d.Host, host: d.host,
ik: d.IntegrationKey, ik: d.integration_key,
sk: d.SecretKey, sk: d.secret_key,
} }
} }
} }
@ -151,17 +151,17 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
st.is_empty() || s == DISABLED_MESSAGE_DEFAULT st.is_empty() || s == DISABLED_MESSAGE_DEFAULT
} }
!empty_or_default(&data.Host) && !empty_or_default(&data.SecretKey) && !empty_or_default(&data.IntegrationKey) !empty_or_default(&data.host) && !empty_or_default(&data.secret_key) && !empty_or_default(&data.integration_key)
} }
#[post("/two-factor/duo", data = "<data>")] #[post("/two-factor/duo", data = "<data>")]
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult { async fn activate_duo(data: Json<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableDuoData = data.into_inner().data; let data: EnableDuoData = data.into_inner();
let mut user = headers.user; let mut user = headers.user;
PasswordOrOtpData { PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash.clone(), master_password_hash: data.master_password_hash.clone(),
Otp: data.Otp.clone(), otp: data.otp.clone(),
} }
.validate(&user, true, &mut conn) .validate(&user, true, &mut conn)
.await?; .await?;
@ -184,16 +184,16 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
Ok(Json(json!({ Ok(Json(json!({
"Enabled": true, "enabled": true,
"Host": data.host, "host": data.host,
"SecretKey": data.sk, "secretKey": data.sk,
"IntegrationKey": data.ik, "integrationKey": data.ik,
"Object": "twoFactorDuo" "object": "twoFactorDuo"
}))) })))
} }
#[put("/two-factor/duo", data = "<data>")] #[put("/two-factor/duo", data = "<data>")]
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult { async fn activate_duo_put(data: Json<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_duo(data, headers, conn).await activate_duo(data, headers, conn).await
} }

99
src/api/core/two_factor/email.rs

@ -1,16 +1,16 @@
use chrono::{Duration, NaiveDateTime, Utc}; use chrono::{DateTime, TimeDelta, Utc};
use rocket::serde::json::Json; use rocket::serde::json::Json;
use rocket::Route; use rocket::Route;
use crate::{ use crate::{
api::{ api::{
core::{log_user_event, two_factor::_generate_recover_code}, core::{log_user_event, two_factor::_generate_recover_code},
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData, EmptyResult, JsonResult, PasswordOrOtpData,
}, },
auth::Headers, auth::Headers,
crypto, crypto,
db::{ db::{
models::{EventType, TwoFactor, TwoFactorType}, models::{EventType, TwoFactor, TwoFactorType, User},
DbConn, DbConn,
}, },
error::{Error, MapResult}, error::{Error, MapResult},
@ -22,28 +22,28 @@ pub fn routes() -> Vec<Route> {
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct SendEmailLoginData { struct SendEmailLoginData {
Email: String, email: String,
MasterPasswordHash: String, master_password_hash: String,
} }
/// User is trying to login and wants to use email 2FA. /// User is trying to login and wants to use email 2FA.
/// Does not require Bearer token /// Does not require Bearer token
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult #[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
async fn send_email_login(data: JsonUpcase<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult { async fn send_email_login(data: Json<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult {
let data: SendEmailLoginData = data.into_inner().data; let data: SendEmailLoginData = data.into_inner();
use crate::db::models::User; use crate::db::models::User;
// Get the user // Get the user
let user = match User::find_by_mail(&data.Email, &mut conn).await { let user = match User::find_by_mail(&data.email, &mut conn).await {
Some(user) => user, Some(user) => user,
None => err!("Username or password is incorrect. Try again."), None => err!("Username or password is incorrect. Try again."),
}; };
// Check password // Check password
if !user.check_valid_password(&data.MasterPasswordHash) { if !user.check_valid_password(&data.master_password_hash) {
err!("Username or password is incorrect. Try again.") err!("Username or password is incorrect. Try again.")
} }
@ -76,8 +76,8 @@ pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
/// When user clicks on Manage email 2FA show the user the related information /// When user clicks on Manage email 2FA show the user the related information
#[post("/two-factor/get-email", data = "<data>")] #[post("/two-factor/get-email", data = "<data>")]
async fn get_email(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult { async fn get_email(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data; let data: PasswordOrOtpData = data.into_inner();
let user = headers.user; let user = headers.user;
data.validate(&user, false, &mut conn).await?; data.validate(&user, false, &mut conn).await?;
@ -92,30 +92,30 @@ async fn get_email(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut co
}; };
Ok(Json(json!({ Ok(Json(json!({
"Email": mfa_email, "email": mfa_email,
"Enabled": enabled, "enabled": enabled,
"Object": "twoFactorEmail" "object": "twoFactorEmail"
}))) })))
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct SendEmailData { struct SendEmailData {
/// Email where 2FA codes will be sent to, can be different than user email account. /// Email where 2FA codes will be sent to, can be different than user email account.
Email: String, email: String,
MasterPasswordHash: Option<String>, master_password_hash: Option<String>,
Otp: Option<String>, otp: Option<String>,
} }
/// Send a verification email to the specified email address to check whether it exists/belongs to user. /// Send a verification email to the specified email address to check whether it exists/belongs to user.
#[post("/two-factor/send-email", data = "<data>")] #[post("/two-factor/send-email", data = "<data>")]
async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult { async fn send_email(data: Json<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
let data: SendEmailData = data.into_inner().data; let data: SendEmailData = data.into_inner();
let user = headers.user; let user = headers.user;
PasswordOrOtpData { PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash, master_password_hash: data.master_password_hash,
Otp: data.Otp, otp: data.otp,
} }
.validate(&user, false, &mut conn) .validate(&user, false, &mut conn)
.await?; .await?;
@ -131,7 +131,7 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
} }
let generated_token = crypto::generate_email_token(CONFIG.email_token_size()); let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
let twofactor_data = EmailTokenData::new(data.Email, generated_token); let twofactor_data = EmailTokenData::new(data.email, generated_token);
// Uses EmailVerificationChallenge as type to show that it's not verified yet. // Uses EmailVerificationChallenge as type to show that it's not verified yet.
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json()); let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
@ -143,24 +143,24 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
} }
#[derive(Deserialize, Serialize)] #[derive(Deserialize, Serialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct EmailData { struct EmailData {
Email: String, email: String,
Token: String, token: String,
MasterPasswordHash: Option<String>, master_password_hash: Option<String>,
Otp: Option<String>, otp: Option<String>,
} }
/// Verify email belongs to user and can be used for 2FA email codes. /// Verify email belongs to user and can be used for 2FA email codes.
#[put("/two-factor/email", data = "<data>")] #[put("/two-factor/email", data = "<data>")]
async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult { async fn email(data: Json<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EmailData = data.into_inner().data; let data: EmailData = data.into_inner();
let mut user = headers.user; let mut user = headers.user;
// This is the last step in the verification process, delete the otp directly afterwards // This is the last step in the verification process, delete the otp directly afterwards
PasswordOrOtpData { PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash, master_password_hash: data.master_password_hash,
Otp: data.Otp, otp: data.otp,
} }
.validate(&user, true, &mut conn) .validate(&user, true, &mut conn)
.await?; .await?;
@ -176,7 +176,7 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
_ => err!("No token available"), _ => err!("No token available"),
}; };
if !crypto::ct_eq(issued_token, data.Token) { if !crypto::ct_eq(issued_token, data.token) {
err!("Token is invalid") err!("Token is invalid")
} }
@ -190,9 +190,9 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
Ok(Json(json!({ Ok(Json(json!({
"Email": email_data.email, "email": email_data.email,
"Enabled": "true", "enabled": "true",
"Object": "twoFactorEmail" "object": "twoFactorEmail"
}))) })))
} }
@ -232,9 +232,9 @@ pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, c
twofactor.data = email_data.to_json(); twofactor.data = email_data.to_json();
twofactor.save(conn).await?; twofactor.save(conn).await?;
let date = NaiveDateTime::from_timestamp_opt(email_data.token_sent, 0).expect("Email token timestamp invalid."); let date = DateTime::from_timestamp(email_data.token_sent, 0).expect("Email token timestamp invalid.").naive_utc();
let max_time = CONFIG.email_expiration_time() as i64; let max_time = CONFIG.email_expiration_time() as i64;
if date + Duration::seconds(max_time) < Utc::now().naive_utc() { if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
err!( err!(
"Token has expired", "Token has expired",
ErrorEvent { ErrorEvent {
@ -265,14 +265,14 @@ impl EmailTokenData {
EmailTokenData { EmailTokenData {
email, email,
last_token: Some(token), last_token: Some(token),
token_sent: Utc::now().naive_utc().timestamp(), token_sent: Utc::now().timestamp(),
attempts: 0, attempts: 0,
} }
} }
pub fn set_token(&mut self, token: String) { pub fn set_token(&mut self, token: String) {
self.last_token = Some(token); self.last_token = Some(token);
self.token_sent = Utc::now().naive_utc().timestamp(); self.token_sent = Utc::now().timestamp();
} }
pub fn reset_token(&mut self) { pub fn reset_token(&mut self) {
@ -297,6 +297,15 @@ impl EmailTokenData {
} }
} }
pub async fn activate_email_2fa(user: &User, conn: &mut DbConn) -> EmptyResult {
if user.verified_at.is_none() {
err!("Auto-enabling of email 2FA failed because the users email address has not been verified!");
}
let twofactor_data = EmailTokenData::new(user.email.clone(), String::new());
let twofactor = TwoFactor::new(user.uuid.clone(), TwoFactorType::Email, twofactor_data.to_json());
twofactor.save(conn).await
}
/// Takes an email address and obscures it by replacing it with asterisks except two characters. /// Takes an email address and obscures it by replacing it with asterisks except two characters.
pub fn obscure_email(email: &str) -> String { pub fn obscure_email(email: &str) -> String {
let split: Vec<&str> = email.rsplitn(2, '@').collect(); let split: Vec<&str> = email.rsplitn(2, '@').collect();
@ -318,6 +327,14 @@ pub fn obscure_email(email: &str) -> String {
format!("{}@{}", new_name, &domain) format!("{}@{}", new_name, &domain)
} }
pub async fn find_and_activate_email_2fa(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
if let Some(user) = User::find_by_uuid(user_uuid, conn).await {
activate_email_2fa(&user, conn).await
} else {
err!("User not found!");
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

64
src/api/core/two_factor/mod.rs

@ -1,4 +1,4 @@
use chrono::{Duration, Utc}; use chrono::{TimeDelta, Utc};
use data_encoding::BASE32; use data_encoding::BASE32;
use rocket::serde::json::Json; use rocket::serde::json::Json;
use rocket::Route; use rocket::Route;
@ -7,7 +7,7 @@ use serde_json::Value;
use crate::{ use crate::{
api::{ api::{
core::{log_event, log_user_event}, core::{log_event, log_user_event},
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData, EmptyResult, JsonResult, PasswordOrOtpData,
}, },
auth::{ClientHeaders, Headers}, auth::{ClientHeaders, Headers},
crypto, crypto,
@ -50,52 +50,52 @@ async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json<Value> {
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect(); let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
Json(json!({ Json(json!({
"Data": twofactors_json, "data": twofactors_json,
"Object": "list", "object": "list",
"ContinuationToken": null, "continuationToken": null,
})) }))
} }
#[post("/two-factor/get-recover", data = "<data>")] #[post("/two-factor/get-recover", data = "<data>")]
async fn get_recover(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult { async fn get_recover(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data; let data: PasswordOrOtpData = data.into_inner();
let user = headers.user; let user = headers.user;
data.validate(&user, true, &mut conn).await?; data.validate(&user, true, &mut conn).await?;
Ok(Json(json!({ Ok(Json(json!({
"Code": user.totp_recover, "code": user.totp_recover,
"Object": "twoFactorRecover" "object": "twoFactorRecover"
}))) })))
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct RecoverTwoFactor { struct RecoverTwoFactor {
MasterPasswordHash: String, master_password_hash: String,
Email: String, email: String,
RecoveryCode: String, recovery_code: String,
} }
#[post("/two-factor/recover", data = "<data>")] #[post("/two-factor/recover", data = "<data>")]
async fn recover(data: JsonUpcase<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult { async fn recover(data: Json<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult {
let data: RecoverTwoFactor = data.into_inner().data; let data: RecoverTwoFactor = data.into_inner();
use crate::db::models::User; use crate::db::models::User;
// Get the user // Get the user
let mut user = match User::find_by_mail(&data.Email, &mut conn).await { let mut user = match User::find_by_mail(&data.email, &mut conn).await {
Some(user) => user, Some(user) => user,
None => err!("Username or password is incorrect. Try again."), None => err!("Username or password is incorrect. Try again."),
}; };
// Check password // Check password
if !user.check_valid_password(&data.MasterPasswordHash) { if !user.check_valid_password(&data.master_password_hash) {
err!("Username or password is incorrect. Try again.") err!("Username or password is incorrect. Try again.")
} }
// Check if recovery code is correct // Check if recovery code is correct
if !user.check_valid_recovery_code(&data.RecoveryCode) { if !user.check_valid_recovery_code(&data.recovery_code) {
err!("Recovery code is incorrect. Try again.") err!("Recovery code is incorrect. Try again.")
} }
@ -127,27 +127,27 @@ async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) {
} }
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct DisableTwoFactorData { struct DisableTwoFactorData {
MasterPasswordHash: Option<String>, master_password_hash: Option<String>,
Otp: Option<String>, otp: Option<String>,
Type: NumberOrString, r#type: NumberOrString,
} }
#[post("/two-factor/disable", data = "<data>")] #[post("/two-factor/disable", data = "<data>")]
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult { async fn disable_twofactor(data: Json<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: DisableTwoFactorData = data.into_inner().data; let data: DisableTwoFactorData = data.into_inner();
let user = headers.user; let user = headers.user;
// Delete directly after a valid token has been provided // Delete directly after a valid token has been provided
PasswordOrOtpData { PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash, master_password_hash: data.master_password_hash,
Otp: data.Otp, otp: data.otp,
} }
.validate(&user, true, &mut conn) .validate(&user, true, &mut conn)
.await?; .await?;
let type_ = data.Type.into_i32()?; let type_ = data.r#type.into_i32()?;
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await { if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
twofactor.delete(&mut conn).await?; twofactor.delete(&mut conn).await?;
@ -160,14 +160,14 @@ async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Head
} }
Ok(Json(json!({ Ok(Json(json!({
"Enabled": false, "enabled": false,
"Type": type_, "type": type_,
"Object": "twoFactorProvider" "object": "twoFactorProvider"
}))) })))
} }
#[put("/two-factor/disable", data = "<data>")] #[put("/two-factor/disable", data = "<data>")]
async fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult { async fn disable_twofactor_put(data: Json<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
disable_twofactor(data, headers, conn).await disable_twofactor(data, headers, conn).await
} }
@ -259,7 +259,7 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
}; };
let now = Utc::now().naive_utc(); let now = Utc::now().naive_utc();
let time_limit = Duration::minutes(CONFIG.incomplete_2fa_time_limit()); let time_limit = TimeDelta::try_minutes(CONFIG.incomplete_2fa_time_limit()).unwrap();
let time_before = now - time_limit; let time_before = now - time_limit;
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await; let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await;
for login in incomplete_logins { for login in incomplete_logins {

25
src/api/core/two_factor/protected_actions.rs

@ -1,8 +1,8 @@
use chrono::{Duration, NaiveDateTime, Utc}; use chrono::{DateTime, TimeDelta, Utc};
use rocket::Route; use rocket::{serde::json::Json, Route};
use crate::{ use crate::{
api::{EmptyResult, JsonUpcase}, api::EmptyResult,
auth::Headers, auth::Headers,
crypto, crypto,
db::{ db::{
@ -18,7 +18,7 @@ pub fn routes() -> Vec<Route> {
} }
/// Data stored in the TwoFactor table in the db /// Data stored in the TwoFactor table in the db
#[derive(Serialize, Deserialize, Debug)] #[derive(Debug, Serialize, Deserialize)]
pub struct ProtectedActionData { pub struct ProtectedActionData {
/// Token issued to validate the protected action /// Token issued to validate the protected action
pub token: String, pub token: String,
@ -32,7 +32,7 @@ impl ProtectedActionData {
pub fn new(token: String) -> Self { pub fn new(token: String) -> Self {
Self { Self {
token, token,
token_sent: Utc::now().naive_utc().timestamp(), token_sent: Utc::now().timestamp(),
attempts: 0, attempts: 0,
} }
} }
@ -82,23 +82,24 @@ async fn request_otp(headers: Headers, mut conn: DbConn) -> EmptyResult {
} }
#[derive(Deserialize, Serialize, Debug)] #[derive(Deserialize, Serialize, Debug)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct ProtectedActionVerify { struct ProtectedActionVerify {
OTP: String, #[serde(rename = "OTP", alias = "otp")]
otp: String,
} }
#[post("/accounts/verify-otp", data = "<data>")] #[post("/accounts/verify-otp", data = "<data>")]
async fn verify_otp(data: JsonUpcase<ProtectedActionVerify>, headers: Headers, mut conn: DbConn) -> EmptyResult { async fn verify_otp(data: Json<ProtectedActionVerify>, headers: Headers, mut conn: DbConn) -> EmptyResult {
if !CONFIG.mail_enabled() { if !CONFIG.mail_enabled() {
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device."); err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
} }
let user = headers.user; let user = headers.user;
let data: ProtectedActionVerify = data.into_inner().data; let data: ProtectedActionVerify = data.into_inner();
// Delete the token after one validation attempt // Delete the token after one validation attempt
// This endpoint only gets called for the vault export, and doesn't need a second attempt // This endpoint only gets called for the vault export, and doesn't need a second attempt
validate_protected_action_otp(&data.OTP, &user.uuid, true, &mut conn).await validate_protected_action_otp(&data.otp, &user.uuid, true, &mut conn).await
} }
pub async fn validate_protected_action_otp( pub async fn validate_protected_action_otp(
@ -122,9 +123,9 @@ pub async fn validate_protected_action_otp(
// Check if the token has expired (Using the email 2fa expiration time) // Check if the token has expired (Using the email 2fa expiration time)
let date = let date =
NaiveDateTime::from_timestamp_opt(pa_data.token_sent, 0).expect("Protected Action token timestamp invalid."); DateTime::from_timestamp(pa_data.token_sent, 0).expect("Protected Action token timestamp invalid.").naive_utc();
let max_time = CONFIG.email_expiration_time() as i64; let max_time = CONFIG.email_expiration_time() as i64;
if date + Duration::seconds(max_time) < Utc::now().naive_utc() { if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
pa.delete(conn).await?; pa.delete(conn).await?;
err!("Token has expired") err!("Token has expired")
} }

160
src/api/core/two_factor/webauthn.rs

@ -7,7 +7,7 @@ use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState,
use crate::{ use crate::{
api::{ api::{
core::{log_user_event, two_factor::_generate_recover_code}, core::{log_user_event, two_factor::_generate_recover_code},
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData, EmptyResult, JsonResult, PasswordOrOtpData,
}, },
auth::Headers, auth::Headers,
db::{ db::{
@ -96,20 +96,20 @@ pub struct WebauthnRegistration {
impl WebauthnRegistration { impl WebauthnRegistration {
fn to_json(&self) -> Value { fn to_json(&self) -> Value {
json!({ json!({
"Id": self.id, "id": self.id,
"Name": self.name, "name": self.name,
"migrated": self.migrated, "migrated": self.migrated,
}) })
} }
} }
#[post("/two-factor/get-webauthn", data = "<data>")] #[post("/two-factor/get-webauthn", data = "<data>")]
async fn get_webauthn(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult { async fn get_webauthn(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
if !CONFIG.domain_set() { if !CONFIG.domain_set() {
err!("`DOMAIN` environment variable is not set. Webauthn disabled") err!("`DOMAIN` environment variable is not set. Webauthn disabled")
} }
let data: PasswordOrOtpData = data.into_inner().data; let data: PasswordOrOtpData = data.into_inner();
let user = headers.user; let user = headers.user;
data.validate(&user, false, &mut conn).await?; data.validate(&user, false, &mut conn).await?;
@ -118,19 +118,15 @@ async fn get_webauthn(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect(); let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
Ok(Json(json!({ Ok(Json(json!({
"Enabled": enabled, "enabled": enabled,
"Keys": registrations_json, "keys": registrations_json,
"Object": "twoFactorWebAuthn" "object": "twoFactorWebAuthn"
}))) })))
} }
#[post("/two-factor/get-webauthn-challenge", data = "<data>")] #[post("/two-factor/get-webauthn-challenge", data = "<data>")]
async fn generate_webauthn_challenge( async fn generate_webauthn_challenge(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
data: JsonUpcase<PasswordOrOtpData>, let data: PasswordOrOtpData = data.into_inner();
headers: Headers,
mut conn: DbConn,
) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user; let user = headers.user;
data.validate(&user, false, &mut conn).await?; data.validate(&user, false, &mut conn).await?;
@ -161,102 +157,94 @@ async fn generate_webauthn_challenge(
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct EnableWebauthnData { struct EnableWebauthnData {
Id: NumberOrString, // 1..5 id: NumberOrString, // 1..5
Name: String, name: String,
DeviceResponse: RegisterPublicKeyCredentialCopy, device_response: RegisterPublicKeyCredentialCopy,
MasterPasswordHash: Option<String>, master_password_hash: Option<String>,
Otp: Option<String>, otp: Option<String>,
} }
// This is copied from RegisterPublicKeyCredential to change the Response objects casing
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct RegisterPublicKeyCredentialCopy { struct RegisterPublicKeyCredentialCopy {
pub Id: String, pub id: String,
pub RawId: Base64UrlSafeData, pub raw_id: Base64UrlSafeData,
pub Response: AuthenticatorAttestationResponseRawCopy, pub response: AuthenticatorAttestationResponseRawCopy,
pub Type: String, pub r#type: String,
} }
// This is copied from AuthenticatorAttestationResponseRaw to change clientDataJSON to clientDataJson // This is copied from AuthenticatorAttestationResponseRaw to change clientDataJSON to clientDataJson
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
pub struct AuthenticatorAttestationResponseRawCopy { pub struct AuthenticatorAttestationResponseRawCopy {
pub AttestationObject: Base64UrlSafeData, #[serde(rename = "AttestationObject", alias = "attestationObject")]
pub ClientDataJson: Base64UrlSafeData, pub attestation_object: Base64UrlSafeData,
#[serde(rename = "clientDataJson", alias = "clientDataJSON")]
pub client_data_json: Base64UrlSafeData,
} }
impl From<RegisterPublicKeyCredentialCopy> for RegisterPublicKeyCredential { impl From<RegisterPublicKeyCredentialCopy> for RegisterPublicKeyCredential {
fn from(r: RegisterPublicKeyCredentialCopy) -> Self { fn from(r: RegisterPublicKeyCredentialCopy) -> Self {
Self { Self {
id: r.Id, id: r.id,
raw_id: r.RawId, raw_id: r.raw_id,
response: AuthenticatorAttestationResponseRaw { response: AuthenticatorAttestationResponseRaw {
attestation_object: r.Response.AttestationObject, attestation_object: r.response.attestation_object,
client_data_json: r.Response.ClientDataJson, client_data_json: r.response.client_data_json,
}, },
type_: r.Type, type_: r.r#type,
} }
} }
} }
// This is copied from PublicKeyCredential to change the Response objects casing
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
pub struct PublicKeyCredentialCopy { pub struct PublicKeyCredentialCopy {
pub Id: String, pub id: String,
pub RawId: Base64UrlSafeData, pub raw_id: Base64UrlSafeData,
pub Response: AuthenticatorAssertionResponseRawCopy, pub response: AuthenticatorAssertionResponseRawCopy,
pub Extensions: Option<AuthenticationExtensionsClientOutputsCopy>, pub extensions: Option<AuthenticationExtensionsClientOutputs>,
pub Type: String, pub r#type: String,
} }
// This is copied from AuthenticatorAssertionResponseRaw to change clientDataJSON to clientDataJson // This is copied from AuthenticatorAssertionResponseRaw to change clientDataJSON to clientDataJson
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
pub struct AuthenticatorAssertionResponseRawCopy { pub struct AuthenticatorAssertionResponseRawCopy {
pub AuthenticatorData: Base64UrlSafeData, pub authenticator_data: Base64UrlSafeData,
pub ClientDataJson: Base64UrlSafeData, #[serde(rename = "clientDataJson", alias = "clientDataJSON")]
pub Signature: Base64UrlSafeData, pub client_data_json: Base64UrlSafeData,
pub UserHandle: Option<Base64UrlSafeData>, pub signature: Base64UrlSafeData,
} pub user_handle: Option<Base64UrlSafeData>,
#[derive(Debug, Deserialize)]
#[allow(non_snake_case)]
pub struct AuthenticationExtensionsClientOutputsCopy {
#[serde(default)]
pub Appid: bool,
} }
impl From<PublicKeyCredentialCopy> for PublicKeyCredential { impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
fn from(r: PublicKeyCredentialCopy) -> Self { fn from(r: PublicKeyCredentialCopy) -> Self {
Self { Self {
id: r.Id, id: r.id,
raw_id: r.RawId, raw_id: r.raw_id,
response: AuthenticatorAssertionResponseRaw { response: AuthenticatorAssertionResponseRaw {
authenticator_data: r.Response.AuthenticatorData, authenticator_data: r.response.authenticator_data,
client_data_json: r.Response.ClientDataJson, client_data_json: r.response.client_data_json,
signature: r.Response.Signature, signature: r.response.signature,
user_handle: r.Response.UserHandle, user_handle: r.response.user_handle,
}, },
extensions: r.Extensions.map(|e| AuthenticationExtensionsClientOutputs { extensions: r.extensions,
appid: e.Appid, type_: r.r#type,
}),
type_: r.Type,
} }
} }
} }
#[post("/two-factor/webauthn", data = "<data>")] #[post("/two-factor/webauthn", data = "<data>")]
async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult { async fn activate_webauthn(data: Json<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableWebauthnData = data.into_inner().data; let data: EnableWebauthnData = data.into_inner();
let mut user = headers.user; let mut user = headers.user;
PasswordOrOtpData { PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash, master_password_hash: data.master_password_hash,
Otp: data.Otp, otp: data.otp,
} }
.validate(&user, true, &mut conn) .validate(&user, true, &mut conn)
.await?; .await?;
@ -274,13 +262,13 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
// Verify the credentials with the saved state // Verify the credentials with the saved state
let (credential, _data) = let (credential, _data) =
WebauthnConfig::load().register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?; WebauthnConfig::load().register_credential(&data.device_response.into(), &state, |_| Ok(false))?;
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1; let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1;
// TODO: Check for repeated ID's // TODO: Check for repeated ID's
registrations.push(WebauthnRegistration { registrations.push(WebauthnRegistration {
id: data.Id.into_i32()?, id: data.id.into_i32()?,
name: data.Name, name: data.name,
migrated: false, migrated: false,
credential, credential,
@ -296,28 +284,28 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect(); let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
Ok(Json(json!({ Ok(Json(json!({
"Enabled": true, "enabled": true,
"Keys": keys_json, "keys": keys_json,
"Object": "twoFactorU2f" "object": "twoFactorU2f"
}))) })))
} }
#[put("/two-factor/webauthn", data = "<data>")] #[put("/two-factor/webauthn", data = "<data>")]
async fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult { async fn activate_webauthn_put(data: Json<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_webauthn(data, headers, conn).await activate_webauthn(data, headers, conn).await
} }
#[derive(Deserialize, Debug)] #[derive(Debug, Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct DeleteU2FData { struct DeleteU2FData {
Id: NumberOrString, id: NumberOrString,
MasterPasswordHash: String, master_password_hash: String,
} }
#[delete("/two-factor/webauthn", data = "<data>")] #[delete("/two-factor/webauthn", data = "<data>")]
async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult { async fn delete_webauthn(data: Json<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let id = data.data.Id.into_i32()?; let id = data.id.into_i32()?;
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) { if !headers.user.check_valid_password(&data.master_password_hash) {
err!("Invalid password"); err!("Invalid password");
} }
@ -358,9 +346,9 @@ async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect(); let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
Ok(Json(json!({ Ok(Json(json!({
"Enabled": true, "enabled": true,
"Keys": keys_json, "keys": keys_json,
"Object": "twoFactorU2f" "object": "twoFactorU2f"
}))) })))
} }
@ -413,8 +401,8 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut
), ),
}; };
let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?; let rsp: PublicKeyCredentialCopy = serde_json::from_str(response)?;
let rsp: PublicKeyCredential = rsp.data.into(); let rsp: PublicKeyCredential = rsp.into();
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1; let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1;

93
src/api/core/two_factor/yubikey.rs

@ -1,12 +1,12 @@
use rocket::serde::json::Json; use rocket::serde::json::Json;
use rocket::Route; use rocket::Route;
use serde_json::Value; use serde_json::Value;
use yubico::{config::Config, verify}; use yubico::{config::Config, verify_async};
use crate::{ use crate::{
api::{ api::{
core::{log_user_event, two_factor::_generate_recover_code}, core::{log_user_event, two_factor::_generate_recover_code},
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData, EmptyResult, JsonResult, PasswordOrOtpData,
}, },
auth::Headers, auth::Headers,
db::{ db::{
@ -21,28 +21,30 @@ pub fn routes() -> Vec<Route> {
routes![generate_yubikey, activate_yubikey, activate_yubikey_put,] routes![generate_yubikey, activate_yubikey, activate_yubikey_put,]
} }
#[derive(Deserialize, Debug)] #[derive(Debug, Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct EnableYubikeyData { struct EnableYubikeyData {
Key1: Option<String>, key1: Option<String>,
Key2: Option<String>, key2: Option<String>,
Key3: Option<String>, key3: Option<String>,
Key4: Option<String>, key4: Option<String>,
Key5: Option<String>, key5: Option<String>,
Nfc: bool, nfc: bool,
MasterPasswordHash: Option<String>, master_password_hash: Option<String>,
Otp: Option<String>, otp: Option<String>,
} }
#[derive(Deserialize, Serialize, Debug)] #[derive(Deserialize, Serialize, Debug)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
pub struct YubikeyMetadata { pub struct YubikeyMetadata {
Keys: Vec<String>, #[serde(rename = "keys", alias = "Keys")]
pub Nfc: bool, keys: Vec<String>,
#[serde(rename = "nfc", alias = "Nfc")]
pub nfc: bool,
} }
fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> { fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
let data_keys = [&data.Key1, &data.Key2, &data.Key3, &data.Key4, &data.Key5]; let data_keys = [&data.key1, &data.key2, &data.key3, &data.key4, &data.key5];
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect() data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
} }
@ -74,21 +76,18 @@ async fn verify_yubikey_otp(otp: String) -> EmptyResult {
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret); let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
match CONFIG.yubico_server() { match CONFIG.yubico_server() {
Some(server) => { Some(server) => verify_async(otp, config.set_api_hosts(vec![server])).await,
tokio::task::spawn_blocking(move || verify(otp, config.set_api_hosts(vec![server]))).await.unwrap() None => verify_async(otp, config).await,
}
None => tokio::task::spawn_blocking(move || verify(otp, config)).await.unwrap(),
} }
.map_res("Failed to verify OTP") .map_res("Failed to verify OTP")
.and(Ok(()))
} }
#[post("/two-factor/get-yubikey", data = "<data>")] #[post("/two-factor/get-yubikey", data = "<data>")]
async fn generate_yubikey(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult { async fn generate_yubikey(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
// Make sure the credentials are set // Make sure the credentials are set
get_yubico_credentials()?; get_yubico_credentials()?;
let data: PasswordOrOtpData = data.into_inner().data; let data: PasswordOrOtpData = data.into_inner();
let user = headers.user; let user = headers.user;
data.validate(&user, false, &mut conn).await?; data.validate(&user, false, &mut conn).await?;
@ -101,29 +100,29 @@ async fn generate_yubikey(data: JsonUpcase<PasswordOrOtpData>, headers: Headers,
if let Some(r) = r { if let Some(r) = r {
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?; let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
let mut result = jsonify_yubikeys(yubikey_metadata.Keys); let mut result = jsonify_yubikeys(yubikey_metadata.keys);
result["Enabled"] = Value::Bool(true); result["enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc); result["nfc"] = Value::Bool(yubikey_metadata.nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned()); result["object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result)) Ok(Json(result))
} else { } else {
Ok(Json(json!({ Ok(Json(json!({
"Enabled": false, "enabled": false,
"Object": "twoFactorU2f", "object": "twoFactorU2f",
}))) })))
} }
} }
#[post("/two-factor/yubikey", data = "<data>")] #[post("/two-factor/yubikey", data = "<data>")]
async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult { async fn activate_yubikey(data: Json<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableYubikeyData = data.into_inner().data; let data: EnableYubikeyData = data.into_inner();
let mut user = headers.user; let mut user = headers.user;
PasswordOrOtpData { PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash.clone(), master_password_hash: data.master_password_hash.clone(),
Otp: data.Otp.clone(), otp: data.otp.clone(),
} }
.validate(&user, true, &mut conn) .validate(&user, true, &mut conn)
.await?; .await?;
@ -139,8 +138,8 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
if yubikeys.is_empty() { if yubikeys.is_empty() {
return Ok(Json(json!({ return Ok(Json(json!({
"Enabled": false, "enabled": false,
"Object": "twoFactorU2f", "object": "twoFactorU2f",
}))); })));
} }
@ -157,8 +156,8 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (x[..12]).to_owned()).collect(); let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (x[..12]).to_owned()).collect();
let yubikey_metadata = YubikeyMetadata { let yubikey_metadata = YubikeyMetadata {
Keys: yubikey_ids, keys: yubikey_ids,
Nfc: data.Nfc, nfc: data.nfc,
}; };
yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap(); yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap();
@ -168,17 +167,17 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
let mut result = jsonify_yubikeys(yubikey_metadata.Keys); let mut result = jsonify_yubikeys(yubikey_metadata.keys);
result["Enabled"] = Value::Bool(true); result["enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc); result["nfc"] = Value::Bool(yubikey_metadata.nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned()); result["object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result)) Ok(Json(result))
} }
#[put("/two-factor/yubikey", data = "<data>")] #[put("/two-factor/yubikey", data = "<data>")]
async fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult { async fn activate_yubikey_put(data: Json<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_yubikey(data, headers, conn).await activate_yubikey(data, headers, conn).await
} }
@ -190,14 +189,10 @@ pub async fn validate_yubikey_login(response: &str, twofactor_data: &str) -> Emp
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(twofactor_data).expect("Can't parse Yubikey Metadata"); let yubikey_metadata: YubikeyMetadata = serde_json::from_str(twofactor_data).expect("Can't parse Yubikey Metadata");
let response_id = &response[..12]; let response_id = &response[..12];
if !yubikey_metadata.Keys.contains(&response_id.to_owned()) { if !yubikey_metadata.keys.contains(&response_id.to_owned()) {
err!("Given Yubikey is not registered"); err!("Given Yubikey is not registered");
} }
let result = verify_yubikey_otp(response.to_owned()).await; verify_yubikey_otp(response.to_owned()).await.map_res("Failed to verify Yubikey against OTP server")?;
Ok(())
match result {
Ok(_answer) => Ok(()),
Err(_e) => err!("Failed to verify Yubikey against OTP server"),
}
} }

244
src/api/icons.rs

@ -1,6 +1,6 @@
use std::{ use std::{
net::IpAddr, net::IpAddr,
sync::Arc, sync::{Arc, Mutex},
time::{Duration, SystemTime}, time::{Duration, SystemTime},
}; };
@ -16,14 +16,13 @@ use rocket::{http::ContentType, response::Redirect, Route};
use tokio::{ use tokio::{
fs::{create_dir_all, remove_file, symlink_metadata, File}, fs::{create_dir_all, remove_file, symlink_metadata, File},
io::{AsyncReadExt, AsyncWriteExt}, io::{AsyncReadExt, AsyncWriteExt},
net::lookup_host,
}; };
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer}; use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
use crate::{ use crate::{
error::Error, error::Error,
util::{get_reqwest_client_builder, Cached}, util::{get_reqwest_client_builder, Cached, CustomDnsResolver, CustomResolverError},
CONFIG, CONFIG,
}; };
@ -49,48 +48,32 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout()); let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
let pool_idle_timeout = Duration::from_secs(10); let pool_idle_timeout = Duration::from_secs(10);
// Reuse the client between requests // Reuse the client between requests
let client = get_reqwest_client_builder()
.cookie_provider(Arc::clone(&cookie_store))
.timeout(icon_download_timeout)
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
.trust_dns(true)
.default_headers(default_headers.clone());
match client.build() {
Ok(client) => client,
Err(e) => {
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
get_reqwest_client_builder() get_reqwest_client_builder()
.cookie_provider(cookie_store) .cookie_provider(Arc::clone(&cookie_store))
.timeout(icon_download_timeout) .timeout(icon_download_timeout)
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections .pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds .pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
.trust_dns(false) .dns_resolver(CustomDnsResolver::instance())
.default_headers(default_headers) .default_headers(default_headers.clone())
.build() .build()
.expect("Failed to build client") .expect("Failed to build client")
}
}
}); });
// Build Regex only once since this takes a lot of time. // Build Regex only once since this takes a lot of time.
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap()); static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
// Special HashMap which holds the user defined Regex to speedup matching the regex. #[get("/<domain>/icon.png")]
static ICON_BLACKLIST_REGEX: Lazy<dashmap::DashMap<String, Regex>> = Lazy::new(dashmap::DashMap::new); fn icon_external(domain: &str) -> Option<Redirect> {
async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
if !is_valid_domain(domain) { if !is_valid_domain(domain) {
warn!("Invalid domain: {}", domain); warn!("Invalid domain: {}", domain);
return None; return None;
} }
if check_domain_blacklist_reason(domain).await.is_some() { if is_domain_blacklisted(domain) {
return None; return None;
} }
let url = template.replace("{}", domain); let url = CONFIG._icon_service_url().replace("{}", domain);
match CONFIG.icon_redirect_code() { match CONFIG.icon_redirect_code() {
301 => Some(Redirect::moved(url)), // legacy permanent redirect 301 => Some(Redirect::moved(url)), // legacy permanent redirect
302 => Some(Redirect::found(url)), // legacy temporary redirect 302 => Some(Redirect::found(url)), // legacy temporary redirect
@ -103,11 +86,6 @@ async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
} }
} }
#[get("/<domain>/icon.png")]
async fn icon_external(domain: &str) -> Option<Redirect> {
icon_redirect(domain, &CONFIG._icon_service_url()).await
}
#[get("/<domain>/icon.png")] #[get("/<domain>/icon.png")]
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> { async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png"); const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
@ -166,153 +144,28 @@ fn is_valid_domain(domain: &str) -> bool {
true true
} }
/// TODO: This is extracted from IpAddr::is_global, which is unstable: pub fn is_domain_blacklisted(domain: &str) -> bool {
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global let Some(config_blacklist) = CONFIG.icon_blacklist_regex() else {
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged return false;
#[allow(clippy::nonminimal_bool)] };
#[cfg(not(feature = "unstable"))]
fn is_global(ip: IpAddr) -> bool {
match ip {
IpAddr::V4(ip) => {
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
// globally routable addresses in the 192.0.0.0/24 range.
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
return true;
}
!ip.is_private()
&& !ip.is_loopback()
&& !ip.is_link_local()
&& !ip.is_broadcast()
&& !ip.is_documentation()
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
// Make sure the address is not in 0.0.0.0/8
&& ip.octets()[0] != 0
}
IpAddr::V6(ip) => {
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
true
} else {
!ip.is_multicast()
&& !ip.is_loopback()
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
&& !ip.is_unspecified()
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
}
}
}
}
#[cfg(feature = "unstable")] // Compiled domain blacklist
fn is_global(ip: IpAddr) -> bool { static COMPILED_BLACKLIST: Mutex<Option<(String, Regex)>> = Mutex::new(None);
ip.is_global() let mut guard = COMPILED_BLACKLIST.lock().unwrap();
}
/// These are some tests to check that the implementations match // If the stored regex is up to date, use it
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11 if let Some((value, regex)) = &*guard {
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct if value == &config_blacklist {
/// Note that the is_global implementation is subject to change as new IP RFCs are created return regex.is_match(domain);
///
/// To run while showing progress output:
/// cargo test --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
}
}
#[test]
#[ignore]
fn test_ipv6_global() {
use ring::rand::{SecureRandom, SystemRandom};
let mut v = [0u8; 16];
let rand = SystemRandom::new();
for i in 0..1_000 {
println!("Iter: {}/1_000", i);
for _ in 0..10_000_000 {
rand.fill(&mut v).expect("Error generating random values");
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
(v[14] as u16) << 8 | v[15] as u16,
(v[12] as u16) << 8 | v[13] as u16,
(v[10] as u16) << 8 | v[11] as u16,
(v[8] as u16) << 8 | v[9] as u16,
(v[6] as u16) << 8 | v[7] as u16,
(v[4] as u16) << 8 | v[5] as u16,
(v[2] as u16) << 8 | v[3] as u16,
(v[0] as u16) << 8 | v[1] as u16,
));
assert_eq!(ip.is_global(), is_global(ip))
}
}
} }
}
#[derive(Clone)]
enum DomainBlacklistReason {
Regex,
IP,
}
use cached::proc_macro::cached;
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
async fn check_domain_blacklist_reason(domain: &str) -> Option<DomainBlacklistReason> {
// First check the blacklist regex if there is a match.
// This prevents the blocked domain(s) from being leaked via a DNS lookup.
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
let is_match = if let Some(regex) = ICON_BLACKLIST_REGEX.get(&blacklist) {
regex.is_match(domain)
} else {
// Clear the current list if the previous key doesn't exists.
// To prevent growing of the HashMap after someone has changed it via the admin interface.
if ICON_BLACKLIST_REGEX.len() >= 1 {
ICON_BLACKLIST_REGEX.clear();
} }
// Generate the regex to store in too the Lazy Static HashMap. // If we don't have a regex stored, or it's not up to date, recreate it
let blacklist_regex = Regex::new(&blacklist).unwrap(); let regex = Regex::new(&config_blacklist).unwrap();
let is_match = blacklist_regex.is_match(domain); let is_match = regex.is_match(domain);
ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex); *guard = Some((config_blacklist, regex));
is_match is_match
};
if is_match {
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
return Some(DomainBlacklistReason::Regex);
}
}
if CONFIG.icon_blacklist_non_global_ips() {
if let Ok(s) = lookup_host((domain, 0)).await {
for addr in s {
if !is_global(addr.ip()) {
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
return Some(DomainBlacklistReason::IP);
}
}
}
}
None
} }
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> { async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
@ -342,6 +195,13 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string())) Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
} }
Err(e) => { Err(e) => {
// If this error comes from the custom resolver, this means this is a blacklisted domain
// or non global IP, don't save the miss file in this case to avoid leaking it
if let Some(error) = CustomResolverError::downcast_ref(&e) {
warn!("{error}");
return None;
}
warn!("Unable to download icon: {:?}", e); warn!("Unable to download icon: {:?}", e);
let miss_indicator = path + ".miss"; let miss_indicator = path + ".miss";
save_icon(&miss_indicator, &[]).await; save_icon(&miss_indicator, &[]).await;
@ -491,12 +351,12 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
let ssldomain = format!("https://{domain}"); let ssldomain = format!("https://{domain}");
let httpdomain = format!("http://{domain}"); let httpdomain = format!("http://{domain}");
// First check the domain as given during the request for both HTTPS and HTTP. // First check the domain as given during the request for HTTPS.
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await { let resp = match get_page(&ssldomain).await {
Ok(c) => Ok(c), Err(e) if CustomResolverError::downcast_ref(&e).is_none() => {
Err(e) => { // If we get an error that is not caused by the blacklist, we retry with HTTP
let mut sub_resp = Err(e); match get_page(&httpdomain).await {
mut sub_resp @ Err(_) => {
// When the domain is not an IP, and has more then one dot, remove all subdomains. // When the domain is not an IP, and has more then one dot, remove all subdomains.
let is_ip = domain.parse::<IpAddr>(); let is_ip = domain.parse::<IpAddr>();
if is_ip.is_err() && domain.matches('.').count() > 1 { if is_ip.is_err() && domain.matches('.').count() > 1 {
@ -527,6 +387,12 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
} }
sub_resp sub_resp
} }
res => res,
}
}
// If we get a result or a blacklist error, just continue
res => res,
}; };
// Create the iconlist // Create the iconlist
@ -573,21 +439,12 @@ async fn get_page(url: &str) -> Result<Response, Error> {
} }
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> { async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
match check_domain_blacklist_reason(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
Some(DomainBlacklistReason::Regex) => warn!("Favicon '{}' is from a blacklisted domain!", url),
Some(DomainBlacklistReason::IP) => warn!("Favicon '{}' is hosted on a non-global IP!", url),
None => (),
}
let mut client = CLIENT.get(url); let mut client = CLIENT.get(url);
if !referer.is_empty() { if !referer.is_empty() {
client = client.header("Referer", referer) client = client.header("Referer", referer)
} }
match client.send().await { Ok(client.send().await?.error_for_status()?)
Ok(c) => c.error_for_status().map_err(Into::into),
Err(e) => err_silent!(format!("{e}")),
}
} }
/// Returns a Integer with the priority of the type of the icon which to prefer. /// Returns a Integer with the priority of the type of the icon which to prefer.
@ -670,12 +527,6 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
} }
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> { async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
match check_domain_blacklist_reason(domain).await {
Some(DomainBlacklistReason::Regex) => err_silent!("Domain is blacklisted", domain),
Some(DomainBlacklistReason::IP) => err_silent!("Host resolves to a non-global IP", domain),
None => (),
}
let icon_result = get_icon_url(domain).await?; let icon_result = get_icon_url(domain).await?;
let mut buffer = Bytes::new(); let mut buffer = Bytes::new();
@ -711,8 +562,8 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
_ => debug!("Extracted icon from data:image uri is invalid"), _ => debug!("Extracted icon from data:image uri is invalid"),
}; };
} else { } else {
match get_page_with_referer(&icon.href, &icon_result.referer).await { let res = get_page_with_referer(&icon.href, &icon_result.referer).await?;
Ok(res) => {
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net) buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
// Check if the icon type is allowed, else try an icon from the list. // Check if the icon type is allowed, else try an icon from the list.
@ -725,9 +576,6 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
info!("Downloaded icon from {}", icon.href); info!("Downloaded icon from {}", icon.href);
break; break;
} }
Err(e) => debug!("{:?}", e),
};
}
} }
if buffer.is_empty() { if buffer.is_empty() {

23
src/api/identity.rs

@ -15,7 +15,7 @@ use crate::{
two_factor::{authenticator, duo, email, enforce_2fa_policy, webauthn, yubikey}, two_factor::{authenticator, duo, email, enforce_2fa_policy, webauthn, yubikey},
}, },
push::register_push_device, push::register_push_device,
ApiResult, EmptyResult, JsonResult, JsonUpcase, ApiResult, EmptyResult, JsonResult,
}, },
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp}, auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp},
db::{models::*, DbConn}, db::{models::*, DbConn},
@ -295,7 +295,12 @@ async fn _password_login(
"KdfIterations": user.client_kdf_iter, "KdfIterations": user.client_kdf_iter,
"KdfMemory": user.client_kdf_memory, "KdfMemory": user.client_kdf_memory,
"KdfParallelism": user.client_kdf_parallelism, "KdfParallelism": user.client_kdf_parallelism,
"ResetMasterPassword": false,// TODO: Same as above "ResetMasterPassword": false, // TODO: Same as above
"ForcePasswordReset": false,
"MasterPasswordPolicy": {
"object": "masterPasswordPolicy",
},
"scope": scope, "scope": scope,
"unofficialServer": true, "unofficialServer": true,
"UserDecryptionOptions": { "UserDecryptionOptions": {
@ -559,8 +564,11 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
let mut result = json!({ let mut result = json!({
"error" : "invalid_grant", "error" : "invalid_grant",
"error_description" : "Two factor required.", "error_description" : "Two factor required.",
"TwoFactorProviders" : providers, "TwoFactorProviders" : providers.iter().map(ToString::to_string).collect::<Vec<String>>(),
"TwoFactorProviders2" : {} // { "0" : null } "TwoFactorProviders2" : {}, // { "0" : null }
"MasterPasswordPolicy": {
"Object": "masterPasswordPolicy"
}
}); });
for provider in providers { for provider in providers {
@ -597,7 +605,7 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?; let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
result["TwoFactorProviders2"][provider.to_string()] = json!({ result["TwoFactorProviders2"][provider.to_string()] = json!({
"Nfc": yubikey_metadata.Nfc, "Nfc": yubikey_metadata.nfc,
}) })
} }
@ -626,19 +634,18 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
} }
#[post("/accounts/prelogin", data = "<data>")] #[post("/accounts/prelogin", data = "<data>")]
async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> { async fn prelogin(data: Json<PreloginData>, conn: DbConn) -> Json<Value> {
_prelogin(data, conn).await _prelogin(data, conn).await
} }
#[post("/accounts/register", data = "<data>")] #[post("/accounts/register", data = "<data>")]
async fn identity_register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult { async fn identity_register(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
_register(data, conn).await _register(data, conn).await
} }
// https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts // https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs // https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
#[derive(Debug, Clone, Default, FromForm)] #[derive(Debug, Clone, Default, FromForm)]
#[allow(non_snake_case)]
struct ConnectData { struct ConnectData {
#[field(name = uncased("grant_type"))] #[field(name = uncased("grant_type"))]
#[field(name = uncased("granttype"))] #[field(name = uncased("granttype"))]

17
src/api/mod.rs

@ -20,10 +20,10 @@ pub use crate::api::{
core::two_factor::send_incomplete_2fa_notifications, core::two_factor::send_incomplete_2fa_notifications,
core::{emergency_notification_reminder_job, emergency_request_timeout_job}, core::{emergency_notification_reminder_job, emergency_request_timeout_job},
core::{event_cleanup_job, events_routes as core_events_routes}, core::{event_cleanup_job, events_routes as core_events_routes},
icons::routes as icons_routes, icons::{is_domain_blacklisted, routes as icons_routes},
identity::routes as identity_routes, identity::routes as identity_routes,
notifications::routes as notifications_routes, notifications::routes as notifications_routes,
notifications::{start_notification_server, AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS}, notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS},
push::{ push::{
push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update, register_push_device, push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update, register_push_device,
unregister_push_device, unregister_push_device,
@ -33,23 +33,18 @@ pub use crate::api::{
web::static_files, web::static_files,
}; };
use crate::db::{models::User, DbConn}; use crate::db::{models::User, DbConn};
use crate::util;
// Type aliases for API methods results // Type aliases for API methods results
type ApiResult<T> = Result<T, crate::error::Error>; type ApiResult<T> = Result<T, crate::error::Error>;
pub type JsonResult = ApiResult<Json<Value>>; pub type JsonResult = ApiResult<Json<Value>>;
pub type EmptyResult = ApiResult<()>; pub type EmptyResult = ApiResult<()>;
type JsonUpcase<T> = Json<util::UpCase<T>>;
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;
type JsonVec<T> = Json<Vec<T>>;
// Common structs representing JSON data received // Common structs representing JSON data received
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
struct PasswordOrOtpData { struct PasswordOrOtpData {
MasterPasswordHash: Option<String>, master_password_hash: Option<String>,
Otp: Option<String>, otp: Option<String>,
} }
impl PasswordOrOtpData { impl PasswordOrOtpData {
@ -59,7 +54,7 @@ impl PasswordOrOtpData {
pub async fn validate(&self, user: &User, delete_if_valid: bool, conn: &mut DbConn) -> EmptyResult { pub async fn validate(&self, user: &User, delete_if_valid: bool, conn: &mut DbConn) -> EmptyResult {
use crate::api::core::two_factor::protected_actions::validate_protected_action_otp; use crate::api::core::two_factor::protected_actions::validate_protected_action_otp;
match (self.MasterPasswordHash.as_deref(), self.Otp.as_deref()) { match (self.master_password_hash.as_deref(), self.otp.as_deref()) {
(Some(pw_hash), None) => { (Some(pw_hash), None) => {
if !user.check_valid_password(pw_hash) { if !user.check_valid_password(pw_hash) {
err!("Invalid password"); err!("Invalid password");

212
src/api/notifications.rs

@ -1,23 +1,11 @@
use std::{ use std::{net::IpAddr, sync::Arc, time::Duration};
net::{IpAddr, SocketAddr},
sync::Arc,
time::Duration,
};
use chrono::{NaiveDateTime, Utc}; use chrono::{NaiveDateTime, Utc};
use rmpv::Value; use rmpv::Value;
use rocket::{ use rocket::{futures::StreamExt, Route};
futures::{SinkExt, StreamExt}, use tokio::sync::mpsc::Sender;
Route,
}; use rocket_ws::{Message, WebSocket};
use tokio::{
net::{TcpListener, TcpStream},
sync::mpsc::Sender,
};
use tokio_tungstenite::{
accept_hdr_async,
tungstenite::{handshake, Message},
};
use crate::{ use crate::{
auth::{ClientIp, WsAccessTokenHeader}, auth::{ClientIp, WsAccessTokenHeader},
@ -30,7 +18,7 @@ use crate::{
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| { pub static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| {
Arc::new(WebSocketUsers { Arc::new(WebSocketUsers {
map: Arc::new(dashmap::DashMap::new()), map: Arc::new(dashmap::DashMap::new()),
}) })
@ -47,8 +35,15 @@ use super::{
push_send_update, push_user_update, push_send_update, push_user_update,
}; };
static NOTIFICATIONS_DISABLED: Lazy<bool> = Lazy::new(|| !CONFIG.enable_websocket() && !CONFIG.push_enabled());
pub fn routes() -> Vec<Route> { pub fn routes() -> Vec<Route> {
if CONFIG.enable_websocket() {
routes![websockets_hub, anonymous_websockets_hub] routes![websockets_hub, anonymous_websockets_hub]
} else {
info!("WebSocket are disabled, realtime sync functionality will not work!");
routes![]
}
} }
#[derive(FromForm, Debug)] #[derive(FromForm, Debug)]
@ -108,7 +103,7 @@ impl Drop for WSAnonymousEntryMapGuard {
#[get("/hub?<data..>")] #[get("/hub?<data..>")]
fn websockets_hub<'r>( fn websockets_hub<'r>(
ws: rocket_ws::WebSocket, ws: WebSocket,
data: WsAccessToken, data: WsAccessToken,
ip: ClientIp, ip: ClientIp,
header_token: WsAccessTokenHeader, header_token: WsAccessTokenHeader,
@ -192,11 +187,7 @@ fn websockets_hub<'r>(
} }
#[get("/anonymous-hub?<token..>")] #[get("/anonymous-hub?<token..>")]
fn anonymous_websockets_hub<'r>( fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> Result<rocket_ws::Stream!['r], Error> {
ws: rocket_ws::WebSocket,
token: String,
ip: ClientIp,
) -> Result<rocket_ws::Stream!['r], Error> {
let addr = ip.ip; let addr = ip.ip;
info!("Accepting Anonymous Rocket WS connection from {addr}"); info!("Accepting Anonymous Rocket WS connection from {addr}");
@ -297,8 +288,8 @@ fn serialize(val: Value) -> Vec<u8> {
} }
fn serialize_date(date: NaiveDateTime) -> Value { fn serialize_date(date: NaiveDateTime) -> Value {
let seconds: i64 = date.timestamp(); let seconds: i64 = date.and_utc().timestamp();
let nanos: i64 = date.timestamp_subsec_nanos().into(); let nanos: i64 = date.and_utc().timestamp_subsec_nanos().into();
let timestamp = nanos << 34 | seconds; let timestamp = nanos << 34 | seconds;
let bs = timestamp.to_be_bytes(); let bs = timestamp.to_be_bytes();
@ -349,13 +340,19 @@ impl WebSocketUsers {
// NOTE: The last modified date needs to be updated before calling these methods // NOTE: The last modified date needs to be updated before calling these methods
pub async fn send_user_update(&self, ut: UpdateType, user: &User) { pub async fn send_user_update(&self, ut: UpdateType, user: &User) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let data = create_update( let data = create_update(
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))], vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
ut, ut,
None, None,
); );
if CONFIG.enable_websocket() {
self.send_update(&user.uuid, &data).await; self.send_update(&user.uuid, &data).await;
}
if CONFIG.push_enabled() { if CONFIG.push_enabled() {
push_user_update(ut, user); push_user_update(ut, user);
@ -363,13 +360,19 @@ impl WebSocketUsers {
} }
pub async fn send_logout(&self, user: &User, acting_device_uuid: Option<String>) { pub async fn send_logout(&self, user: &User, acting_device_uuid: Option<String>) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let data = create_update( let data = create_update(
vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))], vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))],
UpdateType::LogOut, UpdateType::LogOut,
acting_device_uuid.clone(), acting_device_uuid.clone(),
); );
if CONFIG.enable_websocket() {
self.send_update(&user.uuid, &data).await; self.send_update(&user.uuid, &data).await;
}
if CONFIG.push_enabled() { if CONFIG.push_enabled() {
push_logout(user, acting_device_uuid); push_logout(user, acting_device_uuid);
@ -383,6 +386,10 @@ impl WebSocketUsers {
acting_device_uuid: &String, acting_device_uuid: &String,
conn: &mut DbConn, conn: &mut DbConn,
) { ) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let data = create_update( let data = create_update(
vec![ vec![
("Id".into(), folder.uuid.clone().into()), ("Id".into(), folder.uuid.clone().into()),
@ -393,7 +400,9 @@ impl WebSocketUsers {
Some(acting_device_uuid.into()), Some(acting_device_uuid.into()),
); );
if CONFIG.enable_websocket() {
self.send_update(&folder.user_uuid, &data).await; self.send_update(&folder.user_uuid, &data).await;
}
if CONFIG.push_enabled() { if CONFIG.push_enabled() {
push_folder_update(ut, folder, acting_device_uuid, conn).await; push_folder_update(ut, folder, acting_device_uuid, conn).await;
@ -409,6 +418,10 @@ impl WebSocketUsers {
collection_uuids: Option<Vec<String>>, collection_uuids: Option<Vec<String>>,
conn: &mut DbConn, conn: &mut DbConn,
) { ) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let org_uuid = convert_option(cipher.organization_uuid.clone()); let org_uuid = convert_option(cipher.organization_uuid.clone());
// Depending if there are collections provided or not, we need to have different values for the following variables. // Depending if there are collections provided or not, we need to have different values for the following variables.
// The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change. // The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change.
@ -434,9 +447,11 @@ impl WebSocketUsers {
Some(acting_device_uuid.into()), Some(acting_device_uuid.into()),
); );
if CONFIG.enable_websocket() {
for uuid in user_uuids { for uuid in user_uuids {
self.send_update(uuid, &data).await; self.send_update(uuid, &data).await;
} }
}
if CONFIG.push_enabled() && user_uuids.len() == 1 { if CONFIG.push_enabled() && user_uuids.len() == 1 {
push_cipher_update(ut, cipher, acting_device_uuid, conn).await; push_cipher_update(ut, cipher, acting_device_uuid, conn).await;
@ -451,6 +466,10 @@ impl WebSocketUsers {
acting_device_uuid: &String, acting_device_uuid: &String,
conn: &mut DbConn, conn: &mut DbConn,
) { ) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let user_uuid = convert_option(send.user_uuid.clone()); let user_uuid = convert_option(send.user_uuid.clone());
let data = create_update( let data = create_update(
@ -463,9 +482,11 @@ impl WebSocketUsers {
None, None,
); );
if CONFIG.enable_websocket() {
for uuid in user_uuids { for uuid in user_uuids {
self.send_update(uuid, &data).await; self.send_update(uuid, &data).await;
} }
}
if CONFIG.push_enabled() && user_uuids.len() == 1 { if CONFIG.push_enabled() && user_uuids.len() == 1 {
push_send_update(ut, send, acting_device_uuid, conn).await; push_send_update(ut, send, acting_device_uuid, conn).await;
} }
@ -478,12 +499,18 @@ impl WebSocketUsers {
acting_device_uuid: &String, acting_device_uuid: &String,
conn: &mut DbConn, conn: &mut DbConn,
) { ) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let data = create_update( let data = create_update(
vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_uuid.clone().into())], vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_uuid.clone().into())],
UpdateType::AuthRequest, UpdateType::AuthRequest,
Some(acting_device_uuid.to_string()), Some(acting_device_uuid.to_string()),
); );
if CONFIG.enable_websocket() {
self.send_update(user_uuid, &data).await; self.send_update(user_uuid, &data).await;
}
if CONFIG.push_enabled() { if CONFIG.push_enabled() {
push_auth_request(user_uuid.to_string(), auth_request_uuid.to_string(), conn).await; push_auth_request(user_uuid.to_string(), auth_request_uuid.to_string(), conn).await;
@ -497,12 +524,18 @@ impl WebSocketUsers {
approving_device_uuid: String, approving_device_uuid: String,
conn: &mut DbConn, conn: &mut DbConn,
) { ) {
// Skip any processing if both WebSockets and Push are not active
if *NOTIFICATIONS_DISABLED {
return;
}
let data = create_update( let data = create_update(
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())], vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
UpdateType::AuthRequestResponse, UpdateType::AuthRequestResponse,
approving_device_uuid.clone().into(), approving_device_uuid.clone().into(),
); );
if CONFIG.enable_websocket() {
self.send_update(auth_response_uuid, &data).await; self.send_update(auth_response_uuid, &data).await;
}
if CONFIG.push_enabled() { if CONFIG.push_enabled() {
push_auth_response(user_uuid.to_string(), auth_response_uuid.to_string(), approving_device_uuid, conn) push_auth_response(user_uuid.to_string(), auth_response_uuid.to_string(), approving_device_uuid, conn)
@ -526,6 +559,9 @@ impl AnonymousWebSocketSubscriptions {
} }
pub async fn send_auth_response(&self, user_uuid: &String, auth_response_uuid: &str) { pub async fn send_auth_response(&self, user_uuid: &String, auth_response_uuid: &str) {
if !CONFIG.enable_websocket() {
return;
}
let data = create_anonymous_update( let data = create_anonymous_update(
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())], vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())],
UpdateType::AuthRequestResponse, UpdateType::AuthRequestResponse,
@ -620,127 +656,3 @@ pub enum UpdateType {
pub type Notify<'a> = &'a rocket::State<Arc<WebSocketUsers>>; pub type Notify<'a> = &'a rocket::State<Arc<WebSocketUsers>>;
pub type AnonymousNotify<'a> = &'a rocket::State<Arc<AnonymousWebSocketSubscriptions>>; pub type AnonymousNotify<'a> = &'a rocket::State<Arc<AnonymousWebSocketSubscriptions>>;
pub fn start_notification_server() -> Arc<WebSocketUsers> {
let users = Arc::clone(&WS_USERS);
if CONFIG.websocket_enabled() {
let users2 = Arc::<WebSocketUsers>::clone(&users);
tokio::spawn(async move {
let addr = (CONFIG.websocket_address(), CONFIG.websocket_port());
info!("Starting WebSockets server on {}:{}", addr.0, addr.1);
let listener = TcpListener::bind(addr).await.expect("Can't listen on websocket port");
let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel::<()>();
CONFIG.set_ws_shutdown_handle(shutdown_tx);
loop {
tokio::select! {
Ok((stream, addr)) = listener.accept() => {
tokio::spawn(handle_connection(stream, Arc::<WebSocketUsers>::clone(&users2), addr));
}
_ = &mut shutdown_rx => {
break;
}
}
}
info!("Shutting down WebSockets server!")
});
}
users
}
async fn handle_connection(stream: TcpStream, users: Arc<WebSocketUsers>, addr: SocketAddr) -> Result<(), Error> {
let mut user_uuid: Option<String> = None;
info!("Accepting WS connection from {addr}");
// Accept connection, do initial handshake, validate auth token and get the user ID
use handshake::server::{Request, Response};
let mut stream = accept_hdr_async(stream, |req: &Request, res: Response| {
if let Some(token) = get_request_token(req) {
if let Ok(claims) = crate::auth::decode_login(&token) {
user_uuid = Some(claims.sub);
return Ok(res);
}
}
Err(Response::builder().status(401).body(None).unwrap())
})
.await?;
let user_uuid = user_uuid.expect("User UUID should be set after the handshake");
let (mut rx, guard) = {
// Add a channel to send messages to this client to the map
let entry_uuid = uuid::Uuid::new_v4();
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
users.map.entry(user_uuid.clone()).or_default().push((entry_uuid, tx));
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
(rx, WSEntryMapGuard::new(users, user_uuid, entry_uuid, addr.ip()))
};
let _guard = guard;
let mut interval = tokio::time::interval(Duration::from_secs(15));
loop {
tokio::select! {
res = stream.next() => {
match res {
Some(Ok(message)) => {
match message {
// Respond to any pings
Message::Ping(ping) => stream.send(Message::Pong(ping)).await?,
Message::Pong(_) => {/* Ignored */},
// We should receive an initial message with the protocol and version, and we will reply to it
Message::Text(ref message) => {
let msg = message.strip_suffix(RECORD_SEPARATOR as char).unwrap_or(message);
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
stream.send(Message::binary(INITIAL_RESPONSE)).await?;
continue;
}
}
// Just echo anything else the client sends
_ => stream.send(message).await?,
}
}
_ => break,
}
}
res = rx.recv() => {
match res {
Some(res) => stream.send(res).await?,
None => break,
}
}
_ = interval.tick() => stream.send(Message::Ping(create_ping())).await?
}
}
Ok(())
}
fn get_request_token(req: &handshake::server::Request) -> Option<String> {
const ACCESS_TOKEN_KEY: &str = "access_token=";
if let Some(Ok(auth)) = req.headers().get("Authorization").map(|a| a.to_str()) {
if let Some(token_part) = auth.strip_prefix("Bearer ") {
return Some(token_part.to_owned());
}
}
if let Some(params) = req.uri().query() {
let params_iter = params.split('&').take(1);
for val in params_iter {
if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) {
return Some(stripped.to_owned());
}
}
}
None
}

4
src/api/push.rs

@ -114,11 +114,11 @@ pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbC
.await? .await?
.error_for_status() .error_for_status()
{ {
err!(format!("An error occured while proceeding registration of a device: {e}")); err!(format!("An error occurred while proceeding registration of a device: {e}"));
} }
if let Err(e) = device.save(conn).await { if let Err(e) = device.save(conn).await {
err!(format!("An error occured while trying to save the (registered) device push uuid: {e}")); err!(format!("An error occurred while trying to save the (registered) device push uuid: {e}"));
} }
Ok(()) Ok(())

2
src/api/web.rs

@ -170,7 +170,7 @@ pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Erro
} }
"bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))), "bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))),
"bootstrap.bundle.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap.bundle.js"))), "bootstrap.bundle.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap.bundle.js"))),
"jdenticon.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon.js"))), "jdenticon-3.3.0.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon-3.3.0.js"))),
"datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))), "datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))),
"datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))), "datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))),
"jquery-3.7.1.slim.js" => { "jquery-3.7.1.slim.js" => {

124
src/auth.rs

@ -1,10 +1,11 @@
// JWT Handling // JWT Handling
// //
use chrono::{Duration, Utc}; use chrono::{TimeDelta, Utc};
use num_traits::FromPrimitive; use num_traits::FromPrimitive;
use once_cell::sync::Lazy; use once_cell::sync::{Lazy, OnceCell};
use jsonwebtoken::{self, errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header}; use jsonwebtoken::{errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
use openssl::rsa::Rsa;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
use serde::ser::Serialize; use serde::ser::Serialize;
@ -12,7 +13,7 @@ use crate::{error::Error, CONFIG};
const JWT_ALGORITHM: Algorithm = Algorithm::RS256; const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
pub static DEFAULT_VALIDITY: Lazy<Duration> = Lazy::new(|| Duration::hours(2)); pub static DEFAULT_VALIDITY: Lazy<TimeDelta> = Lazy::new(|| TimeDelta::try_hours(2).unwrap());
static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM)); static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM));
pub static JWT_LOGIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|login", CONFIG.domain_origin())); pub static JWT_LOGIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|login", CONFIG.domain_origin()));
@ -26,23 +27,46 @@ static JWT_SEND_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|send", CONFIG.do
static JWT_ORG_API_KEY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|api.organization", CONFIG.domain_origin())); static JWT_ORG_API_KEY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|api.organization", CONFIG.domain_origin()));
static JWT_FILE_DOWNLOAD_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|file_download", CONFIG.domain_origin())); static JWT_FILE_DOWNLOAD_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|file_download", CONFIG.domain_origin()));
static PRIVATE_RSA_KEY: Lazy<EncodingKey> = Lazy::new(|| { static PRIVATE_RSA_KEY: OnceCell<EncodingKey> = OnceCell::new();
let key = static PUBLIC_RSA_KEY: OnceCell<DecodingKey> = OnceCell::new();
std::fs::read(CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key. \n{e}"));
EncodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{e}"))
});
static PUBLIC_RSA_KEY: Lazy<DecodingKey> = Lazy::new(|| {
let key = std::fs::read(CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key. \n{e}"));
DecodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{e}"))
});
pub fn load_keys() { pub fn initialize_keys() -> Result<(), crate::error::Error> {
Lazy::force(&PRIVATE_RSA_KEY); let mut priv_key_buffer = Vec::with_capacity(2048);
Lazy::force(&PUBLIC_RSA_KEY);
let priv_key = {
let mut priv_key_file =
File::options().create(true).truncate(false).read(true).write(true).open(CONFIG.private_rsa_key())?;
#[allow(clippy::verbose_file_reads)]
let bytes_read = priv_key_file.read_to_end(&mut priv_key_buffer)?;
if bytes_read > 0 {
Rsa::private_key_from_pem(&priv_key_buffer[..bytes_read])?
} else {
// Only create the key if the file doesn't exist or is empty
let rsa_key = openssl::rsa::Rsa::generate(2048)?;
priv_key_buffer = rsa_key.private_key_to_pem()?;
priv_key_file.write_all(&priv_key_buffer)?;
info!("Private key created correctly.");
rsa_key
}
};
let pub_key_buffer = priv_key.public_key_to_pem()?;
let enc = EncodingKey::from_rsa_pem(&priv_key_buffer)?;
let dec: DecodingKey = DecodingKey::from_rsa_pem(&pub_key_buffer)?;
if PRIVATE_RSA_KEY.set(enc).is_err() {
err!("PRIVATE_RSA_KEY must only be initialized once")
}
if PUBLIC_RSA_KEY.set(dec).is_err() {
err!("PUBLIC_RSA_KEY must only be initialized once")
}
Ok(())
} }
pub fn encode_jwt<T: Serialize>(claims: &T) -> String { pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
match jsonwebtoken::encode(&JWT_HEADER, claims, &PRIVATE_RSA_KEY) { match jsonwebtoken::encode(&JWT_HEADER, claims, PRIVATE_RSA_KEY.wait()) {
Ok(token) => token, Ok(token) => token,
Err(e) => panic!("Error encoding jwt {e}"), Err(e) => panic!("Error encoding jwt {e}"),
} }
@ -56,7 +80,7 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
validation.set_issuer(&[issuer]); validation.set_issuer(&[issuer]);
let token = token.replace(char::is_whitespace, ""); let token = token.replace(char::is_whitespace, "");
match jsonwebtoken::decode(&token, &PUBLIC_RSA_KEY, &validation) { match jsonwebtoken::decode(&token, PUBLIC_RSA_KEY.wait(), &validation) {
Ok(d) => Ok(d.claims), Ok(d) => Ok(d.claims),
Err(err) => match *err.kind() { Err(err) => match *err.kind() {
ErrorKind::InvalidToken => err!("Token is invalid"), ErrorKind::InvalidToken => err!("Token is invalid"),
@ -164,11 +188,11 @@ pub fn generate_invite_claims(
user_org_id: Option<String>, user_org_id: Option<String>,
invited_by_email: Option<String>, invited_by_email: Option<String>,
) -> InviteJwtClaims { ) -> InviteJwtClaims {
let time_now = Utc::now().naive_utc(); let time_now = Utc::now();
let expire_hours = i64::from(CONFIG.invitation_expiration_hours()); let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
InviteJwtClaims { InviteJwtClaims {
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
exp: (time_now + Duration::hours(expire_hours)).timestamp(), exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
iss: JWT_INVITE_ISSUER.to_string(), iss: JWT_INVITE_ISSUER.to_string(),
sub: uuid, sub: uuid,
email, email,
@ -202,11 +226,11 @@ pub fn generate_emergency_access_invite_claims(
grantor_name: String, grantor_name: String,
grantor_email: String, grantor_email: String,
) -> EmergencyAccessInviteJwtClaims { ) -> EmergencyAccessInviteJwtClaims {
let time_now = Utc::now().naive_utc(); let time_now = Utc::now();
let expire_hours = i64::from(CONFIG.invitation_expiration_hours()); let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
EmergencyAccessInviteJwtClaims { EmergencyAccessInviteJwtClaims {
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
exp: (time_now + Duration::hours(expire_hours)).timestamp(), exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(), iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
sub: uuid, sub: uuid,
email, email,
@ -233,10 +257,10 @@ pub struct OrgApiKeyLoginJwtClaims {
} }
pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims { pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims {
let time_now = Utc::now().naive_utc(); let time_now = Utc::now();
OrgApiKeyLoginJwtClaims { OrgApiKeyLoginJwtClaims {
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
exp: (time_now + Duration::hours(1)).timestamp(), exp: (time_now + TimeDelta::try_hours(1).unwrap()).timestamp(),
iss: JWT_ORG_API_KEY_ISSUER.to_string(), iss: JWT_ORG_API_KEY_ISSUER.to_string(),
sub: uuid, sub: uuid,
client_id: format!("organization.{org_id}"), client_id: format!("organization.{org_id}"),
@ -260,10 +284,10 @@ pub struct FileDownloadClaims {
} }
pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims { pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims {
let time_now = Utc::now().naive_utc(); let time_now = Utc::now();
FileDownloadClaims { FileDownloadClaims {
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
exp: (time_now + Duration::minutes(5)).timestamp(), exp: (time_now + TimeDelta::try_minutes(5).unwrap()).timestamp(),
iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(), iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(),
sub: uuid, sub: uuid,
file_id, file_id,
@ -283,42 +307,42 @@ pub struct BasicJwtClaims {
} }
pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims { pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
let time_now = Utc::now().naive_utc(); let time_now = Utc::now();
let expire_hours = i64::from(CONFIG.invitation_expiration_hours()); let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
BasicJwtClaims { BasicJwtClaims {
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
exp: (time_now + Duration::hours(expire_hours)).timestamp(), exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
iss: JWT_DELETE_ISSUER.to_string(), iss: JWT_DELETE_ISSUER.to_string(),
sub: uuid, sub: uuid,
} }
} }
pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims { pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims {
let time_now = Utc::now().naive_utc(); let time_now = Utc::now();
let expire_hours = i64::from(CONFIG.invitation_expiration_hours()); let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
BasicJwtClaims { BasicJwtClaims {
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
exp: (time_now + Duration::hours(expire_hours)).timestamp(), exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
iss: JWT_VERIFYEMAIL_ISSUER.to_string(), iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
sub: uuid, sub: uuid,
} }
} }
pub fn generate_admin_claims() -> BasicJwtClaims { pub fn generate_admin_claims() -> BasicJwtClaims {
let time_now = Utc::now().naive_utc(); let time_now = Utc::now();
BasicJwtClaims { BasicJwtClaims {
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
exp: (time_now + Duration::minutes(CONFIG.admin_session_lifetime())).timestamp(), exp: (time_now + TimeDelta::try_minutes(CONFIG.admin_session_lifetime()).unwrap()).timestamp(),
iss: JWT_ADMIN_ISSUER.to_string(), iss: JWT_ADMIN_ISSUER.to_string(),
sub: "admin_panel".to_string(), sub: "admin_panel".to_string(),
} }
} }
pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims { pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
let time_now = Utc::now().naive_utc(); let time_now = Utc::now();
BasicJwtClaims { BasicJwtClaims {
nbf: time_now.timestamp(), nbf: time_now.timestamp(),
exp: (time_now + Duration::minutes(2)).timestamp(), exp: (time_now + TimeDelta::try_minutes(2).unwrap()).timestamp(),
iss: JWT_SEND_ISSUER.to_string(), iss: JWT_SEND_ISSUER.to_string(),
sub: format!("{send_id}/{file_id}"), sub: format!("{send_id}/{file_id}"),
} }
@ -367,10 +391,8 @@ impl<'r> FromRequest<'r> for Host {
let host = if let Some(host) = headers.get_one("X-Forwarded-Host") { let host = if let Some(host) = headers.get_one("X-Forwarded-Host") {
host host
} else if let Some(host) = headers.get_one("Host") {
host
} else { } else {
"" headers.get_one("Host").unwrap_or_default()
}; };
format!("{protocol}://{host}") format!("{protocol}://{host}")
@ -383,7 +405,6 @@ impl<'r> FromRequest<'r> for Host {
} }
pub struct ClientHeaders { pub struct ClientHeaders {
pub host: String,
pub device_type: i32, pub device_type: i32,
pub ip: ClientIp, pub ip: ClientIp,
} }
@ -393,7 +414,6 @@ impl<'r> FromRequest<'r> for ClientHeaders {
type Error = &'static str; type Error = &'static str;
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> { async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let host = try_outcome!(Host::from_request(request).await).host;
let ip = match ClientIp::from_request(request).await { let ip = match ClientIp::from_request(request).await {
Outcome::Success(ip) => ip, Outcome::Success(ip) => ip,
_ => err_handler!("Error getting Client IP"), _ => err_handler!("Error getting Client IP"),
@ -403,7 +423,6 @@ impl<'r> FromRequest<'r> for ClientHeaders {
request.headers().get_one("device-type").map(|d| d.parse().unwrap_or(14)).unwrap_or_else(|| 14); request.headers().get_one("device-type").map(|d| d.parse().unwrap_or(14)).unwrap_or_else(|| 14);
Outcome::Success(ClientHeaders { Outcome::Success(ClientHeaders {
host,
device_type, device_type,
ip, ip,
}) })
@ -475,7 +494,7 @@ impl<'r> FromRequest<'r> for Headers {
// Check if the stamp exception has expired first. // Check if the stamp exception has expired first.
// Then, check if the current route matches any of the allowed routes. // Then, check if the current route matches any of the allowed routes.
// After that check the stamp in exception matches the one in the claims. // After that check the stamp in exception matches the one in the claims.
if Utc::now().naive_utc().timestamp() > stamp_exception.expire { if Utc::now().timestamp() > stamp_exception.expire {
// If the stamp exception has been expired remove it from the database. // If the stamp exception has been expired remove it from the database.
// This prevents checking this stamp exception for new requests. // This prevents checking this stamp exception for new requests.
let mut user = user; let mut user = user;
@ -509,7 +528,6 @@ pub struct OrgHeaders {
pub user: User, pub user: User,
pub org_user_type: UserOrgType, pub org_user_type: UserOrgType,
pub org_user: UserOrganization, pub org_user: UserOrganization,
pub org_id: String,
pub ip: ClientIp, pub ip: ClientIp,
} }
@ -572,7 +590,6 @@ impl<'r> FromRequest<'r> for OrgHeaders {
} }
}, },
org_user, org_user,
org_id: String::from(org_id),
ip: headers.ip, ip: headers.ip,
}) })
} }
@ -649,7 +666,6 @@ pub struct ManagerHeaders {
pub host: String, pub host: String,
pub device: Device, pub device: Device,
pub user: User, pub user: User,
pub org_user_type: UserOrgType,
pub ip: ClientIp, pub ip: ClientIp,
} }
@ -667,7 +683,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
_ => err_handler!("Error getting DB"), _ => err_handler!("Error getting DB"),
}; };
if !can_access_collection(&headers.org_user, &col_id, &mut conn).await { if !Collection::can_access_collection(&headers.org_user, &col_id, &mut conn).await {
err_handler!("The current user isn't a manager for this collection") err_handler!("The current user isn't a manager for this collection")
} }
} }
@ -678,7 +694,6 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
host: headers.host, host: headers.host,
device: headers.device, device: headers.device,
user: headers.user, user: headers.user,
org_user_type: headers.org_user_type,
ip: headers.ip, ip: headers.ip,
}) })
} else { } else {
@ -705,7 +720,6 @@ pub struct ManagerHeadersLoose {
pub device: Device, pub device: Device,
pub user: User, pub user: User,
pub org_user: UserOrganization, pub org_user: UserOrganization,
pub org_user_type: UserOrgType,
pub ip: ClientIp, pub ip: ClientIp,
} }
@ -721,7 +735,6 @@ impl<'r> FromRequest<'r> for ManagerHeadersLoose {
device: headers.device, device: headers.device,
user: headers.user, user: headers.user,
org_user: headers.org_user, org_user: headers.org_user,
org_user_type: headers.org_user_type,
ip: headers.ip, ip: headers.ip,
}) })
} else { } else {
@ -740,10 +753,6 @@ impl From<ManagerHeadersLoose> for Headers {
} }
} }
} }
async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
org_user.has_full_access()
|| Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn).await
}
impl ManagerHeaders { impl ManagerHeaders {
pub async fn from_loose( pub async fn from_loose(
@ -755,7 +764,7 @@ impl ManagerHeaders {
if uuid::Uuid::parse_str(col_id).is_err() { if uuid::Uuid::parse_str(col_id).is_err() {
err!("Collection Id is malformed!"); err!("Collection Id is malformed!");
} }
if !can_access_collection(&h.org_user, col_id, conn).await { if !Collection::can_access_collection(&h.org_user, col_id, conn).await {
err!("You don't have access to all collections!"); err!("You don't have access to all collections!");
} }
} }
@ -764,14 +773,12 @@ impl ManagerHeaders {
host: h.host, host: h.host,
device: h.device, device: h.device,
user: h.user, user: h.user,
org_user_type: h.org_user_type,
ip: h.ip, ip: h.ip,
}) })
} }
} }
pub struct OwnerHeaders { pub struct OwnerHeaders {
pub host: String,
pub device: Device, pub device: Device,
pub user: User, pub user: User,
pub ip: ClientIp, pub ip: ClientIp,
@ -785,7 +792,6 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
let headers = try_outcome!(OrgHeaders::from_request(request).await); let headers = try_outcome!(OrgHeaders::from_request(request).await);
if headers.org_user_type == UserOrgType::Owner { if headers.org_user_type == UserOrgType::Owner {
Outcome::Success(Self { Outcome::Success(Self {
host: headers.host,
device: headers.device, device: headers.device,
user: headers.user, user: headers.user,
ip: headers.ip, ip: headers.ip,
@ -799,7 +805,11 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
// //
// Client IP address detection // Client IP address detection
// //
use std::net::IpAddr; use std::{
fs::File,
io::{Read, Write},
net::IpAddr,
};
pub struct ClientIp { pub struct ClientIp {
pub ip: IpAddr, pub ip: IpAddr,

32
src/config.rs

@ -39,7 +39,6 @@ macro_rules! make_config {
struct Inner { struct Inner {
rocket_shutdown_handle: Option<rocket::Shutdown>, rocket_shutdown_handle: Option<rocket::Shutdown>,
ws_shutdown_handle: Option<tokio::sync::oneshot::Sender<()>>,
templates: Handlebars<'static>, templates: Handlebars<'static>,
config: ConfigItems, config: ConfigItems,
@ -371,11 +370,7 @@ make_config! {
}, },
ws { ws {
/// Enable websocket notifications /// Enable websocket notifications
websocket_enabled: bool, false, def, false; enable_websocket: bool, false, def, true;
/// Websocket address
websocket_address: String, false, def, "0.0.0.0".to_string();
/// Websocket port
websocket_port: u16, false, def, 3012;
}, },
push { push {
/// Enable push notifications /// Enable push notifications
@ -691,6 +686,10 @@ make_config! {
email_expiration_time: u64, true, def, 600; email_expiration_time: u64, true, def, 600;
/// Maximum attempts |> Maximum attempts before an email token is reset and a new email will need to be sent /// Maximum attempts |> Maximum attempts before an email token is reset and a new email will need to be sent
email_attempts_limit: u64, true, def, 3; email_attempts_limit: u64, true, def, 3;
/// Automatically enforce at login |> Setup email 2FA provider regardless of any organization policy
email_2fa_enforce_on_verified_invite: bool, true, def, false;
/// Auto-enable 2FA (Know the risks!) |> Automatically setup email 2FA as fallback provider when needed
email_2fa_auto_fallback: bool, true, def, false;
}, },
} }
@ -893,6 +892,13 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
err!("To enable email 2FA, a mail transport must be configured") err!("To enable email 2FA, a mail transport must be configured")
} }
if !cfg._enable_email_2fa && cfg.email_2fa_enforce_on_verified_invite {
err!("To enforce email 2FA on verified invitations, email 2fa has to be enabled!");
}
if !cfg._enable_email_2fa && cfg.email_2fa_auto_fallback {
err!("To use email 2FA as automatic fallback, email 2fa has to be enabled!");
}
// Check if the icon blacklist regex is valid // Check if the icon blacklist regex is valid
if let Some(ref r) = cfg.icon_blacklist_regex { if let Some(ref r) = cfg.icon_blacklist_regex {
let validate_regex = regex::Regex::new(r); let validate_regex = regex::Regex::new(r);
@ -1071,7 +1077,6 @@ impl Config {
Ok(Config { Ok(Config {
inner: RwLock::new(Inner { inner: RwLock::new(Inner {
rocket_shutdown_handle: None, rocket_shutdown_handle: None,
ws_shutdown_handle: None,
templates: load_templates(&config.templates_folder), templates: load_templates(&config.templates_folder),
config, config,
_env, _env,
@ -1164,7 +1169,7 @@ impl Config {
} }
pub fn delete_user_config(&self) -> Result<(), Error> { pub fn delete_user_config(&self) -> Result<(), Error> {
crate::util::delete_file(&CONFIG_FILE)?; std::fs::remove_file(&*CONFIG_FILE)?;
// Empty user config // Empty user config
let usr = ConfigBuilder::default(); let usr = ConfigBuilder::default();
@ -1189,9 +1194,6 @@ impl Config {
pub fn private_rsa_key(&self) -> String { pub fn private_rsa_key(&self) -> String {
format!("{}.pem", CONFIG.rsa_key_filename()) format!("{}.pem", CONFIG.rsa_key_filename())
} }
pub fn public_rsa_key(&self) -> String {
format!("{}.pub.pem", CONFIG.rsa_key_filename())
}
pub fn mail_enabled(&self) -> bool { pub fn mail_enabled(&self) -> bool {
let inner = &self.inner.read().unwrap().config; let inner = &self.inner.read().unwrap().config;
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail) inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail)
@ -1240,16 +1242,8 @@ impl Config {
self.inner.write().unwrap().rocket_shutdown_handle = Some(handle); self.inner.write().unwrap().rocket_shutdown_handle = Some(handle);
} }
pub fn set_ws_shutdown_handle(&self, handle: tokio::sync::oneshot::Sender<()>) {
self.inner.write().unwrap().ws_shutdown_handle = Some(handle);
}
pub fn shutdown(&self) { pub fn shutdown(&self) {
if let Ok(mut c) = self.inner.write() { if let Ok(mut c) = self.inner.write() {
if let Some(handle) = c.ws_shutdown_handle.take() {
handle.send(()).ok();
}
if let Some(handle) = c.rocket_shutdown_handle.take() { if let Some(handle) = c.rocket_shutdown_handle.take() {
handle.notify(); handle.notify();
} }

4
src/db/mod.rs

@ -389,13 +389,13 @@ pub async fn backup_database(conn: &mut DbConn) -> Result<(), Error> {
pub async fn get_sql_server_version(conn: &mut DbConn) -> String { pub async fn get_sql_server_version(conn: &mut DbConn) -> String {
db_run! {@raw conn: db_run! {@raw conn:
postgresql, mysql { postgresql, mysql {
sql_function!{ define_sql_function!{
fn version() -> diesel::sql_types::Text; fn version() -> diesel::sql_types::Text;
} }
diesel::select(version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string()) diesel::select(version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())
} }
sqlite { sqlite {
sql_function!{ define_sql_function!{
fn sqlite_version() -> diesel::sql_types::Text; fn sqlite_version() -> diesel::sql_types::Text;
} }
diesel::select(sqlite_version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string()) diesel::select(sqlite_version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string())

18
src/db/models/attachment.rs

@ -42,13 +42,13 @@ impl Attachment {
pub fn to_json(&self, host: &str) -> Value { pub fn to_json(&self, host: &str) -> Value {
json!({ json!({
"Id": self.id, "id": self.id,
"Url": self.get_url(host), "url": self.get_url(host),
"FileName": self.file_name, "fileName": self.file_name,
"Size": self.file_size.to_string(), "size": self.file_size.to_string(),
"SizeName": crate::util::get_display_size(self.file_size), "sizeName": crate::util::get_display_size(self.file_size),
"Key": self.akey, "key": self.akey,
"Object": "attachment" "object": "attachment"
}) })
} }
} }
@ -95,7 +95,7 @@ impl Attachment {
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult { pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
crate::util::retry( let _: () = crate::util::retry(
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn), || diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
10, 10,
) )
@ -103,7 +103,7 @@ impl Attachment {
let file_path = &self.get_file_path(); let file_path = &self.get_file_path();
match crate::util::delete_file(file_path) { match std::fs::remove_file(file_path) {
// Ignore "file not found" errors. This can happen when the // Ignore "file not found" errors. This can happen when the
// upstream caller has already cleaned up the file as part of // upstream caller has already cleaned up the file as part of
// its own error handling. // its own error handling.

2
src/db/models/auth_request.rs

@ -140,7 +140,7 @@ impl AuthRequest {
} }
pub async fn purge_expired_auth_requests(conn: &mut DbConn) { pub async fn purge_expired_auth_requests(conn: &mut DbConn) {
let expiry_time = Utc::now().naive_utc() - chrono::Duration::minutes(5); //after 5 minutes, clients reject the request let expiry_time = Utc::now().naive_utc() - chrono::TimeDelta::try_minutes(5).unwrap(); //after 5 minutes, clients reject the request
for auth_request in Self::find_created_before(&expiry_time, conn).await { for auth_request in Self::find_created_before(&expiry_time, conn).await {
auth_request.delete(conn).await.ok(); auth_request.delete(conn).await.ok();
} }

232
src/db/models/cipher.rs

@ -1,5 +1,6 @@
use crate::util::LowerCase;
use crate::CONFIG; use crate::CONFIG;
use chrono::{Duration, NaiveDateTime, Utc}; use chrono::{NaiveDateTime, TimeDelta, Utc};
use serde_json::Value; use serde_json::Value;
use super::{ use super::{
@ -81,7 +82,7 @@ impl Cipher {
pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult { pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult {
let mut validation_errors = serde_json::Map::new(); let mut validation_errors = serde_json::Map::new();
for (index, cipher) in cipher_data.iter().enumerate() { for (index, cipher) in cipher_data.iter().enumerate() {
if let Some(note) = &cipher.Notes { if let Some(note) = &cipher.notes {
if note.len() > 10_000 { if note.len() > 10_000 {
validation_errors.insert( validation_errors.insert(
format!("Ciphers[{index}].Notes"), format!("Ciphers[{index}].Notes"),
@ -135,10 +136,6 @@ impl Cipher {
} }
} }
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
let password_history_json =
self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
// We don't need these values at all for Organizational syncs // We don't need these values at all for Organizational syncs
// Skip any other database calls if this is the case and just return false. // Skip any other database calls if this is the case and just return false.
let (read_only, hide_passwords) = if sync_type == CipherSyncType::User { let (read_only, hide_passwords) = if sync_type == CipherSyncType::User {
@ -153,21 +150,49 @@ impl Cipher {
(false, false) (false, false)
}; };
let fields_json: Vec<_> = self
.fields
.as_ref()
.and_then(|s| {
serde_json::from_str::<Vec<LowerCase<Value>>>(s)
.inspect_err(|e| warn!("Error parsing fields {:?}", e))
.ok()
})
.map(|d| d.into_iter().map(|d| d.data).collect())
.unwrap_or_default();
let password_history_json: Vec<_> = self
.password_history
.as_ref()
.and_then(|s| {
serde_json::from_str::<Vec<LowerCase<Value>>>(s)
.inspect_err(|e| warn!("Error parsing password history {:?}", e))
.ok()
})
.map(|d| d.into_iter().map(|d| d.data).collect())
.unwrap_or_default();
// Get the type_data or a default to an empty json object '{}'. // Get the type_data or a default to an empty json object '{}'.
// If not passing an empty object, mobile clients will crash. // If not passing an empty object, mobile clients will crash.
let mut type_data_json: Value = let mut type_data_json = serde_json::from_str::<LowerCase<Value>>(&self.data)
serde_json::from_str(&self.data).unwrap_or_else(|_| Value::Object(serde_json::Map::new())); .map(|d| d.data)
.unwrap_or_else(|_| Value::Object(serde_json::Map::new()));
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream // NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients. // Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
if self.atype == 1 { if self.atype == 1 {
if type_data_json["Uris"].is_array() { if type_data_json["uris"].is_array() {
let uri = type_data_json["Uris"][0]["Uri"].clone(); let uri = type_data_json["uris"][0]["uri"].clone();
type_data_json["Uri"] = uri; type_data_json["uri"] = uri;
} else { } else {
// Upstream always has an Uri key/value // Upstream always has an Uri key/value
type_data_json["Uri"] = Value::Null; type_data_json["uri"] = Value::Null;
}
} }
// Fix secure note issues when data is `{}`
// This breaks at least the native mobile clients
if self.atype == 2 && (self.data.eq("{}") || self.data.to_ascii_lowercase().eq("{\"type\":null}")) {
type_data_json = json!({"type": 0});
} }
// Clone the type_data and add some default value. // Clone the type_data and add some default value.
@ -175,10 +200,10 @@ impl Cipher {
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream // NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
// data_json should always contain the following keys with every atype // data_json should always contain the following keys with every atype
data_json["Fields"] = fields_json.clone(); data_json["fields"] = Value::Array(fields_json.clone());
data_json["Name"] = json!(self.name); data_json["name"] = json!(self.name);
data_json["Notes"] = json!(self.notes); data_json["notes"] = json!(self.notes);
data_json["PasswordHistory"] = password_history_json.clone(); data_json["passwordHistory"] = Value::Array(password_history_json.clone());
let collection_ids = if let Some(cipher_sync_data) = cipher_sync_data { let collection_ids = if let Some(cipher_sync_data) = cipher_sync_data {
if let Some(cipher_collections) = cipher_sync_data.cipher_collections.get(&self.uuid) { if let Some(cipher_collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
@ -187,7 +212,7 @@ impl Cipher {
Cow::from(Vec::with_capacity(0)) Cow::from(Vec::with_capacity(0))
} }
} else { } else {
Cow::from(self.get_collections(user_uuid.to_string(), conn).await) Cow::from(self.get_admin_collections(user_uuid.to_string(), conn).await)
}; };
// There are three types of cipher response models in upstream // There are three types of cipher response models in upstream
@ -198,48 +223,48 @@ impl Cipher {
// //
// Ref: https://github.com/bitwarden/server/blob/master/src/Core/Models/Api/Response/CipherResponseModel.cs // Ref: https://github.com/bitwarden/server/blob/master/src/Core/Models/Api/Response/CipherResponseModel.cs
let mut json_object = json!({ let mut json_object = json!({
"Object": "cipherDetails", "object": "cipherDetails",
"Id": self.uuid, "id": self.uuid,
"Type": self.atype, "type": self.atype,
"CreationDate": format_date(&self.created_at), "creationDate": format_date(&self.created_at),
"RevisionDate": format_date(&self.updated_at), "revisionDate": format_date(&self.updated_at),
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))), "deletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32), "reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
"OrganizationId": self.organization_uuid, "organizationId": self.organization_uuid,
"Key": self.key, "key": self.key,
"Attachments": attachments_json, "attachments": attachments_json,
// We have UseTotp set to true by default within the Organization model. // We have UseTotp set to true by default within the Organization model.
// This variable together with UsersGetPremium is used to show or hide the TOTP counter. // This variable together with UsersGetPremium is used to show or hide the TOTP counter.
"OrganizationUseTotp": true, "organizationUseTotp": true,
// This field is specific to the cipherDetails type. // This field is specific to the cipherDetails type.
"CollectionIds": collection_ids, "collectionIds": collection_ids,
"Name": self.name, "name": self.name,
"Notes": self.notes, "notes": self.notes,
"Fields": fields_json, "fields": fields_json,
"Data": data_json, "data": data_json,
"PasswordHistory": password_history_json, "passwordHistory": password_history_json,
// All Cipher types are included by default as null, but only the matching one will be populated // All Cipher types are included by default as null, but only the matching one will be populated
"Login": null, "login": null,
"SecureNote": null, "secureNote": null,
"Card": null, "card": null,
"Identity": null, "identity": null,
}); });
// These values are only needed for user/default syncs // These values are only needed for user/default syncs
// Not during an organizational sync like `get_org_details` // Not during an organizational sync like `get_org_details`
// Skip adding these fields in that case // Skip adding these fields in that case
if sync_type == CipherSyncType::User { if sync_type == CipherSyncType::User {
json_object["FolderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data { json_object["folderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string()) cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string())
} else { } else {
self.get_folder_uuid(user_uuid, conn).await self.get_folder_uuid(user_uuid, conn).await
}); });
json_object["Favorite"] = json!(if let Some(cipher_sync_data) = cipher_sync_data { json_object["favorite"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
cipher_sync_data.cipher_favorites.contains(&self.uuid) cipher_sync_data.cipher_favorites.contains(&self.uuid)
} else { } else {
self.is_favorite(user_uuid, conn).await self.is_favorite(user_uuid, conn).await
@ -247,15 +272,15 @@ impl Cipher {
// These values are true by default, but can be false if the // These values are true by default, but can be false if the
// cipher belongs to a collection or group where the org owner has enabled // cipher belongs to a collection or group where the org owner has enabled
// the "Read Only" or "Hide Passwords" restrictions for the user. // the "Read Only" or "Hide Passwords" restrictions for the user.
json_object["Edit"] = json!(!read_only); json_object["edit"] = json!(!read_only);
json_object["ViewPassword"] = json!(!hide_passwords); json_object["viewPassword"] = json!(!hide_passwords);
} }
let key = match self.atype { let key = match self.atype {
1 => "Login", 1 => "login",
2 => "SecureNote", 2 => "secureNote",
3 => "Card", 3 => "card",
4 => "Identity", 4 => "identity",
_ => panic!("Wrong type"), _ => panic!("Wrong type"),
}; };
@ -361,7 +386,7 @@ impl Cipher {
pub async fn purge_trash(conn: &mut DbConn) { pub async fn purge_trash(conn: &mut DbConn) {
if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() { if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() {
let now = Utc::now().naive_utc(); let now = Utc::now().naive_utc();
let dt = now - Duration::days(auto_delete_days); let dt = now - TimeDelta::try_days(auto_delete_days).unwrap();
for cipher in Self::find_deleted_before(&dt, conn).await { for cipher in Self::find_deleted_before(&dt, conn).await {
cipher.delete(conn).await.ok(); cipher.delete(conn).await.ok();
} }
@ -431,7 +456,7 @@ impl Cipher {
} }
if let Some(ref org_uuid) = self.organization_uuid { if let Some(ref org_uuid) = self.organization_uuid {
if let Some(cipher_sync_data) = cipher_sync_data { if let Some(cipher_sync_data) = cipher_sync_data {
return cipher_sync_data.user_group_full_access_for_organizations.get(org_uuid).is_some(); return cipher_sync_data.user_group_full_access_for_organizations.contains(org_uuid);
} else { } else {
return Group::is_in_full_access_group(user_uuid, org_uuid, conn).await; return Group::is_in_full_access_group(user_uuid, org_uuid, conn).await;
} }
@ -754,31 +779,124 @@ impl Cipher {
} }
pub async fn get_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> { pub async fn get_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> {
if CONFIG.org_groups_enabled() {
db_run! {conn: { db_run! {conn: {
ciphers_collections::table ciphers_collections::table
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
.inner_join(collections::table.on( .inner_join(collections::table.on(
collections::uuid.eq(ciphers_collections::collection_uuid) collections::uuid.eq(ciphers_collections::collection_uuid)
)) ))
.inner_join(users_organizations::table.on( .left_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid).and( users_organizations::org_uuid.eq(collections::org_uuid)
users_organizations::user_uuid.eq(user_id.clone()) .and(users_organizations::user_uuid.eq(user_id.clone()))
)
)) ))
.left_join(users_collections::table.on( .left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid).and( users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
users_collections::user_uuid.eq(user_id.clone()) .and(users_collections::user_uuid.eq(user_id.clone()))
))
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
.left_join(collections_groups::table.on(
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
.and(collections_groups::groups_uuid.eq(groups::uuid))
))
.filter(users_organizations::access_all.eq(true) // User has access all
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
.and(users_collections::read_only.eq(false)))
.or(groups::access_all.eq(true)) // Access via groups
.or(collections_groups::collections_uuid.is_not_null() // Access via groups
.and(collections_groups::read_only.eq(false)))
) )
.select(ciphers_collections::collection_uuid)
.load::<String>(conn).unwrap_or_default()
}}
} else {
db_run! {conn: {
ciphers_collections::table
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
.inner_join(collections::table.on(
collections::uuid.eq(ciphers_collections::collection_uuid)
))
.inner_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid)
.and(users_organizations::user_uuid.eq(user_id.clone()))
)) ))
.left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
.and(users_collections::user_uuid.eq(user_id.clone()))
))
.filter(users_organizations::access_all.eq(true) // User has access all
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
.and(users_collections::read_only.eq(false)))
)
.select(ciphers_collections::collection_uuid)
.load::<String>(conn).unwrap_or_default()
}}
}
}
pub async fn get_admin_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> {
if CONFIG.org_groups_enabled() {
db_run! {conn: {
ciphers_collections::table
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid)) .filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
.filter(users_collections::user_uuid.eq(user_id).or( // User has access to collection .inner_join(collections::table.on(
users_organizations::access_all.eq(true).or( // User has access all collections::uuid.eq(ciphers_collections::collection_uuid)
users_organizations::atype.le(UserOrgType::Admin as i32) // User is admin or owner ))
.left_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid)
.and(users_organizations::user_uuid.eq(user_id.clone()))
))
.left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
.and(users_collections::user_uuid.eq(user_id.clone()))
))
.left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid)
))
.left_join(groups::table.on(groups::uuid.eq(groups_users::groups_uuid)))
.left_join(collections_groups::table.on(
collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
.and(collections_groups::groups_uuid.eq(groups::uuid))
))
.filter(users_organizations::access_all.eq(true) // User has access all
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
.and(users_collections::read_only.eq(false)))
.or(groups::access_all.eq(true)) // Access via groups
.or(collections_groups::collections_uuid.is_not_null() // Access via groups
.and(collections_groups::read_only.eq(false)))
.or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner
) )
.select(ciphers_collections::collection_uuid)
.load::<String>(conn).unwrap_or_default()
}}
} else {
db_run! {conn: {
ciphers_collections::table
.filter(ciphers_collections::cipher_uuid.eq(&self.uuid))
.inner_join(collections::table.on(
collections::uuid.eq(ciphers_collections::collection_uuid)
))
.inner_join(users_organizations::table.on(
users_organizations::org_uuid.eq(collections::org_uuid)
.and(users_organizations::user_uuid.eq(user_id.clone()))
)) ))
.left_join(users_collections::table.on(
users_collections::collection_uuid.eq(ciphers_collections::collection_uuid)
.and(users_collections::user_uuid.eq(user_id.clone()))
))
.filter(users_organizations::access_all.eq(true) // User has access all
.or(users_collections::user_uuid.eq(user_id) // User has access to collection
.and(users_collections::read_only.eq(false)))
.or(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner
)
.select(ciphers_collections::collection_uuid) .select(ciphers_collections::collection_uuid)
.load::<String>(conn).unwrap_or_default() .load::<String>(conn).unwrap_or_default()
}} }}
} }
}
/// Return a Vec with (cipher_uuid, collection_uuid) /// Return a Vec with (cipher_uuid, collection_uuid)
/// This is used during a full sync so we only need one query for all collections accessible. /// This is used during a full sync so we only need one query for all collections accessible.

106
src/db/models/collection.rs

@ -1,6 +1,6 @@
use serde_json::Value; use serde_json::Value;
use super::{CollectionGroup, User, UserOrgStatus, UserOrgType, UserOrganization}; use super::{CollectionGroup, GroupUser, User, UserOrgStatus, UserOrgType, UserOrganization};
use crate::CONFIG; use crate::CONFIG;
db_object! { db_object! {
@ -49,11 +49,11 @@ impl Collection {
pub fn to_json(&self) -> Value { pub fn to_json(&self) -> Value {
json!({ json!({
"ExternalId": self.external_id, "externalId": self.external_id,
"Id": self.uuid, "id": self.uuid,
"OrganizationId": self.org_uuid, "organizationId": self.org_uuid,
"Name": self.name, "name": self.name,
"Object": "collection", "object": "collection",
}) })
} }
@ -97,11 +97,20 @@ impl Collection {
}; };
let mut json_object = self.to_json(); let mut json_object = self.to_json();
json_object["Object"] = json!("collectionDetails"); json_object["object"] = json!("collectionDetails");
json_object["ReadOnly"] = json!(read_only); json_object["readOnly"] = json!(read_only);
json_object["HidePasswords"] = json!(hide_passwords); json_object["hidePasswords"] = json!(hide_passwords);
json_object json_object
} }
pub async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
org_user.has_status(UserOrgStatus::Confirmed)
&& (org_user.has_full_access()
|| CollectionUser::has_access_to_collection_by_user(col_id, &org_user.user_uuid, conn).await
|| (CONFIG.org_groups_enabled()
&& (GroupUser::has_full_access_by_member(&org_user.org_uuid, &org_user.uuid, conn).await
|| GroupUser::has_access_to_collection_by_member(col_id, &org_user.uuid, conn).await)))
}
} }
use crate::db::DbConn; use crate::db::DbConn;
@ -252,17 +261,6 @@ impl Collection {
} }
} }
// Check if a user has access to a specific collection
// FIXME: This needs to be reviewed. The query used by `find_by_user_uuid` could be adjusted to filter when needed.
// For now this is a good solution without making to much changes.
pub async fn has_access_by_collection_and_user_uuid(
collection_uuid: &str,
user_uuid: &str,
conn: &mut DbConn,
) -> bool {
Self::find_by_user_uuid(user_uuid.to_owned(), conn).await.into_iter().any(|c| c.uuid == collection_uuid)
}
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
Self::find_by_user_uuid(user_uuid.to_owned(), conn) Self::find_by_user_uuid(user_uuid.to_owned(), conn)
.await .await
@ -373,17 +371,17 @@ impl Collection {
pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool { pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
let user_uuid = user_uuid.to_string(); let user_uuid = user_uuid.to_string();
if CONFIG.org_groups_enabled() {
db_run! { conn: { db_run! { conn: {
collections::table collections::table
.left_join(users_collections::table.on( .filter(collections::uuid.eq(&self.uuid))
users_collections::collection_uuid.eq(collections::uuid).and( .inner_join(users_organizations::table.on(
users_collections::user_uuid.eq(user_uuid.clone()) collections::org_uuid.eq(users_organizations::org_uuid)
) .and(users_organizations::user_uuid.eq(user_uuid.clone()))
)) ))
.left_join(users_organizations::table.on( .left_join(users_collections::table.on(
collections::org_uuid.eq(users_organizations::org_uuid).and( users_collections::collection_uuid.eq(collections::uuid)
users_organizations::user_uuid.eq(user_uuid) .and(users_collections::user_uuid.eq(user_uuid))
)
)) ))
.left_join(groups_users::table.on( .left_join(groups_users::table.on(
groups_users::users_organizations_uuid.eq(users_organizations::uuid) groups_users::users_organizations_uuid.eq(users_organizations::uuid)
@ -392,23 +390,38 @@ impl Collection {
groups::uuid.eq(groups_users::groups_uuid) groups::uuid.eq(groups_users::groups_uuid)
)) ))
.left_join(collections_groups::table.on( .left_join(collections_groups::table.on(
collections_groups::groups_uuid.eq(groups_users::groups_uuid).and( collections_groups::groups_uuid.eq(groups_users::groups_uuid)
collections_groups::collections_uuid.eq(collections::uuid) .and(collections_groups::collections_uuid.eq(collections::uuid))
)
)) ))
.filter(collections::uuid.eq(&self.uuid)) .filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
.filter( .or(users_organizations::access_all.eq(true)) // access_all via membership
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::read_only.eq(false)).or(// Directly accessed collection .or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
users_organizations::access_all.eq(true).or( // access_all in Organization .and(users_collections::read_only.eq(false)))
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner .or(groups::access_all.eq(true)) // access_all via group
)).or( .or(collections_groups::collections_uuid.is_not_null() // write access given via group
groups::access_all.eq(true) // access_all in groups .and(collections_groups::read_only.eq(false)))
).or( // access via groups
groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
collections_groups::collections_uuid.is_not_null().and(
collections_groups::read_only.eq(false))
)
) )
.count()
.first::<i64>(conn)
.ok()
.unwrap_or(0) != 0
}}
} else {
db_run! { conn: {
collections::table
.filter(collections::uuid.eq(&self.uuid))
.inner_join(users_organizations::table.on(
collections::org_uuid.eq(users_organizations::org_uuid)
.and(users_organizations::user_uuid.eq(user_uuid.clone()))
))
.left_join(users_collections::table.on(
users_collections::collection_uuid.eq(collections::uuid)
.and(users_collections::user_uuid.eq(user_uuid))
))
.filter(users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
.or(users_organizations::access_all.eq(true)) // access_all via membership
.or(users_collections::collection_uuid.eq(&self.uuid) // write access given to collection
.and(users_collections::read_only.eq(false)))
) )
.count() .count()
.first::<i64>(conn) .first::<i64>(conn)
@ -416,6 +429,7 @@ impl Collection {
.unwrap_or(0) != 0 .unwrap_or(0) != 0
}} }}
} }
}
pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool { pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
let user_uuid = user_uuid.to_string(); let user_uuid = user_uuid.to_string();
@ -634,7 +648,7 @@ impl CollectionUser {
db_run! { conn: { db_run! { conn: {
for user in collectionusers { for user in collectionusers {
diesel::delete(users_collections::table.filter( let _: () = diesel::delete(users_collections::table.filter(
users_collections::user_uuid.eq(user_uuid) users_collections::user_uuid.eq(user_uuid)
.and(users_collections::collection_uuid.eq(user.collection_uuid)) .and(users_collections::collection_uuid.eq(user.collection_uuid))
)) ))
@ -644,6 +658,10 @@ impl CollectionUser {
Ok(()) Ok(())
}} }}
} }
pub async fn has_access_to_collection_by_user(col_id: &str, user_uuid: &str, conn: &mut DbConn) -> bool {
Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some()
}
} }
/// Database methods /// Database methods

4
src/db/models/device.rs

@ -67,8 +67,8 @@ impl Device {
} }
// Update the expiration of the device and the last update date // Update the expiration of the device and the last update date
let time_now = Utc::now().naive_utc(); let time_now = Utc::now();
self.updated_at = time_now; self.updated_at = time_now.naive_utc();
// --- // ---
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large // Disabled these keys to be added to the JWT since they could cause the JWT to get too large

125
src/db/models/emergency_access.rs

@ -58,11 +58,11 @@ impl EmergencyAccess {
pub fn to_json(&self) -> Value { pub fn to_json(&self) -> Value {
json!({ json!({
"Id": self.uuid, "id": self.uuid,
"Status": self.status, "status": self.status,
"Type": self.atype, "type": self.atype,
"WaitTimeDays": self.wait_time_days, "waitTimeDays": self.wait_time_days,
"Object": "emergencyAccess", "object": "emergencyAccess",
}) })
} }
@ -70,36 +70,43 @@ impl EmergencyAccess {
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).await.expect("Grantor user not found."); let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).await.expect("Grantor user not found.");
json!({ json!({
"Id": self.uuid, "id": self.uuid,
"Status": self.status, "status": self.status,
"Type": self.atype, "type": self.atype,
"WaitTimeDays": self.wait_time_days, "waitTimeDays": self.wait_time_days,
"GrantorId": grantor_user.uuid, "grantorId": grantor_user.uuid,
"Email": grantor_user.email, "email": grantor_user.email,
"Name": grantor_user.name, "name": grantor_user.name,
"Object": "emergencyAccessGrantorDetails", "object": "emergencyAccessGrantorDetails",
}) })
} }
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Value { pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option<Value> {
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() { let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
Some(User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")) User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
} else if let Some(email) = self.email.as_deref() { } else if let Some(email) = self.email.as_deref() {
Some(User::find_by_mail(email, conn).await.expect("Grantee user not found.")) match User::find_by_mail(email, conn).await {
Some(user) => user,
None => {
// remove outstanding invitations which should not exist
let _ = Self::delete_all_by_grantee_email(email, conn).await;
return None;
}
}
} else { } else {
None return None;
}; };
json!({ Some(json!({
"Id": self.uuid, "id": self.uuid,
"Status": self.status, "status": self.status,
"Type": self.atype, "type": self.atype,
"WaitTimeDays": self.wait_time_days, "waitTimeDays": self.wait_time_days,
"GranteeId": grantee_user.as_ref().map_or("", |u| &u.uuid), "granteeId": grantee_user.uuid,
"Email": grantee_user.as_ref().map_or("", |u| &u.email), "email": grantee_user.email,
"Name": grantee_user.as_ref().map_or("", |u| &u.name), "name": grantee_user.name,
"Object": "emergencyAccessGranteeDetails", "object": "emergencyAccessGranteeDetails",
}) }))
} }
} }
@ -174,7 +181,7 @@ impl EmergencyAccess {
// Update the grantee so that it will refresh it's status. // Update the grantee so that it will refresh it's status.
User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await; User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await;
self.status = status; self.status = status;
self.updated_at = date.to_owned(); date.clone_into(&mut self.updated_at);
db_run! {conn: { db_run! {conn: {
crate::util::retry(|| { crate::util::retry(|| {
@ -192,7 +199,7 @@ impl EmergencyAccess {
conn: &mut DbConn, conn: &mut DbConn,
) -> EmptyResult { ) -> EmptyResult {
self.last_notification_at = Some(date.to_owned()); self.last_notification_at = Some(date.to_owned());
self.updated_at = date.to_owned(); date.clone_into(&mut self.updated_at);
db_run! {conn: { db_run! {conn: {
crate::util::retry(|| { crate::util::retry(|| {
@ -214,6 +221,13 @@ impl EmergencyAccess {
Ok(()) Ok(())
} }
pub async fn delete_all_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
for ea in Self::find_all_invited_by_grantee_email(grantee_email, conn).await {
ea.delete(conn).await?;
}
Ok(())
}
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
User::update_uuid_revision(&self.grantor_uuid, conn).await; User::update_uuid_revision(&self.grantor_uuid, conn).await;
@ -224,15 +238,6 @@ impl EmergencyAccess {
}} }}
} }
pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::uuid.eq(uuid))
.first::<EmergencyAccessDb>(conn)
.ok().from_db()
}}
}
pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email( pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email(
grantor_uuid: &str, grantor_uuid: &str,
grantee_uuid: &str, grantee_uuid: &str,
@ -267,6 +272,26 @@ impl EmergencyAccess {
}} }}
} }
pub async fn find_by_uuid_and_grantee_uuid(uuid: &str, grantee_uuid: &str, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::uuid.eq(uuid))
.filter(emergency_access::grantee_uuid.eq(grantee_uuid))
.first::<EmergencyAccessDb>(conn)
.ok().from_db()
}}
}
pub async fn find_by_uuid_and_grantee_email(uuid: &str, grantee_email: &str, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::uuid.eq(uuid))
.filter(emergency_access::email.eq(grantee_email))
.first::<EmergencyAccessDb>(conn)
.ok().from_db()
}}
}
pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
emergency_access::table emergency_access::table
@ -285,6 +310,15 @@ impl EmergencyAccess {
}} }}
} }
pub async fn find_all_invited_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::email.eq(grantee_email))
.filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32))
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}}
}
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
emergency_access::table emergency_access::table
@ -292,6 +326,21 @@ impl EmergencyAccess {
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db() .load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}} }}
} }
pub async fn accept_invite(&mut self, grantee_uuid: &str, grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email {
err!("User email does not match invite.");
}
if self.status == EmergencyAccessStatus::Accepted as i32 {
err!("Emergency contact already accepted.");
}
self.status = EmergencyAccessStatus::Accepted as i32;
self.grantee_uuid = Some(String::from(grantee_uuid));
self.email = None;
self.save(conn).await
}
} }
// endregion // endregion

4
src/db/models/event.rs

@ -3,7 +3,7 @@ use serde_json::Value;
use crate::{api::EmptyResult, error::MapResult, CONFIG}; use crate::{api::EmptyResult, error::MapResult, CONFIG};
use chrono::{Duration, NaiveDateTime, Utc}; use chrono::{NaiveDateTime, TimeDelta, Utc};
// https://bitwarden.com/help/event-logs/ // https://bitwarden.com/help/event-logs/
@ -316,7 +316,7 @@ impl Event {
pub async fn clean_events(conn: &mut DbConn) -> EmptyResult { pub async fn clean_events(conn: &mut DbConn) -> EmptyResult {
if let Some(days_to_retain) = CONFIG.events_days_retain() { if let Some(days_to_retain) = CONFIG.events_days_retain() {
let dt = Utc::now().naive_utc() - Duration::days(days_to_retain); let dt = Utc::now().naive_utc() - TimeDelta::try_days(days_to_retain).unwrap();
db_run! { conn: { db_run! { conn: {
diesel::delete(event::table.filter(event::event_date.lt(dt))) diesel::delete(event::table.filter(event::event_date.lt(dt)))
.execute(conn) .execute(conn)

8
src/db/models/folder.rs

@ -43,10 +43,10 @@ impl Folder {
use crate::util::format_date; use crate::util::format_date;
json!({ json!({
"Id": self.uuid, "id": self.uuid,
"RevisionDate": format_date(&self.updated_at), "revisionDate": format_date(&self.updated_at),
"Name": self.name, "name": self.name,
"Object": "folder", "object": "folder",
}) })
} }
} }

74
src/db/models/group.rs

@ -58,14 +58,14 @@ impl Group {
use crate::util::format_date; use crate::util::format_date;
json!({ json!({
"Id": self.uuid, "id": self.uuid,
"OrganizationId": self.organizations_uuid, "organizationId": self.organizations_uuid,
"Name": self.name, "name": self.name,
"AccessAll": self.access_all, "accessAll": self.access_all,
"ExternalId": self.external_id, "externalId": self.external_id,
"CreationDate": format_date(&self.creation_date), "creationDate": format_date(&self.creation_date),
"RevisionDate": format_date(&self.revision_date), "revisionDate": format_date(&self.revision_date),
"Object": "group" "object": "group"
}) })
} }
@ -75,21 +75,21 @@ impl Group {
.iter() .iter()
.map(|entry| { .map(|entry| {
json!({ json!({
"Id": entry.collections_uuid, "id": entry.collections_uuid,
"ReadOnly": entry.read_only, "readOnly": entry.read_only,
"HidePasswords": entry.hide_passwords "hidePasswords": entry.hide_passwords
}) })
}) })
.collect(); .collect();
json!({ json!({
"Id": self.uuid, "id": self.uuid,
"OrganizationId": self.organizations_uuid, "organizationId": self.organizations_uuid,
"Name": self.name, "name": self.name,
"AccessAll": self.access_all, "accessAll": self.access_all,
"ExternalId": self.external_id, "externalId": self.external_id,
"Collections": collections_groups, "collections": collections_groups,
"Object": "groupDetails" "object": "groupDetails"
}) })
} }
@ -203,10 +203,11 @@ impl Group {
}} }}
} }
pub async fn find_by_external_id(id: &str, conn: &mut DbConn) -> Option<Self> { pub async fn find_by_external_id_and_org(external_id: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
db_run! { conn: { db_run! { conn: {
groups::table groups::table
.filter(groups::external_id.eq(id)) .filter(groups::external_id.eq(external_id))
.filter(groups::organizations_uuid.eq(org_uuid))
.first::<GroupDb>(conn) .first::<GroupDb>(conn)
.ok() .ok()
.from_db() .from_db()
@ -486,6 +487,39 @@ impl GroupUser {
}} }}
} }
pub async fn has_access_to_collection_by_member(
collection_uuid: &str,
member_uuid: &str,
conn: &mut DbConn,
) -> bool {
db_run! { conn: {
groups_users::table
.inner_join(collections_groups::table.on(
collections_groups::groups_uuid.eq(groups_users::groups_uuid)
))
.filter(collections_groups::collections_uuid.eq(collection_uuid))
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
.count()
.first::<i64>(conn)
.unwrap_or(0) != 0
}}
}
pub async fn has_full_access_by_member(org_uuid: &str, member_uuid: &str, conn: &mut DbConn) -> bool {
db_run! { conn: {
groups_users::table
.inner_join(groups::table.on(
groups::uuid.eq(groups_users::groups_uuid)
))
.filter(groups::organizations_uuid.eq(org_uuid))
.filter(groups::access_all.eq(true))
.filter(groups_users::users_organizations_uuid.eq(member_uuid))
.count()
.first::<i64>(conn)
.unwrap_or(0) != 0
}}
}
pub async fn update_user_revision(&self, conn: &mut DbConn) { pub async fn update_user_revision(&self, conn: &mut DbConn) {
match UserOrganization::find_by_uuid(&self.users_organizations_uuid, conn).await { match UserOrganization::find_by_uuid(&self.users_organizations_uuid, conn).await {
Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await, Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,

40
src/db/models/org_policy.rs

@ -4,7 +4,6 @@ use serde_json::Value;
use crate::api::EmptyResult; use crate::api::EmptyResult;
use crate::db::DbConn; use crate::db::DbConn;
use crate::error::MapResult; use crate::error::MapResult;
use crate::util::UpCase;
use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization}; use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization};
@ -39,16 +38,18 @@ pub enum OrgPolicyType {
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/SendOptionsPolicyData.cs // https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/SendOptionsPolicyData.cs
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
pub struct SendOptionsPolicyData { pub struct SendOptionsPolicyData {
pub DisableHideEmail: bool, #[serde(rename = "disableHideEmail", alias = "DisableHideEmail")]
pub disable_hide_email: bool,
} }
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/ResetPasswordDataModel.cs // https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/ResetPasswordDataModel.cs
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[serde(rename_all = "camelCase")]
pub struct ResetPasswordDataModel { pub struct ResetPasswordDataModel {
pub AutoEnrollEnabled: bool, #[serde(rename = "autoEnrollEnabled", alias = "AutoEnrollEnabled")]
pub auto_enroll_enabled: bool,
} }
pub type OrgPolicyResult = Result<(), OrgPolicyErr>; pub type OrgPolicyResult = Result<(), OrgPolicyErr>;
@ -78,12 +79,12 @@ impl OrgPolicy {
pub fn to_json(&self) -> Value { pub fn to_json(&self) -> Value {
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null); let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
json!({ json!({
"Id": self.uuid, "id": self.uuid,
"OrganizationId": self.org_uuid, "organizationId": self.org_uuid,
"Type": self.atype, "type": self.atype,
"Data": data_json, "data": data_json,
"Enabled": self.enabled, "enabled": self.enabled,
"Object": "policy", "object": "policy",
}) })
} }
} }
@ -114,7 +115,7 @@ impl OrgPolicy {
// We need to make sure we're not going to violate the unique constraint on org_uuid and atype. // We need to make sure we're not going to violate the unique constraint on org_uuid and atype.
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does // This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
// not support multiple constraints on ON CONFLICT clauses. // not support multiple constraints on ON CONFLICT clauses.
diesel::delete( let _: () = diesel::delete(
org_policies::table org_policies::table
.filter(org_policies::org_uuid.eq(&self.org_uuid)) .filter(org_policies::org_uuid.eq(&self.org_uuid))
.filter(org_policies::atype.eq(&self.atype)), .filter(org_policies::atype.eq(&self.atype)),
@ -307,9 +308,9 @@ impl OrgPolicy {
pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool { pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool {
match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await { match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await {
Some(policy) => match serde_json::from_str::<UpCase<ResetPasswordDataModel>>(&policy.data) { Some(policy) => match serde_json::from_str::<ResetPasswordDataModel>(&policy.data) {
Ok(opts) => { Ok(opts) => {
return policy.enabled && opts.data.AutoEnrollEnabled; return policy.enabled && opts.auto_enroll_enabled;
} }
_ => error!("Failed to deserialize ResetPasswordDataModel: {}", policy.data), _ => error!("Failed to deserialize ResetPasswordDataModel: {}", policy.data),
}, },
@ -327,9 +328,9 @@ impl OrgPolicy {
{ {
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await { if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
if user.atype < UserOrgType::Admin { if user.atype < UserOrgType::Admin {
match serde_json::from_str::<UpCase<SendOptionsPolicyData>>(&policy.data) { match serde_json::from_str::<SendOptionsPolicyData>(&policy.data) {
Ok(opts) => { Ok(opts) => {
if opts.data.DisableHideEmail { if opts.disable_hide_email {
return true; return true;
} }
} }
@ -340,4 +341,11 @@ impl OrgPolicy {
} }
false false
} }
pub async fn is_enabled_by_org(org_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> bool {
if let Some(policy) = OrgPolicy::find_by_org_and_type(org_uuid, policy_type, conn).await {
return policy.enabled;
}
false
}
} }

261
src/db/models/organization.rs

@ -153,39 +153,39 @@ impl Organization {
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/Organizations/OrganizationResponseModel.cs // https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/Organizations/OrganizationResponseModel.cs
pub fn to_json(&self) -> Value { pub fn to_json(&self) -> Value {
json!({ json!({
"Id": self.uuid, "id": self.uuid,
"Identifier": null, // not supported by us "identifier": null, // not supported by us
"Name": self.name, "name": self.name,
"Seats": 10, // The value doesn't matter, we don't check server-side "seats": 10, // The value doesn't matter, we don't check server-side
// "MaxAutoscaleSeats": null, // The value doesn't matter, we don't check server-side // "maxAutoscaleSeats": null, // The value doesn't matter, we don't check server-side
"MaxCollections": 10, // The value doesn't matter, we don't check server-side "maxCollections": 10, // The value doesn't matter, we don't check server-side
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side "maxStorageGb": 10, // The value doesn't matter, we don't check server-side
"Use2fa": true, "use2fa": true,
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet) "useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
"UseEvents": CONFIG.org_events_enabled(), "useEvents": CONFIG.org_events_enabled(),
"UseGroups": CONFIG.org_groups_enabled(), "useGroups": CONFIG.org_groups_enabled(),
"UseTotp": true, "useTotp": true,
"UsePolicies": true, "usePolicies": true,
// "UseScim": false, // Not supported (Not AGPLv3 Licensed) // "useScim": false, // Not supported (Not AGPLv3 Licensed)
"UseSso": false, // Not supported "useSso": false, // Not supported
// "UseKeyConnector": false, // Not supported // "useKeyConnector": false, // Not supported
"SelfHost": true, "selfHost": true,
"UseApi": true, "useApi": true,
"HasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(), "hasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(),
"UseResetPassword": CONFIG.mail_enabled(), "useResetPassword": CONFIG.mail_enabled(),
"BusinessName": null, "businessName": null,
"BusinessAddress1": null, "businessAddress1": null,
"BusinessAddress2": null, "businessAddress2": null,
"BusinessAddress3": null, "businessAddress3": null,
"BusinessCountry": null, "businessCountry": null,
"BusinessTaxNumber": null, "businessTaxNumber": null,
"BillingEmail": self.billing_email, "billingEmail": self.billing_email,
"Plan": "TeamsAnnually", "plan": "TeamsAnnually",
"PlanType": 5, // TeamsAnnually plan "planType": 5, // TeamsAnnually plan
"UsersGetPremium": true, "usersGetPremium": true,
"Object": "organization", "object": "organization",
}) })
} }
} }
@ -316,6 +316,7 @@ impl Organization {
UserOrganization::delete_all_by_organization(&self.uuid, conn).await?; UserOrganization::delete_all_by_organization(&self.uuid, conn).await?;
OrgPolicy::delete_all_by_organization(&self.uuid, conn).await?; OrgPolicy::delete_all_by_organization(&self.uuid, conn).await?;
Group::delete_all_by_organization(&self.uuid, conn).await?; Group::delete_all_by_organization(&self.uuid, conn).await?;
OrganizationApiKey::delete_all_by_organization(&self.uuid, conn).await?;
db_run! { conn: { db_run! { conn: {
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid))) diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
@ -344,65 +345,81 @@ impl UserOrganization {
pub async fn to_json(&self, conn: &mut DbConn) -> Value { pub async fn to_json(&self, conn: &mut DbConn) -> Value {
let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap(); let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap();
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs let permissions = json!({
json!({
"Id": self.org_uuid,
"Identifier": null, // Not supported
"Name": org.name,
"Seats": 10, // The value doesn't matter, we don't check server-side
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
"UsersGetPremium": true,
"Use2fa": true,
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
"UseEvents": CONFIG.org_events_enabled(),
"UseGroups": CONFIG.org_groups_enabled(),
"UseTotp": true,
// "UseScim": false, // Not supported (Not AGPLv3 Licensed)
"UsePolicies": true,
"UseApi": true,
"SelfHost": true,
"HasPublicAndPrivateKeys": org.private_key.is_some() && org.public_key.is_some(),
"ResetPasswordEnrolled": self.reset_password_key.is_some(),
"UseResetPassword": CONFIG.mail_enabled(),
"SsoBound": false, // Not supported
"UseSso": false, // Not supported
"ProviderId": null,
"ProviderName": null,
// "KeyConnectorEnabled": false,
// "KeyConnectorUrl": null,
// TODO: Add support for Custom User Roles // TODO: Add support for Custom User Roles
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role // See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
// "Permissions": { "accessEventLogs": false,
// "AccessEventLogs": false, "accessImportExport": false,
// "AccessImportExport": false, "accessReports": false,
// "AccessReports": false, "createNewCollections": false,
// "ManageAllCollections": false, "editAnyCollection": false,
// "CreateNewCollections": false, "deleteAnyCollection": false,
// "EditAnyCollection": false, "editAssignedCollections": false,
// "DeleteAnyCollection": false, "deleteAssignedCollections": false,
// "ManageAssignedCollections": false, "manageGroups": false,
// "editAssignedCollections": false, "managePolicies": false,
// "deleteAssignedCollections": false, "manageSso": false, // Not supported
// "ManageCiphers": false, "manageUsers": false,
// "ManageGroups": false, "manageResetPassword": false,
// "ManagePolicies": false, "manageScim": false // Not supported (Not AGPLv3 Licensed)
// "ManageResetPassword": false, });
// "ManageSso": false, // Not supported
// "ManageUsers": false, // https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
// "ManageScim": false, // Not supported (Not AGPLv3 Licensed) json!({
// }, "id": self.org_uuid,
"identifier": null, // Not supported
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side "name": org.name,
"seats": 10, // The value doesn't matter, we don't check server-side
"maxCollections": 10, // The value doesn't matter, we don't check server-side
"usersGetPremium": true,
"use2fa": true,
"useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
"useEvents": CONFIG.org_events_enabled(),
"useGroups": CONFIG.org_groups_enabled(),
"useTotp": true,
"useScim": false, // Not supported (Not AGPLv3 Licensed)
"usePolicies": true,
"useApi": true,
"selfHost": true,
"hasPublicAndPrivateKeys": org.private_key.is_some() && org.public_key.is_some(),
"resetPasswordEnrolled": self.reset_password_key.is_some(),
"useResetPassword": CONFIG.mail_enabled(),
"ssoBound": false, // Not supported
"useSso": false, // Not supported
"useKeyConnector": false,
"useSecretsManager": false,
"usePasswordManager": true,
"useCustomPermissions": false,
"useActivateAutofillPolicy": false,
"providerId": null,
"providerName": null,
"providerType": null,
"familySponsorshipFriendlyName": null,
"familySponsorshipAvailable": false,
"planProductType": 0,
"keyConnectorEnabled": false,
"keyConnectorUrl": null,
"familySponsorshipLastSyncDate": null,
"familySponsorshipValidUntil": null,
"familySponsorshipToDelete": null,
"accessSecretsManager": false,
"limitCollectionCreationDeletion": true,
"allowAdminAccessToAllCollectionItems": true,
"flexibleCollections": false,
"permissions": permissions,
"maxStorageGb": 10, // The value doesn't matter, we don't check server-side
// These are per user // These are per user
"UserId": self.user_uuid, "userId": self.user_uuid,
"Key": self.akey, "key": self.akey,
"Status": self.status, "status": self.status,
"Type": self.atype, "type": self.atype,
"Enabled": true, "enabled": true,
"Object": "profileOrganization", "object": "profileOrganization",
}) })
} }
@ -438,9 +455,9 @@ impl UserOrganization {
.iter() .iter()
.map(|cu| { .map(|cu| {
json!({ json!({
"Id": cu.collection_uuid, "id": cu.collection_uuid,
"ReadOnly": cu.read_only, "readOnly": cu.read_only,
"HidePasswords": cu.hide_passwords, "hidePasswords": cu.hide_passwords,
}) })
}) })
.collect() .collect()
@ -449,29 +466,29 @@ impl UserOrganization {
}; };
json!({ json!({
"Id": self.uuid, "id": self.uuid,
"UserId": self.user_uuid, "userId": self.user_uuid,
"Name": user.name, "name": user.name,
"Email": user.email, "email": user.email,
"ExternalId": self.external_id, "externalId": self.external_id,
"Groups": groups, "groups": groups,
"Collections": collections, "collections": collections,
"Status": status, "status": status,
"Type": self.atype, "type": self.atype,
"AccessAll": self.access_all, "accessAll": self.access_all,
"TwoFactorEnabled": twofactor_enabled, "twoFactorEnabled": twofactor_enabled,
"ResetPasswordEnrolled": self.reset_password_key.is_some(), "resetPasswordEnrolled": self.reset_password_key.is_some(),
"Object": "organizationUserUserDetails", "object": "organizationUserUserDetails",
}) })
} }
pub fn to_json_user_access_restrictions(&self, col_user: &CollectionUser) -> Value { pub fn to_json_user_access_restrictions(&self, col_user: &CollectionUser) -> Value {
json!({ json!({
"Id": self.uuid, "id": self.uuid,
"ReadOnly": col_user.read_only, "readOnly": col_user.read_only,
"HidePasswords": col_user.hide_passwords, "hidePasswords": col_user.hide_passwords,
}) })
} }
@ -485,9 +502,9 @@ impl UserOrganization {
.iter() .iter()
.map(|c| { .map(|c| {
json!({ json!({
"Id": c.collection_uuid, "id": c.collection_uuid,
"ReadOnly": c.read_only, "readOnly": c.read_only,
"HidePasswords": c.hide_passwords, "hidePasswords": c.hide_passwords,
}) })
}) })
.collect() .collect()
@ -502,15 +519,15 @@ impl UserOrganization {
}; };
json!({ json!({
"Id": self.uuid, "id": self.uuid,
"UserId": self.user_uuid, "userId": self.user_uuid,
"Status": status, "status": status,
"Type": self.atype, "type": self.atype,
"AccessAll": self.access_all, "accessAll": self.access_all,
"Collections": coll_uuids, "collections": coll_uuids,
"Object": "organizationUserDetails", "object": "organizationUserDetails",
}) })
} }
pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
@ -887,6 +904,14 @@ impl OrganizationApiKey {
.ok().from_db() .ok().from_db()
}} }}
} }
pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(organization_api_key::table.filter(organization_api_key::org_uuid.eq(org_uuid)))
.execute(conn)
.map_res("Error removing organization api key from organization")
}}
}
} }
#[cfg(test)] #[cfg(test)]

91
src/db/models/send.rs

@ -1,6 +1,8 @@
use chrono::{NaiveDateTime, Utc}; use chrono::{NaiveDateTime, Utc};
use serde_json::Value; use serde_json::Value;
use crate::util::LowerCase;
use super::User; use super::User;
db_object! { db_object! {
@ -122,48 +124,58 @@ impl Send {
use data_encoding::BASE64URL_NOPAD; use data_encoding::BASE64URL_NOPAD;
use uuid::Uuid; use uuid::Uuid;
let data: Value = serde_json::from_str(&self.data).unwrap_or_default(); let mut data = serde_json::from_str::<LowerCase<Value>>(&self.data).map(|d| d.data).unwrap_or_default();
// Mobile clients expect size to be a string instead of a number
if let Some(size) = data.get("size").and_then(|v| v.as_i64()) {
data["size"] = Value::String(size.to_string());
}
json!({ json!({
"Id": self.uuid, "id": self.uuid,
"AccessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()), "accessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()),
"Type": self.atype, "type": self.atype,
"Name": self.name, "name": self.name,
"Notes": self.notes, "notes": self.notes,
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None }, "text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None }, "file": if self.atype == SendType::File as i32 { Some(&data) } else { None },
"Key": self.akey, "key": self.akey,
"MaxAccessCount": self.max_access_count, "maxAccessCount": self.max_access_count,
"AccessCount": self.access_count, "accessCount": self.access_count,
"Password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)), "password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
"Disabled": self.disabled, "disabled": self.disabled,
"HideEmail": self.hide_email, "hideEmail": self.hide_email,
"RevisionDate": format_date(&self.revision_date), "revisionDate": format_date(&self.revision_date),
"ExpirationDate": self.expiration_date.as_ref().map(format_date), "expirationDate": self.expiration_date.as_ref().map(format_date),
"DeletionDate": format_date(&self.deletion_date), "deletionDate": format_date(&self.deletion_date),
"Object": "send", "object": "send",
}) })
} }
pub async fn to_json_access(&self, conn: &mut DbConn) -> Value { pub async fn to_json_access(&self, conn: &mut DbConn) -> Value {
use crate::util::format_date; use crate::util::format_date;
let data: Value = serde_json::from_str(&self.data).unwrap_or_default(); let mut data = serde_json::from_str::<LowerCase<Value>>(&self.data).map(|d| d.data).unwrap_or_default();
// Mobile clients expect size to be a string instead of a number
if let Some(size) = data.get("size").and_then(|v| v.as_i64()) {
data["size"] = Value::String(size.to_string());
}
json!({ json!({
"Id": self.uuid, "id": self.uuid,
"Type": self.atype, "type": self.atype,
"Name": self.name, "name": self.name,
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None }, "text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None }, "file": if self.atype == SendType::File as i32 { Some(&data) } else { None },
"ExpirationDate": self.expiration_date.as_ref().map(format_date), "expirationDate": self.expiration_date.as_ref().map(format_date),
"CreatorIdentifier": self.creator_identifier(conn).await, "creatorIdentifier": self.creator_identifier(conn).await,
"Object": "send-access", "object": "send-access",
}) })
} }
} }
@ -290,25 +302,18 @@ impl Send {
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<i64> { pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<i64> {
let sends = Self::find_by_user(user_uuid, conn).await; let sends = Self::find_by_user(user_uuid, conn).await;
#[allow(non_snake_case)] #[derive(serde::Deserialize)]
#[derive(serde::Deserialize, Default)]
struct FileData { struct FileData {
Size: Option<NumberOrString>, #[serde(rename = "size", alias = "Size")]
size: Option<NumberOrString>, size: NumberOrString,
} }
let mut total: i64 = 0; let mut total: i64 = 0;
for send in sends { for send in sends {
if send.atype == SendType::File as i32 { if send.atype == SendType::File as i32 {
let data: FileData = serde_json::from_str(&send.data).unwrap_or_default(); if let Ok(size) =
serde_json::from_str::<FileData>(&send.data).map_err(Into::into).and_then(|d| d.size.into_i64())
let size = match (data.size, data.Size) { {
(Some(s), _) => s.into_i64(),
(_, Some(s)) => s.into_i64(),
(None, None) => continue,
};
if let Ok(size) = size {
total = total.checked_add(size)?; total = total.checked_add(size)?;
}; };
} }

16
src/db/models/two_factor.rs

@ -12,7 +12,7 @@ db_object! {
pub atype: i32, pub atype: i32,
pub enabled: bool, pub enabled: bool,
pub data: String, pub data: String,
pub last_used: i32, pub last_used: i64,
} }
} }
@ -54,17 +54,17 @@ impl TwoFactor {
pub fn to_json(&self) -> Value { pub fn to_json(&self) -> Value {
json!({ json!({
"Enabled": self.enabled, "enabled": self.enabled,
"Key": "", // This key and value vary "key": "", // This key and value vary
"Object": "twoFactorAuthenticator" // This value varies "Oobject": "twoFactorAuthenticator" // This value varies
}) })
} }
pub fn to_json_provider(&self) -> Value { pub fn to_json_provider(&self) -> Value {
json!({ json!({
"Enabled": self.enabled, "enabled": self.enabled,
"Type": self.atype, "type": self.atype,
"Object": "twoFactorProvider" "object": "twoFactorProvider"
}) })
} }
} }
@ -95,7 +95,7 @@ impl TwoFactor {
// We need to make sure we're not going to violate the unique constraint on user_uuid and atype. // We need to make sure we're not going to violate the unique constraint on user_uuid and atype.
// This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does // This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
// not support multiple constraints on ON CONFLICT clauses. // not support multiple constraints on ON CONFLICT clauses.
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(&self.user_uuid)).filter(twofactor::atype.eq(&self.atype))) let _: () = diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(&self.user_uuid)).filter(twofactor::atype.eq(&self.atype)))
.execute(conn) .execute(conn)
.map_res("Error deleting twofactor for insert")?; .map_res("Error deleting twofactor for insert")?;

43
src/db/models/user.rs

@ -1,4 +1,4 @@
use chrono::{Duration, NaiveDateTime, Utc}; use chrono::{NaiveDateTime, TimeDelta, Utc};
use serde_json::Value; use serde_json::Value;
use crate::crypto; use crate::crypto;
@ -202,7 +202,7 @@ impl User {
let stamp_exception = UserStampException { let stamp_exception = UserStampException {
routes: route_exception, routes: route_exception,
security_stamp: self.security_stamp.clone(), security_stamp: self.security_stamp.clone(),
expire: (Utc::now().naive_utc() + Duration::minutes(2)).timestamp(), expire: (Utc::now() + TimeDelta::try_minutes(2).unwrap()).timestamp(),
}; };
self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default()); self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default());
} }
@ -240,24 +240,26 @@ impl User {
}; };
json!({ json!({
"_Status": status as i32, "_status": status as i32,
"Id": self.uuid, "id": self.uuid,
"Name": self.name, "name": self.name,
"Email": self.email, "email": self.email,
"EmailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(), "emailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
"Premium": true, "premium": true,
"MasterPasswordHint": self.password_hint, "premiumFromOrganization": false,
"Culture": "en-US", "masterPasswordHint": self.password_hint,
"TwoFactorEnabled": twofactor_enabled, "culture": "en-US",
"Key": self.akey, "twoFactorEnabled": twofactor_enabled,
"PrivateKey": self.private_key, "key": self.akey,
"SecurityStamp": self.security_stamp, "privateKey": self.private_key,
"Organizations": orgs_json, "securityStamp": self.security_stamp,
"Providers": [], "organizations": orgs_json,
"ProviderOrganizations": [], "providers": [],
"ForcePasswordReset": false, "providerOrganizations": [],
"AvatarColor": self.avatar_color, "forcePasswordReset": false,
"Object": "profile", "avatarColor": self.avatar_color,
"usesKeyConnector": false,
"object": "profile",
}) })
} }
@ -311,6 +313,7 @@ impl User {
Send::delete_all_by_user(&self.uuid, conn).await?; Send::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?; EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?;
UserOrganization::delete_all_by_user(&self.uuid, conn).await?; UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
Cipher::delete_all_by_user(&self.uuid, conn).await?; Cipher::delete_all_by_user(&self.uuid, conn).await?;
Favorite::delete_all_by_user(&self.uuid, conn).await?; Favorite::delete_all_by_user(&self.uuid, conn).await?;

2
src/db/schemas/mysql/schema.rs

@ -160,7 +160,7 @@ table! {
atype -> Integer, atype -> Integer,
enabled -> Bool, enabled -> Bool,
data -> Text, data -> Text,
last_used -> Integer, last_used -> BigInt,
} }
} }

2
src/db/schemas/postgresql/schema.rs

@ -160,7 +160,7 @@ table! {
atype -> Integer, atype -> Integer,
enabled -> Bool, enabled -> Bool,
data -> Text, data -> Text,
last_used -> Integer, last_used -> BigInt,
} }
} }

2
src/db/schemas/sqlite/schema.rs

@ -160,7 +160,7 @@ table! {
atype -> Integer, atype -> Integer,
enabled -> Bool, enabled -> Bool,
data -> Text, data -> Text,
last_used -> Integer, last_used -> BigInt,
} }
} }

20
src/error.rs

@ -52,7 +52,6 @@ use rocket::error::Error as RocketErr;
use serde_json::{Error as SerdeErr, Value}; use serde_json::{Error as SerdeErr, Value};
use std::io::Error as IoErr; use std::io::Error as IoErr;
use std::time::SystemTimeError as TimeErr; use std::time::SystemTimeError as TimeErr;
use tokio_tungstenite::tungstenite::Error as TungstError;
use webauthn_rs::error::WebauthnError as WebauthnErr; use webauthn_rs::error::WebauthnError as WebauthnErr;
use yubico::yubicoerror::YubicoError as YubiErr; use yubico::yubicoerror::YubicoError as YubiErr;
@ -91,7 +90,6 @@ make_error! {
DieselCon(DieselConErr): _has_source, _api_error, DieselCon(DieselConErr): _has_source, _api_error,
Webauthn(WebauthnErr): _has_source, _api_error, Webauthn(WebauthnErr): _has_source, _api_error,
WebSocket(TungstError): _has_source, _api_error,
} }
impl std::fmt::Debug for Error { impl std::fmt::Debug for Error {
@ -181,18 +179,18 @@ fn _serialize(e: &impl serde::Serialize, _msg: &str) -> String {
fn _api_error(_: &impl std::any::Any, msg: &str) -> String { fn _api_error(_: &impl std::any::Any, msg: &str) -> String {
let json = json!({ let json = json!({
"Message": msg, "message": msg,
"error": "", "error": "",
"error_description": "", "error_description": "",
"ValidationErrors": {"": [ msg ]}, "validationErrors": {"": [ msg ]},
"ErrorModel": { "errorModel": {
"Message": msg, "message": msg,
"Object": "error" "object": "error"
}, },
"ExceptionMessage": null, "exceptionMessage": null,
"ExceptionStackTrace": null, "exceptionStackTrace": null,
"InnerExceptionMessage": null, "innerExceptionMessage": null,
"Object": "error" "object": "error"
}); });
_serialize(&json, "") _serialize(&json, "")
} }

51
src/main.rs

@ -3,7 +3,7 @@
// The more key/value pairs there are the more recursion occurs. // The more key/value pairs there are the more recursion occurs.
// We want to keep this as low as possible, but not higher then 128. // We want to keep this as low as possible, but not higher then 128.
// If you go above 128 it will cause rust-analyzer to fail, // If you go above 128 it will cause rust-analyzer to fail,
#![recursion_limit = "103"] #![recursion_limit = "200"]
// When enabled use MiMalloc as malloc instead of the default malloc // When enabled use MiMalloc as malloc instead of the default malloc
#[cfg(feature = "enable_mimalloc")] #[cfg(feature = "enable_mimalloc")]
@ -52,7 +52,7 @@ mod ratelimit;
mod util; mod util;
use crate::api::purge_auth_requests; use crate::api::purge_auth_requests;
use crate::api::WS_ANONYMOUS_SUBSCRIPTIONS; use crate::api::{WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS};
pub use config::CONFIG; pub use config::CONFIG;
pub use error::{Error, MapResult}; pub use error::{Error, MapResult};
use rocket::data::{Limits, ToByteUnit}; use rocket::data::{Limits, ToByteUnit};
@ -65,13 +65,17 @@ async fn main() -> Result<(), Error> {
launch_info(); launch_info();
use log::LevelFilter as LF; use log::LevelFilter as LF;
let level = LF::from_str(&CONFIG.log_level()).expect("Valid log level"); let level = LF::from_str(&CONFIG.log_level()).unwrap_or_else(|_| {
let valid_log_levels = LF::iter().map(|lvl| lvl.as_str().to_lowercase()).collect::<Vec<String>>().join(", ");
println!("Log level must be one of the following: {valid_log_levels}");
exit(1);
});
init_logging(level).ok(); init_logging(level).ok();
let extra_debug = matches!(level, LF::Trace | LF::Debug); let extra_debug = matches!(level, LF::Trace | LF::Debug);
check_data_folder().await; check_data_folder().await;
check_rsa_keys().unwrap_or_else(|_| { auth::initialize_keys().unwrap_or_else(|_| {
error!("Error creating keys, exiting..."); error!("Error creating keys, exiting...");
exit(1); exit(1);
}); });
@ -207,9 +211,9 @@ fn launch_info() {
} }
fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> { fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
// Depending on the main log level we either want to disable or enable logging for trust-dns. // Depending on the main log level we either want to disable or enable logging for hickory.
// Else if there are timeouts it will clutter the logs since trust-dns uses warn for this. // Else if there are timeouts it will clutter the logs since hickory uses warn for this.
let trust_dns_level = if level >= log::LevelFilter::Debug { let hickory_level = if level >= log::LevelFilter::Debug {
level level
} else { } else {
log::LevelFilter::Off log::LevelFilter::Off
@ -262,9 +266,9 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
.level_for("handlebars::render", handlebars_level) .level_for("handlebars::render", handlebars_level)
// Prevent cookie_store logs // Prevent cookie_store logs
.level_for("cookie_store", log::LevelFilter::Off) .level_for("cookie_store", log::LevelFilter::Off)
// Variable level for trust-dns used by reqwest // Variable level for hickory used by reqwest
.level_for("trust_dns_resolver::name_server::name_server", trust_dns_level) .level_for("hickory_resolver::name_server::name_server", hickory_level)
.level_for("trust_dns_proto::xfer", trust_dns_level) .level_for("hickory_proto::xfer", hickory_level)
.level_for("diesel_logger", diesel_logger_level) .level_for("diesel_logger", diesel_logger_level)
.chain(std::io::stdout()); .chain(std::io::stdout());
@ -444,31 +448,6 @@ async fn container_data_folder_is_persistent(data_folder: &str) -> bool {
true true
} }
fn check_rsa_keys() -> Result<(), crate::error::Error> {
// If the RSA keys don't exist, try to create them
let priv_path = CONFIG.private_rsa_key();
let pub_path = CONFIG.public_rsa_key();
if !util::file_exists(&priv_path) {
let rsa_key = openssl::rsa::Rsa::generate(2048)?;
let priv_key = rsa_key.private_key_to_pem()?;
crate::util::write_file(&priv_path, &priv_key)?;
info!("Private key created correctly.");
}
if !util::file_exists(&pub_path) {
let rsa_key = openssl::rsa::Rsa::private_key_from_pem(&std::fs::read(&priv_path)?)?;
let pub_key = rsa_key.public_key_to_pem()?;
crate::util::write_file(&pub_path, &pub_key)?;
info!("Public key created correctly.");
}
auth::load_keys();
Ok(())
}
fn check_web_vault() { fn check_web_vault() {
if !CONFIG.web_vault_enabled() { if !CONFIG.web_vault_enabled() {
return; return;
@ -522,7 +501,7 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error>
.register([basepath, "/api"].concat(), api::core_catchers()) .register([basepath, "/api"].concat(), api::core_catchers())
.register([basepath, "/admin"].concat(), api::admin_catchers()) .register([basepath, "/admin"].concat(), api::admin_catchers())
.manage(pool) .manage(pool)
.manage(api::start_notification_server()) .manage(Arc::clone(&WS_USERS))
.manage(Arc::clone(&WS_ANONYMOUS_SUBSCRIPTIONS)) .manage(Arc::clone(&WS_ANONYMOUS_SUBSCRIPTIONS))
.attach(util::AppHeaders()) .attach(util::AppHeaders())
.attach(util::Cors()) .attach(util::Cors())

540
src/static/global_domains.json

File diff suppressed because it is too large

6
src/static/scripts/admin_diagnostics.js

@ -21,7 +21,11 @@ const browserUTC = `${year}-${month}-${day} ${hour}:${minute}:${seconds} UTC`;
// ================================ // ================================
// Check if the output is a valid IP // Check if the output is a valid IP
const isValidIp = value => (/^(?:(?:^|\.)(?:2(?:5[0-5]|[0-4]\d)|1?\d?\d)){4}$/.test(value) ? true : false); function isValidIp(ip) {
const ipv4Regex = /^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/;
const ipv6Regex = /^(?:[a-fA-F0-9]{1,4}:){7}[a-fA-F0-9]{1,4}|((?:[a-fA-F0-9]{1,4}:){1,7}:|:(:[a-fA-F0-9]{1,4}){1,7}|[a-fA-F0-9]{1,4}:((:[a-fA-F0-9]{1,4}){1,6}))$/;
return ipv4Regex.test(ip) || ipv6Regex.test(ip);
}
function checkVersions(platform, installed, latest, commit=null) { function checkVersions(platform, installed, latest, commit=null) {
if (installed === "-" || latest === "-") { if (installed === "-" || latest === "-") {

17
src/static/scripts/bootstrap.bundle.js

@ -1,6 +1,6 @@
/*! /*!
* Bootstrap v5.3.2 (https://getbootstrap.com/) * Bootstrap v5.3.3 (https://getbootstrap.com/)
* Copyright 2011-2023 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) * Copyright 2011-2024 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
*/ */
(function (global, factory) { (function (global, factory) {
@ -210,7 +210,6 @@
const reflow = element => { const reflow = element => {
element.offsetHeight; // eslint-disable-line no-unused-expressions element.offsetHeight; // eslint-disable-line no-unused-expressions
}; };
const getjQuery = () => { const getjQuery = () => {
if (window.jQuery && !document.body.hasAttribute('data-bs-no-jquery')) { if (window.jQuery && !document.body.hasAttribute('data-bs-no-jquery')) {
return window.jQuery; return window.jQuery;
@ -648,7 +647,7 @@
* Constants * Constants
*/ */
const VERSION = '5.3.2'; const VERSION = '5.3.3';
/** /**
* Class definition * Class definition
@ -729,9 +728,9 @@
if (hrefAttribute.includes('#') && !hrefAttribute.startsWith('#')) { if (hrefAttribute.includes('#') && !hrefAttribute.startsWith('#')) {
hrefAttribute = `#${hrefAttribute.split('#')[1]}`; hrefAttribute = `#${hrefAttribute.split('#')[1]}`;
} }
selector = hrefAttribute && hrefAttribute !== '#' ? parseSelector(hrefAttribute.trim()) : null; selector = hrefAttribute && hrefAttribute !== '#' ? hrefAttribute.trim() : null;
} }
return selector; return selector ? selector.split(',').map(sel => parseSelector(sel)).join(',') : null;
}; };
const SelectorEngine = { const SelectorEngine = {
find(selector, element = document.documentElement) { find(selector, element = document.documentElement) {
@ -3916,7 +3915,6 @@
// if false, we use the backdrop helper without adding any element to the dom // if false, we use the backdrop helper without adding any element to the dom
rootElement: 'body' // give the choice to place backdrop under different elements rootElement: 'body' // give the choice to place backdrop under different elements
}; };
const DefaultType$8 = { const DefaultType$8 = {
className: 'string', className: 'string',
clickCallback: '(function|null)', clickCallback: '(function|null)',
@ -4041,7 +4039,6 @@
autofocus: true, autofocus: true,
trapElement: null // The element to trap focus inside of trapElement: null // The element to trap focus inside of
}; };
const DefaultType$7 = { const DefaultType$7 = {
autofocus: 'boolean', autofocus: 'boolean',
trapElement: 'element' trapElement: 'element'
@ -4768,7 +4765,10 @@
br: [], br: [],
col: [], col: [],
code: [], code: [],
dd: [],
div: [], div: [],
dl: [],
dt: [],
em: [], em: [],
hr: [], hr: [],
h1: [], h1: [],
@ -6311,3 +6311,4 @@
return index_umd; return index_umd;
})); }));
//# sourceMappingURL=bootstrap.bundle.js.map

53
src/static/scripts/bootstrap.css

@ -1,7 +1,7 @@
@charset "UTF-8"; @charset "UTF-8";
/*! /*!
* Bootstrap v5.3.2 (https://getbootstrap.com/) * Bootstrap v5.3.3 (https://getbootstrap.com/)
* Copyright 2011-2023 The Bootstrap Authors * Copyright 2011-2024 The Bootstrap Authors
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
*/ */
:root, :root,
@ -3042,6 +3042,9 @@ textarea.form-control-lg {
.btn-check:checked + .btn:focus-visible, :not(.btn-check) + .btn:active:focus-visible, .btn:first-child:active:focus-visible, .btn.active:focus-visible, .btn.show:focus-visible { .btn-check:checked + .btn:focus-visible, :not(.btn-check) + .btn:active:focus-visible, .btn:first-child:active:focus-visible, .btn.active:focus-visible, .btn.show:focus-visible {
box-shadow: var(--bs-btn-focus-box-shadow); box-shadow: var(--bs-btn-focus-box-shadow);
} }
.btn-check:checked:focus-visible + .btn {
box-shadow: var(--bs-btn-focus-box-shadow);
}
.btn:disabled, .btn.disabled, fieldset:disabled .btn { .btn:disabled, .btn.disabled, fieldset:disabled .btn {
color: var(--bs-btn-disabled-color); color: var(--bs-btn-disabled-color);
pointer-events: none; pointer-events: none;
@ -4573,12 +4576,11 @@ textarea.form-control-lg {
--bs-accordion-btn-padding-y: 1rem; --bs-accordion-btn-padding-y: 1rem;
--bs-accordion-btn-color: var(--bs-body-color); --bs-accordion-btn-color: var(--bs-body-color);
--bs-accordion-btn-bg: var(--bs-accordion-bg); --bs-accordion-btn-bg: var(--bs-accordion-bg);
--bs-accordion-btn-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23212529'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e"); --bs-accordion-btn-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='none' stroke='%23212529' stroke-linecap='round' stroke-linejoin='round'%3e%3cpath d='M2 5L8 11L14 5'/%3e%3c/svg%3e");
--bs-accordion-btn-icon-width: 1.25rem; --bs-accordion-btn-icon-width: 1.25rem;
--bs-accordion-btn-icon-transform: rotate(-180deg); --bs-accordion-btn-icon-transform: rotate(-180deg);
--bs-accordion-btn-icon-transition: transform 0.2s ease-in-out; --bs-accordion-btn-icon-transition: transform 0.2s ease-in-out;
--bs-accordion-btn-active-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23052c65'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e"); --bs-accordion-btn-active-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='none' stroke='%23052c65' stroke-linecap='round' stroke-linejoin='round'%3e%3cpath d='M2 5L8 11L14 5'/%3e%3c/svg%3e");
--bs-accordion-btn-focus-border-color: #86b7fe;
--bs-accordion-btn-focus-box-shadow: 0 0 0 0.25rem rgba(13, 110, 253, 0.25); --bs-accordion-btn-focus-box-shadow: 0 0 0 0.25rem rgba(13, 110, 253, 0.25);
--bs-accordion-body-padding-x: 1.25rem; --bs-accordion-body-padding-x: 1.25rem;
--bs-accordion-body-padding-y: 1rem; --bs-accordion-body-padding-y: 1rem;
@ -4636,7 +4638,6 @@ textarea.form-control-lg {
} }
.accordion-button:focus { .accordion-button:focus {
z-index: 3; z-index: 3;
border-color: var(--bs-accordion-btn-focus-border-color);
outline: 0; outline: 0;
box-shadow: var(--bs-accordion-btn-focus-box-shadow); box-shadow: var(--bs-accordion-btn-focus-box-shadow);
} }
@ -4654,7 +4655,7 @@ textarea.form-control-lg {
border-top-left-radius: var(--bs-accordion-border-radius); border-top-left-radius: var(--bs-accordion-border-radius);
border-top-right-radius: var(--bs-accordion-border-radius); border-top-right-radius: var(--bs-accordion-border-radius);
} }
.accordion-item:first-of-type .accordion-button { .accordion-item:first-of-type > .accordion-header .accordion-button {
border-top-left-radius: var(--bs-accordion-inner-border-radius); border-top-left-radius: var(--bs-accordion-inner-border-radius);
border-top-right-radius: var(--bs-accordion-inner-border-radius); border-top-right-radius: var(--bs-accordion-inner-border-radius);
} }
@ -4665,11 +4666,11 @@ textarea.form-control-lg {
border-bottom-right-radius: var(--bs-accordion-border-radius); border-bottom-right-radius: var(--bs-accordion-border-radius);
border-bottom-left-radius: var(--bs-accordion-border-radius); border-bottom-left-radius: var(--bs-accordion-border-radius);
} }
.accordion-item:last-of-type .accordion-button.collapsed { .accordion-item:last-of-type > .accordion-header .accordion-button.collapsed {
border-bottom-right-radius: var(--bs-accordion-inner-border-radius); border-bottom-right-radius: var(--bs-accordion-inner-border-radius);
border-bottom-left-radius: var(--bs-accordion-inner-border-radius); border-bottom-left-radius: var(--bs-accordion-inner-border-radius);
} }
.accordion-item:last-of-type .accordion-collapse { .accordion-item:last-of-type > .accordion-collapse {
border-bottom-right-radius: var(--bs-accordion-border-radius); border-bottom-right-radius: var(--bs-accordion-border-radius);
border-bottom-left-radius: var(--bs-accordion-border-radius); border-bottom-left-radius: var(--bs-accordion-border-radius);
} }
@ -4678,21 +4679,21 @@ textarea.form-control-lg {
padding: var(--bs-accordion-body-padding-y) var(--bs-accordion-body-padding-x); padding: var(--bs-accordion-body-padding-y) var(--bs-accordion-body-padding-x);
} }
.accordion-flush .accordion-collapse { .accordion-flush > .accordion-item {
border-width: 0;
}
.accordion-flush .accordion-item {
border-right: 0; border-right: 0;
border-left: 0; border-left: 0;
border-radius: 0; border-radius: 0;
} }
.accordion-flush .accordion-item:first-child { .accordion-flush > .accordion-item:first-child {
border-top: 0; border-top: 0;
} }
.accordion-flush .accordion-item:last-child { .accordion-flush > .accordion-item:last-child {
border-bottom: 0; border-bottom: 0;
} }
.accordion-flush .accordion-item .accordion-button, .accordion-flush .accordion-item .accordion-button.collapsed { .accordion-flush > .accordion-item > .accordion-header .accordion-button, .accordion-flush > .accordion-item > .accordion-header .accordion-button.collapsed {
border-radius: 0;
}
.accordion-flush > .accordion-item > .accordion-collapse {
border-radius: 0; border-radius: 0;
} }
@ -5578,7 +5579,6 @@ textarea.form-control-lg {
display: flex; display: flex;
flex-shrink: 0; flex-shrink: 0;
align-items: center; align-items: center;
justify-content: space-between;
padding: var(--bs-modal-header-padding); padding: var(--bs-modal-header-padding);
border-bottom: var(--bs-modal-header-border-width) solid var(--bs-modal-header-border-color); border-bottom: var(--bs-modal-header-border-width) solid var(--bs-modal-header-border-color);
border-top-left-radius: var(--bs-modal-inner-border-radius); border-top-left-radius: var(--bs-modal-inner-border-radius);
@ -6144,20 +6144,12 @@ textarea.form-control-lg {
background-size: 100% 100%; background-size: 100% 100%;
} }
/* rtl:options: {
"autoRename": true,
"stringMap":[ {
"name" : "prev-next",
"search" : "prev",
"replace" : "next"
} ]
} */
.carousel-control-prev-icon { .carousel-control-prev-icon {
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e"); background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e") /*rtl:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e")*/;
} }
.carousel-control-next-icon { .carousel-control-next-icon {
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e"); background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e") /*rtl:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e")*/;
} }
.carousel-indicators { .carousel-indicators {
@ -6777,14 +6769,11 @@ textarea.form-control-lg {
.offcanvas-header { .offcanvas-header {
display: flex; display: flex;
align-items: center; align-items: center;
justify-content: space-between;
padding: var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x); padding: var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x);
} }
.offcanvas-header .btn-close { .offcanvas-header .btn-close {
padding: calc(var(--bs-offcanvas-padding-y) * 0.5) calc(var(--bs-offcanvas-padding-x) * 0.5); padding: calc(var(--bs-offcanvas-padding-y) * 0.5) calc(var(--bs-offcanvas-padding-x) * 0.5);
margin-top: calc(-0.5 * var(--bs-offcanvas-padding-y)); margin: calc(-0.5 * var(--bs-offcanvas-padding-y)) calc(-0.5 * var(--bs-offcanvas-padding-x)) calc(-0.5 * var(--bs-offcanvas-padding-y)) auto;
margin-right: calc(-0.5 * var(--bs-offcanvas-padding-x));
margin-bottom: calc(-0.5 * var(--bs-offcanvas-padding-y));
} }
.offcanvas-title { .offcanvas-title {
@ -12064,3 +12053,5 @@ textarea.form-control-lg {
display: none !important; display: none !important;
} }
} }
/*# sourceMappingURL=bootstrap.css.map */

18
src/static/scripts/datatables.css

@ -4,10 +4,10 @@
* *
* To rebuild or modify this file with the latest versions of the included * To rebuild or modify this file with the latest versions of the included
* software please visit: * software please visit:
* https://datatables.net/download/#bs5/dt-2.0.0 * https://datatables.net/download/#bs5/dt-2.0.7
* *
* Included libraries: * Included libraries:
* DataTables 2.0.0 * DataTables 2.0.7
*/ */
@charset "UTF-8"; @charset "UTF-8";
@ -347,7 +347,7 @@ table.table.dataTable.table-striped > tbody > tr:nth-of-type(2n+1) > * {
box-shadow: none; box-shadow: none;
} }
table.table.dataTable > :not(caption) > * > * { table.table.dataTable > :not(caption) > * > * {
background-color: transparent; background-color: var(--bs-table-bg);
} }
table.table.dataTable > tbody > tr { table.table.dataTable > tbody > tr {
background-color: transparent; background-color: transparent;
@ -463,10 +463,18 @@ div.dt-scroll-foot > .dt-scroll-footInner > table > tfoot > tr:first-child {
justify-content: center !important; justify-content: center !important;
} }
} }
table.dataTable.table-sm > thead > tr > th:not(.sorting_disabled) { table.dataTable.table-sm > thead > tr th.dt-orderable-asc, table.dataTable.table-sm > thead > tr th.dt-orderable-desc, table.dataTable.table-sm > thead > tr th.dt-ordering-asc, table.dataTable.table-sm > thead > tr th.dt-ordering-desc,
table.dataTable.table-sm > thead > tr td.dt-orderable-asc,
table.dataTable.table-sm > thead > tr td.dt-orderable-desc,
table.dataTable.table-sm > thead > tr td.dt-ordering-asc,
table.dataTable.table-sm > thead > tr td.dt-ordering-desc {
padding-right: 20px; padding-right: 20px;
} }
table.dataTable.table-sm > thead > tr > th:not(.sorting_disabled):before, table.dataTable.table-sm > thead > tr > th:not(.sorting_disabled):after { table.dataTable.table-sm > thead > tr th.dt-orderable-asc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-orderable-desc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-asc span.dt-column-order, table.dataTable.table-sm > thead > tr th.dt-ordering-desc span.dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-orderable-asc span.dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-orderable-desc span.dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-ordering-asc span.dt-column-order,
table.dataTable.table-sm > thead > tr td.dt-ordering-desc span.dt-column-order {
right: 5px; right: 5px;
} }

1085
src/static/scripts/datatables.js

File diff suppressed because it is too large

61
src/static/scripts/jdenticon.js → src/static/scripts/jdenticon-3.3.0.js

@ -1,12 +1,12 @@
/** /**
* Jdenticon 3.2.0 * Jdenticon 3.3.0
* http://jdenticon.com * http://jdenticon.com
* *
* Built: 2022-08-07T11:23:11.640Z * Built: 2024-05-10T09:48:41.921Z
* *
* MIT License * MIT License
* *
* Copyright (c) 2014-2021 Daniel Mester Pirttijärvi * Copyright (c) 2014-2024 Daniel Mester Pirttijärvi
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy * Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal * of this software and associated documentation files (the "Software"), to deal
@ -304,6 +304,8 @@ var ATTRIBUTES = {
o/*VALUE*/: "data-jdenticon-value" o/*VALUE*/: "data-jdenticon-value"
}; };
var IS_RENDERED_PROPERTY = "jdenticonRendered";
var ICON_SELECTOR = "[" + ATTRIBUTES.t/*HASH*/ +"],[" + ATTRIBUTES.o/*VALUE*/ +"]"; var ICON_SELECTOR = "[" + ATTRIBUTES.t/*HASH*/ +"],[" + ATTRIBUTES.o/*VALUE*/ +"]";
var documentQuerySelectorAll = /** @type {!Function} */ ( var documentQuerySelectorAll = /** @type {!Function} */ (
@ -323,6 +325,27 @@ function getIdenticonType(el) {
} }
} }
function whenDocumentIsReady(/** @type {Function} */ callback) {
function loadedHandler() {
document.removeEventListener("DOMContentLoaded", loadedHandler);
window.removeEventListener("load", loadedHandler);
setTimeout(callback, 0); // Give scripts a chance to run
}
if (typeof document !== "undefined" &&
typeof window !== "undefined" &&
typeof setTimeout !== "undefined"
) {
if (document.readyState === "loading") {
document.addEventListener("DOMContentLoaded", loadedHandler);
window.addEventListener("load", loadedHandler);
} else {
// Document already loaded. The load events above likely won't be raised
setTimeout(callback, 0);
}
}
}
function observer(updateCallback) { function observer(updateCallback) {
if (typeof MutationObserver != "undefined") { if (typeof MutationObserver != "undefined") {
var mutationObserver = new MutationObserver(function onmutation(mutations) { var mutationObserver = new MutationObserver(function onmutation(mutations) {
@ -1011,6 +1034,11 @@ function drawIcon(ctx, hashOrValue, size, config) {
iconGenerator(new CanvasRenderer(ctx, size), iconGenerator(new CanvasRenderer(ctx, size),
isValidHash(hashOrValue) || computeHash(hashOrValue), isValidHash(hashOrValue) || computeHash(hashOrValue),
config); config);
var canvas = ctx.canvas;
if (canvas) {
canvas[IS_RENDERED_PROPERTY] = true;
}
} }
/** /**
@ -1313,6 +1341,24 @@ function updateAll() {
} }
} }
/**
* Updates all canvas elements with the `data-jdenticon-hash` or `data-jdenticon-value` attribute that have not already
* been rendered.
*/
function updateAllConditional() {
if (documentQuerySelectorAll) {
/** @type {NodeListOf<HTMLElement>} */
var elements = documentQuerySelectorAll(ICON_SELECTOR);
for (var i = 0; i < elements.length; i++) {
var el = elements[i];
if (!el[IS_RENDERED_PROPERTY]) {
update(el);
}
}
}
}
/** /**
* Updates the identicon in the specified `<canvas>` or `<svg>` elements. * Updates the identicon in the specified `<canvas>` or `<svg>` elements.
* @param {(string|Element)} el - Specifies the container in which the icon is rendered as a DOM element of the type * @param {(string|Element)} el - Specifies the container in which the icon is rendered as a DOM element of the type
@ -1381,6 +1427,7 @@ function renderDomElement(el, hashOrValue, config, rendererFactory) {
if (renderer) { if (renderer) {
// Draw icon // Draw icon
iconGenerator(renderer, hash, config); iconGenerator(renderer, hash, config);
el[IS_RENDERED_PROPERTY] = true;
} }
} }
@ -1419,7 +1466,7 @@ jdenticon["updateSvg"] = update;
* Specifies the version of the Jdenticon package in use. * Specifies the version of the Jdenticon package in use.
* @type {string} * @type {string}
*/ */
jdenticon["version"] = "3.2.0"; jdenticon["version"] = "3.3.0";
/** /**
* Specifies which bundle of Jdenticon that is used. * Specifies which bundle of Jdenticon that is used.
@ -1444,7 +1491,7 @@ function jdenticonStartup() {
)["replaceMode"]; )["replaceMode"];
if (replaceMode != "never") { if (replaceMode != "never") {
updateAll(); updateAllConditional();
if (replaceMode == "observe") { if (replaceMode == "observe") {
observer(update); observer(update);
@ -1453,9 +1500,7 @@ function jdenticonStartup() {
} }
// Schedule to render all identicons on the page once it has been loaded. // Schedule to render all identicons on the page once it has been loaded.
if (typeof setTimeout === "function") { whenDocumentIsReady(jdenticonStartup);
setTimeout(jdenticonStartup, 0);
}
return jdenticon; return jdenticon;

12
src/static/templates/admin/organizations.hbs

@ -17,12 +17,12 @@
{{#each page_data}} {{#each page_data}}
<tr> <tr>
<td> <td>
<svg width="48" height="48" class="float-start me-2 rounded" data-jdenticon-value="{{Id}}"> <svg width="48" height="48" class="float-start me-2 rounded" data-jdenticon-value="{{id}}">
<div class="float-start"> <div class="float-start">
<strong>{{Name}}</strong> <strong>{{name}}</strong>
<span class="me-2">({{BillingEmail}})</span> <span class="me-2">({{billingEmail}})</span>
<span class="d-block"> <span class="d-block">
<span class="badge bg-success font-monospace">{{Id}}</span> <span class="badge bg-success font-monospace">{{id}}</span>
</span> </span>
</div> </div>
</td> </td>
@ -44,7 +44,7 @@
<span class="d-block"><strong>Events:</strong> {{event_count}}</span> <span class="d-block"><strong>Events:</strong> {{event_count}}</span>
</td> </td>
<td class="text-end px-0 small"> <td class="text-end px-0 small">
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-delete-organization data-vw-org-uuid="{{jsesc Id no_quote}}" data-vw-org-name="{{jsesc Name no_quote}}" data-vw-billing-email="{{jsesc BillingEmail no_quote}}">Delete Organization</button><br> <button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-delete-organization data-vw-org-uuid="{{jsesc id no_quote}}" data-vw-org-name="{{jsesc name no_quote}}" data-vw-billing-email="{{jsesc billingEmail no_quote}}">Delete Organization</button><br>
</td> </td>
</tr> </tr>
{{/each}} {{/each}}
@ -62,4 +62,4 @@
<script src="{{urlpath}}/vw_static/jquery-3.7.1.slim.js"></script> <script src="{{urlpath}}/vw_static/jquery-3.7.1.slim.js"></script>
<script src="{{urlpath}}/vw_static/datatables.js"></script> <script src="{{urlpath}}/vw_static/datatables.js"></script>
<script src="{{urlpath}}/vw_static/admin_organizations.js"></script> <script src="{{urlpath}}/vw_static/admin_organizations.js"></script>
<script src="{{urlpath}}/vw_static/jdenticon.js"></script> <script src="{{urlpath}}/vw_static/jdenticon-3.3.0.js"></script>

26
src/static/templates/admin/users.hbs

@ -18,21 +18,21 @@
{{#each page_data}} {{#each page_data}}
<tr> <tr>
<td> <td>
<svg width="48" height="48" class="float-start me-2 rounded" data-jdenticon-value="{{Email}}"> <svg width="48" height="48" class="float-start me-2 rounded" data-jdenticon-value="{{email}}">
<div class="float-start"> <div class="float-start">
<strong>{{Name}}</strong> <strong>{{name}}</strong>
<span class="d-block">{{Email}}</span> <span class="d-block">{{email}}</span>
<span class="d-block"> <span class="d-block">
{{#unless user_enabled}} {{#unless user_enabled}}
<span class="badge bg-danger me-2" title="User is disabled">Disabled</span> <span class="badge bg-danger me-2" title="User is disabled">Disabled</span>
{{/unless}} {{/unless}}
{{#if TwoFactorEnabled}} {{#if twoFactorEnabled}}
<span class="badge bg-success me-2" title="2FA is enabled">2FA</span> <span class="badge bg-success me-2" title="2FA is enabled">2FA</span>
{{/if}} {{/if}}
{{#case _Status 1}} {{#case _status 1}}
<span class="badge bg-warning text-dark me-2" title="User is invited">Invited</span> <span class="badge bg-warning text-dark me-2" title="User is invited">Invited</span>
{{/case}} {{/case}}
{{#if EmailVerified}} {{#if emailVerified}}
<span class="badge bg-success me-2" title="Email has been verified">Verified</span> <span class="badge bg-success me-2" title="Email has been verified">Verified</span>
{{/if}} {{/if}}
</span> </span>
@ -54,15 +54,15 @@
{{/if}} {{/if}}
</td> </td>
<td> <td>
<div class="overflow-auto vw-org-cell" data-vw-user-email="{{jsesc Email no_quote}}" data-vw-user-uuid="{{jsesc Id no_quote}}"> <div class="overflow-auto vw-org-cell" data-vw-user-email="{{jsesc email no_quote}}" data-vw-user-uuid="{{jsesc id no_quote}}">
{{#each Organizations}} {{#each organizations}}
<button class="badge" data-bs-toggle="modal" data-bs-target="#userOrgTypeDialog" data-vw-org-type="{{Type}}" data-vw-org-uuid="{{jsesc Id no_quote}}" data-vw-org-name="{{jsesc Name no_quote}}">{{Name}}</button> <button class="badge" data-bs-toggle="modal" data-bs-target="#userOrgTypeDialog" data-vw-org-type="{{type}}" data-vw-org-uuid="{{jsesc id no_quote}}" data-vw-org-name="{{jsesc name no_quote}}">{{name}}</button>
{{/each}} {{/each}}
</div> </div>
</td> </td>
<td class="text-end px-0 small"> <td class="text-end px-0 small">
<span data-vw-user-uuid="{{jsesc Id no_quote}}" data-vw-user-email="{{jsesc Email no_quote}}"> <span data-vw-user-uuid="{{jsesc id no_quote}}" data-vw-user-email="{{jsesc email no_quote}}">
{{#if TwoFactorEnabled}} {{#if twoFactorEnabled}}
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-remove2fa>Remove all 2FA</button><br> <button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-remove2fa>Remove all 2FA</button><br>
{{/if}} {{/if}}
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-deauth-user>Deauthorize sessions</button><br> <button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-deauth-user>Deauthorize sessions</button><br>
@ -72,7 +72,7 @@
{{else}} {{else}}
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-enable-user>Enable User</button><br> <button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-enable-user>Enable User</button><br>
{{/if}} {{/if}}
{{#case _Status 1}} {{#case _status 1}}
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-resend-user-invite>Resend invite</button><br> <button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-resend-user-invite>Resend invite</button><br>
{{/case}} {{/case}}
</span> </span>
@ -143,4 +143,4 @@
<script src="{{urlpath}}/vw_static/jquery-3.7.1.slim.js"></script> <script src="{{urlpath}}/vw_static/jquery-3.7.1.slim.js"></script>
<script src="{{urlpath}}/vw_static/datatables.js"></script> <script src="{{urlpath}}/vw_static/datatables.js"></script>
<script src="{{urlpath}}/vw_static/admin_users.js"></script> <script src="{{urlpath}}/vw_static/admin_users.js"></script>
<script src="{{urlpath}}/vw_static/jdenticon.js"></script> <script src="{{urlpath}}/vw_static/jdenticon-3.3.0.js"></script>

2
src/static/templates/email/change_email.hbs

@ -2,5 +2,5 @@ Your Email Change
<!----------------> <!---------------->
To finalize changing your email address enter the following code in web vault: {{token}} To finalize changing your email address enter the following code in web vault: {{token}}
If you did not try to change an email address, you can safely ignore this email. If you did not try to change your email address, contact your administrator.
{{> email/email_footer_text }} {{> email/email_footer_text }}

2
src/static/templates/email/change_email.html.hbs

@ -9,7 +9,7 @@ Your Email Change
</tr> </tr>
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;"> <tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<td class="content-block last" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center"> <td class="content-block last" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
If you did not try to change an email address, you can safely ignore this email. If you did not try to change your email address, contact your administrator.
</td> </td>
</tr> </tr>
</table> </table>

367
src/util.rs

@ -1,13 +1,10 @@
// //
// Web Headers and caching // Web Headers and caching
// //
use std::{ use std::{collections::HashMap, io::Cursor, ops::Deref, path::Path};
collections::HashMap,
io::{Cursor, ErrorKind},
ops::Deref,
};
use num_traits::ToPrimitive; use num_traits::ToPrimitive;
use once_cell::sync::Lazy;
use rocket::{ use rocket::{
fairing::{Fairing, Info, Kind}, fairing::{Fairing, Info, Kind},
http::{ContentType, Header, HeaderMap, Method, Status}, http::{ContentType, Header, HeaderMap, Method, Status},
@ -218,7 +215,7 @@ impl<'r, R: 'r + Responder<'r, 'static> + Send> Responder<'r, 'static> for Cache
res.set_raw_header("Cache-Control", cache_control_header); res.set_raw_header("Cache-Control", cache_control_header);
let time_now = chrono::Local::now(); let time_now = chrono::Local::now();
let expiry_time = time_now + chrono::Duration::seconds(self.ttl.try_into().unwrap()); let expiry_time = time_now + chrono::TimeDelta::try_seconds(self.ttl.try_into().unwrap()).unwrap();
res.set_raw_header("Expires", format_datetime_http(&expiry_time)); res.set_raw_header("Expires", format_datetime_http(&expiry_time));
Ok(res) Ok(res)
} }
@ -334,40 +331,6 @@ impl Fairing for BetterLogging {
} }
} }
//
// File handling
//
use std::{
fs::{self, File},
io::Result as IOResult,
path::Path,
};
pub fn file_exists(path: &str) -> bool {
Path::new(path).exists()
}
pub fn write_file(path: &str, content: &[u8]) -> Result<(), crate::error::Error> {
use std::io::Write;
let mut f = match File::create(path) {
Ok(file) => file,
Err(e) => {
if e.kind() == ErrorKind::PermissionDenied {
error!("Can't create '{}': Permission denied", path);
}
return Err(From::from(e));
}
};
f.write_all(content)?;
f.flush()?;
Ok(())
}
pub fn delete_file(path: &str) -> IOResult<()> {
fs::remove_file(path)
}
pub fn get_display_size(size: i64) -> String { pub fn get_display_size(size: i64) -> String {
const UNITS: [&str; 6] = ["bytes", "KB", "MB", "GB", "TB", "PB"]; const UNITS: [&str; 6] = ["bytes", "KB", "MB", "GB", "TB", "PB"];
@ -444,7 +407,7 @@ pub fn get_env_str_value(key: &str) -> Option<String> {
match (value_from_env, value_file) { match (value_from_env, value_file) {
(Ok(_), Ok(_)) => panic!("You should not define both {key} and {key_file}!"), (Ok(_), Ok(_)) => panic!("You should not define both {key} and {key_file}!"),
(Ok(v_env), Err(_)) => Some(v_env), (Ok(v_env), Err(_)) => Some(v_env),
(Err(_), Ok(v_file)) => match fs::read_to_string(v_file) { (Err(_), Ok(v_file)) => match std::fs::read_to_string(v_file) {
Ok(content) => Some(content.trim().to_string()), Ok(content) => Some(content.trim().to_string()),
Err(e) => panic!("Failed to load {key}: {e:?}"), Err(e) => panic!("Failed to load {key}: {e:?}"),
}, },
@ -558,30 +521,38 @@ pub fn container_base_image() -> &'static str {
use std::fmt; use std::fmt;
use serde::de::{self, DeserializeOwned, Deserializer, MapAccess, SeqAccess, Visitor}; use serde::de::{self, DeserializeOwned, Deserializer, MapAccess, SeqAccess, Visitor};
use serde_json::{self, Value}; use serde_json::Value;
pub type JsonMap = serde_json::Map<String, Value>; pub type JsonMap = serde_json::Map<String, Value>;
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct UpCase<T: DeserializeOwned> { pub struct LowerCase<T: DeserializeOwned> {
#[serde(deserialize_with = "upcase_deserialize")] #[serde(deserialize_with = "lowercase_deserialize")]
#[serde(flatten)] #[serde(flatten)]
pub data: T, pub data: T,
} }
impl Default for LowerCase<Value> {
fn default() -> Self {
Self {
data: Value::Null,
}
}
}
// https://github.com/serde-rs/serde/issues/586 // https://github.com/serde-rs/serde/issues/586
pub fn upcase_deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error> pub fn lowercase_deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where where
T: DeserializeOwned, T: DeserializeOwned,
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
let d = deserializer.deserialize_any(UpCaseVisitor)?; let d = deserializer.deserialize_any(LowerCaseVisitor)?;
T::deserialize(d).map_err(de::Error::custom) T::deserialize(d).map_err(de::Error::custom)
} }
struct UpCaseVisitor; struct LowerCaseVisitor;
impl<'de> Visitor<'de> for UpCaseVisitor { impl<'de> Visitor<'de> for LowerCaseVisitor {
type Value = Value; type Value = Value;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
@ -595,7 +566,7 @@ impl<'de> Visitor<'de> for UpCaseVisitor {
let mut result_map = JsonMap::new(); let mut result_map = JsonMap::new();
while let Some((key, value)) = map.next_entry()? { while let Some((key, value)) = map.next_entry()? {
result_map.insert(upcase_first(key), upcase_value(value)); result_map.insert(_process_key(key), convert_json_key_lcase_first(value));
} }
Ok(Value::Object(result_map)) Ok(Value::Object(result_map))
@ -608,45 +579,23 @@ impl<'de> Visitor<'de> for UpCaseVisitor {
let mut result_seq = Vec::<Value>::new(); let mut result_seq = Vec::<Value>::new();
while let Some(value) = seq.next_element()? { while let Some(value) = seq.next_element()? {
result_seq.push(upcase_value(value)); result_seq.push(convert_json_key_lcase_first(value));
} }
Ok(Value::Array(result_seq)) Ok(Value::Array(result_seq))
} }
} }
fn upcase_value(value: Value) -> Value {
if let Value::Object(map) = value {
let mut new_value = Value::Object(serde_json::Map::new());
for (key, val) in map.into_iter() {
let processed_key = _process_key(&key);
new_value[processed_key] = upcase_value(val);
}
new_value
} else if let Value::Array(array) = value {
// Initialize array with null values
let mut new_value = Value::Array(vec![Value::Null; array.len()]);
for (index, val) in array.into_iter().enumerate() {
new_value[index] = upcase_value(val);
}
new_value
} else {
value
}
}
// Inner function to handle a special case for the 'ssn' key. // Inner function to handle a special case for the 'ssn' key.
// This key is part of the Identity Cipher (Social Security Number) // This key is part of the Identity Cipher (Social Security Number)
fn _process_key(key: &str) -> String { fn _process_key(key: &str) -> String {
match key.to_lowercase().as_ref() { match key.to_lowercase().as_ref() {
"ssn" => "SSN".into(), "ssn" => "ssn".into(),
_ => self::upcase_first(key), _ => self::lcase_first(key),
} }
} }
#[derive(Deserialize, Debug, Clone)] #[derive(Clone, Debug, Deserialize)]
#[serde(untagged)] #[serde(untagged)]
pub enum NumberOrString { pub enum NumberOrString {
Number(i64), Number(i64),
@ -739,14 +688,9 @@ where
use reqwest::{header, Client, ClientBuilder}; use reqwest::{header, Client, ClientBuilder};
pub fn get_reqwest_client() -> Client { pub fn get_reqwest_client() -> &'static Client {
match get_reqwest_client_builder().build() { static INSTANCE: Lazy<Client> = Lazy::new(|| get_reqwest_client_builder().build().expect("Failed to build client"));
Ok(client) => client, &INSTANCE
Err(e) => {
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
get_reqwest_client_builder().trust_dns(false).build().expect("Failed to build client")
}
}
} }
pub fn get_reqwest_client_builder() -> ClientBuilder { pub fn get_reqwest_client_builder() -> ClientBuilder {
@ -768,25 +712,25 @@ pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
Value::Object(obj) => { Value::Object(obj) => {
let mut json_map = JsonMap::new(); let mut json_map = JsonMap::new();
for (key, value) in obj.iter() { for (key, value) in obj.into_iter() {
match (key, value) { match (key, value) {
(key, Value::Object(elm)) => { (key, Value::Object(elm)) => {
let inner_value = convert_json_key_lcase_first(Value::Object(elm.clone())); let inner_value = convert_json_key_lcase_first(Value::Object(elm));
json_map.insert(lcase_first(key), inner_value); json_map.insert(_process_key(&key), inner_value);
} }
(key, Value::Array(elm)) => { (key, Value::Array(elm)) => {
let mut inner_array: Vec<Value> = Vec::with_capacity(elm.len()); let mut inner_array: Vec<Value> = Vec::with_capacity(elm.len());
for inner_obj in elm { for inner_obj in elm {
inner_array.push(convert_json_key_lcase_first(inner_obj.clone())); inner_array.push(convert_json_key_lcase_first(inner_obj));
} }
json_map.insert(lcase_first(key), Value::Array(inner_array)); json_map.insert(_process_key(&key), Value::Array(inner_array));
} }
(key, value) => { (key, value) => {
json_map.insert(lcase_first(key), value.clone()); json_map.insert(_process_key(&key), value);
} }
} }
} }
@ -805,3 +749,248 @@ pub fn parse_experimental_client_feature_flags(experimental_client_feature_flags
feature_states feature_states
} }
mod dns_resolver {
use std::{
fmt,
net::{IpAddr, SocketAddr},
sync::Arc,
};
use hickory_resolver::{system_conf::read_system_conf, TokioAsyncResolver};
use once_cell::sync::Lazy;
use reqwest::dns::{Name, Resolve, Resolving};
use crate::{util::is_global, CONFIG};
#[derive(Debug, Clone)]
pub enum CustomResolverError {
Blacklist {
domain: String,
},
NonGlobalIp {
domain: String,
ip: IpAddr,
},
}
impl CustomResolverError {
pub fn downcast_ref(e: &dyn std::error::Error) -> Option<&Self> {
let mut source = e.source();
while let Some(err) = source {
source = err.source();
if let Some(err) = err.downcast_ref::<CustomResolverError>() {
return Some(err);
}
}
None
}
}
impl fmt::Display for CustomResolverError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Blacklist {
domain,
} => write!(f, "Blacklisted domain: {domain} matched ICON_BLACKLIST_REGEX"),
Self::NonGlobalIp {
domain,
ip,
} => write!(f, "IP {ip} for domain '{domain}' is not a global IP!"),
}
}
}
impl std::error::Error for CustomResolverError {}
#[derive(Debug, Clone)]
pub enum CustomDnsResolver {
Default(),
Hickory(Arc<TokioAsyncResolver>),
}
type BoxError = Box<dyn std::error::Error + Send + Sync>;
impl CustomDnsResolver {
pub fn instance() -> Arc<Self> {
static INSTANCE: Lazy<Arc<CustomDnsResolver>> = Lazy::new(CustomDnsResolver::new);
Arc::clone(&*INSTANCE)
}
fn new() -> Arc<Self> {
match read_system_conf() {
Ok((config, opts)) => {
let resolver = TokioAsyncResolver::tokio(config.clone(), opts.clone());
Arc::new(Self::Hickory(Arc::new(resolver)))
}
Err(e) => {
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
Arc::new(Self::Default())
}
}
}
// Note that we get an iterator of addresses, but we only grab the first one for convenience
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
pre_resolve(name)?;
let result = match self {
Self::Default() => tokio::net::lookup_host(name).await?.next(),
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
};
if let Some(addr) = &result {
post_resolve(name, addr.ip())?;
}
Ok(result)
}
}
fn pre_resolve(name: &str) -> Result<(), CustomResolverError> {
if crate::api::is_domain_blacklisted(name) {
return Err(CustomResolverError::Blacklist {
domain: name.to_string(),
});
}
Ok(())
}
fn post_resolve(name: &str, ip: IpAddr) -> Result<(), CustomResolverError> {
if CONFIG.icon_blacklist_non_global_ips() && !is_global(ip) {
Err(CustomResolverError::NonGlobalIp {
domain: name.to_string(),
ip,
})
} else {
Ok(())
}
}
impl Resolve for CustomDnsResolver {
fn resolve(&self, name: Name) -> Resolving {
let this = self.clone();
Box::pin(async move {
let name = name.as_str();
let result = this.resolve_domain(name).await?;
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
})
}
}
}
pub use dns_resolver::{CustomDnsResolver, CustomResolverError};
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
#[allow(clippy::nonminimal_bool)]
#[cfg(any(not(feature = "unstable"), test))]
pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
match ip {
std::net::IpAddr::V4(ip) => {
!(ip.octets()[0] == 0 // "This network"
|| ip.is_private()
|| (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) //ip.is_shared()
|| ip.is_loopback()
|| ip.is_link_local()
// addresses reserved for future protocols (`192.0.0.0/24`)
||(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|| ip.is_documentation()
|| (ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) // ip.is_benchmarking()
|| (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) //ip.is_reserved()
|| ip.is_broadcast())
}
std::net::IpAddr::V6(ip) => {
!(ip.is_unspecified()
|| ip.is_loopback()
// IPv4-mapped Address (`::ffff:0:0/96`)
|| matches!(ip.segments(), [0, 0, 0, 0, 0, 0xffff, _, _])
// IPv4-IPv6 Translat. (`64:ff9b:1::/48`)
|| matches!(ip.segments(), [0x64, 0xff9b, 1, _, _, _, _, _])
// Discard-Only Address Block (`100::/64`)
|| matches!(ip.segments(), [0x100, 0, 0, 0, _, _, _, _])
// IETF Protocol Assignments (`2001::/23`)
|| (matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200)
&& !(
// Port Control Protocol Anycast (`2001:1::1`)
u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001
// Traversal Using Relays around NAT Anycast (`2001:1::2`)
|| u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002
// AMT (`2001:3::/32`)
|| matches!(ip.segments(), [0x2001, 3, _, _, _, _, _, _])
// AS112-v6 (`2001:4:112::/48`)
|| matches!(ip.segments(), [0x2001, 4, 0x112, _, _, _, _, _])
// ORCHIDv2 (`2001:20::/28`)
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x2F).contains(&b))
))
|| ((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8)) // ip.is_documentation()
|| ((ip.segments()[0] & 0xfe00) == 0xfc00) //ip.is_unique_local()
|| ((ip.segments()[0] & 0xffc0) == 0xfe80)) //ip.is_unicast_link_local()
}
}
}
#[cfg(not(feature = "unstable"))]
pub use is_global_hardcoded as is_global;
#[cfg(feature = "unstable")]
#[inline(always)]
pub fn is_global(ip: std::net::IpAddr) -> bool {
ip.is_global()
}
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 30 seconds or so and they are correct as of nightly 2023-07-17
/// The IPV6 can't be checked in a reasonable time, so we check over a hundred billion random ones, so far correct
/// Note that the is_global implementation is subject to change as new IP RFCs are created
///
/// To run while showing progress output:
/// cargo +nightly test --release --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
use std::net::IpAddr;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {}", ip)
}
}
}
}
}
#[test]
#[ignore]
fn test_ipv6_global() {
use rand::Rng;
std::thread::scope(|s| {
for t in 0..16 {
let handle = s.spawn(move || {
let mut v = [0u8; 16];
let mut rng = rand::thread_rng();
for i in 0..20 {
println!("Thread {t} Iter: {i}/50");
for _ in 0..500_000_000 {
rng.fill(&mut v);
let ip = IpAddr::V6(std::net::Ipv6Addr::from(v));
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {ip}");
}
}
});
}
});
}
}

6
tools/global_domains.py

@ -71,9 +71,9 @@ with urllib.request.urlopen(DOMAIN_LISTS_URL) as response:
global_domains = [] global_domains = []
for name, domain_list in domain_lists.items(): for name, domain_list in domain_lists.items():
entry = OrderedDict() entry = OrderedDict()
entry["Type"] = enums[name] entry["type"] = enums[name]
entry["Domains"] = domain_list entry["domains"] = domain_list
entry["Excluded"] = False entry["excluded"] = False
global_domains.append(entry) global_domains.append(entry)
# Write out the global domains JSON file. # Write out the global domains JSON file.

Loading…
Cancel
Save