Browse Source

Merge remote-tracking branch 'dani-garcia/main' into sso-for-pr

pull/1955/head
Stuart Heap 4 years ago
parent
commit
f8e1739408
No known key found for this signature in database GPG Key ID: C753450AB379AA25
  1. 2
      .dockerignore
  2. 64
      .env.template
  3. 90
      .github/workflows/build.yml
  4. 7
      .github/workflows/hadolint.yml
  5. 119
      .github/workflows/release.yml
  6. 1
      .pre-commit-config.yaml
  7. 1130
      Cargo.lock
  8. 44
      Cargo.toml
  9. 2
      README.md
  10. 41
      build.rs
  11. 151
      docker/Dockerfile.j2
  12. 6
      docker/Makefile
  13. 47
      docker/amd64/Dockerfile
  14. 49
      docker/amd64/Dockerfile.alpine
  15. 129
      docker/amd64/Dockerfile.buildx
  16. 121
      docker/amd64/Dockerfile.buildx.alpine
  17. 89
      docker/arm64/Dockerfile
  18. 125
      docker/arm64/Dockerfile.alpine
  19. 153
      docker/arm64/Dockerfile.buildx
  20. 125
      docker/arm64/Dockerfile.buildx.alpine
  21. 89
      docker/armv6/Dockerfile
  22. 125
      docker/armv6/Dockerfile.alpine
  23. 153
      docker/armv6/Dockerfile.buildx
  24. 125
      docker/armv6/Dockerfile.buildx.alpine
  25. 89
      docker/armv7/Dockerfile
  26. 49
      docker/armv7/Dockerfile.alpine
  27. 153
      docker/armv7/Dockerfile.buildx
  28. 128
      docker/armv7/Dockerfile.buildx.alpine
  29. 5
      hooks/arches.sh
  30. 7
      hooks/build
  31. 15
      hooks/push
  32. 1
      migrations/mysql/2021-08-30-193501_create_emergency_access/down.sql
  33. 14
      migrations/mysql/2021-08-30-193501_create_emergency_access/up.sql
  34. 1
      migrations/mysql/2021-10-24-164321_add_2fa_incomplete/down.sql
  35. 9
      migrations/mysql/2021-10-24-164321_add_2fa_incomplete/up.sql
  36. 0
      migrations/mysql/2022-01-17-234911_add_api_key/down.sql
  37. 2
      migrations/mysql/2022-01-17-234911_add_api_key/up.sql
  38. 1
      migrations/postgresql/2021-08-30-193501_create_emergency_access/down.sql
  39. 14
      migrations/postgresql/2021-08-30-193501_create_emergency_access/up.sql
  40. 1
      migrations/postgresql/2021-10-24-164321_add_2fa_incomplete/down.sql
  41. 9
      migrations/postgresql/2021-10-24-164321_add_2fa_incomplete/up.sql
  42. 0
      migrations/postgresql/2022-01-17-234911_add_api_key/down.sql
  43. 2
      migrations/postgresql/2022-01-17-234911_add_api_key/up.sql
  44. 1
      migrations/sqlite/2021-08-30-193501_create_emergency_access/down.sql
  45. 14
      migrations/sqlite/2021-08-30-193501_create_emergency_access/up.sql
  46. 1
      migrations/sqlite/2021-10-24-164321_add_2fa_incomplete/down.sql
  47. 9
      migrations/sqlite/2021-10-24-164321_add_2fa_incomplete/up.sql
  48. 0
      migrations/sqlite/2022-01-17-234911_add_api_key/down.sql
  49. 2
      migrations/sqlite/2022-01-17-234911_add_api_key/up.sql
  50. 2
      rust-toolchain
  51. 26
      src/api/admin.rs
  52. 66
      src/api/core/accounts.rs
  53. 12
      src/api/core/ciphers.rs
  54. 800
      src/api/core/emergency_access.rs
  55. 4
      src/api/core/mod.rs
  56. 336
      src/api/core/organizations.rs
  57. 38
      src/api/core/sends.rs
  58. 18
      src/api/core/two_factor/email.rs
  59. 33
      src/api/core/two_factor/mod.rs
  60. 14
      src/api/core/two_factor/webauthn.rs
  61. 91
      src/api/icons.rs
  62. 123
      src/api/identity.rs
  63. 2
      src/api/mod.rs
  64. 4
      src/api/notifications.rs
  65. 17
      src/api/web.rs
  66. 43
      src/auth.rs
  67. 247
      src/config.rs
  68. 52
      src/crypto.rs
  69. 45
      src/db/models/cipher.rs
  70. 12
      src/db/models/device.rs
  71. 282
      src/db/models/emergency_access.rs
  72. 4
      src/db/models/mod.rs
  73. 9
      src/db/models/org_policy.rs
  74. 2
      src/db/models/organization.rs
  75. 5
      src/db/models/two_factor.rs
  76. 108
      src/db/models/two_factor_incomplete.rs
  77. 27
      src/db/models/user.rs
  78. 30
      src/db/schemas/mysql/schema.rs
  79. 30
      src/db/schemas/postgresql/schema.rs
  80. 30
      src/db/schemas/sqlite/schema.rs
  81. 13
      src/error.rs
  82. 193
      src/mail.rs
  83. 56
      src/main.rs
  84. 38
      src/ratelimit.rs
  85. 19
      src/static/global_domains.json
  86. 446
      src/static/scripts/bootstrap-native.js
  87. 77
      src/static/scripts/bootstrap.css
  88. 110
      src/static/scripts/datatables.css
  89. 1019
      src/static/scripts/datatables.js
  90. 18
      src/static/templates/admin/base.hbs
  91. 6
      src/static/templates/admin/diagnostics.hbs
  92. 10
      src/static/templates/admin/organizations.hbs
  93. 22
      src/static/templates/admin/settings.hbs
  94. 18
      src/static/templates/admin/users.hbs
  95. 2
      src/static/templates/email/email_footer.hbs
  96. 2
      src/static/templates/email/email_header.hbs
  97. 8
      src/static/templates/email/emergency_access_invite_accepted.hbs
  98. 21
      src/static/templates/email/emergency_access_invite_accepted.html.hbs
  99. 6
      src/static/templates/email/emergency_access_invite_confirmed.hbs
  100. 16
      src/static/templates/email/emergency_access_invite_confirmed.html.hbs

2
.dockerignore

@ -24,4 +24,4 @@ hooks
tools
# Web vault
web-vault
#web-vault

64
.env.template

@ -61,6 +61,10 @@
## To control this on a per-org basis instead, use the "Disable Send" org policy.
# SENDS_ALLOWED=true
## Controls whether users can enable emergency access to their accounts.
## This setting applies globally to all users.
# EMERGENCY_ACCESS_ALLOWED=true
## Job scheduler settings
##
## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron),
@ -77,6 +81,18 @@
## Cron schedule of the job that checks for trashed items to delete permanently.
## Defaults to daily (5 minutes after midnight). Set blank to disable this job.
# TRASH_PURGE_SCHEDULE="0 5 0 * * *"
##
## Cron schedule of the job that checks for incomplete 2FA logins.
## Defaults to once every minute. Set blank to disable this job.
# INCOMPLETE_2FA_SCHEDULE="30 * * * * *"
##
## Cron schedule of the job that sends expiration reminders to emergency access grantors.
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
# EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE="0 5 * * * *"
##
## Cron schedule of the job that grants emergency access requests that have met the required wait time.
## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
# EMERGENCY_REQUEST_TIMEOUT_SCHEDULE="0 5 * * * *"
## Enable extended logging, which shows timestamps and targets in the logs
# EXTENDED_LOGGING=true
@ -113,10 +129,32 @@
## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely
# DB_CONNECTION_RETRIES=15
## Icon service
## The predefined icon services are: internal, bitwarden, duckduckgo, google.
## To specify a custom icon service, set a URL template with exactly one instance of `{}`,
## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
##
## `internal` refers to Vaultwarden's built-in icon fetching implementation.
## If an external service is set, an icon request to Vaultwarden will return an HTTP
## redirect to the corresponding icon at the external service. An external service may
## be useful if your Vaultwarden instance has no external network connectivity, or if
## you are concerned that someone may probe your instance to try to detect whether icons
## for certain sites have been cached.
# ICON_SERVICE=internal
## Icon redirect code
## The HTTP status code to use for redirects to an external icon service.
## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
## Temporary redirects are useful while testing different icon services, but once a service
## has been decided on, consider using permanent redirects for cacheability. The legacy codes
## are currently better supported by the Bitwarden clients.
# ICON_REDIRECT_CODE=302
## Disable icon downloading
## Set to true to disable icon downloading, this would still serve icons from $ICON_CACHE_FOLDER,
## but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
## otherwise it will delete them and they won't be downloaded again.
## Set to true to disable icon downloading in the internal icon service.
## This still serves existing icons from $ICON_CACHE_FOLDER, without generating any external
## network requests. $ICON_CACHE_TTL must also be set to 0; otherwise, the existing icons
## will be deleted eventually, but won't be downloaded again.
# DISABLE_ICON_DOWNLOAD=false
## Icon download timeout
@ -147,7 +185,7 @@
# EMAIL_EXPIRATION_TIME=600
## Email token size
## Number of digits in an email token (min: 6, max: 19).
## Number of digits in an email 2FA token (min: 6, max: 255).
## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting!
# EMAIL_TOKEN_SIZE=6
@ -208,6 +246,13 @@
## This setting applies globally, so make sure to inform all users of any changes to this setting.
# TRASH_AUTO_DELETE_DAYS=
## Number of minutes to wait before a 2FA-enabled login is considered incomplete,
## resulting in an email notification. An incomplete 2FA login is one where the correct
## master password was provided but the required 2FA step was not completed, which
## potentially indicates a master password compromise. Set to 0 to disable this check.
## This setting applies globally to all users.
# INCOMPLETE_2FA_TIME_LIMIT=3
## Controls the PBBKDF password iterations to apply on the server
## The change only applies when the password is changed
# PASSWORD_ITERATIONS=100000
@ -231,6 +276,17 @@
## Multiple values must be separated with a whitespace.
# ALLOWED_IFRAME_ANCESTORS=
## Number of seconds, on average, between login requests from the same IP address before rate limiting kicks in.
# LOGIN_RATELIMIT_SECONDS=60
## Allow a burst of requests of up to this size, while maintaining the average indicated by `LOGIN_RATELIMIT_SECONDS`.
## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2.
# LOGIN_RATELIMIT_MAX_BURST=10
## Number of seconds, on average, between admin requests from the same IP address before rate limiting kicks in.
# ADMIN_RATELIMIT_SECONDS=300
## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`.
# ADMIN_RATELIMIT_MAX_BURST=3
## Yubico (Yubikey) Settings
## Set your Client ID and Secret Key for Yubikey OTP
## You can generate it here: https://upgrade.yubico.com/getapikey/

90
.github/workflows/build.yml

@ -2,36 +2,23 @@ name: Build
on:
push:
paths-ignore:
- "*.md"
- "*.txt"
- ".dockerignore"
- ".env.template"
- ".gitattributes"
- ".gitignore"
- "azure-pipelines.yml"
- "docker/**"
- "hooks/**"
- "tools/**"
- ".github/FUNDING.yml"
- ".github/ISSUE_TEMPLATE/**"
- ".github/security-contact.gif"
paths:
- ".github/workflows/build.yml"
- "src/**"
- "migrations/**"
- "Cargo.*"
- "build.rs"
- "diesel.toml"
- "rust-toolchain"
pull_request:
# Ignore when there are only changes done too one of these paths
paths-ignore:
- "*.md"
- "*.txt"
- ".dockerignore"
- ".env.template"
- ".gitattributes"
- ".gitignore"
- "azure-pipelines.yml"
- "docker/**"
- "hooks/**"
- "tools/**"
- ".github/FUNDING.yml"
- ".github/ISSUE_TEMPLATE/**"
- ".github/security-contact.gif"
paths:
- ".github/workflows/build.yml"
- "src/**"
- "migrations/**"
- "Cargo.*"
- "build.rs"
- "diesel.toml"
- "rust-toolchain"
jobs:
build:
@ -44,30 +31,22 @@ jobs:
matrix:
channel:
- nightly
# - stable
target-triple:
- x86_64-unknown-linux-gnu
# - x86_64-unknown-linux-musl
include:
- target-triple: x86_64-unknown-linux-gnu
host-triple: x86_64-unknown-linux-gnu
features: [sqlite,mysql,postgresql] # Remember to update the `cargo test` to match the amount of features
channel: nightly
os: ubuntu-18.04
os: ubuntu-20.04
ext: ""
# - target-triple: x86_64-unknown-linux-gnu
# host-triple: x86_64-unknown-linux-gnu
# features: "sqlite,mysql,postgresql"
# channel: stable
# os: ubuntu-18.04
# ext: ""
name: Building ${{ matrix.channel }}-${{ matrix.target-triple }}
runs-on: ${{ matrix.os }}
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4
# End Checkout the repo
@ -86,13 +65,13 @@ jobs:
# Enable Rust Caching
- uses: Swatinem/rust-cache@v1
- uses: Swatinem/rust-cache@842ef286fff290e445b90b4002cc9807c3669641 # v1.3.0
# End Enable Rust Caching
# Uses the rust-toolchain file to determine version
- name: 'Install ${{ matrix.channel }}-${{ matrix.host-triple }} for target: ${{ matrix.target-triple }}'
uses: actions-rs/toolchain@v1
uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f # v1.0.6
with:
profile: minimal
target: ${{ matrix.target-triple }}
@ -103,28 +82,28 @@ jobs:
# Run cargo tests (In release mode to speed up future builds)
# First test all features together, afterwards test them separately.
- name: "`cargo test --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
uses: actions-rs/cargo@v1
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
with:
command: test
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
# Test single features
# 0: sqlite
- name: "`cargo test --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}`"
uses: actions-rs/cargo@v1
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
with:
command: test
args: --release --features ${{ matrix.features[0] }} --target ${{ matrix.target-triple }}
if: ${{ matrix.features[0] != '' }}
# 1: mysql
- name: "`cargo test --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}`"
uses: actions-rs/cargo@v1
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
with:
command: test
args: --release --features ${{ matrix.features[1] }} --target ${{ matrix.target-triple }}
if: ${{ matrix.features[1] != '' }}
# 2: postgresql
- name: "`cargo test --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}`"
uses: actions-rs/cargo@v1
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
with:
command: test
args: --release --features ${{ matrix.features[2] }} --target ${{ matrix.target-triple }}
@ -134,7 +113,7 @@ jobs:
# Run cargo clippy, and fail on warnings (In release mode to speed up future builds)
- name: "`cargo clippy --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
uses: actions-rs/cargo@v1
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
with:
command: clippy
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }} -- -D warnings
@ -143,7 +122,7 @@ jobs:
# Run cargo fmt
- name: '`cargo fmt`'
uses: actions-rs/cargo@v1
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
with:
command: fmt
args: --all -- --check
@ -152,7 +131,7 @@ jobs:
# Build the binary
- name: "`cargo build --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}`"
uses: actions-rs/cargo@v1
uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1
with:
command: build
args: --release --features ${{ join(matrix.features, ',') }} --target ${{ matrix.target-triple }}
@ -161,21 +140,8 @@ jobs:
# Upload artifact to Github Actions
- name: Upload artifact
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@27121b0bdffd731efa15d66772be8dc71245d074 # v2.2.4
with:
name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
# End Upload artifact to Github Actions
## This is not used at the moment
## We could start using this when we can build static binaries
# Upload to github actions release
# - name: Release
# uses: Shopify/upload-to-release@1
# if: startsWith(github.ref, 'refs/tags/')
# with:
# name: vaultwarden-${{ matrix.target-triple }}${{ matrix.ext }}
# path: target/${{ matrix.target-triple }}/release/vaultwarden${{ matrix.ext }}
# repo-token: ${{ secrets.GITHUB_TOKEN }}
# End Upload to github actions release

7
.github/workflows/hadolint.yml

@ -2,11 +2,10 @@ name: Hadolint
on:
push:
# Ignore when there are only changes done too one of these paths
paths:
- "docker/**"
pull_request:
# Ignore when there are only changes done too one of these paths
paths:
- "docker/**"
@ -17,7 +16,7 @@ jobs:
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4
# End Checkout the repo
@ -28,7 +27,7 @@ jobs:
sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \
sudo chmod +x /usr/local/bin/hadolint
env:
HADOLINT_VERSION: 2.6.1
HADOLINT_VERSION: 2.7.0
# End Download hadolint
# Test Dockerfiles

119
.github/workflows/release.yml

@ -0,0 +1,119 @@
name: Release
on:
push:
paths:
- ".github/workflows/release.yml"
- "src/**"
- "migrations/**"
- "hooks/**"
- "docker/**"
- "Cargo.*"
- "build.rs"
- "diesel.toml"
- "rust-toolchain"
branches: # Only on paths above
- main
tags: # Always, regardless of paths above
- '*'
jobs:
# https://github.com/marketplace/actions/skip-duplicate-actions
# Some checks to determine if we need to continue with building a new docker.
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already.
skip_check:
runs-on: ubuntu-latest
if: ${{ github.repository == 'dani-garcia/vaultwarden' }}
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- name: Skip Duplicates Actions
id: skip_check
uses: fkirc/skip-duplicate-actions@f75dd6564bb646f95277dc8c3b80612e46a4a1ea # v3.4.1
with:
cancel_others: 'true'
# Only run this when not creating a tag
if: ${{ startsWith(github.ref, 'refs/heads/') }}
docker-build:
runs-on: ubuntu-latest
needs: skip_check
# Start a local docker registry to be used to generate multi-arch images.
services:
registry:
image: registry:2
ports:
- 5000:5000
env:
DOCKER_BUILDKIT: 1 # Disabled for now, but we should look at this because it will speedup building!
# DOCKER_REPO/secrets.DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>'
DOCKER_REPO: ${{ secrets.DOCKERHUB_REPO }}
SOURCE_COMMIT: ${{ github.sha }}
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
strategy:
matrix:
base_image: ["debian","alpine"]
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4
with:
fetch-depth: 0
# Login to Docker Hub
- name: Login to Docker Hub
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 # v1.10.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
# Determine Docker Tag
- name: Init Variables
id: vars
shell: bash
run: |
# Check which main tag we are going to build determined by github.ref
if [[ "${{ github.ref }}" == refs/tags/* ]]; then
echo "set-output name=DOCKER_TAG::${GITHUB_REF#refs/*/}"
echo "::set-output name=DOCKER_TAG::${GITHUB_REF#refs/*/}"
elif [[ "${{ github.ref }}" == refs/heads/* ]]; then
echo "set-output name=DOCKER_TAG::testing"
echo "::set-output name=DOCKER_TAG::testing"
fi
# End Determine Docker Tag
- name: Build Debian based images
shell: bash
env:
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
run: |
./hooks/build
if: ${{ matrix.base_image == 'debian' }}
- name: Push Debian based images
shell: bash
env:
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}"
run: |
./hooks/push
if: ${{ matrix.base_image == 'debian' }}
- name: Build Alpine based images
shell: bash
env:
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
run: |
./hooks/build
if: ${{ matrix.base_image == 'alpine' }}
- name: Push Alpine based images
shell: bash
env:
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine"
run: |
./hooks/push
if: ${{ matrix.base_image == 'alpine' }}

1
.pre-commit-config.yaml

@ -7,6 +7,7 @@ repos:
- id: check-json
- id: check-toml
- id: end-of-file-fixer
exclude: "(.*js$|.*css$)"
- id: check-case-conflict
- id: check-merge-conflict
- id: detect-private-key

1130
Cargo.lock

File diff suppressed because it is too large

44
Cargo.toml

@ -2,7 +2,9 @@
name = "vaultwarden"
version = "1.0.0"
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
edition = "2018"
edition = "2021"
rust-version = "1.60"
resolver = "2"
repository = "https://github.com/dani-garcia/vaultwarden"
readme = "README.md"
@ -32,19 +34,19 @@ rocket = { version = "=0.5.0-dev", features = ["tls"], default-features = false
rocket_contrib = "=0.5.0-dev"
# HTTP client
reqwest = { version = "0.11.4", features = ["blocking", "json", "gzip", "brotli", "socks", "cookies"] }
reqwest = { version = "0.11.9", features = ["blocking", "json", "gzip", "brotli", "socks", "cookies", "trust-dns"] }
# Used for custom short lived cookie jar
cookie = "0.15.1"
cookie_store = "0.15.0"
bytes = "1.0.1"
cookie_store = "0.15.1"
bytes = "1.1.0"
url = "2.2.2"
# multipart/form-data support
multipart = { version = "0.18.0", features = ["server"], default-features = false }
# WebSockets library
ws = { version = "0.11.0", package = "parity-ws" }
ws = { version = "0.11.1", package = "parity-ws" }
# MessagePack library
rmpv = "1.0.0"
@ -53,15 +55,15 @@ rmpv = "1.0.0"
chashmap = "2.2.2"
# A generic serialization/deserialization framework
serde = { version = "1.0.128", features = ["derive"] }
serde_json = "1.0.66"
serde = { version = "1.0.136", features = ["derive"] }
serde_json = "1.0.78"
# Logging
log = "0.4.14"
fern = { version = "0.6.0", features = ["syslog-4"] }
# A safe, extensible ORM and Query builder
diesel = { version = "1.4.7", features = [ "chrono", "r2d2"] }
diesel = { version = "1.4.8", features = [ "chrono", "r2d2"] }
diesel_migrations = "1.4.0"
# Bundled SQLite
@ -76,7 +78,7 @@ uuid = { version = "0.8.2", features = ["v4"] }
# Date and time libraries
chrono = { version = "0.4.19", features = ["serde"] }
chrono-tz = "0.5.3"
chrono-tz = "0.6.1"
time = "0.2.27"
# Job scheduler
@ -93,7 +95,7 @@ jsonwebtoken = "7.2.0"
# U2F library
u2f = "0.2.0"
webauthn-rs = "=0.3.0-alpha.10"
webauthn-rs = "0.3.2"
# Yubico Library
yubico = { version = "0.10.0", features = ["online-tokio"], default-features = false }
@ -102,27 +104,27 @@ yubico = { version = "0.10.0", features = ["online-tokio"], default-features = f
dotenv = { version = "0.15.0", default-features = false }
# Lazy initialization
once_cell = "1.8.0"
once_cell = "1.9.0"
# Numerical libraries
num-traits = "0.2.14"
num-derive = "0.3.3"
# Email libraries
tracing = { version = "0.1.26", features = ["log"] } # Needed to have lettre trace logging used when SMTP_DEBUG is enabled.
lettre = { version = "0.10.0-rc.3", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
tracing = { version = "0.1.29", features = ["log"] } # Needed to have lettre trace logging used when SMTP_DEBUG is enabled.
lettre = { version = "0.10.0-rc.4", features = ["smtp-transport", "builder", "serde", "native-tls", "hostname", "tracing"], default-features = false }
# Template library
handlebars = { version = "4.1.2", features = ["dir_source"] }
handlebars = { version = "4.2.1", features = ["dir_source"] }
# For favicon extraction from main website
html5ever = "0.25.1"
markup5ever_rcdom = "0.1.0"
regex = { version = "1.5.4", features = ["std", "perf"], default-features = false }
data-url = "0.1.0"
regex = { version = "1.5.4", features = ["std", "perf", "unicode-perl"], default-features = false }
data-url = "0.1.1"
# Used by U2F, JWT and Postgres
openssl = "0.10.36"
openssl = "0.10.38"
# URL encoding library
percent-encoding = "2.1.0"
@ -133,20 +135,18 @@ idna = "0.2.3"
pico-args = "0.4.2"
# Logging panics to logfile instead stderr only
backtrace = "0.3.61"
backtrace = "0.3.64"
# Macro ident concatenation
paste = "1.0.5"
openidconnect = "2.0.1"
paste = "1.0.6"
governor = "0.4.1"
[patch.crates-io]
# Use newest ring
rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
rocket_contrib = { git = 'https://github.com/SergioBenitez/Rocket', rev = '263e39b5b429de1913ce7e3036575a7b4d88b6d7' }
# For favicon extraction from main website
data-url = { git = 'https://github.com/servo/rust-url', package="data-url", rev = 'eb7330b5296c0d43816d1346211b74182bb4ae37' }
# The maintainer of the `job_scheduler` crate doesn't seem to have responded
# to any issues or PRs for almost a year (as of April 2021). This hopefully
# temporary fork updates Cargo.toml to use more up-to-date dependencies.

2
README.md

@ -1,4 +1,4 @@
### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/#download)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/download/)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal.
📢 Note: This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues. Please see [#1642](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation.

41
build.rs

@ -15,11 +15,14 @@ fn main() {
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
);
if let Ok(version) = env::var("BWRS_VERSION") {
println!("cargo:rustc-env=BWRS_VERSION={}", version);
// Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION.
// If neither exist, read from git.
let maybe_vaultwarden_version =
env::var("VW_VERSION").or_else(|_| env::var("BWRS_VERSION")).or_else(|_| version_from_git_info());
if let Ok(version) = maybe_vaultwarden_version {
println!("cargo:rustc-env=VW_VERSION={}", version);
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
} else {
read_git_info().ok();
}
}
@ -33,7 +36,13 @@ fn run(args: &[&str]) -> Result<String, std::io::Error> {
}
/// This method reads info from Git, namely tags, branch, and revision
fn read_git_info() -> Result<(), std::io::Error> {
/// To access these values, use:
/// - env!("GIT_EXACT_TAG")
/// - env!("GIT_LAST_TAG")
/// - env!("GIT_BRANCH")
/// - env!("GIT_REV")
/// - env!("VW_VERSION")
fn version_from_git_info() -> Result<String, std::io::Error> {
// The exact tag for the current commit, can be empty when
// the current commit doesn't have an associated tag
let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"]).ok();
@ -56,23 +65,11 @@ fn read_git_info() -> Result<(), std::io::Error> {
println!("cargo:rustc-env=GIT_REV={}", rev_short);
// Combined version
let version = if let Some(exact) = exact_tag {
exact
if let Some(exact) = exact_tag {
Ok(exact)
} else if &branch != "main" && &branch != "master" {
format!("{}-{} ({})", last_tag, rev_short, branch)
Ok(format!("{}-{} ({})", last_tag, rev_short, branch))
} else {
format!("{}-{}", last_tag, rev_short)
};
println!("cargo:rustc-env=BWRS_VERSION={}", version);
println!("cargo:rustc-env=CARGO_PKG_VERSION={}", version);
// To access these values, use:
// env!("GIT_EXACT_TAG")
// env!("GIT_LAST_TAG")
// env!("GIT_BRANCH")
// env!("GIT_REV")
// env!("BWRS_VERSION")
Ok(())
Ok(format!("{}-{}", last_tag, rev_short))
}
}

151
docker/Dockerfile.j2

@ -1,16 +1,26 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
{% set build_stage_base_image = "rust:1.54" %}
{% set build_stage_base_image = "rust:1.58-buster" %}
{% if "alpine" in target_file %}
{% if "amd64" in target_file %}
{% set build_stage_base_image = "clux/muslrust:nightly-2021-08-22" %}
{% set runtime_stage_base_image = "alpine:3.14" %}
{% set build_stage_base_image = "blackdex/rust-musl:x86_64-musl-nightly-2022-01-23" %}
{% set runtime_stage_base_image = "alpine:3.15" %}
{% set package_arch_target = "x86_64-unknown-linux-musl" %}
{% elif "armv7" in target_file %}
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.14" %}
{% set build_stage_base_image = "blackdex/rust-musl:armv7-musleabihf-nightly-2022-01-23" %}
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.15" %}
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
{% elif "armv6" in target_file %}
{% set build_stage_base_image = "blackdex/rust-musl:arm-musleabi-nightly-2022-01-23" %}
{% set runtime_stage_base_image = "balenalib/rpi-alpine:3.15" %}
{% set package_arch_target = "arm-unknown-linux-musleabi" %}
{% elif "arm64" in target_file %}
{% set build_stage_base_image = "blackdex/rust-musl:aarch64-musl-nightly-2022-01-23" %}
{% set runtime_stage_base_image = "balenalib/aarch64-alpine:3.15" %}
{% set package_arch_target = "aarch64-unknown-linux-musl" %}
{% endif %}
{% elif "amd64" in target_file %}
{% set runtime_stage_base_image = "debian:buster-slim" %}
@ -40,12 +50,17 @@
{% else %}
{% set package_arch_target_param = "" %}
{% endif %}
{% if "buildx" in target_file %}
{% set mount_rust_cache = "--mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry " %}
{% else %}
{% set mount_rust_cache = "" %}
{% endif %}
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
{% set vault_version = "2.21.1" %}
{% set vault_image_digest = "sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5" %}
{% set vault_version = "2.25.1b" %}
{% set vault_image_digest = "sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba" %}
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# Using the digest instead of the tag name provides better security,
# as the digest of an image is immutable, whereas a tag name can later
@ -68,66 +83,69 @@ FROM vaultwarden/web-vault@{{ vault_image_digest }} as vault
########################## BUILD IMAGE ##########################
FROM {{ build_stage_base_image }} as build
{% if "alpine" in target_file %}
{% if "amd64" in target_file %}
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
ARG DB=sqlite,postgresql
{% set features = "sqlite,postgresql" %}
{% else %}
# Alpine-based ARM (musl) only supports sqlite during compile time.
# We now also need to add vendored_openssl, because the current base image we use to build has OpenSSL removed.
ARG DB=sqlite,vendored_openssl
{% set features = "sqlite" %}
{% endif %}
{% else %}
# Debian-based builds support multidb
ARG DB=sqlite,mysql,postgresql
{% set features = "sqlite,mysql,postgresql" %}
{% endif %}
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
{# {% if "alpine" not in target_file and "buildx" in target_file %}
# Debian based Buildx builds can use some special apt caching to speedup building.
# By default Debian based images have some rules to keep docker builds clean, we need to remove this.
# See: https://hub.docker.com/r/docker/dockerfile
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
{% endif %} #}
# Don't download rust docs
RUN rustup set profile minimal
# Create CARGO_HOME folder and don't download rust docs
RUN {{ mount_rust_cache -}} mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
{% if "alpine" in target_file %}
ENV USER "root"
ENV RUSTFLAGS='-C link-arg=-s'
{% if "armv7" in target_file %}
{#- https://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html -#}
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
{% endif %}
{% elif "arm" in target_file %}
#
# Install required build libs for {{ package_arch_name }} architecture.
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture {{ package_arch_name }} \
# hadolint ignore=DL3059
RUN dpkg --add-architecture {{ package_arch_name }} \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev{{ package_arch_prefix }} \
libc6-dev{{ package_arch_prefix }} \
libpq5{{ package_arch_prefix }} \
libpq-dev \
libpq-dev{{ package_arch_prefix }} \
libmariadb3{{ package_arch_prefix }} \
libmariadb-dev{{ package_arch_prefix }} \
libmariadb-dev-compat{{ package_arch_prefix }} \
gcc-{{ package_cross_compiler }} \
&& mkdir -p ~/.cargo \
&& echo '[target.{{ package_arch_target }}]' >> ~/.cargo/config \
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> ~/.cargo/config \
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> ~/.cargo/config
#
# Make sure cargo has the right target config
&& echo '[target.{{ package_arch_target }}]' >> "${CARGO_HOME}/config" \
&& echo 'linker = "{{ package_cross_compiler }}-gcc"' >> "${CARGO_HOME}/config" \
&& echo 'rustflags = ["-L/usr/lib/{{ package_cross_compiler }}"]' >> "${CARGO_HOME}/config"
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
{% endif -%}
# Set arm specific environment values
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc" \
CROSS_COMPILE="1" \
OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}" \
OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
{% if "amd64" in target_file and "alpine" not in target_file %}
{% elif "amd64" in target_file %}
# Install DB packages
RUN apt-get update && apt-get install -y \
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
libmariadb-dev{{ package_arch_prefix }} \
libpq-dev{{ package_arch_prefix }} \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
{% endif %}
@ -140,37 +158,17 @@ COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
{% if "alpine" not in target_file %}
{% if "arm" in target_file %}
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the {{ package_arch_prefix }} version.
# What we can do is a force install, because nothing important is overlapping each other.
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
&& apt-get download libmariadb-dev-compat:amd64 \
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
&& rm -rvf ./libmariadb-dev-compat*.deb \
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
# The libpq5{{ package_arch_prefix }} package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
# Without this specific file the ld command will fail and compilation fails with it.
&& ln -sfnr /usr/lib/{{ package_cross_compiler }}/libpq.so.5 /usr/lib/{{ package_cross_compiler }}/libpq.so
ENV CC_{{ package_arch_target | replace("-", "_") }}="/usr/bin/{{ package_cross_compiler }}-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/{{ package_cross_compiler }}"
ENV OPENSSL_LIB_DIR="/usr/lib/{{ package_cross_compiler }}"
{% endif -%}
{% endif %}
{% if package_arch_target is defined %}
RUN rustup target add {{ package_arch_target }}
RUN {{ mount_rust_cache -}} rustup target add {{ package_arch_target }}
{% endif %}
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release{{ package_arch_target_param }} \
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }} \
&& find . -not -path "./target*" -delete
# Copies the complete project
@ -182,7 +180,8 @@ RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
# hadolint ignore=DL3059
RUN {{ mount_rust_cache -}} cargo build --features ${DB} --release{{ package_arch_target_param }}
{% if "alpine" in target_file %}
{% if "armv7" in target_file %}
# hadolint ignore=DL3059
@ -195,13 +194,14 @@ RUN musl-strip target/{{ package_arch_target }}/release/vaultwarden
# because we already have a binary built
FROM {{ runtime_stage_base_image }}
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
{% if "alpine" in runtime_stage_base_image %}
ENV SSL_CERT_DIR=/etc/ssl/certs
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10
{%- if "alpine" in runtime_stage_base_image %} \
SSL_CERT_DIR=/etc/ssl/certs
{% endif %}
{% if "amd64" not in target_file %}
# hadolint ignore=DL3059
RUN [ "cross-build-start" ]
@ -215,12 +215,6 @@ RUN mkdir /data \
tzdata \
curl \
dumb-init \
{% if "mysql" in features %}
mariadb-connector-c \
{% endif %}
{% if "postgresql" in features %}
postgresql-libs \
{% endif %}
ca-certificates
{% else %}
&& apt-get update && apt-get install -y \
@ -231,6 +225,7 @@ RUN mkdir /data \
dumb-init \
libmariadb-dev-compat \
libpq5 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
{% endif %}

6
docker/Makefile

@ -7,3 +7,9 @@ all: $(OBJECTS)
%/Dockerfile.alpine: Dockerfile.j2 render_template
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
%/Dockerfile.buildx: Dockerfile.j2 render_template
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"
%/Dockerfile.buildx.alpine: Dockerfile.j2 render_template
./render_template "$<" "{\"target_file\":\"$@\"}" > "$@"

47
docker/amd64/Dockerfile

@ -1,3 +1,5 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
@ -14,33 +16,41 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.21.1
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
# [vaultwarden/web-vault:v2.21.1]
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM rust:1.54 as build
FROM rust:1.58-buster as build
# Debian-based builds support multidb
ARG DB=sqlite,mysql,postgresql
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Don't download rust docs
RUN rustup set profile minimal
# Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
# Install DB packages
RUN apt-get update && apt-get install -y \
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
libmariadb-dev \
libpq-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
@ -53,6 +63,9 @@ COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
@ -68,6 +81,7 @@ RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
@ -75,9 +89,9 @@ RUN cargo build --features ${DB} --release
# because we already have a binary built
FROM debian:buster-slim
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10
# Create data folder and Install needed libraries
@ -90,6 +104,7 @@ RUN mkdir /data \
dumb-init \
libmariadb-dev-compat \
libpq5 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

49
docker/amd64/Dockerfile.alpine

@ -1,3 +1,5 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
@ -14,29 +16,34 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.21.1
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
# [vaultwarden/web-vault:v2.21.1]
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM clux/muslrust:nightly-2021-08-22 as build
FROM blackdex/rust-musl:x86_64-musl-nightly-2022-01-23 as build
# Alpine-based AMD64 (musl) does not support mysql/mariadb during compile time.
ARG DB=sqlite,postgresql
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Don't download rust docs
RUN rustup set profile minimal
# Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
ENV USER "root"
ENV RUSTFLAGS='-C link-arg=-s'
# Creates a dummy project used to grab dependencies
@ -50,6 +57,9 @@ COPY ./build.rs ./build.rs
RUN rustup target add x86_64-unknown-linux-musl
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
@ -65,17 +75,19 @@ RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM alpine:3.14
FROM alpine:3.15
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10 \
SSL_CERT_DIR=/etc/ssl/certs
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
ENV SSL_CERT_DIR=/etc/ssl/certs
# Create data folder and Install needed libraries
@ -85,7 +97,6 @@ RUN mkdir /data \
tzdata \
curl \
dumb-init \
postgresql-libs \
ca-certificates

129
docker/amd64/Dockerfile.buildx

@ -0,0 +1,129 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# Using the digest instead of the tag name provides better security,
# as the digest of an image is immutable, whereas a tag name can later
# be changed to point to a malicious image.
#
# To verify the current digest for a given tag name:
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM rust:1.58-buster as build
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Create CARGO_HOME folder and don't download rust docs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
# Install DB packages
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
libmariadb-dev \
libpq-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release \
&& find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM debian:buster-slim
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10
# Create data folder and Install needed libraries
RUN mkdir /data \
&& apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
dumb-init \
libmariadb-dev-compat \
libpq5 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
WORKDIR /
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/release/vaultwarden .
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup!
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

121
docker/amd64/Dockerfile.buildx.alpine

@ -0,0 +1,121 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# Using the digest instead of the tag name provides better security,
# as the digest of an image is immutable, whereas a tag name can later
# be changed to point to a malicious image.
#
# To verify the current digest for a given tag name:
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM blackdex/rust-musl:x86_64-musl-nightly-2022-01-23 as build
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Create CARGO_HOME folder and don't download rust docs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
ENV RUSTFLAGS='-C link-arg=-s'
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add x86_64-unknown-linux-musl
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \
&& find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM alpine:3.15
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10 \
SSL_CERT_DIR=/etc/ssl/certs
# Create data folder and Install needed libraries
RUN mkdir /data \
&& apk add --no-cache \
openssl \
tzdata \
curl \
dumb-init \
ca-certificates
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
WORKDIR /
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden .
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup!
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

89
docker/arm64/Dockerfile

@ -1,3 +1,5 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
@ -14,50 +16,61 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.21.1
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
# [vaultwarden/web-vault:v2.21.1]
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM rust:1.54 as build
FROM rust:1.58-buster as build
# Debian-based builds support multidb
ARG DB=sqlite,mysql,postgresql
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Don't download rust docs
RUN rustup set profile minimal
# Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
#
# Install required build libs for arm64 architecture.
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture arm64 \
# hadolint ignore=DL3059
RUN dpkg --add-architecture arm64 \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:arm64 \
libc6-dev:arm64 \
libpq5:arm64 \
libpq-dev \
libpq-dev:arm64 \
libmariadb3:arm64 \
libmariadb-dev:arm64 \
libmariadb-dev-compat:arm64 \
gcc-aarch64-linux-gnu \
&& mkdir -p ~/.cargo \
&& echo '[target.aarch64-unknown-linux-gnu]' >> ~/.cargo/config \
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> ~/.cargo/config \
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> ~/.cargo/config
#
# Make sure cargo has the right target config
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
# Set arm specific environment values
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
CROSS_COMPILE="1" \
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
@ -68,27 +81,11 @@ COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :arm64 version.
# What we can do is a force install, because nothing important is overlapping each other.
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
&& apt-get download libmariadb-dev-compat:amd64 \
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
&& rm -rvf ./libmariadb-dev-compat*.deb \
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
# The libpq5:arm64 package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
# Without this specific file the ld command will fail and compilation fails with it.
&& ln -sfnr /usr/lib/aarch64-linux-gnu/libpq.so.5 /usr/lib/aarch64-linux-gnu/libpq.so
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu"
ENV OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
RUN rustup target add aarch64-unknown-linux-gnu
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
@ -104,6 +101,7 @@ RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
######################## RUNTIME IMAGE ########################
@ -111,9 +109,9 @@ RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
# because we already have a binary built
FROM balenalib/aarch64-debian:buster
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10
# hadolint ignore=DL3059
RUN [ "cross-build-start" ]
@ -128,6 +126,7 @@ RUN mkdir /data \
dumb-init \
libmariadb-dev-compat \
libpq5 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# hadolint ignore=DL3059

125
docker/arm64/Dockerfile.alpine

@ -0,0 +1,125 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# Using the digest instead of the tag name provides better security,
# as the digest of an image is immutable, whereas a tag name can later
# be changed to point to a malicious image.
#
# To verify the current digest for a given tag name:
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM blackdex/rust-musl:aarch64-musl-nightly-2022-01-23 as build
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
ENV RUSTFLAGS='-C link-arg=-s'
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
RUN rustup target add aarch64-unknown-linux-musl
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
&& find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/aarch64-alpine:3.15
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10 \
SSL_CERT_DIR=/etc/ssl/certs
# hadolint ignore=DL3059
RUN [ "cross-build-start" ]
# Create data folder and Install needed libraries
RUN mkdir /data \
&& apk add --no-cache \
openssl \
tzdata \
curl \
dumb-init \
ca-certificates
# hadolint ignore=DL3059
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
WORKDIR /
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup!
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

153
docker/arm64/Dockerfile.buildx

@ -0,0 +1,153 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# Using the digest instead of the tag name provides better security,
# as the digest of an image is immutable, whereas a tag name can later
# be changed to point to a malicious image.
#
# To verify the current digest for a given tag name:
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM rust:1.58-buster as build
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Create CARGO_HOME folder and don't download rust docs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
#
# Install required build libs for arm64 architecture.
# hadolint ignore=DL3059
RUN dpkg --add-architecture arm64 \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:arm64 \
libc6-dev:arm64 \
libpq5:arm64 \
libpq-dev:arm64 \
libmariadb3:arm64 \
libmariadb-dev:arm64 \
libmariadb-dev-compat:arm64 \
gcc-aarch64-linux-gnu \
#
# Make sure cargo has the right target config
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config"
# Set arm specific environment values
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \
CROSS_COMPILE="1" \
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu"
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-gnu
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \
&& find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/aarch64-debian:buster
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10
# hadolint ignore=DL3059
RUN [ "cross-build-start" ]
# Create data folder and Install needed libraries
RUN mkdir /data \
&& apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
dumb-init \
libmariadb-dev-compat \
libpq5 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# hadolint ignore=DL3059
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
WORKDIR /
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden .
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup!
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

125
docker/arm64/Dockerfile.buildx.alpine

@ -0,0 +1,125 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# Using the digest instead of the tag name provides better security,
# as the digest of an image is immutable, whereas a tag name can later
# be changed to point to a malicious image.
#
# To verify the current digest for a given tag name:
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM blackdex/rust-musl:aarch64-musl-nightly-2022-01-23 as build
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Create CARGO_HOME folder and don't download rust docs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
ENV RUSTFLAGS='-C link-arg=-s'
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-musl
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \
&& find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/aarch64-alpine:3.15
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10 \
SSL_CERT_DIR=/etc/ssl/certs
# hadolint ignore=DL3059
RUN [ "cross-build-start" ]
# Create data folder and Install needed libraries
RUN mkdir /data \
&& apk add --no-cache \
openssl \
tzdata \
curl \
dumb-init \
ca-certificates
# hadolint ignore=DL3059
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
WORKDIR /
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden .
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup!
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

89
docker/armv6/Dockerfile

@ -1,3 +1,5 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
@ -14,50 +16,61 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.21.1
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
# [vaultwarden/web-vault:v2.21.1]
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM rust:1.54 as build
FROM rust:1.58-buster as build
# Debian-based builds support multidb
ARG DB=sqlite,mysql,postgresql
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Don't download rust docs
RUN rustup set profile minimal
# Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
#
# Install required build libs for armel architecture.
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armel \
# hadolint ignore=DL3059
RUN dpkg --add-architecture armel \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:armel \
libc6-dev:armel \
libpq5:armel \
libpq-dev \
libpq-dev:armel \
libmariadb3:armel \
libmariadb-dev:armel \
libmariadb-dev-compat:armel \
gcc-arm-linux-gnueabi \
&& mkdir -p ~/.cargo \
&& echo '[target.arm-unknown-linux-gnueabi]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> ~/.cargo/config \
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> ~/.cargo/config
#
# Make sure cargo has the right target config
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
# Set arm specific environment values
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
CROSS_COMPILE="1" \
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
@ -68,27 +81,11 @@ COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armel version.
# What we can do is a force install, because nothing important is overlapping each other.
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
&& apt-get download libmariadb-dev-compat:amd64 \
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
&& rm -rvf ./libmariadb-dev-compat*.deb \
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
# The libpq5:armel package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
# Without this specific file the ld command will fail and compilation fails with it.
&& ln -sfnr /usr/lib/arm-linux-gnueabi/libpq.so.5 /usr/lib/arm-linux-gnueabi/libpq.so
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
RUN rustup target add arm-unknown-linux-gnueabi
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
@ -104,6 +101,7 @@ RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
######################## RUNTIME IMAGE ########################
@ -111,9 +109,9 @@ RUN cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
# because we already have a binary built
FROM balenalib/rpi-debian:buster
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10
# hadolint ignore=DL3059
RUN [ "cross-build-start" ]
@ -128,6 +126,7 @@ RUN mkdir /data \
dumb-init \
libmariadb-dev-compat \
libpq5 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# hadolint ignore=DL3059

125
docker/armv6/Dockerfile.alpine

@ -0,0 +1,125 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# Using the digest instead of the tag name provides better security,
# as the digest of an image is immutable, whereas a tag name can later
# be changed to point to a malicious image.
#
# To verify the current digest for a given tag name:
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM blackdex/rust-musl:arm-musleabi-nightly-2022-01-23 as build
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
ENV RUSTFLAGS='-C link-arg=-s'
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
RUN rustup target add arm-unknown-linux-musleabi
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
&& find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/rpi-alpine:3.15
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10 \
SSL_CERT_DIR=/etc/ssl/certs
# hadolint ignore=DL3059
RUN [ "cross-build-start" ]
# Create data folder and Install needed libraries
RUN mkdir /data \
&& apk add --no-cache \
openssl \
tzdata \
curl \
dumb-init \
ca-certificates
# hadolint ignore=DL3059
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
WORKDIR /
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup!
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

153
docker/armv6/Dockerfile.buildx

@ -0,0 +1,153 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# Using the digest instead of the tag name provides better security,
# as the digest of an image is immutable, whereas a tag name can later
# be changed to point to a malicious image.
#
# To verify the current digest for a given tag name:
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM rust:1.58-buster as build
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Create CARGO_HOME folder and don't download rust docs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
#
# Install required build libs for armel architecture.
# hadolint ignore=DL3059
RUN dpkg --add-architecture armel \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:armel \
libc6-dev:armel \
libpq5:armel \
libpq-dev:armel \
libmariadb3:armel \
libmariadb-dev:armel \
libmariadb-dev-compat:armel \
gcc-arm-linux-gnueabi \
#
# Make sure cargo has the right target config
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config"
# Set arm specific environment values
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \
CROSS_COMPILE="1" \
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi"
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-gnueabi
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \
&& find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/rpi-debian:buster
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10
# hadolint ignore=DL3059
RUN [ "cross-build-start" ]
# Create data folder and Install needed libraries
RUN mkdir /data \
&& apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
dumb-init \
libmariadb-dev-compat \
libpq5 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# hadolint ignore=DL3059
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
WORKDIR /
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden .
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup!
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

125
docker/armv6/Dockerfile.buildx.alpine

@ -0,0 +1,125 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# Using the digest instead of the tag name provides better security,
# as the digest of an image is immutable, whereas a tag name can later
# be changed to point to a malicious image.
#
# To verify the current digest for a given tag name:
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM blackdex/rust-musl:arm-musleabi-nightly-2022-01-23 as build
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Create CARGO_HOME folder and don't download rust docs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
ENV RUSTFLAGS='-C link-arg=-s'
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-musleabi
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \
&& find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/rpi-alpine:3.15
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10 \
SSL_CERT_DIR=/etc/ssl/certs
# hadolint ignore=DL3059
RUN [ "cross-build-start" ]
# Create data folder and Install needed libraries
RUN mkdir /data \
&& apk add --no-cache \
openssl \
tzdata \
curl \
dumb-init \
ca-certificates
# hadolint ignore=DL3059
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
WORKDIR /
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden .
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup!
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

89
docker/armv7/Dockerfile

@ -1,3 +1,5 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
@ -14,50 +16,61 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.21.1
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
# [vaultwarden/web-vault:v2.21.1]
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM rust:1.54 as build
FROM rust:1.58-buster as build
# Debian-based builds support multidb
ARG DB=sqlite,mysql,postgresql
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Don't download rust docs
RUN rustup set profile minimal
# Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
#
# Install required build libs for armhf architecture.
# To compile both mysql and postgresql we need some extra packages for both host arch and target arch
RUN sed 's/^deb/deb-src/' /etc/apt/sources.list > \
/etc/apt/sources.list.d/deb-src.list \
&& dpkg --add-architecture armhf \
# hadolint ignore=DL3059
RUN dpkg --add-architecture armhf \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:armhf \
libc6-dev:armhf \
libpq5:armhf \
libpq-dev \
libpq-dev:armhf \
libmariadb3:armhf \
libmariadb-dev:armhf \
libmariadb-dev-compat:armhf \
gcc-arm-linux-gnueabihf \
&& mkdir -p ~/.cargo \
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> ~/.cargo/config \
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> ~/.cargo/config \
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> ~/.cargo/config
#
# Make sure cargo has the right target config
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
# Set arm specific environment values
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
CROSS_COMPILE="1" \
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
ENV CARGO_HOME "/root/.cargo"
ENV USER "root"
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
@ -68,27 +81,11 @@ COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
# NOTE: This should be the last apt-get/dpkg for this stage, since after this it will fail because of broken dependencies.
# For Diesel-RS migrations_macros to compile with MySQL/MariaDB we need to do some magic.
# We at least need libmariadb3:amd64 installed for the x86_64 version of libmariadb.so (client)
# We also need the libmariadb-dev-compat:amd64 but it can not be installed together with the :armhf version.
# What we can do is a force install, because nothing important is overlapping each other.
RUN apt-get install -y --no-install-recommends libmariadb3:amd64 \
&& apt-get download libmariadb-dev-compat:amd64 \
&& dpkg --force-all -i ./libmariadb-dev-compat*.deb \
&& rm -rvf ./libmariadb-dev-compat*.deb \
# For Diesel-RS migrations_macros to compile with PostgreSQL we need to do some magic.
# The libpq5:armhf package seems to not provide a symlink to libpq.so.5 with the name libpq.so.
# This is only provided by the libpq-dev package which can't be installed for both arch at the same time.
# Without this specific file the ld command will fail and compilation fails with it.
&& ln -sfnr /usr/lib/arm-linux-gnueabihf/libpq.so.5 /usr/lib/arm-linux-gnueabihf/libpq.so
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc"
ENV CROSS_COMPILE="1"
ENV OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf"
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
RUN rustup target add armv7-unknown-linux-gnueabihf
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
@ -104,6 +101,7 @@ RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
######################## RUNTIME IMAGE ########################
@ -111,9 +109,9 @@ RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabih
# because we already have a binary built
FROM balenalib/armv7hf-debian:buster
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10
# hadolint ignore=DL3059
RUN [ "cross-build-start" ]
@ -128,6 +126,7 @@ RUN mkdir /data \
dumb-init \
libmariadb-dev-compat \
libpq5 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# hadolint ignore=DL3059

49
docker/armv7/Dockerfile.alpine

@ -1,3 +1,5 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
@ -14,30 +16,34 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.21.1
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.21.1
# [vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5]
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5
# [vaultwarden/web-vault:v2.21.1]
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:29a4fa7bf3790fff9d908b02ac5a154913491f4bf30c95b87b06d8cf1c5516b5 as vault
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM messense/rust-musl-cross:armv7-musleabihf as build
FROM blackdex/rust-musl:armv7-musleabihf-nightly-2022-01-23 as build
# Alpine-based ARM (musl) only supports sqlite during compile time.
# We now also need to add vendored_openssl, because the current base image we use to build has OpenSSL removed.
ARG DB=sqlite,vendored_openssl
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive LANG=C.UTF-8 TZ=UTC TERM=xterm-256color
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Don't download rust docs
RUN rustup set profile minimal
# Create CARGO_HOME folder and don't download rust docs
RUN mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
ENV USER "root"
ENV RUSTFLAGS='-C link-arg=-s'
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
@ -52,6 +58,9 @@ COPY ./build.rs ./build.rs
RUN rustup target add armv7-unknown-linux-musleabihf
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
@ -67,6 +76,7 @@ RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
# hadolint ignore=DL3059
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden
@ -74,12 +84,13 @@ RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/armv7hf-alpine:3.14
FROM balenalib/armv7hf-alpine:3.15
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10 \
SSL_CERT_DIR=/etc/ssl/certs
ENV ROCKET_ENV "staging"
ENV ROCKET_PORT=80
ENV ROCKET_WORKERS=10
ENV SSL_CERT_DIR=/etc/ssl/certs
# hadolint ignore=DL3059
RUN [ "cross-build-start" ]

153
docker/armv7/Dockerfile.buildx

@ -0,0 +1,153 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# Using the digest instead of the tag name provides better security,
# as the digest of an image is immutable, whereas a tag name can later
# be changed to point to a malicious image.
#
# To verify the current digest for a given tag name:
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM rust:1.58-buster as build
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Create CARGO_HOME folder and don't download rust docs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
#
# Install required build libs for armhf architecture.
# hadolint ignore=DL3059
RUN dpkg --add-architecture armhf \
&& apt-get update \
&& apt-get install -y \
--no-install-recommends \
libssl-dev:armhf \
libc6-dev:armhf \
libpq5:armhf \
libpq-dev:armhf \
libmariadb3:armhf \
libmariadb-dev:armhf \
libmariadb-dev-compat:armhf \
gcc-arm-linux-gnueabihf \
#
# Make sure cargo has the right target config
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config"
# Set arm specific environment values
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \
CROSS_COMPILE="1" \
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-gnueabihf
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \
&& find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/armv7hf-debian:buster
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10
# hadolint ignore=DL3059
RUN [ "cross-build-start" ]
# Create data folder and Install needed libraries
RUN mkdir /data \
&& apt-get update && apt-get install -y \
--no-install-recommends \
openssl \
ca-certificates \
curl \
dumb-init \
libmariadb-dev-compat \
libpq5 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# hadolint ignore=DL3059
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
WORKDIR /
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden .
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup!
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

128
docker/armv7/Dockerfile.buildx.alpine

@ -0,0 +1,128 @@
# syntax=docker/dockerfile:1
# This file was generated using a Jinja2 template.
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles.
# Using multistage build:
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://whitfin.io/speeding-up-rust-docker-builds/
####################### VAULT BUILD IMAGE #######################
# The web-vault digest specifies a particular web-vault build on Docker Hub.
# Using the digest instead of the tag name provides better security,
# as the digest of an image is immutable, whereas a tag name can later
# be changed to point to a malicious image.
#
# To verify the current digest for a given tag name:
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull vaultwarden/web-vault:v2.25.1b
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba
# [vaultwarden/web-vault:v2.25.1b]
#
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault
########################## BUILD IMAGE ##########################
FROM blackdex/rust-musl:armv7-musleabihf-nightly-2022-01-23 as build
# Build time options to avoid dpkg warnings and help with reproducible builds.
ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
TZ=UTC \
TERM=xterm-256color \
CARGO_HOME="/root/.cargo" \
USER="root"
# Create CARGO_HOME folder and don't download rust docs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \
&& rustup set profile minimal
ENV RUSTFLAGS='-C link-arg=-s'
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16"
# Creates a dummy project used to grab dependencies
RUN USER=root cargo new --bin /app
WORKDIR /app
# Copies over *only* your manifests and build files
COPY ./Cargo.* ./
COPY ./rust-toolchain ./rust-toolchain
COPY ./build.rs ./build.rs
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-musleabihf
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
# Builds your dependencies and removes the
# dummy project, except the target folder
# This folder contains the compiled dependencies
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \
&& find . -not -path "./target*" -delete
# Copies the complete project
# To avoid copying unneeded files, use .dockerignore
COPY . .
# Make sure that we actually build the project
RUN touch src/main.rs
# Builds again, this time it'll just be
# your actual source files being built
# hadolint ignore=DL3059
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf
# hadolint ignore=DL3059
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden
######################## RUNTIME IMAGE ########################
# Create a new stage with a minimal image
# because we already have a binary built
FROM balenalib/armv7hf-alpine:3.15
ENV ROCKET_ENV="staging" \
ROCKET_PORT=80 \
ROCKET_WORKERS=10 \
SSL_CERT_DIR=/etc/ssl/certs
# hadolint ignore=DL3059
RUN [ "cross-build-start" ]
# Create data folder and Install needed libraries
RUN mkdir /data \
&& apk add --no-cache \
openssl \
tzdata \
curl \
dumb-init \
ca-certificates
# hadolint ignore=DL3059
RUN [ "cross-build-end" ]
VOLUME /data
EXPOSE 80
EXPOSE 3012
# Copies the files from the context (Rocket.toml file and web-vault)
# and the binary from the "build" stage to the current stage
WORKDIR /
COPY Rocket.toml .
COPY --from=vault /web-vault ./web-vault
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden .
COPY docker/healthcheck.sh /healthcheck.sh
COPY docker/start.sh /start.sh
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup!
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
CMD ["/start.sh"]

5
hooks/arches.sh

@ -7,10 +7,5 @@ arches=(
)
if [[ "${DOCKER_TAG}" == *alpine ]]; then
# The Alpine image build currently only works for certain arches.
distro_suffix=.alpine
arches=(
amd64
armv7
)
fi

7
hooks/build

@ -34,12 +34,17 @@ for label in "${LABELS[@]}"; do
LABEL_ARGS+=(--label "${label}")
done
# Check if DOCKER_BUILDKIT is set, if so, use the Dockerfile.buildx as template
if [[ -n "${DOCKER_BUILDKIT}" ]]; then
buildx_suffix=.buildx
fi
set -ex
for arch in "${arches[@]}"; do
docker build \
"${LABEL_ARGS[@]}" \
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
-f docker/${arch}/Dockerfile${distro_suffix} \
-f docker/${arch}/Dockerfile${buildx_suffix}${distro_suffix} \
.
done

15
hooks/push

@ -10,7 +10,7 @@ join() { local IFS="$1"; shift; echo "$*"; }
set -ex
echo ">>> Starting local Docker registry..."
echo ">>> Starting local Docker registry when needed..."
# Docker Buildx's `docker-container` driver is needed for multi-platform
# builds, but it can't access existing images on the Docker host (like the
@ -25,7 +25,13 @@ echo ">>> Starting local Docker registry..."
# Use host networking so the buildx container can access the registry via
# localhost.
#
docker run -d --name registry --network host registry:2 # defaults to port 5000
# First check if there already is a registry container running, else skip it.
# This will only happen either locally or running it via Github Actions
#
if ! timeout 5 bash -c 'cat < /dev/null > /dev/tcp/localhost/5000'; then
# defaults to port 5000
docker run -d --name registry --network host registry:2
fi
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
@ -49,7 +55,12 @@ echo ">>> Setting up Docker Buildx..."
#
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
#
# Check if there already is a builder running, else skip this and use the existing.
# This will only happen either locally or running it via Github Actions
#
if ! docker buildx inspect builder > /dev/null 2>&1 ; then
docker buildx create --name builder --use --driver-opt network=host
fi
echo ">>> Running Docker Buildx..."

1
migrations/mysql/2021-08-30-193501_create_emergency_access/down.sql

@ -0,0 +1 @@
DROP TABLE emergency_access;

14
migrations/mysql/2021-08-30-193501_create_emergency_access/up.sql

@ -0,0 +1,14 @@
CREATE TABLE emergency_access (
uuid CHAR(36) NOT NULL PRIMARY KEY,
grantor_uuid CHAR(36) REFERENCES users (uuid),
grantee_uuid CHAR(36) REFERENCES users (uuid),
email VARCHAR(255),
key_encrypted TEXT,
atype INTEGER NOT NULL,
status INTEGER NOT NULL,
wait_time_days INTEGER NOT NULL,
recovery_initiated_at DATETIME,
last_notification_at DATETIME,
updated_at DATETIME NOT NULL,
created_at DATETIME NOT NULL
);

1
migrations/mysql/2021-10-24-164321_add_2fa_incomplete/down.sql

@ -0,0 +1 @@
DROP TABLE twofactor_incomplete;

9
migrations/mysql/2021-10-24-164321_add_2fa_incomplete/up.sql

@ -0,0 +1,9 @@
CREATE TABLE twofactor_incomplete (
user_uuid CHAR(36) NOT NULL REFERENCES users(uuid),
device_uuid CHAR(36) NOT NULL,
device_name TEXT NOT NULL,
login_time DATETIME NOT NULL,
ip_address TEXT NOT NULL,
PRIMARY KEY (user_uuid, device_uuid)
);

0
migrations/mysql/2022-01-17-234911_add_api_key/down.sql

2
migrations/mysql/2022-01-17-234911_add_api_key/up.sql

@ -0,0 +1,2 @@
ALTER TABLE users
ADD COLUMN api_key VARCHAR(255);

1
migrations/postgresql/2021-08-30-193501_create_emergency_access/down.sql

@ -0,0 +1 @@
DROP TABLE emergency_access;

14
migrations/postgresql/2021-08-30-193501_create_emergency_access/up.sql

@ -0,0 +1,14 @@
CREATE TABLE emergency_access (
uuid CHAR(36) NOT NULL PRIMARY KEY,
grantor_uuid CHAR(36) REFERENCES users (uuid),
grantee_uuid CHAR(36) REFERENCES users (uuid),
email VARCHAR(255),
key_encrypted TEXT,
atype INTEGER NOT NULL,
status INTEGER NOT NULL,
wait_time_days INTEGER NOT NULL,
recovery_initiated_at TIMESTAMP,
last_notification_at TIMESTAMP,
updated_at TIMESTAMP NOT NULL,
created_at TIMESTAMP NOT NULL
);

1
migrations/postgresql/2021-10-24-164321_add_2fa_incomplete/down.sql

@ -0,0 +1 @@
DROP TABLE twofactor_incomplete;

9
migrations/postgresql/2021-10-24-164321_add_2fa_incomplete/up.sql

@ -0,0 +1,9 @@
CREATE TABLE twofactor_incomplete (
user_uuid VARCHAR(40) NOT NULL REFERENCES users(uuid),
device_uuid VARCHAR(40) NOT NULL,
device_name TEXT NOT NULL,
login_time TIMESTAMP NOT NULL,
ip_address TEXT NOT NULL,
PRIMARY KEY (user_uuid, device_uuid)
);

0
migrations/postgresql/2022-01-17-234911_add_api_key/down.sql

2
migrations/postgresql/2022-01-17-234911_add_api_key/up.sql

@ -0,0 +1,2 @@
ALTER TABLE users
ADD COLUMN api_key TEXT;

1
migrations/sqlite/2021-08-30-193501_create_emergency_access/down.sql

@ -0,0 +1 @@
DROP TABLE emergency_access;

14
migrations/sqlite/2021-08-30-193501_create_emergency_access/up.sql

@ -0,0 +1,14 @@
CREATE TABLE emergency_access (
uuid TEXT NOT NULL PRIMARY KEY,
grantor_uuid TEXT REFERENCES users (uuid),
grantee_uuid TEXT REFERENCES users (uuid),
email TEXT,
key_encrypted TEXT,
atype INTEGER NOT NULL,
status INTEGER NOT NULL,
wait_time_days INTEGER NOT NULL,
recovery_initiated_at DATETIME,
last_notification_at DATETIME,
updated_at DATETIME NOT NULL,
created_at DATETIME NOT NULL
);

1
migrations/sqlite/2021-10-24-164321_add_2fa_incomplete/down.sql

@ -0,0 +1 @@
DROP TABLE twofactor_incomplete;

9
migrations/sqlite/2021-10-24-164321_add_2fa_incomplete/up.sql

@ -0,0 +1,9 @@
CREATE TABLE twofactor_incomplete (
user_uuid TEXT NOT NULL REFERENCES users(uuid),
device_uuid TEXT NOT NULL,
device_name TEXT NOT NULL,
login_time DATETIME NOT NULL,
ip_address TEXT NOT NULL,
PRIMARY KEY (user_uuid, device_uuid)
);

0
migrations/sqlite/2022-01-17-234911_add_api_key/down.sql

2
migrations/sqlite/2022-01-17-234911_add_api_key/up.sql

@ -0,0 +1,2 @@
ALTER TABLE users
ADD COLUMN api_key TEXT;

2
rust-toolchain

@ -1 +1 @@
nightly-2021-08-22
nightly-2022-01-23

26
src/api/admin.rs

@ -1,7 +1,7 @@
use once_cell::sync::Lazy;
use serde::de::DeserializeOwned;
use serde_json::Value;
use std::{env, time::Duration};
use std::env;
use rocket::{
http::{Cookie, Cookies, SameSite, Status},
@ -18,8 +18,10 @@ use crate::{
db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
error::{Error, MapResult},
mail,
util::{format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker},
CONFIG,
util::{
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
},
CONFIG, VERSION,
};
pub fn routes() -> Vec<Route> {
@ -72,11 +74,10 @@ fn admin_disabled() -> &'static str {
"The admin panel is disabled, please configure the 'ADMIN_TOKEN' variable to enable it"
}
const COOKIE_NAME: &str = "BWRS_ADMIN";
const COOKIE_NAME: &str = "VW_ADMIN";
const ADMIN_PATH: &str = "/admin";
const BASE_TEMPLATE: &str = "admin/base";
const VERSION: Option<&str> = option_env!("BWRS_VERSION");
fn admin_path() -> String {
format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
@ -164,6 +165,10 @@ fn post_admin_login(
) -> Result<Redirect, Flash<Redirect>> {
let data = data.into_inner();
if crate::ratelimit::check_limit_admin(&ip.ip).is_err() {
return Err(Flash::error(Redirect::to(admin_url(referer)), "Too many requests, try again later."));
}
// If the token is invalid, redirect to login page
if !_validate_token(&data.token) {
error!("Invalid admin token. IP: {}", ip.ip);
@ -234,7 +239,7 @@ impl AdminTemplateData {
}
#[get("/", rank = 1)]
fn admin_page(_token: AdminToken, _conn: DbConn) -> ApiResult<Html<String>> {
fn admin_page(_token: AdminToken) -> ApiResult<Html<String>> {
let text = AdminTemplateData::new().render()?;
Ok(Html(text))
}
@ -269,7 +274,7 @@ fn invite_user(data: Json<InviteData>, _token: AdminToken, conn: DbConn) -> Json
if CONFIG.mail_enabled() {
mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None)?;
} else {
let invitation = Invitation::new(data.email);
let invitation = Invitation::new(user.email.clone());
invitation.save(&conn)?;
}
@ -460,13 +465,13 @@ struct GitCommit {
fn get_github_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> {
let github_api = get_reqwest_client();
Ok(github_api.get(url).timeout(Duration::from_secs(10)).send()?.error_for_status()?.json::<T>()?)
Ok(github_api.get(url).send()?.error_for_status()?.json::<T>()?)
}
fn has_http_access() -> bool {
let http_access = get_reqwest_client();
match http_access.head("https://github.com/dani-garcia/vaultwarden").timeout(Duration::from_secs(10)).send() {
match http_access.head("https://github.com/dani-garcia/vaultwarden").send() {
Ok(r) => r.status().is_success(),
_ => false,
}
@ -480,7 +485,7 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
// Get current running versions
let web_vault_version: WebVaultVersion =
match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "bwrs-version.json")) {
match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "vw-version.json")) {
Ok(s) => serde_json::from_str(&s)?,
_ => match read_file_string(&format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) {
Ok(s) => serde_json::from_str(&s)?,
@ -549,6 +554,7 @@ fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResu
"web_vault_version": web_vault_version.version,
"latest_web_build": latest_web_build,
"running_within_docker": running_within_docker,
"docker_base_image": docker_base_image(),
"has_http_access": has_http_access,
"ip_header_exists": &ip_header.0.is_some(),
"ip_header_match": ip_header_name == CONFIG.ip_header(),

66
src/api/core/accounts.rs

@ -34,6 +34,8 @@ pub fn routes() -> Vec<rocket::Route> {
password_hint,
prelogin,
verify_password,
api_key,
rotate_api_key,
]
}
@ -49,6 +51,7 @@ struct RegisterData {
MasterPasswordHint: Option<String>,
Name: Option<String>,
Token: Option<String>,
#[allow(dead_code)]
OrganizationUserId: Option<String>,
}
@ -62,11 +65,12 @@ struct KeysData {
#[post("/accounts/register", data = "<data>")]
fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
let data: RegisterData = data.into_inner().data;
let email = data.Email.to_lowercase();
let mut user = match User::find_by_mail(&data.Email, &conn) {
let mut user = match User::find_by_mail(&email, &conn) {
Some(user) => {
if !user.password_hash.is_empty() {
if CONFIG.is_signup_allowed(&data.Email) {
if CONFIG.is_signup_allowed(&email) {
err!("User already exists")
} else {
err!("Registration not allowed or user already exists")
@ -75,19 +79,20 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
if let Some(token) = data.Token {
let claims = decode_invite(&token)?;
if claims.email == data.Email {
if claims.email == email {
user
} else {
err!("Registration email does not match invite email")
}
} else if Invitation::take(&data.Email, &conn) {
} else if Invitation::take(&email, &conn) {
for mut user_org in UserOrganization::find_invited_by_user(&user.uuid, &conn).iter_mut() {
user_org.status = UserOrgStatus::Accepted as i32;
user_org.save(&conn)?;
}
user
} else if CONFIG.is_signup_allowed(&data.Email) {
} else if EmergencyAccess::find_invited_by_grantee_email(&email, &conn).is_some() {
user
} else if CONFIG.is_signup_allowed(&email) {
err!("Account with this email already exists")
} else {
err!("Registration not allowed or user already exists")
@ -97,8 +102,8 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
// Order is important here; the invitation check must come first
// because the vaultwarden admin can invite anyone, regardless
// of other signup restrictions.
if Invitation::take(&data.Email, &conn) || CONFIG.is_signup_allowed(&data.Email) {
User::new(data.Email.clone())
if Invitation::take(&email, &conn) || CONFIG.is_signup_allowed(&email) {
User::new(email.clone())
} else {
err!("Registration not allowed or user already exists")
}
@ -106,7 +111,7 @@ fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> EmptyResult {
};
// Make sure we don't leave a lingering invitation.
Invitation::take(&data.Email, &conn);
Invitation::take(&email, &conn);
if let Some(client_kdf_iter) = data.KdfIterations {
user.client_kdf_iter = client_kdf_iter;
@ -233,7 +238,7 @@ fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, conn: DbCon
user.set_password(
&data.NewMasterPasswordHash,
Some(vec![String::from("post_rotatekey"), String::from("get_contacts")]),
Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]),
);
user.akey = data.Key;
user.save(&conn)
@ -376,7 +381,7 @@ fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, conn: Db
err!("Email domain not allowed");
}
let token = crypto::generate_token(6)?;
let token = crypto::generate_email_token(6);
if CONFIG.mail_enabled() {
if let Err(e) = mail::send_change_email(&data.NewEmail, &token) {
@ -448,7 +453,7 @@ fn post_email(data: JsonUpcase<ChangeEmailData>, headers: Headers, conn: DbConn)
}
#[post("/accounts/verify-email")]
fn post_verify_email(headers: Headers, _conn: DbConn) -> EmptyResult {
fn post_verify_email(headers: Headers) -> EmptyResult {
let user = headers.user;
if !CONFIG.mail_enabled() {
@ -641,15 +646,17 @@ fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
"KdfIterations": kdf_iter
}))
}
// https://github.com/bitwarden/server/blob/master/src/Api/Models/Request/Accounts/SecretVerificationRequestModel.cs
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct VerifyPasswordData {
struct SecretVerificationRequest {
MasterPasswordHash: String,
}
#[post("/accounts/verify-password", data = "<data>")]
fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn: DbConn) -> EmptyResult {
let data: VerifyPasswordData = data.into_inner().data;
fn verify_password(data: JsonUpcase<SecretVerificationRequest>, headers: Headers) -> EmptyResult {
let data: SecretVerificationRequest = data.into_inner().data;
let user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
@ -658,3 +665,32 @@ fn verify_password(data: JsonUpcase<VerifyPasswordData>, headers: Headers, _conn
Ok(())
}
fn _api_key(data: JsonUpcase<SecretVerificationRequest>, rotate: bool, headers: Headers, conn: DbConn) -> JsonResult {
let data: SecretVerificationRequest = data.into_inner().data;
let mut user = headers.user;
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Invalid password")
}
if rotate || user.api_key.is_none() {
user.api_key = Some(crypto::generate_api_key());
user.save(&conn).expect("Error saving API key");
}
Ok(Json(json!({
"ApiKey": user.api_key,
"Object": "apiKey",
})))
}
#[post("/accounts/api-key", data = "<data>")]
fn api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
_api_key(data, false, headers, conn)
}
#[post("/accounts/rotate-api-key", data = "<data>")]
fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult {
_api_key(data, true, headers, conn)
}

12
src/api/core/ciphers.rs

@ -105,7 +105,7 @@ fn sync(data: Form<SyncData>, headers: Headers, conn: DbConn) -> Json<Value> {
let collections_json: Vec<Value> =
collections.iter().map(|c| c.to_json_details(&headers.user.uuid, &conn)).collect();
let policies = OrgPolicy::find_by_user(&headers.user.uuid, &conn);
let policies = OrgPolicy::find_confirmed_by_user(&headers.user.uuid, &conn);
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn);
@ -248,7 +248,7 @@ fn post_ciphers_create(data: JsonUpcase<ShareCipherData>, headers: Headers, conn
// This check is usually only needed in update_cipher_from_data(), but we
// need it here as well to avoid creating an empty cipher in the call to
// cipher.save() below.
enforce_personal_ownership_policy(&data.Cipher, &headers, &conn)?;
enforce_personal_ownership_policy(Some(&data.Cipher), &headers, &conn)?;
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
cipher.user_uuid = Some(headers.user.uuid.clone());
@ -289,8 +289,8 @@ fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, conn: DbConn, nt
/// allowed to delete or share such ciphers to an org, however.
///
/// Ref: https://bitwarden.com/help/article/policies/#personal-ownership
fn enforce_personal_ownership_policy(data: &CipherData, headers: &Headers, conn: &DbConn) -> EmptyResult {
if data.OrganizationId.is_none() {
fn enforce_personal_ownership_policy(data: Option<&CipherData>, headers: &Headers, conn: &DbConn) -> EmptyResult {
if data.is_none() || data.unwrap().OrganizationId.is_none() {
let user_uuid = &headers.user.uuid;
let policy_type = OrgPolicyType::PersonalOwnership;
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, conn) {
@ -309,7 +309,7 @@ pub fn update_cipher_from_data(
nt: &Notify,
ut: UpdateType,
) -> EmptyResult {
enforce_personal_ownership_policy(&data, headers, conn)?;
enforce_personal_ownership_policy(Some(&data), headers, conn)?;
// Check that the client isn't updating an existing cipher with stale data.
if let Some(dt) = data.LastKnownRevisionDate {
@ -458,6 +458,8 @@ struct RelationsData {
#[post("/ciphers/import", data = "<data>")]
fn post_ciphers_import(data: JsonUpcase<ImportData>, headers: Headers, conn: DbConn, nt: Notify) -> EmptyResult {
enforce_personal_ownership_policy(None, &headers, &conn)?;
let data: ImportData = data.into_inner().data;
// Read and create the folders

800
src/api/core/emergency_access.rs

@ -1,24 +1,804 @@
use chrono::{Duration, Utc};
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json::Value;
use std::borrow::Borrow;
use crate::{api::JsonResult, auth::Headers, db::DbConn};
use crate::{
api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString},
auth::{decode_emergency_access_invite, Headers},
db::{models::*, DbConn, DbPool},
mail, CONFIG,
};
pub fn routes() -> Vec<Route> {
routes![get_contacts,]
routes![
get_contacts,
get_grantees,
get_emergency_access,
put_emergency_access,
delete_emergency_access,
post_delete_emergency_access,
send_invite,
resend_invite,
accept_invite,
confirm_emergency_access,
initiate_emergency_access,
approve_emergency_access,
reject_emergency_access,
takeover_emergency_access,
password_emergency_access,
view_emergency_access,
policies_emergency_access,
]
}
/// This endpoint is expected to return at least something.
/// If we return an error message that will trigger error toasts for the user.
/// To prevent this we just return an empty json result with no Data.
/// When this feature is going to be implemented it also needs to return this empty Data
/// instead of throwing an error/4XX unless it really is an error.
// region get
#[get("/emergency-access/trusted")]
fn get_contacts(_headers: Headers, _conn: DbConn) -> JsonResult {
debug!("Emergency access is not supported.");
fn get_contacts(headers: Headers, conn: DbConn) -> JsonResult {
check_emergency_access_allowed()?;
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &conn);
let emergency_access_list_json: Vec<Value> =
emergency_access_list.iter().map(|e| e.to_json_grantee_details(&conn)).collect();
Ok(Json(json!({
"Data": emergency_access_list_json,
"Object": "list",
"ContinuationToken": null
})))
}
#[get("/emergency-access/granted")]
fn get_grantees(headers: Headers, conn: DbConn) -> JsonResult {
check_emergency_access_allowed()?;
let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &conn);
let emergency_access_list_json: Vec<Value> =
emergency_access_list.iter().map(|e| e.to_json_grantor_details(&conn)).collect();
Ok(Json(json!({
"Data": emergency_access_list_json,
"Object": "list",
"ContinuationToken": null
})))
}
#[get("/emergency-access/<emer_id>")]
fn get_emergency_access(emer_id: String, conn: DbConn) -> JsonResult {
check_emergency_access_allowed()?;
match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&conn))),
None => err!("Emergency access not valid."),
}
}
// endregion
// region put/post
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EmergencyAccessUpdateData {
Type: NumberOrString,
WaitTimeDays: i32,
KeyEncrypted: Option<String>,
}
#[put("/emergency-access/<emer_id>", data = "<data>")]
fn put_emergency_access(emer_id: String, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
post_emergency_access(emer_id, data, conn)
}
#[post("/emergency-access/<emer_id>", data = "<data>")]
fn post_emergency_access(emer_id: String, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
check_emergency_access_allowed()?;
let data: EmergencyAccessUpdateData = data.into_inner().data;
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
Some(emergency_access) => emergency_access,
None => err!("Emergency access not valid."),
};
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
Some(new_type) => new_type as i32,
None => err!("Invalid emergency access type."),
};
emergency_access.atype = new_type;
emergency_access.wait_time_days = data.WaitTimeDays;
emergency_access.key_encrypted = data.KeyEncrypted;
emergency_access.save(&conn)?;
Ok(Json(emergency_access.to_json()))
}
// endregion
// region delete
#[delete("/emergency-access/<emer_id>")]
fn delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
check_emergency_access_allowed()?;
let grantor_user = headers.user;
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
Some(emer) => {
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) {
err!("Emergency access not valid.")
}
emer
}
None => err!("Emergency access not valid."),
};
emergency_access.delete(&conn)?;
Ok(())
}
#[post("/emergency-access/<emer_id>/delete")]
fn post_delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
delete_emergency_access(emer_id, headers, conn)
}
// endregion
// region invite
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EmergencyAccessInviteData {
Email: String,
Type: NumberOrString,
WaitTimeDays: i32,
}
#[post("/emergency-access/invite", data = "<data>")]
fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, conn: DbConn) -> EmptyResult {
check_emergency_access_allowed()?;
let data: EmergencyAccessInviteData = data.into_inner().data;
let email = data.Email.to_lowercase();
let wait_time_days = data.WaitTimeDays;
let emergency_access_status = EmergencyAccessStatus::Invited as i32;
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
Some(new_type) => new_type as i32,
None => err!("Invalid emergency access type."),
};
let grantor_user = headers.user;
// avoid setting yourself as emergency contact
if email == grantor_user.email {
err!("You can not set yourself as an emergency contact.")
}
let grantee_user = match User::find_by_mail(&email, &conn) {
None => {
if !CONFIG.invitations_allowed() {
err!(format!("Grantee user does not exist: {}", email))
}
if !CONFIG.is_email_domain_allowed(&email) {
err!("Email domain not eligible for invitations")
}
if !CONFIG.mail_enabled() {
let invitation = Invitation::new(email.clone());
invitation.save(&conn)?;
}
let mut user = User::new(email.clone());
user.save(&conn)?;
user
}
Some(user) => user,
};
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
&grantor_user.uuid,
&grantee_user.uuid,
&grantee_user.email,
&conn,
)
.is_some()
{
err!(format!("Grantee user already invited: {}", email))
}
let mut new_emergency_access = EmergencyAccess::new(
grantor_user.uuid.clone(),
Some(grantee_user.email.clone()),
emergency_access_status,
new_type,
wait_time_days,
);
new_emergency_access.save(&conn)?;
if CONFIG.mail_enabled() {
mail::send_emergency_access_invite(
&grantee_user.email,
&grantee_user.uuid,
Some(new_emergency_access.uuid),
Some(grantor_user.name.clone()),
Some(grantor_user.email),
)?;
} else {
// Automatically mark user as accepted if no email invites
match User::find_by_mail(&email, &conn) {
Some(user) => {
match accept_invite_process(user.uuid, new_emergency_access.uuid, Some(email), conn.borrow()) {
Ok(v) => (v),
Err(e) => err!(e.to_string()),
}
}
None => err!("Grantee user not found."),
}
}
Ok(())
}
#[post("/emergency-access/<emer_id>/reinvite")]
fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult {
check_emergency_access_allowed()?;
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
if emergency_access.grantor_uuid != headers.user.uuid {
err!("Emergency access not valid.");
}
if emergency_access.status != EmergencyAccessStatus::Invited as i32 {
err!("The grantee user is already accepted or confirmed to the organization");
}
let email = match emergency_access.email.clone() {
Some(email) => email,
None => err!("Email not valid."),
};
let grantee_user = match User::find_by_mail(&email, &conn) {
Some(user) => user,
None => err!("Grantee user not found."),
};
let grantor_user = headers.user;
if CONFIG.mail_enabled() {
mail::send_emergency_access_invite(
&email,
&grantor_user.uuid,
Some(emergency_access.uuid),
Some(grantor_user.name.clone()),
Some(grantor_user.email),
)?;
} else {
if Invitation::find_by_mail(&email, &conn).is_none() {
let invitation = Invitation::new(email);
invitation.save(&conn)?;
}
// Automatically mark user as accepted if no email invites
match accept_invite_process(grantee_user.uuid, emergency_access.uuid, emergency_access.email, conn.borrow()) {
Ok(v) => (v),
Err(e) => err!(e.to_string()),
}
}
Ok(())
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct AcceptData {
Token: String,
}
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
fn accept_invite(emer_id: String, data: JsonUpcase<AcceptData>, conn: DbConn) -> EmptyResult {
check_emergency_access_allowed()?;
let data: AcceptData = data.into_inner().data;
let token = &data.Token;
let claims = decode_emergency_access_invite(token)?;
let grantee_user = match User::find_by_mail(&claims.email, &conn) {
Some(user) => {
Invitation::take(&claims.email, &conn);
user
}
None => err!("Invited user not found"),
};
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
// get grantor user to send Accepted email
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
Some(user) => user,
None => err!("Grantor user not found."),
};
if (claims.emer_id.is_some() && emer_id == claims.emer_id.unwrap())
&& (claims.grantor_name.is_some() && grantor_user.name == claims.grantor_name.unwrap())
&& (claims.grantor_email.is_some() && grantor_user.email == claims.grantor_email.unwrap())
{
match accept_invite_process(grantee_user.uuid.clone(), emer_id, Some(grantee_user.email.clone()), &conn) {
Ok(v) => (v),
Err(e) => err!(e.to_string()),
}
if CONFIG.mail_enabled() {
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email)?;
}
Ok(())
} else {
err!("Emergency access invitation error.")
}
}
fn accept_invite_process(grantee_uuid: String, emer_id: String, email: Option<String>, conn: &DbConn) -> EmptyResult {
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, conn) {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
let emer_email = emergency_access.email;
if emer_email.is_none() || emer_email != email {
err!("User email does not match invite.");
}
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
err!("Emergency contact already accepted.");
}
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
emergency_access.grantee_uuid = Some(grantee_uuid);
emergency_access.email = None;
emergency_access.save(conn)
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct ConfirmData {
Key: String,
}
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
fn confirm_emergency_access(
emer_id: String,
data: JsonUpcase<ConfirmData>,
headers: Headers,
conn: DbConn,
) -> JsonResult {
check_emergency_access_allowed()?;
let confirming_user = headers.user;
let data: ConfirmData = data.into_inner().data;
let key = data.Key;
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
if emergency_access.status != EmergencyAccessStatus::Accepted as i32
|| emergency_access.grantor_uuid != confirming_user.uuid
{
err!("Emergency access not valid.")
}
let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &conn) {
Some(user) => user,
None => err!("Grantor user not found."),
};
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) {
Some(user) => user,
None => err!("Grantee user not found."),
};
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
emergency_access.key_encrypted = Some(key);
emergency_access.email = None;
emergency_access.save(&conn)?;
if CONFIG.mail_enabled() {
mail::send_emergency_access_invite_confirmed(&grantee_user.email, &grantor_user.name)?;
}
Ok(Json(emergency_access.to_json()))
} else {
err!("Grantee user not found.")
}
}
// endregion
// region access emergency access
#[post("/emergency-access/<emer_id>/initiate")]
fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
check_emergency_access_allowed()?;
let initiating_user = headers.user;
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32
|| emergency_access.grantee_uuid != Some(initiating_user.uuid.clone())
{
err!("Emergency access not valid.")
}
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
Some(user) => user,
None => err!("Grantor user not found."),
};
let now = Utc::now().naive_utc();
emergency_access.status = EmergencyAccessStatus::RecoveryInitiated as i32;
emergency_access.updated_at = now;
emergency_access.recovery_initiated_at = Some(now);
emergency_access.last_notification_at = Some(now);
emergency_access.save(&conn)?;
if CONFIG.mail_enabled() {
mail::send_emergency_access_recovery_initiated(
&grantor_user.email,
&initiating_user.name,
emergency_access.get_type_as_str(),
&emergency_access.wait_time_days.clone().to_string(),
)?;
}
Ok(Json(emergency_access.to_json()))
}
#[post("/emergency-access/<emer_id>/approve")]
fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
check_emergency_access_allowed()?;
let approving_user = headers.user;
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
|| emergency_access.grantor_uuid != approving_user.uuid
{
err!("Emergency access not valid.")
}
let grantor_user = match User::find_by_uuid(&approving_user.uuid, &conn) {
Some(user) => user,
None => err!("Grantor user not found."),
};
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) {
Some(user) => user,
None => err!("Grantee user not found."),
};
emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32;
emergency_access.save(&conn)?;
if CONFIG.mail_enabled() {
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name)?;
}
Ok(Json(emergency_access.to_json()))
} else {
err!("Grantee user not found.")
}
}
#[post("/emergency-access/<emer_id>/reject")]
fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
check_emergency_access_allowed()?;
let rejecting_user = headers.user;
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
if (emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32)
|| emergency_access.grantor_uuid != rejecting_user.uuid
{
err!("Emergency access not valid.")
}
let grantor_user = match User::find_by_uuid(&rejecting_user.uuid, &conn) {
Some(user) => user,
None => err!("Grantor user not found."),
};
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() {
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) {
Some(user) => user,
None => err!("Grantee user not found."),
};
emergency_access.status = EmergencyAccessStatus::Confirmed as i32;
emergency_access.save(&conn)?;
if CONFIG.mail_enabled() {
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name)?;
}
Ok(Json(emergency_access.to_json()))
} else {
err!("Grantee user not found.")
}
}
// endregion
// region action
#[post("/emergency-access/<emer_id>/view")]
fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
check_emergency_access_allowed()?;
let requesting_user = headers.user;
let host = headers.host;
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::View) {
err!("Emergency access not valid.")
}
let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &conn);
let ciphers_json: Vec<Value> =
ciphers.iter().map(|c| c.to_json(&host, &emergency_access.grantor_uuid, &conn)).collect();
Ok(Json(json!({
"Ciphers": ciphers_json,
"KeyEncrypted": &emergency_access.key_encrypted,
"Object": "emergencyAccessView",
})))
}
#[post("/emergency-access/<emer_id>/takeover")]
fn takeover_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
check_emergency_access_allowed()?;
let requesting_user = headers.user;
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
err!("Emergency access not valid.")
}
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
Some(user) => user,
None => err!("Grantor user not found."),
};
Ok(Json(json!({
"Data": [],
"Kdf": grantor_user.client_kdf_type,
"KdfIterations": grantor_user.client_kdf_iter,
"KeyEncrypted": &emergency_access.key_encrypted,
"Object": "emergencyAccessTakeover",
})))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EmergencyAccessPasswordData {
NewMasterPasswordHash: String,
Key: String,
}
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
fn password_emergency_access(
emer_id: String,
data: JsonUpcase<EmergencyAccessPasswordData>,
headers: Headers,
conn: DbConn,
) -> EmptyResult {
check_emergency_access_allowed()?;
let data: EmergencyAccessPasswordData = data.into_inner().data;
let new_master_password_hash = &data.NewMasterPasswordHash;
let key = data.Key;
let requesting_user = headers.user;
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
err!("Emergency access not valid.")
}
let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
Some(user) => user,
None => err!("Grantor user not found."),
};
// change grantor_user password
grantor_user.set_password(new_master_password_hash, None);
grantor_user.akey = key;
grantor_user.save(&conn)?;
// Disable TwoFactor providers since they will otherwise block logins
TwoFactor::delete_all_by_user(&grantor_user.uuid, &conn)?;
// Removing owner, check that there are at least another owner
let user_org_grantor = UserOrganization::find_any_state_by_user(&grantor_user.uuid, &conn);
// Remove grantor from all organisations unless Owner
for user_org in user_org_grantor {
if user_org.atype != UserOrgType::Owner as i32 {
user_org.delete(&conn)?;
}
}
Ok(())
}
// endregion
#[get("/emergency-access/<emer_id>/policies")]
fn policies_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult {
let requesting_user = headers.user;
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) {
Some(emer) => emer,
None => err!("Emergency access not valid."),
};
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) {
err!("Emergency access not valid.")
}
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) {
Some(user) => user,
None => err!("Grantor user not found."),
};
let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &conn);
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
Ok(Json(json!({
"Data": policies_json,
"Object": "list",
"ContinuationToken": null
})))
}
fn is_valid_request(
emergency_access: &EmergencyAccess,
requesting_user_uuid: String,
requested_access_type: EmergencyAccessType,
) -> bool {
emergency_access.grantee_uuid == Some(requesting_user_uuid)
&& emergency_access.status == EmergencyAccessStatus::RecoveryApproved as i32
&& emergency_access.atype == requested_access_type as i32
}
fn check_emergency_access_allowed() -> EmptyResult {
if !CONFIG.emergency_access_allowed() {
err!("Emergency access is not allowed.")
}
Ok(())
}
pub fn emergency_request_timeout_job(pool: DbPool) {
debug!("Start emergency_request_timeout_job");
if !CONFIG.emergency_access_allowed() {
return;
}
if let Ok(conn) = pool.get() {
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn);
if emergency_access_list.is_empty() {
debug!("No emergency request timeout to approve");
}
for mut emer in emergency_access_list {
if emer.recovery_initiated_at.is_some()
&& Utc::now().naive_utc()
>= emer.recovery_initiated_at.unwrap() + Duration::days(emer.wait_time_days as i64)
{
emer.status = EmergencyAccessStatus::RecoveryApproved as i32;
emer.save(&conn).expect("Cannot save emergency access on job");
if CONFIG.mail_enabled() {
// get grantor user to send Accepted email
let grantor_user = User::find_by_uuid(&emer.grantor_uuid, &conn).expect("Grantor user not found.");
// get grantee user to send Accepted email
let grantee_user =
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn)
.expect("Grantee user not found.");
mail::send_emergency_access_recovery_timed_out(
&grantor_user.email,
&grantee_user.name.clone(),
emer.get_type_as_str(),
)
.expect("Error on sending email");
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name.clone())
.expect("Error on sending email");
}
}
}
} else {
error!("Failed to get DB connection while searching emergency request timed out")
}
}
pub fn emergency_notification_reminder_job(pool: DbPool) {
debug!("Start emergency_notification_reminder_job");
if !CONFIG.emergency_access_allowed() {
return;
}
if let Ok(conn) = pool.get() {
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn);
if emergency_access_list.is_empty() {
debug!("No emergency request reminder notification to send");
}
for mut emer in emergency_access_list {
if (emer.recovery_initiated_at.is_some()
&& Utc::now().naive_utc()
>= emer.recovery_initiated_at.unwrap() + Duration::days((emer.wait_time_days as i64) - 1))
&& (emer.last_notification_at.is_none()
|| (emer.last_notification_at.is_some()
&& Utc::now().naive_utc() >= emer.last_notification_at.unwrap() + Duration::days(1)))
{
emer.save(&conn).expect("Cannot save emergency access on job");
if CONFIG.mail_enabled() {
// get grantor user to send Accepted email
let grantor_user = User::find_by_uuid(&emer.grantor_uuid, &conn).expect("Grantor user not found.");
// get grantee user to send Accepted email
let grantee_user =
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn)
.expect("Grantee user not found.");
mail::send_emergency_access_recovery_reminder(
&grantor_user.email,
&grantee_user.name.clone(),
emer.get_type_as_str(),
&emer.wait_time_days.to_string(), // TODO(jjlin): This should be the number of days left.
)
.expect("Error on sending email");
}
}
}
} else {
error!("Failed to get DB connection while searching emergency notification reminder")
}
}

4
src/api/core/mod.rs

@ -7,7 +7,9 @@ mod sends;
pub mod two_factor;
pub use ciphers::purge_trashed_ciphers;
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
pub use sends::purge_sends;
pub use two_factor::send_incomplete_2fa_notifications;
pub fn routes() -> Vec<Route> {
let mut mod_routes =
@ -168,7 +170,7 @@ fn hibp_breach(username: String) -> JsonResult {
"BreachDate": "2019-08-18T00:00:00Z",
"AddedDate": "2019-08-18T00:00:00Z",
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{account}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{account}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>", account=username),
"LogoPath": "bwrs_static/hibp.png",
"LogoPath": "vw_static/hibp.png",
"PwnCount": 0,
"DataClasses": [
"Error - No API key set!"

336
src/api/core/organizations.rs

@ -37,12 +37,15 @@ pub fn routes() -> Vec<Route> {
get_org_users,
send_invite,
reinvite_user,
bulk_reinvite_user,
confirm_invite,
bulk_confirm_invite,
accept_invite,
get_user,
edit_user,
put_organization_user,
delete_user,
bulk_delete_user,
post_delete_user,
post_org_import,
list_policies,
@ -54,6 +57,7 @@ pub fn routes() -> Vec<Route> {
get_plans_tax_rates,
import,
post_org_keys,
bulk_public_keys,
]
}
@ -101,11 +105,22 @@ struct OrgKeyData {
PublicKey: String,
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct OrgBulkIds {
Ids: Vec<String>,
}
#[post("/organizations", data = "<data>")]
fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, conn: DbConn) -> JsonResult {
if !CONFIG.is_org_creation_allowed(&headers.user.email) {
err!("User not allowed to create organizations")
}
if OrgPolicy::is_applicable_to_user(&headers.user.uuid, OrgPolicyType::SingleOrg, &conn) {
err!(
"You may not create an organization. You belong to an organization which has a policy that prohibits you from being a member of any other organization."
)
}
let data: OrgData = data.into_inner().data;
let (private_key, public_key) = if data.Keys.is_some() {
@ -275,7 +290,7 @@ fn get_user_collections(headers: Headers, conn: DbConn) -> Json<Value> {
}
#[get("/organizations/<org_id>/collections")]
fn get_org_collections(org_id: String, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
fn get_org_collections(org_id: String, _headers: ManagerHeadersLoose, conn: DbConn) -> Json<Value> {
Json(json!({
"Data":
Collection::find_by_organization(&org_id, &conn)
@ -422,7 +437,7 @@ fn delete_organization_collection(
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
#[allow(non_snake_case, dead_code)]
struct DeleteCollectionData {
Id: String,
OrgId: String,
@ -595,18 +610,19 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
}
for email in data.Emails.iter() {
let email = email.to_lowercase();
let mut user_org_status = if CONFIG.mail_enabled() {
UserOrgStatus::Invited as i32
} else {
UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites
};
let user = match User::find_by_mail(email, &conn) {
let user = match User::find_by_mail(&email, &conn) {
None => {
if !CONFIG.invitations_allowed() {
err!(format!("User does not exist: {}", email))
}
if !CONFIG.is_email_domain_allowed(email) {
if !CONFIG.is_email_domain_allowed(&email) {
err!("Email domain not eligible for invitations")
}
@ -656,7 +672,7 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
};
mail::send_invite(
email,
&email,
&user.uuid,
Some(org_id.clone()),
Some(new_user.uuid),
@ -669,8 +685,44 @@ fn send_invite(org_id: String, data: JsonUpcase<InviteData>, headers: AdminHeade
Ok(())
}
#[post("/organizations/<org_id>/users/reinvite", data = "<data>")]
fn bulk_reinvite_user(
org_id: String,
data: JsonUpcase<OrgBulkIds>,
headers: AdminHeaders,
conn: DbConn,
) -> Json<Value> {
let data: OrgBulkIds = data.into_inner().data;
let mut bulk_response = Vec::new();
for org_user_id in data.Ids {
let err_msg = match _reinvite_user(&org_id, &org_user_id, &headers.user.email, &conn) {
Ok(_) => String::from(""),
Err(e) => format!("{:?}", e),
};
bulk_response.push(json!(
{
"Object": "OrganizationBulkConfirmResponseModel",
"Id": org_user_id,
"Error": err_msg
}
))
}
Json(json!({
"Data": bulk_response,
"Object": "list",
"ContinuationToken": null
}))
}
#[post("/organizations/<org_id>/users/<user_org>/reinvite")]
fn reinvite_user(org_id: String, user_org: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
_reinvite_user(&org_id, &user_org, &headers.user.email, &conn)
}
fn _reinvite_user(org_id: &str, user_org: &str, invited_by_email: &str, conn: &DbConn) -> EmptyResult {
if !CONFIG.invitations_allowed() {
err!("Invitations are not allowed.")
}
@ -679,7 +731,7 @@ fn reinvite_user(org_id: String, user_org: String, headers: AdminHeaders, conn:
err!("SMTP is not configured.")
}
let user_org = match UserOrganization::find_by_uuid(&user_org, &conn) {
let user_org = match UserOrganization::find_by_uuid(user_org, conn) {
Some(user_org) => user_org,
None => err!("The user hasn't been invited to the organization."),
};
@ -688,12 +740,12 @@ fn reinvite_user(org_id: String, user_org: String, headers: AdminHeaders, conn:
err!("The user is already accepted or confirmed to the organization")
}
let user = match User::find_by_uuid(&user_org.user_uuid, &conn) {
let user = match User::find_by_uuid(&user_org.user_uuid, conn) {
Some(user) => user,
None => err!("User not found."),
};
let org_name = match Organization::find_by_uuid(&org_id, &conn) {
let org_name = match Organization::find_by_uuid(org_id, conn) {
Some(org) => org.name,
None => err!("Error looking up organization."),
};
@ -702,14 +754,14 @@ fn reinvite_user(org_id: String, user_org: String, headers: AdminHeaders, conn:
mail::send_invite(
&user.email,
&user.uuid,
Some(org_id),
Some(org_id.to_string()),
Some(user_org.uuid),
&org_name,
Some(headers.user.email),
Some(invited_by_email.to_string()),
)?;
} else {
let invitation = Invitation::new(user.email);
invitation.save(&conn)?;
invitation.save(conn)?;
}
Ok(())
@ -755,6 +807,30 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
err!("You cannot join this organization until you enable two-step login on your user account.")
}
// Enforce Single Organization Policy of organization user is trying to join
let single_org_policy_enabled =
match OrgPolicy::find_by_org_and_type(&user_org.org_uuid, OrgPolicyType::SingleOrg as i32, &conn) {
Some(p) => p.enabled,
None => false,
};
if single_org_policy_enabled && user_org.atype < UserOrgType::Admin {
let is_member_of_another_org = UserOrganization::find_any_state_by_user(&user_org.user_uuid, &conn)
.into_iter()
.filter(|uo| uo.org_uuid != user_org.org_uuid)
.count()
> 1;
if is_member_of_another_org {
err!("You may not join this organization until you leave or remove all other organizations.")
}
}
// Enforce Single Organization Policy of other organizations user is a member of
if OrgPolicy::is_applicable_to_user(&user_org.user_uuid, OrgPolicyType::SingleOrg, &conn) {
err!(
"You cannot join this organization because you are a member of an organization which forbids it"
)
}
user_org.status = UserOrgStatus::Accepted as i32;
user_org.save(&conn)?;
}
@ -782,6 +858,40 @@ fn accept_invite(_org_id: String, _org_user_id: String, data: JsonUpcase<AcceptD
Ok(())
}
#[post("/organizations/<org_id>/users/confirm", data = "<data>")]
fn bulk_confirm_invite(org_id: String, data: JsonUpcase<Value>, headers: AdminHeaders, conn: DbConn) -> Json<Value> {
let data = data.into_inner().data;
let mut bulk_response = Vec::new();
match data["Keys"].as_array() {
Some(keys) => {
for invite in keys {
let org_user_id = invite["Id"].as_str().unwrap_or_default();
let user_key = invite["Key"].as_str().unwrap_or_default();
let err_msg = match _confirm_invite(&org_id, org_user_id, user_key, &headers, &conn) {
Ok(_) => String::from(""),
Err(e) => format!("{:?}", e),
};
bulk_response.push(json!(
{
"Object": "OrganizationBulkConfirmResponseModel",
"Id": org_user_id,
"Error": err_msg
}
));
}
}
None => error!("No keys to confirm"),
}
Json(json!({
"Data": bulk_response,
"Object": "list",
"ContinuationToken": null
}))
}
#[post("/organizations/<org_id>/users/<org_user_id>/confirm", data = "<data>")]
fn confirm_invite(
org_id: String,
@ -791,8 +901,16 @@ fn confirm_invite(
conn: DbConn,
) -> EmptyResult {
let data = data.into_inner().data;
let user_key = data["Key"].as_str().unwrap_or_default();
_confirm_invite(&org_id, &org_user_id, user_key, &headers, &conn)
}
fn _confirm_invite(org_id: &str, org_user_id: &str, key: &str, headers: &AdminHeaders, conn: &DbConn) -> EmptyResult {
if key.is_empty() || org_user_id.is_empty() {
err!("Key or UserId is not set, unable to process request");
}
let mut user_to_confirm = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) {
let mut user_to_confirm = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn) {
Some(user) => user,
None => err!("The specified user isn't a member of the organization"),
};
@ -806,24 +924,21 @@ fn confirm_invite(
}
user_to_confirm.status = UserOrgStatus::Confirmed as i32;
user_to_confirm.akey = match data["Key"].as_str() {
Some(key) => key.to_string(),
None => err!("Invalid key provided"),
};
user_to_confirm.akey = key.to_string();
if CONFIG.mail_enabled() {
let org_name = match Organization::find_by_uuid(&org_id, &conn) {
let org_name = match Organization::find_by_uuid(org_id, conn) {
Some(org) => org.name,
None => err!("Error looking up organization."),
};
let address = match User::find_by_uuid(&user_to_confirm.user_uuid, &conn) {
let address = match User::find_by_uuid(&user_to_confirm.user_uuid, conn) {
Some(user) => user.email,
None => err!("Error looking up user."),
};
mail::send_invite_confirmed(&address, &org_name)?;
}
user_to_confirm.save(&conn)
user_to_confirm.save(conn)
}
#[get("/organizations/<org_id>/users/<org_user_id>")]
@ -924,9 +1039,40 @@ fn edit_user(
user_to_edit.save(&conn)
}
#[delete("/organizations/<org_id>/users", data = "<data>")]
fn bulk_delete_user(org_id: String, data: JsonUpcase<OrgBulkIds>, headers: AdminHeaders, conn: DbConn) -> Json<Value> {
let data: OrgBulkIds = data.into_inner().data;
let mut bulk_response = Vec::new();
for org_user_id in data.Ids {
let err_msg = match _delete_user(&org_id, &org_user_id, &headers, &conn) {
Ok(_) => String::from(""),
Err(e) => format!("{:?}", e),
};
bulk_response.push(json!(
{
"Object": "OrganizationBulkConfirmResponseModel",
"Id": org_user_id,
"Error": err_msg
}
))
}
Json(json!({
"Data": bulk_response,
"Object": "list",
"ContinuationToken": null
}))
}
#[delete("/organizations/<org_id>/users/<org_user_id>")]
fn delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
let user_to_delete = match UserOrganization::find_by_uuid_and_org(&org_user_id, &org_id, &conn) {
_delete_user(&org_id, &org_user_id, &headers, &conn)
}
fn _delete_user(org_id: &str, org_user_id: &str, headers: &AdminHeaders, conn: &DbConn) -> EmptyResult {
let user_to_delete = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn) {
Some(user) => user,
None => err!("User to delete isn't member of the organization"),
};
@ -937,14 +1083,14 @@ fn delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn:
if user_to_delete.atype == UserOrgType::Owner {
// Removing owner, check that there are at least another owner
let num_owners = UserOrganization::find_by_org_and_type(&org_id, UserOrgType::Owner as i32, &conn).len();
let num_owners = UserOrganization::find_by_org_and_type(org_id, UserOrgType::Owner as i32, conn).len();
if num_owners <= 1 {
err!("Can't delete the last owner")
}
}
user_to_delete.delete(&conn)
user_to_delete.delete(conn)
}
#[post("/organizations/<org_id>/users/<org_user_id>/delete")]
@ -952,6 +1098,38 @@ fn post_delete_user(org_id: String, org_user_id: String, headers: AdminHeaders,
delete_user(org_id, org_user_id, headers, conn)
}
#[post("/organizations/<org_id>/users/public-keys", data = "<data>")]
fn bulk_public_keys(org_id: String, data: JsonUpcase<OrgBulkIds>, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
let data: OrgBulkIds = data.into_inner().data;
let mut bulk_response = Vec::new();
// Check all received UserOrg UUID's and find the matching User to retreive the public-key.
// If the user does not exists, just ignore it, and do not return any information regarding that UserOrg UUID.
// The web-vault will then ignore that user for the folowing steps.
for user_org_id in data.Ids {
match UserOrganization::find_by_uuid_and_org(&user_org_id, &org_id, &conn) {
Some(user_org) => match User::find_by_uuid(&user_org.user_uuid, &conn) {
Some(user) => bulk_response.push(json!(
{
"Object": "organizationUserPublicKeyResponseModel",
"Id": user_org_id,
"UserId": user.uuid,
"Key": user.public_key
}
)),
None => debug!("User doesn't exist"),
},
None => debug!("UserOrg doesn't exist"),
}
}
Json(json!({
"Data": bulk_response,
"Object": "list",
"ContinuationToken": null
}))
}
use super::ciphers::update_cipher_from_data;
use super::ciphers::CipherData;
@ -1089,7 +1267,7 @@ struct PolicyData {
enabled: bool,
#[serde(rename = "type")]
_type: i32,
data: Value,
data: Option<Value>,
}
#[put("/organizations/<org_id>/policies/<pol_type>", data = "<data>")]
@ -1107,20 +1285,52 @@ fn put_policy(
None => err!("Invalid policy type"),
};
// If enabling the TwoFactorAuthentication policy, remove this org's members that do have 2FA
if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled {
let org_list = UserOrganization::find_by_org(&org_id, &conn);
let org_members = UserOrganization::find_by_org(&org_id, &conn);
for user_org in org_list.into_iter() {
let user_twofactor_disabled = TwoFactor::find_by_user(&user_org.user_uuid, &conn).is_empty();
for member in org_members.into_iter() {
let user_twofactor_disabled = TwoFactor::find_by_user(&member.user_uuid, &conn).is_empty();
if user_twofactor_disabled && user_org.atype < UserOrgType::Admin {
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
if user_twofactor_disabled
&& member.atype < UserOrgType::Admin
&& member.status != UserOrgStatus::Invited as i32
{
if CONFIG.mail_enabled() {
let org = Organization::find_by_uuid(&user_org.org_uuid, &conn).unwrap();
let user = User::find_by_uuid(&user_org.user_uuid, &conn).unwrap();
let org = Organization::find_by_uuid(&member.org_uuid, &conn).unwrap();
let user = User::find_by_uuid(&member.user_uuid, &conn).unwrap();
mail::send_2fa_removed_from_org(&user.email, &org.name)?;
}
user_org.delete(&conn)?;
member.delete(&conn)?;
}
}
}
// If enabling the SingleOrg policy, remove this org's members that are members of other orgs
if pol_type_enum == OrgPolicyType::SingleOrg && data.enabled {
let org_members = UserOrganization::find_by_org(&org_id, &conn);
for member in org_members.into_iter() {
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
if member.atype < UserOrgType::Admin && member.status != UserOrgStatus::Invited as i32 {
let is_member_of_another_org = UserOrganization::find_any_state_by_user(&member.user_uuid, &conn)
.into_iter()
// Other UserOrganization's where they have accepted being a member of
.filter(|uo| uo.uuid != member.uuid && uo.status != UserOrgStatus::Invited as i32)
.count()
> 1;
if is_member_of_another_org {
if CONFIG.mail_enabled() {
let org = Organization::find_by_uuid(&member.org_uuid, &conn).unwrap();
let user = User::find_by_uuid(&member.user_uuid, &conn).unwrap();
mail::send_single_org_removed_from_org(&user.email, &org.name)?;
}
member.delete(&conn)?;
}
}
}
}
@ -1139,75 +1349,47 @@ fn put_policy(
#[allow(unused_variables)]
#[get("/organizations/<org_id>/tax")]
fn get_organization_tax(org_id: String, _headers: Headers, _conn: DbConn) -> EmptyResult {
fn get_organization_tax(org_id: String, _headers: Headers) -> Json<Value> {
// Prevent a 404 error, which also causes Javascript errors.
err!("Only allowed when not self hosted.")
// Upstream sends "Only allowed when not self hosted." As an error message.
// If we do the same it will also output this to the log, which is overkill.
// An empty list/data also works fine.
Json(_empty_data_json())
}
#[get("/plans")]
fn get_plans(_headers: Headers, _conn: DbConn) -> Json<Value> {
fn get_plans(_headers: Headers) -> Json<Value> {
// Respond with a minimal json just enough to allow the creation of an new organization.
Json(json!({
"Object": "list",
"Data": [
{
"Data": [{
"Object": "plan",
"Type": 0,
"Product": 0,
"Name": "Free",
"IsAnnual": false,
"NameLocalizationKey": "planNameFree",
"DescriptionLocalizationKey": "planDescFree",
"CanBeUsedByBusiness": false,
"BaseSeats": 2,
"BaseStorageGb": null,
"MaxCollections": 2,
"MaxUsers": 2,
"HasAdditionalSeatsOption": false,
"MaxAdditionalSeats": null,
"HasAdditionalStorageOption": false,
"MaxAdditionalStorage": null,
"HasPremiumAccessOption": false,
"TrialPeriodDays": null,
"HasSelfHost": false,
"HasPolicies": false,
"HasGroups": false,
"HasDirectory": false,
"HasEvents": false,
"HasTotp": false,
"Has2fa": false,
"HasApi": false,
"HasSso": false,
"UsersGetPremium": false,
"UpgradeSortOrder": -1,
"DisplaySortOrder": -1,
"LegacyYear": null,
"Disabled": false,
"StripePlanId": null,
"StripeSeatPlanId": null,
"StripeStoragePlanId": null,
"StripePremiumAccessPlanId": null,
"BasePrice": 0.0,
"SeatPrice": 0.0,
"AdditionalStoragePricePerGb": 0.0,
"PremiumAccessOptionPrice": 0.0
}
],
"DescriptionLocalizationKey": "planDescFree"
}],
"ContinuationToken": null
}))
}
#[get("/plans/sales-tax-rates")]
fn get_plans_tax_rates(_headers: Headers, _conn: DbConn) -> Json<Value> {
fn get_plans_tax_rates(_headers: Headers) -> Json<Value> {
// Prevent a 404 error, which also causes Javascript errors.
Json(json!({
Json(_empty_data_json())
}
fn _empty_data_json() -> Value {
json!({
"Object": "list",
"Data": [],
"ContinuationToken": null
}))
})
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
#[allow(non_snake_case, dead_code)]
struct OrgImportGroupData {
Name: String, // "GroupName"
ExternalId: String, // "cn=GroupName,ou=Groups,dc=example,dc=com"
@ -1218,6 +1400,7 @@ struct OrgImportGroupData {
#[allow(non_snake_case)]
struct OrgImportUserData {
Email: String, // "user@maildomain.net"
#[allow(dead_code)]
ExternalId: String, // "uid=user,ou=People,dc=example,dc=com"
Deleted: bool,
}
@ -1225,6 +1408,7 @@ struct OrgImportUserData {
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct OrgImportData {
#[allow(dead_code)]
Groups: Vec<OrgImportGroupData>,
OverwriteExisting: bool,
Users: Vec<OrgImportUserData>,

38
src/api/core/sends.rs

@ -7,7 +7,7 @@ use rocket_contrib::json::Json;
use serde_json::Value;
use crate::{
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, UpdateType},
auth::{Headers, Host},
db::{models::*, DbConn, DbPool},
util::SafeString,
@ -42,21 +42,21 @@ pub fn purge_sends(pool: DbPool) {
#[derive(Deserialize)]
#[allow(non_snake_case)]
pub struct SendData {
pub Type: i32,
pub Key: String,
pub Password: Option<String>,
pub MaxAccessCount: Option<i32>,
pub ExpirationDate: Option<DateTime<Utc>>,
pub DeletionDate: DateTime<Utc>,
pub Disabled: bool,
pub HideEmail: Option<bool>,
struct SendData {
Type: i32,
Key: String,
Password: Option<String>,
MaxAccessCount: Option<NumberOrString>,
ExpirationDate: Option<DateTime<Utc>>,
DeletionDate: DateTime<Utc>,
Disabled: bool,
HideEmail: Option<bool>,
// Data field
pub Name: String,
pub Notes: Option<String>,
pub Text: Option<Value>,
pub File: Option<Value>,
Name: String,
Notes: Option<String>,
Text: Option<Value>,
File: Option<Value>,
}
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
@ -119,7 +119,10 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
send.user_uuid = Some(user_uuid);
send.notes = data.Notes;
send.max_access_count = data.MaxAccessCount;
send.max_access_count = match data.MaxAccessCount {
Some(m) => Some(m.into_i32()?),
_ => None,
};
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
send.disabled = data.Disabled;
send.hide_email = data.HideEmail;
@ -414,7 +417,10 @@ fn put_send(id: String, data: JsonUpcase<SendData>, headers: Headers, conn: DbCo
send.akey = data.Key;
send.deletion_date = data.DeletionDate.naive_utc();
send.notes = data.Notes;
send.max_access_count = data.MaxAccessCount;
send.max_access_count = match data.MaxAccessCount {
Some(m) => Some(m.into_i32()?),
_ => None,
};
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
send.hide_email = data.HideEmail;
send.disabled = data.Disabled;

18
src/api/core/two_factor/email.rs

@ -58,7 +58,7 @@ pub fn send_token(user_uuid: &str, conn: &DbConn) -> EmptyResult {
let type_ = TwoFactorType::Email as i32;
let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, type_, conn).map_res("Two factor not found")?;
let generated_token = crypto::generate_token(CONFIG.email_token_size())?;
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
let mut twofactor_data = EmailTokenData::from_json(&twofactor.data)?;
twofactor_data.set_token(generated_token);
@ -123,7 +123,7 @@ fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, conn: DbConn) -
tf.delete(&conn)?;
}
let generated_token = crypto::generate_token(CONFIG.email_token_size())?;
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
@ -309,18 +309,4 @@ mod tests {
// If it's smaller than 3 characters it should only show asterisks.
assert_eq!(result, "***@example.ext");
}
#[test]
fn test_token() {
let result = crypto::generate_token(19).unwrap();
assert_eq!(result.chars().count(), 19);
}
#[test]
fn test_token_too_large() {
let result = crypto::generate_token(20);
assert!(result.is_err(), "too large token should give an error");
}
}

33
src/api/core/two_factor/mod.rs

@ -1,3 +1,4 @@
use chrono::{Duration, Utc};
use data_encoding::BASE32;
use rocket::Route;
use rocket_contrib::json::Json;
@ -7,7 +8,7 @@ use crate::{
api::{JsonResult, JsonUpcase, NumberOrString, PasswordData},
auth::Headers,
crypto,
db::{models::*, DbConn},
db::{models::*, DbConn, DbPool},
mail, CONFIG,
};
@ -156,3 +157,33 @@ fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, c
fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
disable_twofactor(data, headers, conn)
}
pub fn send_incomplete_2fa_notifications(pool: DbPool) {
debug!("Sending notifications for incomplete 2FA logins");
if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() {
return;
}
let conn = match pool.get() {
Ok(conn) => conn,
_ => {
error!("Failed to get DB connection in send_incomplete_2fa_notifications()");
return;
}
};
let now = Utc::now().naive_utc();
let time_limit = Duration::minutes(CONFIG.incomplete_2fa_time_limit());
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&(now - time_limit), &conn);
for login in incomplete_logins {
let user = User::find_by_uuid(&login.user_uuid, &conn).expect("User not found");
info!(
"User {} did not complete a 2FA login within the configured time limit. IP: {}",
user.email, login.ip_address
);
mail::send_incomplete_2fa_login(&user.email, &login.ip_address, &login.login_time, &login.device_name)
.expect("Error sending incomplete 2FA email");
login.delete(&conn).expect("Error deleting incomplete 2FA record");
}
}

14
src/api/core/two_factor/webauthn.rs

@ -1,6 +1,7 @@
use rocket::Route;
use rocket_contrib::json::Json;
use serde_json::Value;
use url::Url;
use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState, RegistrationState, Webauthn};
use crate::{
@ -22,19 +23,18 @@ pub fn routes() -> Vec<Route> {
struct WebauthnConfig {
url: String,
origin: Url,
rpid: String,
}
impl WebauthnConfig {
fn load() -> Webauthn<Self> {
let domain = CONFIG.domain();
let domain_origin = CONFIG.domain_origin();
Webauthn::new(Self {
rpid: reqwest::Url::parse(&domain)
.map(|u| u.domain().map(str::to_owned))
.ok()
.flatten()
.unwrap_or_default(),
rpid: Url::parse(&domain).map(|u| u.domain().map(str::to_owned)).ok().flatten().unwrap_or_default(),
url: domain,
origin: Url::parse(&domain_origin).unwrap(),
})
}
}
@ -44,8 +44,8 @@ impl webauthn_rs::WebauthnConfig for WebauthnConfig {
&self.url
}
fn get_origin(&self) -> &str {
&self.url
fn get_origin(&self) -> &Url {
&self.origin
}
fn get_relying_party_id(&self) -> &str {

91
src/api/icons.rs

@ -10,7 +10,11 @@ use std::{
use once_cell::sync::Lazy;
use regex::Regex;
use reqwest::{blocking::Client, blocking::Response, header};
use rocket::{http::ContentType, response::Content, Route};
use rocket::{
http::ContentType,
response::{Content, Redirect},
Route,
};
use crate::{
error::Error,
@ -19,7 +23,13 @@ use crate::{
};
pub fn routes() -> Vec<Route> {
routes![icon]
match CONFIG.icon_service().as_str() {
"internal" => routes![icon_internal],
"bitwarden" => routes![icon_bitwarden],
"duckduckgo" => routes![icon_duckduckgo],
"google" => routes![icon_google],
_ => routes![icon_custom],
}
}
static CLIENT: Lazy<Client> = Lazy::new(|| {
@ -50,8 +60,51 @@ static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+
// Special HashMap which holds the user defined Regex to speedup matching the regex.
static ICON_BLACKLIST_REGEX: Lazy<RwLock<HashMap<String, Regex>>> = Lazy::new(|| RwLock::new(HashMap::new()));
fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
if !is_valid_domain(domain) {
warn!("Invalid domain: {}", domain);
return None;
}
if is_domain_blacklisted(domain) {
return None;
}
let url = template.replace("{}", domain);
match CONFIG.icon_redirect_code() {
301 => Some(Redirect::moved(url)), // legacy permanent redirect
302 => Some(Redirect::found(url)), // legacy temporary redirect
307 => Some(Redirect::temporary(url)),
308 => Some(Redirect::permanent(url)),
_ => {
error!("Unexpected redirect code {}", CONFIG.icon_redirect_code());
None
}
}
}
#[get("/<domain>/icon.png")]
fn icon(domain: String) -> Cached<Content<Vec<u8>>> {
fn icon_custom(domain: String) -> Option<Redirect> {
icon_redirect(&domain, &CONFIG.icon_service())
}
#[get("/<domain>/icon.png")]
fn icon_bitwarden(domain: String) -> Option<Redirect> {
icon_redirect(&domain, "https://icons.bitwarden.net/{}/icon.png")
}
#[get("/<domain>/icon.png")]
fn icon_duckduckgo(domain: String) -> Option<Redirect> {
icon_redirect(&domain, "https://icons.duckduckgo.com/ip3/{}.ico")
}
#[get("/<domain>/icon.png")]
fn icon_google(domain: String) -> Option<Redirect> {
icon_redirect(&domain, "https://www.google.com/s2/favicons?domain={}&sz=32")
}
#[get("/<domain>/icon.png")]
fn icon_internal(domain: String) -> Cached<Content<Vec<u8>>> {
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
if !is_valid_domain(&domain) {
@ -59,14 +112,19 @@ fn icon(domain: String) -> Cached<Content<Vec<u8>>> {
return Cached::ttl(
Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
CONFIG.icon_cache_negttl(),
true,
);
}
match get_icon(&domain) {
Some((icon, icon_type)) => {
Cached::ttl(Content(ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl())
Cached::ttl(Content(ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true)
}
_ => Cached::ttl(Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), CONFIG.icon_cache_negttl()),
_ => Cached::ttl(
Content(ContentType::new("image", "png"), FALLBACK_ICON.to_vec()),
CONFIG.icon_cache_negttl(),
true,
),
}
}
@ -250,7 +308,7 @@ fn is_domain_blacklisted(domain: &str) -> bool {
// Use the pre-generate Regex stored in a Lazy HashMap.
if regex.is_match(domain) {
warn!("Blacklisted domain: {:#?} matched {:#?}", domain, blacklist);
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
is_blacklisted = true;
}
}
@ -286,7 +344,7 @@ fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
Some((icon, icon_type.unwrap_or("x-icon").to_string()))
}
Err(e) => {
error!("Error downloading icon: {:?}", e);
warn!("Unable to download icon: {:?}", e);
let miss_indicator = path + ".miss";
save_icon(&miss_indicator, &[]);
None
@ -555,7 +613,7 @@ fn get_page(url: &str) -> Result<Response, Error> {
fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
if is_domain_blacklisted(url::Url::parse(url).unwrap().host_str().unwrap_or_default()) {
err!("Favicon rel linked to a blacklisted domain!");
warn!("Favicon '{}' resolves to a blacklisted domain or IP!", url);
}
let mut client = CLIENT.get(url);
@ -563,7 +621,10 @@ fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
client = client.header("Referer", referer)
}
client.send()?.error_for_status().map_err(Into::into)
match client.send() {
Ok(c) => c.error_for_status().map_err(Into::into),
Err(e) => err_silent!(format!("{}", e)),
}
}
/// Returns a Integer with the priority of the type of the icon which to prefer.
@ -647,7 +708,7 @@ fn parse_sizes(sizes: Option<&str>) -> (u16, u16) {
fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
if is_domain_blacklisted(domain) {
err!("Domain is blacklisted", domain)
err_silent!("Domain is blacklisted", domain)
}
let icon_result = get_icon_url(domain)?;
@ -676,7 +737,7 @@ fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
break;
}
}
_ => warn!("Extracted icon from data:image uri is invalid"),
_ => debug!("Extracted icon from data:image uri is invalid"),
};
} else {
match get_page_with_referer(&icon.href, &icon_result.referer) {
@ -692,13 +753,13 @@ fn download_icon(domain: &str) -> Result<(Vec<u8>, Option<&str>), Error> {
info!("Downloaded icon from {}", icon.href);
break;
}
_ => warn!("Download failed for {}", icon.href),
Err(e) => debug!("{:?}", e),
};
}
}
if buffer.is_empty() {
err!("Empty response downloading icon")
err_silent!("Empty response or unable find a valid icon", domain);
}
Ok((buffer, icon_type))
@ -710,10 +771,10 @@ fn save_icon(path: &str, icon: &[u8]) {
f.write_all(icon).expect("Error writing icon file");
}
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache");
create_dir_all(&CONFIG.icon_cache_folder()).expect("Error creating icon cache folder");
}
Err(e) => {
warn!("Icon save error: {:?}", e);
warn!("Unable to save icon: {:?}", e);
}
}
}

123
src/api/identity.rs

@ -1,4 +1,4 @@
use chrono::Local;
use chrono::Utc;
use num_traits::FromPrimitive;
use rocket::{
http::Status,
@ -53,6 +53,13 @@ fn login(data: Form<ConnectData>, conn: DbConn, ip: ClientIp) -> JsonResult {
_authorization_login(data, conn, &ip)
}
"client_credentials" => {
_check_is_some(&data.client_id, "client_id cannot be blank")?;
_check_is_some(&data.client_secret, "client_secret cannot be blank")?;
_check_is_some(&data.scope, "scope cannot be blank")?;
_api_key_login(data, conn, &ip)
}
t => err!("Invalid type", t),
}
}
@ -64,13 +71,15 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
// Get device by refresh token
let mut device = Device::find_by_refresh_token(&token, &conn).map_res("Invalid refresh token")?;
// COMMON
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
let scope = "api offline_access";
let scope_vec = vec!["api".into(), "offline_access".into()];
// Common
let user = User::find_by_uuid(&device.user_uuid, &conn).unwrap();
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn);
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
device.save(&conn)?;
Ok(Json(json!({
"access_token": access_token,
"expires_in": expires_in,
@ -82,7 +91,7 @@ fn _refresh_login(data: ConnectData, conn: DbConn) -> JsonResult {
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
"scope": "api offline_access",
"scope": scope,
"unofficialServer": true,
})))
}
@ -114,7 +123,7 @@ fn _authorization_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonR
Ok(_) => {
let expiry = token.exp;
let user_email = token.email;
let now = Local::now();
let now = Utc::now().naive_utc();
// COMMON
let user = User::find_by_mail(&user_email, &conn).unwrap();
@ -138,7 +147,7 @@ fn _authorization_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonR
let mut result = json!({
"access_token": access_token,
"expires_in": expiry - now.naive_utc().timestamp(),
"expires_in": expiry - now.timestamp(),
"token_type": "Bearer",
"refresh_token": refresh_token,
"Key": user.akey,
@ -172,6 +181,10 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
if scope != "api offline_access" {
err!("Scope not supported")
}
let scope_vec = vec!["api".into(), "offline_access".into()];
// Ratelimit the login
crate::ratelimit::check_limit_login(&ip.ip)?;
// Get the user
let username = data.username.as_ref().unwrap();
@ -200,10 +213,9 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
err!("Organization policy requires SSO sign in");
}
let now = Local::now();
let now = Utc::now().naive_utc();
if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() {
let now = now.naive_utc();
if user.last_verifying_at.is_none()
|| now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds()
> CONFIG.signups_verify_resend_time() as i64
@ -245,9 +257,8 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
}
// Common
let orgs = UserOrganization::find_by_user(&user.uuid, &conn);
let (access_token, expires_in) = device.refresh_tokens(&user, orgs);
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn);
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
device.save(&conn)?;
let mut result = json!({
@ -262,7 +273,7 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"ResetMasterPassword": false,// TODO: Same as above
"scope": "api offline_access",
"scope": scope,
"unofficialServer": true,
});
@ -274,6 +285,76 @@ fn _password_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult
Ok(Json(result))
}
fn _api_key_login(data: ConnectData, conn: DbConn, ip: &ClientIp) -> JsonResult {
// Validate scope
let scope = data.scope.as_ref().unwrap();
if scope != "api" {
err!("Scope not supported")
}
let scope_vec = vec!["api".into()];
// Ratelimit the login
crate::ratelimit::check_limit_login(&ip.ip)?;
// Get the user via the client_id
let client_id = data.client_id.as_ref().unwrap();
let user_uuid = match client_id.strip_prefix("user.") {
Some(uuid) => uuid,
None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
};
let user = match User::find_by_uuid(user_uuid, &conn) {
Some(user) => user,
None => err!("Invalid client_id", format!("IP: {}.", ip.ip)),
};
// Check if the user is disabled
if !user.enabled {
err!("This user has been disabled (API key login)", format!("IP: {}. Username: {}.", ip.ip, user.email))
}
// Check API key. Note that API key logins bypass 2FA.
let client_secret = data.client_secret.as_ref().unwrap();
if !user.check_valid_api_key(client_secret) {
err!("Incorrect client_secret", format!("IP: {}. Username: {}.", ip.ip, user.email))
}
let (mut device, new_device) = get_device(&data, &conn, &user);
if CONFIG.mail_enabled() && new_device {
let now = Utc::now().naive_utc();
if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name) {
error!("Error sending new device email: {:#?}", e);
if CONFIG.require_device_email() {
err!("Could not send login notification email. Please contact your administrator.")
}
}
}
// Common
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &conn);
let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
device.save(&conn)?;
info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
// Note: No refresh_token is returned. The CLI just repeats the
// client_credentials login flow when the existing token expires.
Ok(Json(json!({
"access_token": access_token,
"expires_in": expires_in,
"token_type": "Bearer",
"Key": user.akey,
"PrivateKey": user.private_key,
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"ResetMasterPassword": false, // TODO: Same as above
"scope": scope,
"unofficialServer": true,
})))
}
/// Retrieves an existing device or creates a new device from ConnectData and the User
fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool) {
// On iOS, device_type sends "iOS", on others it sends a number
@ -317,6 +398,8 @@ fn twofactor_auth(
return Ok(None);
}
TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn)?;
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, asume the first one
@ -360,6 +443,8 @@ fn twofactor_auth(
_ => err!("Invalid two factor provider"),
}
TwoFactorIncomplete::mark_complete(user_uuid, &device.uuid, conn)?;
if !CONFIG.disable_2fa_remember() && remember == 1 {
Ok(Some(device.refresh_twofactor_remember()))
} else {
@ -466,17 +551,20 @@ fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &DbConn) -> Api
Ok(result)
}
// https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
#[derive(Debug, Clone, Default)]
#[allow(non_snake_case)]
struct ConnectData {
grant_type: String, // refresh_token, password
// refresh_token, password, client_credentials (API key)
grant_type: String,
// Needed for grant_type="refresh_token"
refresh_token: Option<String>,
// Needed for grant_type="password"
// Needed for grant_type = "password" | "client_credentials"
client_id: Option<String>, // web, cli, desktop, browser, mobile
client_secret: Option<String>, // API key login (cli only)
password: Option<String>,
scope: Option<String>,
username: Option<String>,
@ -510,6 +598,7 @@ impl<'f> FromForm<'f> for ConnectData {
"granttype" => form.grant_type = value,
"refreshtoken" => form.refresh_token = Some(value),
"clientid" => form.client_id = Some(value),
"clientsecret" => form.client_secret = Some(value),
"password" => form.password = Some(value),
"scope" => form.scope = Some(value),
"username" => form.username = Some(value),

2
src/api/mod.rs

@ -13,6 +13,8 @@ pub use crate::api::{
core::purge_sends,
core::purge_trashed_ciphers,
core::routes as core_routes,
core::two_factor::send_incomplete_2fa_notifications,
core::{emergency_notification_reminder_job, emergency_request_timeout_job},
icons::routes as icons_routes,
identity::routes as identity_routes,
notifications::routes as notifications_routes,

4
src/api/notifications.rs

@ -4,7 +4,7 @@ use rocket::Route;
use rocket_contrib::json::Json;
use serde_json::Value as JsonValue;
use crate::{api::EmptyResult, auth::Headers, db::DbConn, Error, CONFIG};
use crate::{api::EmptyResult, auth::Headers, Error, CONFIG};
pub fn routes() -> Vec<Route> {
routes![negotiate, websockets_err]
@ -30,7 +30,7 @@ fn websockets_err() -> EmptyResult {
}
#[post("/hub/negotiate")]
fn negotiate(_headers: Headers, _conn: DbConn) -> Json<JsonValue> {
fn negotiate(_headers: Headers) -> Json<JsonValue> {
use crate::crypto;
use data_encoding::BASE64URL;

17
src/api/web.rs

@ -22,14 +22,15 @@ pub fn routes() -> Vec<Route> {
#[get("/")]
fn web_index() -> Cached<Option<NamedFile>> {
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).ok())
Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).ok(), false)
}
#[get("/app-id.json")]
fn app_id() -> Cached<Content<Json<Value>>> {
let content_type = ContentType::new("application", "fido.trusted-apps+json");
Cached::long(Content(
Cached::long(
Content(
content_type,
Json(json!({
"trustedFacets": [
@ -51,12 +52,14 @@ fn app_id() -> Cached<Content<Json<Value>>> {
"android:apk-key-hash:dUGFzUzf3lmHSLBDBIv+WaFyZMI" ]
}]
})),
))
),
true,
)
}
#[get("/<p..>", rank = 10)] // Only match this if the other routes don't match
fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> {
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).ok())
Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).ok(), true)
}
#[get("/attachments/<uuid>/<file_id>")]
@ -64,15 +67,17 @@ fn attachments(uuid: SafeString, file_id: SafeString) -> Option<NamedFile> {
NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).ok()
}
// We use DbConn here to let the alive healthcheck also verify the database connection.
use crate::db::DbConn;
#[get("/alive")]
fn alive() -> Json<String> {
fn alive(_conn: DbConn) -> Json<String> {
use crate::util::format_date;
use chrono::Utc;
Json(format_date(&Utc::now().naive_utc()))
}
#[get("/bwrs_static/<filename>")]
#[get("/vw_static/<filename>")]
fn static_files(filename: String) -> Result<Content<&'static [u8]>, Error> {
match filename.as_ref() {
"mail-github.png" => Ok(Content(ContentType::PNG, include_bytes!("../static/images/mail-github.png"))),

43
src/auth.rs

@ -22,6 +22,8 @@ static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM));
pub static JWT_LOGIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|login", CONFIG.domain_origin()));
static JWT_INVITE_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|invite", CONFIG.domain_origin()));
static JWT_EMERGENCY_ACCESS_INVITE_ISSUER: Lazy<String> =
Lazy::new(|| format!("{}|emergencyaccessinvite", CONFIG.domain_origin()));
static JWT_DELETE_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|delete", CONFIG.domain_origin()));
static JWT_VERIFYEMAIL_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|verifyemail", CONFIG.domain_origin()));
static JWT_ADMIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|admin", CONFIG.domain_origin()));
@ -75,6 +77,10 @@ pub fn decode_invite(token: &str) -> Result<InviteJwtClaims, Error> {
decode_jwt(token, JWT_INVITE_ISSUER.to_string())
}
pub fn decode_emergency_access_invite(token: &str) -> Result<EmergencyAccessInviteJwtClaims, Error> {
decode_jwt(token, JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string())
}
pub fn decode_delete(token: &str) -> Result<BasicJwtClaims, Error> {
decode_jwt(token, JWT_DELETE_ISSUER.to_string())
}
@ -159,6 +165,43 @@ pub fn generate_invite_claims(
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct EmergencyAccessInviteJwtClaims {
// Not before
pub nbf: i64,
// Expiration time
pub exp: i64,
// Issuer
pub iss: String,
// Subject
pub sub: String,
pub email: String,
pub emer_id: Option<String>,
pub grantor_name: Option<String>,
pub grantor_email: Option<String>,
}
pub fn generate_emergency_access_invite_claims(
uuid: String,
email: String,
emer_id: Option<String>,
grantor_name: Option<String>,
grantor_email: Option<String>,
) -> EmergencyAccessInviteJwtClaims {
let time_now = Utc::now().naive_utc();
EmergencyAccessInviteJwtClaims {
nbf: time_now.timestamp(),
exp: (time_now + Duration::days(5)).timestamp(),
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
sub: uuid,
email,
emer_id,
grantor_name,
grantor_email,
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct BasicJwtClaims {
// Not before

247
src/config.rs

@ -2,7 +2,6 @@ use std::process::exit;
use std::sync::RwLock;
use once_cell::sync::Lazy;
use regex::Regex;
use reqwest::Url;
use crate::{
@ -23,21 +22,6 @@ pub static CONFIG: Lazy<Config> = Lazy::new(|| {
})
});
static PRIVACY_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"[\w]").unwrap());
const PRIVACY_CONFIG: &[&str] = &[
"allowed_iframe_ancestors",
"database_url",
"domain_origin",
"domain_path",
"domain",
"helo_name",
"org_creation_users",
"signups_domains_whitelist",
"smtp_from",
"smtp_host",
"smtp_username",
];
pub type Pass = String;
macro_rules! make_config {
@ -61,7 +45,7 @@ macro_rules! make_config {
_overrides: Vec<String>,
}
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
#[derive(Clone, Default, Deserialize, Serialize)]
pub struct ConfigBuilder {
$($(
#[serde(skip_serializing_if = "Option::is_none")]
@ -133,19 +117,6 @@ macro_rules! make_config {
builder
}
/// Returns a new builder with all the elements from self,
/// except those that are equal in both sides
fn _remove(&self, other: &Self) -> Self {
let mut builder = ConfigBuilder::default();
$($(
if &self.$name != &other.$name {
builder.$name = self.$name.clone();
}
)+)+
builder
}
fn build(&self) -> ConfigItems {
let mut config = ConfigItems::default();
let _domain_set = self.domain.is_some();
@ -161,12 +132,13 @@ macro_rules! make_config {
}
}
#[derive(Debug, Clone, Default)]
pub struct ConfigItems { $($(pub $name: make_config!{@type $ty, $none_action}, )+)+ }
#[derive(Clone, Default)]
struct ConfigItems { $($( $name: make_config!{@type $ty, $none_action}, )+)+ }
#[allow(unused)]
impl Config {
$($(
$(#[doc = $doc])+
pub fn $name(&self) -> make_config!{@type $ty, $none_action} {
self.inner.read().unwrap().config.$name.clone()
}
@ -189,38 +161,91 @@ macro_rules! make_config {
fn _get_doc(doc: &str) -> serde_json::Value {
let mut split = doc.split("|>").map(str::trim);
json!({
"name": split.next(),
"description": split.next()
// We do not use the json!() macro here since that causes a lot of macro recursion.
// This slows down compile time and it also causes issues with rust-analyzer
serde_json::Value::Object({
let mut doc_json = serde_json::Map::new();
doc_json.insert("name".into(), serde_json::to_value(split.next()).unwrap());
doc_json.insert("description".into(), serde_json::to_value(split.next()).unwrap());
doc_json
})
}
json!([ $({
"group": stringify!($group),
"grouptoggle": stringify!($($group_enabled)?),
"groupdoc": make_config!{ @show $($groupdoc)? },
"elements": [
$( {
"editable": $editable,
"name": stringify!($name),
"value": cfg.$name,
"default": def.$name,
"type": _get_form_type(stringify!($ty)),
"doc": _get_doc(concat!($($doc),+)),
"overridden": overriden.contains(&stringify!($name).to_uppercase()),
}, )+
]}, )+ ])
// We do not use the json!() macro here since that causes a lot of macro recursion.
// This slows down compile time and it also causes issues with rust-analyzer
serde_json::Value::Array(<[_]>::into_vec(Box::new([
$(
serde_json::Value::Object({
let mut group = serde_json::Map::new();
group.insert("group".into(), (stringify!($group)).into());
group.insert("grouptoggle".into(), (stringify!($($group_enabled)?)).into());
group.insert("groupdoc".into(), (make_config!{ @show $($groupdoc)? }).into());
group.insert("elements".into(), serde_json::Value::Array(<[_]>::into_vec(Box::new([
$(
serde_json::Value::Object({
let mut element = serde_json::Map::new();
element.insert("editable".into(), ($editable).into());
element.insert("name".into(), (stringify!($name)).into());
element.insert("value".into(), serde_json::to_value(cfg.$name).unwrap());
element.insert("default".into(), serde_json::to_value(def.$name).unwrap());
element.insert("type".into(), (_get_form_type(stringify!($ty))).into());
element.insert("doc".into(), (_get_doc(concat!($($doc),+))).into());
element.insert("overridden".into(), (overriden.contains(&stringify!($name).to_uppercase())).into());
element
}),
)+
]))));
group
}),
)+
])))
}
pub fn get_support_json(&self) -> serde_json::Value {
// Define which config keys need to be masked.
// Pass types will always be masked and no need to put them in the list.
// Besides Pass, only String types will be masked via _privacy_mask.
const PRIVACY_CONFIG: &[&str] = &[
"allowed_iframe_ancestors",
"database_url",
"domain_origin",
"domain_path",
"domain",
"helo_name",
"org_creation_users",
"signups_domains_whitelist",
"smtp_from",
"smtp_host",
"smtp_username",
];
let cfg = {
let inner = &self.inner.read().unwrap();
inner.config.clone()
};
json!({ $($(
stringify!($name): make_config!{ @supportstr $name, cfg.$name, $ty, $none_action },
)+)+ })
/// We map over the string and remove all alphanumeric, _ and - characters.
/// This is the fastest way (within micro-seconds) instead of using a regex (which takes mili-seconds)
fn _privacy_mask(value: &str) -> String {
value.chars().map(|c|
match c {
c if c.is_alphanumeric() => '*',
'_' => '*',
'-' => '*',
_ => c
}
).collect::<String>()
}
serde_json::Value::Object({
let mut json = serde_json::Map::new();
$($(
json.insert(stringify!($name).into(), make_config!{ @supportstr $name, cfg.$name, $ty, $none_action });
)+)+;
json
})
}
pub fn get_overrides(&self) -> Vec<String> {
@ -228,29 +253,30 @@ macro_rules! make_config {
let inner = &self.inner.read().unwrap();
inner._overrides.clone()
};
overrides
}
}
};
// Support string print
( @supportstr $name:ident, $value:expr, Pass, option ) => { $value.as_ref().map(|_| String::from("***")) }; // Optional pass, we map to an Option<String> with "***"
( @supportstr $name:ident, $value:expr, Pass, $none_action:ident ) => { String::from("***") }; // Required pass, we return "***"
( @supportstr $name:ident, $value:expr, $ty:ty, option ) => { // Optional other value, we return as is or convert to string to apply the privacy config
( @supportstr $name:ident, $value:expr, Pass, option ) => { serde_json::to_value($value.as_ref().map(|_| String::from("***"))).unwrap() }; // Optional pass, we map to an Option<String> with "***"
( @supportstr $name:ident, $value:expr, Pass, $none_action:ident ) => { "***".into() }; // Required pass, we return "***"
( @supportstr $name:ident, $value:expr, String, option ) => { // Optional other value, we return as is or convert to string to apply the privacy config
if PRIVACY_CONFIG.contains(&stringify!($name)) {
json!($value.as_ref().map(|x| PRIVACY_REGEX.replace_all(&x.to_string(), "${1}*").to_string()))
serde_json::to_value($value.as_ref().map(|x| _privacy_mask(x) )).unwrap()
} else {
json!($value)
serde_json::to_value($value).unwrap()
}
};
( @supportstr $name:ident, $value:expr, $ty:ty, $none_action:ident ) => { // Required other value, we return as is or convert to string to apply the privacy config
( @supportstr $name:ident, $value:expr, String, $none_action:ident ) => { // Required other value, we return as is or convert to string to apply the privacy config
if PRIVACY_CONFIG.contains(&stringify!($name)) {
json!(PRIVACY_REGEX.replace_all(&$value.to_string(), "${1}*").to_string())
_privacy_mask(&$value).into()
} else {
json!($value)
($value).into()
}
};
( @supportstr $name:ident, $value:expr, $ty:ty, option ) => { serde_json::to_value($value).unwrap() }; // Optional other value, we return as is or convert to string to apply the privacy config
( @supportstr $name:ident, $value:expr, $ty:ty, $none_action:ident ) => { ($value).into() }; // Required other value, we return as is or convert to string to apply the privacy config
// Group or empty string
( @show ) => { "" };
@ -300,8 +326,6 @@ make_config! {
data_folder: String, false, def, "data".to_string();
/// Database URL
database_url: String, false, auto, |c| format!("{}/{}", c.data_folder, "db.sqlite3");
/// Database connection pool size
database_max_conns: u32, false, def, 10;
/// Icon cache folder
icon_cache_folder: String, false, auto, |c| format!("{}/{}", c.data_folder, "icon_cache");
/// Attachments folder
@ -333,6 +357,15 @@ make_config! {
/// Trash purge schedule |> Cron schedule of the job that checks for trashed items to delete permanently.
/// Defaults to daily. Set blank to disable this job.
trash_purge_schedule: String, false, def, "0 5 0 * * *".to_string();
/// Incomplete 2FA login schedule |> Cron schedule of the job that checks for incomplete 2FA logins.
/// Defaults to once every minute. Set blank to disable this job.
incomplete_2fa_schedule: String, false, def, "30 * * * * *".to_string();
/// Emergency notification reminder schedule |> Cron schedule of the job that sends expiration reminders to emergency access grantors.
/// Defaults to hourly. Set blank to disable this job.
emergency_notification_reminder_schedule: String, false, def, "0 5 * * * *".to_string();
/// Emergency request timeout schedule |> Cron schedule of the job that grants emergency access requests that have met the required wait time.
/// Defaults to hourly. Set blank to disable this job.
emergency_request_timeout_schedule: String, false, def, "0 5 * * * *".to_string();
},
/// General settings
@ -366,9 +399,17 @@ make_config! {
/// sure to inform all users of any changes to this setting.
trash_auto_delete_days: i64, true, option;
/// Disable icon downloads |> Set to true to disable icon downloading, this would still serve icons from
/// $ICON_CACHE_FOLDER, but it won't produce any external network request. Needs to set $ICON_CACHE_TTL to 0,
/// otherwise it will delete them and they won't be downloaded again.
/// Incomplete 2FA time limit |> Number of minutes to wait before a 2FA-enabled login is
/// considered incomplete, resulting in an email notification. An incomplete 2FA login is one
/// where the correct master password was provided but the required 2FA step was not completed,
/// which potentially indicates a master password compromise. Set to 0 to disable this check.
/// This setting applies globally to all users.
incomplete_2fa_time_limit: i64, true, def, 3;
/// Disable icon downloads |> Set to true to disable icon downloading in the internal icon service.
/// This still serves existing icons from $ICON_CACHE_FOLDER, without generating any external
/// network requests. $ICON_CACHE_TTL must also be set to 0; otherwise, the existing icons
/// will be deleted eventually, but won't be downloaded again.
disable_icon_download: bool, true, def, false;
/// Allow new signups |> Controls whether new users can register. Users can be invited by the vaultwarden admin even if this is disabled
signups_allowed: bool, true, def, true;
@ -385,6 +426,8 @@ make_config! {
org_creation_users: String, true, def, "".to_string();
/// Allow invitations |> Controls whether users can be invited by organization admins, even when signups are otherwise disabled
invitations_allowed: bool, true, def, true;
/// Allow emergency access |> Controls whether users can enable emergency access to their accounts. This setting applies globally to all users.
emergency_access_allowed: bool, true, def, true;
/// Password iterations |> Number of server-side passwords hashing iterations.
/// The changes only apply when a user changes their password. Not recommended to lower the value
password_iterations: i32, true, def, 100_000;
@ -407,6 +450,19 @@ make_config! {
ip_header: String, true, def, "X-Real-IP".to_string();
/// Internal IP header property, used to avoid recomputing each time
_ip_header_enabled: bool, false, gen, |c| &c.ip_header.trim().to_lowercase() != "none";
/// Icon service |> The predefined icon services are: internal, bitwarden, duckduckgo, google.
/// To specify a custom icon service, set a URL template with exactly one instance of `{}`,
/// which is replaced with the domain. For example: `https://icon.example.com/domain/{}`.
/// `internal` refers to Vaultwarden's built-in icon fetching implementation. If an external
/// service is set, an icon request to Vaultwarden will return an HTTP redirect to the
/// corresponding icon at the external service.
icon_service: String, false, def, "internal".to_string();
/// Icon redirect code |> The HTTP status code to use for redirects to an external icon service.
/// The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent).
/// Temporary redirects are useful while testing different icon services, but once a service
/// has been decided on, consider using permanent redirects for cacheability. The legacy codes
/// are currently better supported by the Bitwarden clients.
icon_redirect_code: u32, true, def, 302;
/// Positive icon cache expiry |> Number of seconds to consider that an already cached icon is fresh. After this period, the icon will be redownloaded
icon_cache_ttl: u64, true, def, 2_592_000;
/// Negative icon cache expiry |> Number of seconds before trying to download an icon that failed again.
@ -453,11 +509,24 @@ make_config! {
/// Max database connection retries |> Number of times to retry the database connection during startup, with 1 second between each retry, set to 0 to retry indefinitely
db_connection_retries: u32, false, def, 15;
/// Database connection pool size
database_max_conns: u32, false, def, 10;
/// Bypass admin page security (Know the risks!) |> Disables the Admin Token for the admin page so you may use your own auth in-front
disable_admin_token: bool, true, def, false;
/// Allowed iframe ancestors (Know the risks!) |> Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets
allowed_iframe_ancestors: String, true, def, String::new();
/// Seconds between login requests |> Number of seconds, on average, between login and 2FA requests from the same IP address before rate limiting kicks in
login_ratelimit_seconds: u64, false, def, 60;
/// Max burst size for login requests |> Allow a burst of requests of up to this size, while maintaining the average indicated by `login_ratelimit_seconds`. Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2
login_ratelimit_max_burst: u32, false, def, 10;
/// Seconds between admin requests |> Number of seconds, on average, between admin requests from the same IP address before rate limiting kicks in
admin_ratelimit_seconds: u64, false, def, 300;
/// Max burst size for login requests |> Allow a burst of requests of up to this size, while maintaining the average indicated by `admin_ratelimit_seconds`
admin_ratelimit_max_burst: u32, false, def, 3;
},
/// Yubikey settings
@ -524,8 +593,8 @@ make_config! {
email_2fa: _enable_email_2fa {
/// Enabled |> Disabling will prevent users from setting up new email 2FA and using existing email 2FA configured
_enable_email_2fa: bool, true, auto, |c| c._enable_smtp && c.smtp_host.is_some();
/// Email token size |> Number of digits in an email token (min: 6, max: 19). Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting.
email_token_size: u32, true, def, 6;
/// Email token size |> Number of digits in an email 2FA token (min: 6, max: 255). Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting.
email_token_size: u8, true, def, 6;
/// Token expiration time |> Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
email_expiration_time: u64, true, def, 600;
/// Maximum attempts |> Maximum attempts before an email token is reset and a new email will need to be sent
@ -599,21 +668,39 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
if cfg._enable_email_2fa && cfg.email_token_size < 6 {
err!("`EMAIL_TOKEN_SIZE` has a minimum size of 6")
}
if cfg._enable_email_2fa && cfg.email_token_size > 19 {
err!("`EMAIL_TOKEN_SIZE` has a maximum size of 19")
}
}
// Check if the icon blacklist regex is valid
if let Some(ref r) = cfg.icon_blacklist_regex {
let validate_regex = Regex::new(r);
let validate_regex = regex::Regex::new(r);
match validate_regex {
Ok(_) => (),
Err(e) => err!(format!("`ICON_BLACKLIST_REGEX` is invalid: {:#?}", e)),
}
}
// Check if the icon service is valid
let icon_service = cfg.icon_service.as_str();
match icon_service {
"internal" | "bitwarden" | "duckduckgo" | "google" => (),
_ => {
if !icon_service.starts_with("http") {
err!(format!("Icon service URL `{}` must start with \"http\"", icon_service))
}
match icon_service.matches("{}").count() {
1 => (), // nominal
0 => err!(format!("Icon service URL `{}` has no placeholder \"{{}}\"", icon_service)),
_ => err!(format!("Icon service URL `{}` has more than one placeholder \"{{}}\"", icon_service)),
}
}
}
// Check if the icon redirect code is valid
match cfg.icon_redirect_code {
301 | 302 | 307 | 308 => (),
_ => err!("Only HTTP 301/302 and 307/308 redirects are supported"),
}
Ok(())
}
@ -699,7 +786,7 @@ impl Config {
Ok(())
}
pub fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
let builder = {
let usr = &self.inner.read().unwrap()._usr;
let mut _overrides = Vec::new();
@ -853,13 +940,23 @@ where
reg!("email/change_email", ".html");
reg!("email/delete_account", ".html");
reg!("email/emergency_access_invite_accepted", ".html");
reg!("email/emergency_access_invite_confirmed", ".html");
reg!("email/emergency_access_recovery_approved", ".html");
reg!("email/emergency_access_recovery_initiated", ".html");
reg!("email/emergency_access_recovery_rejected", ".html");
reg!("email/emergency_access_recovery_reminder", ".html");
reg!("email/emergency_access_recovery_timed_out", ".html");
reg!("email/incomplete_2fa_login", ".html");
reg!("email/invite_accepted", ".html");
reg!("email/invite_confirmed", ".html");
reg!("email/new_device_logged_in", ".html");
reg!("email/pw_hint_none", ".html");
reg!("email/pw_hint_some", ".html");
reg!("email/send_2fa_removed_from_org", ".html");
reg!("email/send_single_org_removed_from_org", ".html");
reg!("email/send_org_invite", ".html");
reg!("email/send_emergency_access_invite", ".html");
reg!("email/twofactor_email", ".html");
reg!("email/verify_email", ".html");
reg!("email/welcome", ".html");

52
src/crypto.rs

@ -6,8 +6,6 @@ use std::num::NonZeroU32;
use data_encoding::HEXLOWER;
use ring::{digest, hmac, pbkdf2};
use crate::error::Error;
static DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256;
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
@ -51,6 +49,34 @@ pub fn get_random(mut array: Vec<u8>) -> Vec<u8> {
array
}
/// Generates a random string over a specified alphabet.
pub fn get_random_string(alphabet: &[u8], num_chars: usize) -> String {
// Ref: https://rust-lang-nursery.github.io/rust-cookbook/algorithms/randomness.html
use rand::Rng;
let mut rng = rand::thread_rng();
(0..num_chars)
.map(|_| {
let i = rng.gen_range(0..alphabet.len());
alphabet[i] as char
})
.collect()
}
/// Generates a random numeric string.
pub fn get_random_string_numeric(num_chars: usize) -> String {
const ALPHABET: &[u8] = b"0123456789";
get_random_string(ALPHABET, num_chars)
}
/// Generates a random alphanumeric string.
pub fn get_random_string_alphanum(num_chars: usize) -> String {
const ALPHABET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
abcdefghijklmnopqrstuvwxyz\
0123456789";
get_random_string(ALPHABET, num_chars)
}
pub fn generate_id(num_bytes: usize) -> String {
HEXLOWER.encode(&get_random(vec![0; num_bytes]))
}
@ -65,23 +91,15 @@ pub fn generate_attachment_id() -> String {
generate_id(10) // 80 bits
}
pub fn generate_token(token_size: u32) -> Result<String, Error> {
// A u64 can represent all whole numbers up to 19 digits long.
if token_size > 19 {
err!("Token size is limited to 19 digits")
/// Generates a numeric token for email-based verifications.
pub fn generate_email_token(token_size: u8) -> String {
get_random_string_numeric(token_size as usize)
}
let low: u64 = 0;
let high: u64 = 10u64.pow(token_size);
// Generate a random number in the range [low, high), then format it as a
// token of fixed width, left-padding with 0 as needed.
use rand::{thread_rng, Rng};
let mut rng = thread_rng();
let number: u64 = rng.gen_range(low..high);
let token = format!("{:0size$}", number, size = token_size as usize);
Ok(token)
/// Generates a personal API key.
/// Upstream uses 30 chars, which is ~178 bits of entropy.
pub fn generate_api_key() -> String {
get_random_string_alphanum(30)
}
//

45
src/db/models/cipher.rs

@ -343,36 +343,39 @@ impl Cipher {
db_run! {conn: {
// Check whether this cipher is in any collections accessible to the
// user. If so, retrieve the access flags for each collection.
let query = ciphers::table
let rows = ciphers::table
.filter(ciphers::uuid.eq(&self.uuid))
.inner_join(ciphers_collections::table.on(
ciphers::uuid.eq(ciphers_collections::cipher_uuid)))
.inner_join(users_collections::table.on(
ciphers_collections::collection_uuid.eq(users_collections::collection_uuid)
.and(users_collections::user_uuid.eq(user_uuid))))
.select((users_collections::read_only, users_collections::hide_passwords));
// There's an edge case where a cipher can be in multiple collections
// with inconsistent access flags. For example, a cipher could be in
// one collection where the user has read-only access, but also in
// another collection where the user has read/write access. To handle
// this, we do a boolean OR of all values in each of the `read_only`
// and `hide_passwords` columns. This could ideally be done as part
// of the query, but Diesel doesn't support a max() or bool_or()
// function on booleans and this behavior isn't portable anyway.
if let Ok(vec) = query.load::<(bool, bool)>(conn) {
let mut read_only = false;
let mut hide_passwords = false;
for (ro, hp) in vec.iter() {
read_only |= ro;
hide_passwords |= hp;
}
.select((users_collections::read_only, users_collections::hide_passwords))
.load::<(bool, bool)>(conn)
.expect("Error getting access restrictions");
Some((read_only, hide_passwords))
} else {
if rows.is_empty() {
// This cipher isn't in any collections accessible to the user.
None
return None;
}
// A cipher can be in multiple collections with inconsistent access flags.
// For example, a cipher could be in one collection where the user has
// read-only access, but also in another collection where the user has
// read/write access. For a flag to be in effect for a cipher, upstream
// requires all collections the cipher is in to have that flag set.
// Therefore, we do a boolean AND of all values in each of the `read_only`
// and `hide_passwords` columns. This could ideally be done as part of the
// query, but Diesel doesn't support a min() or bool_and() function on
// booleans and this behavior isn't portable anyway.
let mut read_only = true;
let mut hide_passwords = true;
for (ro, hp) in rows.iter() {
read_only &= ro;
hide_passwords &= hp;
}
Some((read_only, hide_passwords))
}}
}

12
src/db/models/device.rs

@ -17,8 +17,7 @@ db_object! {
pub user_uuid: String,
pub name: String,
// https://github.com/bitwarden/core/tree/master/src/Core/Enums
pub atype: i32,
pub atype: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
pub push_token: Option<String>,
pub refresh_token: String,
@ -61,7 +60,12 @@ impl Device {
self.twofactor_remember = None;
}
pub fn refresh_tokens(&mut self, user: &super::User, orgs: Vec<super::UserOrganization>) -> (String, i64) {
pub fn refresh_tokens(
&mut self,
user: &super::User,
orgs: Vec<super::UserOrganization>,
scope: Vec<String>,
) -> (String, i64) {
// If there is no refresh token, we create one
if self.refresh_token.is_empty() {
use crate::crypto;
@ -99,7 +103,7 @@ impl Device {
sstamp: user.security_stamp.to_string(),
device: self.uuid.to_string(),
scope: vec!["api".into(), "offline_access".into()],
scope,
amr: vec!["Application".into()],
};

282
src/db/models/emergency_access.rs

@ -0,0 +1,282 @@
use chrono::{NaiveDateTime, Utc};
use serde_json::Value;
use super::User;
db_object! {
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)]
#[table_name = "emergency_access"]
#[changeset_options(treat_none_as_null="true")]
#[belongs_to(User, foreign_key = "grantor_uuid")]
#[primary_key(uuid)]
pub struct EmergencyAccess {
pub uuid: String,
pub grantor_uuid: String,
pub grantee_uuid: Option<String>,
pub email: Option<String>,
pub key_encrypted: Option<String>,
pub atype: i32, //EmergencyAccessType
pub status: i32, //EmergencyAccessStatus
pub wait_time_days: i32,
pub recovery_initiated_at: Option<NaiveDateTime>,
pub last_notification_at: Option<NaiveDateTime>,
pub updated_at: NaiveDateTime,
pub created_at: NaiveDateTime,
}
}
/// Local methods
impl EmergencyAccess {
pub fn new(grantor_uuid: String, email: Option<String>, status: i32, atype: i32, wait_time_days: i32) -> Self {
let now = Utc::now().naive_utc();
Self {
uuid: crate::util::get_uuid(),
grantor_uuid,
grantee_uuid: None,
email,
status,
atype,
wait_time_days,
recovery_initiated_at: None,
created_at: now,
updated_at: now,
key_encrypted: None,
last_notification_at: None,
}
}
pub fn get_type_as_str(&self) -> &'static str {
if self.atype == EmergencyAccessType::View as i32 {
"View"
} else {
"Takeover"
}
}
pub fn has_type(&self, access_type: EmergencyAccessType) -> bool {
self.atype == access_type as i32
}
pub fn has_status(&self, status: EmergencyAccessStatus) -> bool {
self.status == status as i32
}
pub fn to_json(&self) -> Value {
json!({
"Id": self.uuid,
"Status": self.status,
"Type": self.atype,
"WaitTimeDays": self.wait_time_days,
"Object": "emergencyAccess",
})
}
pub fn to_json_grantor_details(&self, conn: &DbConn) -> Value {
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).expect("Grantor user not found.");
json!({
"Id": self.uuid,
"Status": self.status,
"Type": self.atype,
"WaitTimeDays": self.wait_time_days,
"GrantorId": grantor_user.uuid,
"Email": grantor_user.email,
"Name": grantor_user.name,
"Object": "emergencyAccessGrantorDetails",
})
}
#[allow(clippy::manual_map)]
pub fn to_json_grantee_details(&self, conn: &DbConn) -> Value {
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
Some(User::find_by_uuid(grantee_uuid, conn).expect("Grantee user not found."))
} else if let Some(email) = self.email.as_deref() {
Some(User::find_by_mail(email, conn).expect("Grantee user not found."))
} else {
None
};
json!({
"Id": self.uuid,
"Status": self.status,
"Type": self.atype,
"WaitTimeDays": self.wait_time_days,
"GranteeId": grantee_user.as_ref().map_or("", |u| &u.uuid),
"Email": grantee_user.as_ref().map_or("", |u| &u.email),
"Name": grantee_user.as_ref().map_or("", |u| &u.name),
"Object": "emergencyAccessGranteeDetails",
})
}
}
#[derive(Copy, Clone, PartialEq, Eq, num_derive::FromPrimitive)]
pub enum EmergencyAccessType {
View = 0,
Takeover = 1,
}
impl EmergencyAccessType {
pub fn from_str(s: &str) -> Option<Self> {
match s {
"0" | "View" => Some(EmergencyAccessType::View),
"1" | "Takeover" => Some(EmergencyAccessType::Takeover),
_ => None,
}
}
}
impl PartialEq<i32> for EmergencyAccessType {
fn eq(&self, other: &i32) -> bool {
*other == *self as i32
}
}
impl PartialEq<EmergencyAccessType> for i32 {
fn eq(&self, other: &EmergencyAccessType) -> bool {
*self == *other as i32
}
}
pub enum EmergencyAccessStatus {
Invited = 0,
Accepted = 1,
Confirmed = 2,
RecoveryInitiated = 3,
RecoveryApproved = 4,
}
// region Database methods
use crate::db::DbConn;
use crate::api::EmptyResult;
use crate::error::MapResult;
impl EmergencyAccess {
pub fn save(&mut self, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&self.grantor_uuid, conn);
self.updated_at = Utc::now().naive_utc();
db_run! { conn:
sqlite, mysql {
match diesel::replace_into(emergency_access::table)
.values(EmergencyAccessDb::to_db(self))
.execute(conn)
{
Ok(_) => Ok(()),
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
diesel::update(emergency_access::table)
.filter(emergency_access::uuid.eq(&self.uuid))
.set(EmergencyAccessDb::to_db(self))
.execute(conn)
.map_res("Error updating emergency access")
}
Err(e) => Err(e.into()),
}.map_res("Error saving emergency access")
}
postgresql {
let value = EmergencyAccessDb::to_db(self);
diesel::insert_into(emergency_access::table)
.values(&value)
.on_conflict(emergency_access::uuid)
.do_update()
.set(&value)
.execute(conn)
.map_res("Error saving emergency access")
}
}
}
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
for ea in Self::find_all_by_grantor_uuid(user_uuid, conn) {
ea.delete(conn)?;
}
for ea in Self::find_all_by_grantee_uuid(user_uuid, conn) {
ea.delete(conn)?;
}
Ok(())
}
pub fn delete(self, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&self.grantor_uuid, conn);
db_run! { conn: {
diesel::delete(emergency_access::table.filter(emergency_access::uuid.eq(self.uuid)))
.execute(conn)
.map_res("Error removing user from emergency access")
}}
}
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::uuid.eq(uuid))
.first::<EmergencyAccessDb>(conn)
.ok().from_db()
}}
}
pub fn find_by_grantor_uuid_and_grantee_uuid_or_email(
grantor_uuid: &str,
grantee_uuid: &str,
email: &str,
conn: &DbConn,
) -> Option<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::grantor_uuid.eq(grantor_uuid))
.filter(emergency_access::grantee_uuid.eq(grantee_uuid).or(emergency_access::email.eq(email)))
.first::<EmergencyAccessDb>(conn)
.ok().from_db()
}}
}
pub fn find_all_recoveries(conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::status.eq(EmergencyAccessStatus::RecoveryInitiated as i32))
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}}
}
pub fn find_by_uuid_and_grantor_uuid(uuid: &str, grantor_uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::uuid.eq(uuid))
.filter(emergency_access::grantor_uuid.eq(grantor_uuid))
.first::<EmergencyAccessDb>(conn)
.ok().from_db()
}}
}
pub fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::grantee_uuid.eq(grantee_uuid))
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}}
}
pub fn find_invited_by_grantee_email(grantee_email: &str, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::email.eq(grantee_email))
.filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32))
.first::<EmergencyAccessDb>(conn)
.ok().from_db()
}}
}
pub fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::grantor_uuid.eq(grantor_uuid))
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}}
}
}
// endregion

4
src/db/models/mod.rs

@ -2,12 +2,14 @@ mod attachment;
mod cipher;
mod collection;
mod device;
mod emergency_access;
mod favorite;
mod folder;
mod org_policy;
mod organization;
mod send;
mod two_factor;
mod two_factor_incomplete;
mod user;
mod sso_nonce;
mod sso_config;
@ -16,12 +18,14 @@ pub use self::attachment::Attachment;
pub use self::cipher::Cipher;
pub use self::collection::{Collection, CollectionCipher, CollectionUser};
pub use self::device::Device;
pub use self::emergency_access::{EmergencyAccess, EmergencyAccessStatus, EmergencyAccessType};
pub use self::favorite::Favorite;
pub use self::folder::{Folder, FolderCipher};
pub use self::org_policy::{OrgPolicy, OrgPolicyType};
pub use self::organization::{Organization, UserOrgStatus, UserOrgType, UserOrganization};
pub use self::send::{Send, SendType};
pub use self::two_factor::{TwoFactor, TwoFactorType};
pub use self::two_factor_incomplete::TwoFactorIncomplete;
pub use self::user::{Invitation, User, UserStampException};
pub use self::sso_nonce::SsoNonce;
pub use self::sso_config::SsoConfig;

9
src/db/models/org_policy.rs

@ -143,7 +143,7 @@ impl OrgPolicy {
}}
}
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
pub fn find_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
org_policies::table
.inner_join(
@ -184,8 +184,8 @@ impl OrgPolicy {
/// and the user is not an owner or admin of that org. This is only useful for checking
/// applicability of policy types that have these particular semantics.
pub fn is_applicable_to_user(user_uuid: &str, policy_type: OrgPolicyType, conn: &DbConn) -> bool {
// Returns confirmed users only.
for policy in OrgPolicy::find_by_user(user_uuid, conn) {
// TODO: Should check confirmed and accepted users
for policy in OrgPolicy::find_confirmed_by_user(user_uuid, conn) {
if policy.enabled && policy.has_type(policy_type) {
let org_uuid = &policy.org_uuid;
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) {
@ -201,8 +201,7 @@ impl OrgPolicy {
/// Returns true if the user belongs to an org that has enabled the `DisableHideEmail`
/// option of the `Send Options` policy, and the user is not an owner or admin of that org.
pub fn is_hide_email_disabled(user_uuid: &str, conn: &DbConn) -> bool {
// Returns confirmed users only.
for policy in OrgPolicy::find_by_user(user_uuid, conn) {
for policy in OrgPolicy::find_confirmed_by_user(user_uuid, conn) {
if policy.enabled && policy.has_type(OrgPolicyType::SendOptions) {
let org_uuid = &policy.org_uuid;
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn) {

2
src/db/models/organization.rs

@ -487,7 +487,7 @@ impl UserOrganization {
}}
}
pub fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
pub fn find_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))

5
src/db/models/two_factor.rs

@ -1,8 +1,6 @@
use serde_json::Value;
use crate::api::EmptyResult;
use crate::db::DbConn;
use crate::error::MapResult;
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
use super::User;
@ -161,7 +159,6 @@ impl TwoFactor {
use crate::api::core::two_factor::u2f::U2FRegistration;
use crate::api::core::two_factor::webauthn::{get_webauthn_registrations, WebauthnRegistration};
use std::convert::TryInto;
use webauthn_rs::proto::*;
for mut u2f in u2f_factors {

108
src/db/models/two_factor_incomplete.rs

@ -0,0 +1,108 @@
use chrono::{NaiveDateTime, Utc};
use crate::{api::EmptyResult, auth::ClientIp, db::DbConn, error::MapResult, CONFIG};
use super::User;
db_object! {
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)]
#[table_name = "twofactor_incomplete"]
#[belongs_to(User, foreign_key = "user_uuid")]
#[primary_key(user_uuid, device_uuid)]
pub struct TwoFactorIncomplete {
pub user_uuid: String,
// This device UUID is simply what's claimed by the device. It doesn't
// necessarily correspond to any UUID in the devices table, since a device
// must complete 2FA login before being added into the devices table.
pub device_uuid: String,
pub device_name: String,
pub login_time: NaiveDateTime,
pub ip_address: String,
}
}
impl TwoFactorIncomplete {
pub fn mark_incomplete(
user_uuid: &str,
device_uuid: &str,
device_name: &str,
ip: &ClientIp,
conn: &DbConn,
) -> EmptyResult {
if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() {
return Ok(());
}
// Don't update the data for an existing user/device pair, since that
// would allow an attacker to arbitrarily delay notifications by
// sending repeated 2FA attempts to reset the timer.
let existing = Self::find_by_user_and_device(user_uuid, device_uuid, conn);
if existing.is_some() {
return Ok(());
}
db_run! { conn: {
diesel::insert_into(twofactor_incomplete::table)
.values((
twofactor_incomplete::user_uuid.eq(user_uuid),
twofactor_incomplete::device_uuid.eq(device_uuid),
twofactor_incomplete::device_name.eq(device_name),
twofactor_incomplete::login_time.eq(Utc::now().naive_utc()),
twofactor_incomplete::ip_address.eq(ip.ip.to_string()),
))
.execute(conn)
.map_res("Error adding twofactor_incomplete record")
}}
}
pub fn mark_complete(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> EmptyResult {
if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() {
return Ok(());
}
Self::delete_by_user_and_device(user_uuid, device_uuid, conn)
}
pub fn find_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
twofactor_incomplete::table
.filter(twofactor_incomplete::user_uuid.eq(user_uuid))
.filter(twofactor_incomplete::device_uuid.eq(device_uuid))
.first::<TwoFactorIncompleteDb>(conn)
.ok()
.from_db()
}}
}
pub fn find_logins_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec<Self> {
db_run! {conn: {
twofactor_incomplete::table
.filter(twofactor_incomplete::login_time.lt(dt))
.load::<TwoFactorIncompleteDb>(conn)
.expect("Error loading twofactor_incomplete")
.from_db()
}}
}
pub fn delete(self, conn: &DbConn) -> EmptyResult {
Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn)
}
pub fn delete_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(twofactor_incomplete::table
.filter(twofactor_incomplete::user_uuid.eq(user_uuid))
.filter(twofactor_incomplete::device_uuid.eq(device_uuid)))
.execute(conn)
.map_res("Error in twofactor_incomplete::delete_by_user_and_device()")
}}
}
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(twofactor_incomplete::table.filter(twofactor_incomplete::user_uuid.eq(user_uuid)))
.execute(conn)
.map_res("Error in twofactor_incomplete::delete_all_by_user()")
}}
}
}

27
src/db/models/user.rs

@ -44,8 +44,9 @@ db_object! {
pub client_kdf_type: i32,
pub client_kdf_iter: i32,
}
pub api_key: Option<String>,
}
#[derive(Identifiable, Queryable, Insertable)]
#[table_name = "invitations"]
@ -73,9 +74,9 @@ impl User {
pub const CLIENT_KDF_TYPE_DEFAULT: i32 = 0; // PBKDF2: 0
pub const CLIENT_KDF_ITER_DEFAULT: i32 = 100_000;
pub fn new(mail: String) -> Self {
pub fn new(email: String) -> Self {
let now = Utc::now().naive_utc();
let email = mail.to_lowercase();
let email = email.to_lowercase();
Self {
uuid: crate::util::get_uuid(),
@ -110,6 +111,8 @@ impl User {
client_kdf_type: Self::CLIENT_KDF_TYPE_DEFAULT,
client_kdf_iter: Self::CLIENT_KDF_ITER_DEFAULT,
api_key: None,
}
}
@ -130,6 +133,10 @@ impl User {
}
}
pub fn check_valid_api_key(&self, key: &str) -> bool {
matches!(self.api_key, Some(ref api_key) if crate::crypto::ct_eq(api_key, key))
}
/// Set the password hash generated
/// And resets the security_stamp. Based upon the allow_next_route the security_stamp will be different.
///
@ -176,7 +183,10 @@ impl User {
}
}
use super::{Cipher, Device, Favorite, Folder, Send, TwoFactor, UserOrgType, UserOrganization};
use super::{
Cipher, Device, EmergencyAccess, Favorite, Folder, Send, TwoFactor, TwoFactorIncomplete, UserOrgType,
UserOrganization,
};
use crate::db::DbConn;
use crate::api::EmptyResult;
@ -185,7 +195,7 @@ use crate::error::MapResult;
/// Database methods
impl User {
pub fn to_json(&self, conn: &DbConn) -> Value {
let orgs = UserOrganization::find_by_user(&self.uuid, conn);
let orgs = UserOrganization::find_confirmed_by_user(&self.uuid, conn);
let orgs_json: Vec<Value> = orgs.iter().map(|c| c.to_json(conn)).collect();
let twofactor_enabled = !TwoFactor::find_by_user(&self.uuid, conn).is_empty();
@ -256,7 +266,7 @@ impl User {
}
pub fn delete(self, conn: &DbConn) -> EmptyResult {
for user_org in UserOrganization::find_by_user(&self.uuid, conn) {
for user_org in UserOrganization::find_confirmed_by_user(&self.uuid, conn) {
if user_org.atype == UserOrgType::Owner {
let owner_type = UserOrgType::Owner as i32;
if UserOrganization::find_by_org_and_type(&user_org.org_uuid, owner_type, conn).len() <= 1 {
@ -266,12 +276,14 @@ impl User {
}
Send::delete_all_by_user(&self.uuid, conn)?;
EmergencyAccess::delete_all_by_user(&self.uuid, conn)?;
UserOrganization::delete_all_by_user(&self.uuid, conn)?;
Cipher::delete_all_by_user(&self.uuid, conn)?;
Favorite::delete_all_by_user(&self.uuid, conn)?;
Folder::delete_all_by_user(&self.uuid, conn)?;
Device::delete_all_by_user(&self.uuid, conn)?;
TwoFactor::delete_all_by_user(&self.uuid, conn)?;
TwoFactorIncomplete::delete_all_by_user(&self.uuid, conn)?;
Invitation::take(&self.email, conn); // Delete invitation if any
db_run! {conn: {
@ -349,7 +361,8 @@ impl User {
}
impl Invitation {
pub const fn new(email: String) -> Self {
pub fn new(email: String) -> Self {
let email = email.to_lowercase();
Self {
email,
}

30
src/db/schemas/mysql/schema.rs

@ -154,6 +154,16 @@ table! {
}
}
table! {
twofactor_incomplete (user_uuid, device_uuid) {
user_uuid -> Text,
device_uuid -> Text,
device_name -> Text,
login_time -> Timestamp,
ip_address -> Text,
}
}
table! {
users (uuid) {
uuid -> Text,
@ -182,6 +192,7 @@ table! {
excluded_globals -> Text,
client_kdf_type -> Integer,
client_kdf_iter -> Integer,
api_key -> Nullable<Text>,
}
}
@ -214,6 +225,23 @@ table! {
}
}
table! {
emergency_access (uuid) {
uuid -> Text,
grantor_uuid -> Text,
grantee_uuid -> Nullable<Text>,
email -> Nullable<Text>,
key_encrypted -> Nullable<Text>,
atype -> Integer,
status -> Integer,
wait_time_days -> Integer,
recovery_initiated_at -> Nullable<Timestamp>,
last_notification_at -> Nullable<Timestamp>,
updated_at -> Timestamp,
created_at -> Timestamp,
}
}
joinable!(attachments -> ciphers (cipher_uuid));
joinable!(ciphers -> organizations (organization_uuid));
joinable!(ciphers -> users (user_uuid));
@ -233,6 +261,7 @@ joinable!(users_collections -> users (user_uuid));
joinable!(users_organizations -> organizations (org_uuid));
joinable!(users_organizations -> users (user_uuid));
joinable!(sso_nonce -> organizations (org_uuid));
joinable!(emergency_access -> users (grantor_uuid));
allow_tables_to_appear_in_same_query!(
attachments,
@ -250,4 +279,5 @@ allow_tables_to_appear_in_same_query!(
users,
users_collections,
users_organizations,
emergency_access,
);

30
src/db/schemas/postgresql/schema.rs

@ -154,6 +154,16 @@ table! {
}
}
table! {
twofactor_incomplete (user_uuid, device_uuid) {
user_uuid -> Text,
device_uuid -> Text,
device_name -> Text,
login_time -> Timestamp,
ip_address -> Text,
}
}
table! {
users (uuid) {
uuid -> Text,
@ -182,6 +192,7 @@ table! {
excluded_globals -> Text,
client_kdf_type -> Integer,
client_kdf_iter -> Integer,
api_key -> Nullable<Text>,
}
}
@ -214,6 +225,23 @@ table! {
}
}
table! {
emergency_access (uuid) {
uuid -> Text,
grantor_uuid -> Text,
grantee_uuid -> Nullable<Text>,
email -> Nullable<Text>,
key_encrypted -> Nullable<Text>,
atype -> Integer,
status -> Integer,
wait_time_days -> Integer,
recovery_initiated_at -> Nullable<Timestamp>,
last_notification_at -> Nullable<Timestamp>,
updated_at -> Timestamp,
created_at -> Timestamp,
}
}
joinable!(attachments -> ciphers (cipher_uuid));
joinable!(ciphers -> organizations (organization_uuid));
joinable!(ciphers -> users (user_uuid));
@ -233,6 +261,7 @@ joinable!(users_collections -> users (user_uuid));
joinable!(users_organizations -> organizations (org_uuid));
joinable!(users_organizations -> users (user_uuid));
joinable!(sso_nonce -> organizations (org_uuid));
joinable!(emergency_access -> users (grantor_uuid));
allow_tables_to_appear_in_same_query!(
attachments,
@ -250,4 +279,5 @@ allow_tables_to_appear_in_same_query!(
users,
users_collections,
users_organizations,
emergency_access,
);

30
src/db/schemas/sqlite/schema.rs

@ -154,6 +154,16 @@ table! {
}
}
table! {
twofactor_incomplete (user_uuid, device_uuid) {
user_uuid -> Text,
device_uuid -> Text,
device_name -> Text,
login_time -> Timestamp,
ip_address -> Text,
}
}
table! {
users (uuid) {
uuid -> Text,
@ -182,6 +192,7 @@ table! {
excluded_globals -> Text,
client_kdf_type -> Integer,
client_kdf_iter -> Integer,
api_key -> Nullable<Text>,
}
}
@ -214,6 +225,23 @@ table! {
}
}
table! {
emergency_access (uuid) {
uuid -> Text,
grantor_uuid -> Text,
grantee_uuid -> Nullable<Text>,
email -> Nullable<Text>,
key_encrypted -> Nullable<Text>,
atype -> Integer,
status -> Integer,
wait_time_days -> Integer,
recovery_initiated_at -> Nullable<Timestamp>,
last_notification_at -> Nullable<Timestamp>,
updated_at -> Timestamp,
created_at -> Timestamp,
}
}
joinable!(attachments -> ciphers (cipher_uuid));
joinable!(ciphers -> organizations (organization_uuid));
joinable!(ciphers -> users (user_uuid));
@ -233,6 +261,7 @@ joinable!(users_collections -> users (user_uuid));
joinable!(users_organizations -> organizations (org_uuid));
joinable!(users_organizations -> users (user_uuid));
joinable!(sso_nonce -> organizations (org_uuid));
joinable!(emergency_access -> users (grantor_uuid));
allow_tables_to_appear_in_same_query!(
attachments,
@ -250,4 +279,5 @@ allow_tables_to_appear_in_same_query!(
users,
users_collections,
users_organizations,
emergency_access,
);

13
src/error.rs

@ -73,7 +73,7 @@ make_error! {
Serde(SerdeErr): _has_source, _api_error,
JWt(JwtErr): _has_source, _api_error,
Handlebars(HbErr): _has_source, _api_error,
//WsError(ws::Error): _has_source, _api_error,
Io(IoErr): _has_source, _api_error,
Time(TimeErr): _has_source, _api_error,
Req(ReqErr): _has_source, _api_error,
@ -119,11 +119,13 @@ impl Error {
Empty {}.into()
}
#[must_use]
pub fn with_msg<M: Into<String>>(mut self, msg: M) -> Self {
self.message = msg.into();
self
}
#[must_use]
pub const fn with_code(mut self, code: u16) -> Self {
self.error_code = code;
self
@ -220,6 +222,15 @@ macro_rules! err {
}};
}
macro_rules! err_silent {
($msg:expr) => {{
return Err(crate::error::Error::new($msg, $msg));
}};
($usr_msg:expr, $log_value:expr) => {{
return Err(crate::error::Error::new($usr_msg, $log_value));
}};
}
#[macro_export]
macro_rules! err_code {
($msg:expr, $err_code: expr) => {{

193
src/mail.rs

@ -1,6 +1,6 @@
use std::str::FromStr;
use chrono::{DateTime, Local};
use chrono::NaiveDateTime;
use percent_encoding::{percent_encode, NON_ALPHANUMERIC};
use lettre::{
@ -13,7 +13,10 @@ use lettre::{
use crate::{
api::EmptyResult,
auth::{encode_jwt, generate_delete_claims, generate_invite_claims, generate_verify_email_claims},
auth::{
encode_jwt, generate_delete_claims, generate_emergency_access_invite_claims, generate_invite_claims,
generate_verify_email_claims,
},
error::Error,
CONFIG,
};
@ -192,6 +195,18 @@ pub fn send_2fa_removed_from_org(address: &str, org_name: &str) -> EmptyResult {
send_email(address, &subject, body_html, body_text)
}
pub fn send_single_org_removed_from_org(address: &str, org_name: &str) -> EmptyResult {
let (subject, body_html, body_text) = get_text(
"email/send_single_org_removed_from_org",
json!({
"url": CONFIG.domain(),
"org_name": org_name,
}),
)?;
send_email(address, &subject, body_html, body_text)
}
pub fn send_invite(
address: &str,
uuid: &str,
@ -224,6 +239,136 @@ pub fn send_invite(
send_email(address, &subject, body_html, body_text)
}
pub fn send_emergency_access_invite(
address: &str,
uuid: &str,
emer_id: Option<String>,
grantor_name: Option<String>,
grantor_email: Option<String>,
) -> EmptyResult {
let claims = generate_emergency_access_invite_claims(
uuid.to_string(),
String::from(address),
emer_id.clone(),
grantor_name.clone(),
grantor_email,
);
let invite_token = encode_jwt(&claims);
let (subject, body_html, body_text) = get_text(
"email/send_emergency_access_invite",
json!({
"url": CONFIG.domain(),
"emer_id": emer_id.unwrap_or_else(|| "_".to_string()),
"email": percent_encode(address.as_bytes(), NON_ALPHANUMERIC).to_string(),
"grantor_name": grantor_name,
"token": invite_token,
}),
)?;
send_email(address, &subject, body_html, body_text)
}
pub fn send_emergency_access_invite_accepted(address: &str, grantee_email: &str) -> EmptyResult {
let (subject, body_html, body_text) = get_text(
"email/emergency_access_invite_accepted",
json!({
"url": CONFIG.domain(),
"grantee_email": grantee_email,
}),
)?;
send_email(address, &subject, body_html, body_text)
}
pub fn send_emergency_access_invite_confirmed(address: &str, grantor_name: &str) -> EmptyResult {
let (subject, body_html, body_text) = get_text(
"email/emergency_access_invite_confirmed",
json!({
"url": CONFIG.domain(),
"grantor_name": grantor_name,
}),
)?;
send_email(address, &subject, body_html, body_text)
}
pub fn send_emergency_access_recovery_approved(address: &str, grantor_name: &str) -> EmptyResult {
let (subject, body_html, body_text) = get_text(
"email/emergency_access_recovery_approved",
json!({
"url": CONFIG.domain(),
"grantor_name": grantor_name,
}),
)?;
send_email(address, &subject, body_html, body_text)
}
pub fn send_emergency_access_recovery_initiated(
address: &str,
grantee_name: &str,
atype: &str,
wait_time_days: &str,
) -> EmptyResult {
let (subject, body_html, body_text) = get_text(
"email/emergency_access_recovery_initiated",
json!({
"url": CONFIG.domain(),
"grantee_name": grantee_name,
"atype": atype,
"wait_time_days": wait_time_days,
}),
)?;
send_email(address, &subject, body_html, body_text)
}
pub fn send_emergency_access_recovery_reminder(
address: &str,
grantee_name: &str,
atype: &str,
days_left: &str,
) -> EmptyResult {
let (subject, body_html, body_text) = get_text(
"email/emergency_access_recovery_reminder",
json!({
"url": CONFIG.domain(),
"grantee_name": grantee_name,
"atype": atype,
"days_left": days_left,
}),
)?;
send_email(address, &subject, body_html, body_text)
}
pub fn send_emergency_access_recovery_rejected(address: &str, grantor_name: &str) -> EmptyResult {
let (subject, body_html, body_text) = get_text(
"email/emergency_access_recovery_rejected",
json!({
"url": CONFIG.domain(),
"grantor_name": grantor_name,
}),
)?;
send_email(address, &subject, body_html, body_text)
}
pub fn send_emergency_access_recovery_timed_out(address: &str, grantee_name: &str, atype: &str) -> EmptyResult {
let (subject, body_html, body_text) = get_text(
"email/emergency_access_recovery_timed_out",
json!({
"url": CONFIG.domain(),
"grantee_name": grantee_name,
"atype": atype,
}),
)?;
send_email(address, &subject, body_html, body_text)
}
pub fn send_invite_accepted(new_user_email: &str, address: &str, org_name: &str) -> EmptyResult {
let (subject, body_html, body_text) = get_text(
"email/invite_accepted",
@ -249,7 +394,7 @@ pub fn send_invite_confirmed(address: &str, org_name: &str) -> EmptyResult {
send_email(address, &subject, body_html, body_text)
}
pub fn send_new_device_logged_in(address: &str, ip: &str, dt: &DateTime<Local>, device: &str) -> EmptyResult {
pub fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult {
use crate::util::upcase_first;
let device = upcase_first(device);
@ -260,7 +405,26 @@ pub fn send_new_device_logged_in(address: &str, ip: &str, dt: &DateTime<Local>,
"url": CONFIG.domain(),
"ip": ip,
"device": device,
"datetime": crate::util::format_datetime_local(dt, fmt),
"datetime": crate::util::format_naive_datetime_local(dt, fmt),
}),
)?;
send_email(address, &subject, body_html, body_text)
}
pub fn send_incomplete_2fa_login(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult {
use crate::util::upcase_first;
let device = upcase_first(device);
let fmt = "%A, %B %_d, %Y at %r %Z";
let (subject, body_html, body_text) = get_text(
"email/incomplete_2fa_login",
json!({
"url": CONFIG.domain(),
"ip": ip,
"device": device,
"datetime": crate::util::format_naive_datetime_local(dt, fmt),
"time_limit": CONFIG.incomplete_2fa_time_limit(),
}),
)?;
@ -340,15 +504,28 @@ fn send_email(address: &str, subject: &str, body_html: String, body_text: String
// Match some common errors and make them more user friendly
Err(e) => {
if e.is_client() {
debug!("SMTP Client error: {:#?}", e);
err!(format!("SMTP Client error: {}", e));
} else if e.is_transient() {
err!(format!("SMTP 4xx error: {:?}", e));
debug!("SMTP 4xx error: {:#?}", e);
err!(format!("SMTP 4xx error: {}", e));
} else if e.is_permanent() {
err!(format!("SMTP 5xx error: {:?}", e));
debug!("SMTP 5xx error: {:#?}", e);
let mut msg = e.to_string();
// Add a special check for 535 to add a more descriptive message
if msg.contains("(535)") {
msg = format!("{} - Authentication credentials invalid", msg);
}
err!(format!("SMTP 5xx error: {}", msg));
} else if e.is_timeout() {
err!(format!("SMTP timeout error: {:?}", e));
debug!("SMTP timeout error: {:#?}", e);
err!(format!("SMTP timeout error: {}", e));
} else if e.is_tls() {
debug!("SMTP Encryption error: {:#?}", e);
err!(format!("SMTP Encryption error: {}", e));
} else {
Err(e.into())
debug!("SMTP {:#?}", e);
err!(format!("SMTP {}", e));
}
}
}

56
src/main.rs

@ -1,6 +1,10 @@
#![forbid(unsafe_code)]
#![cfg_attr(feature = "unstable", feature(ip))]
#![recursion_limit = "512"]
// The recursion_limit is mainly triggered by the json!() macro.
// The more key/value pairs there are the more recursion occurs.
// We want to keep this as low as possible, but not higher then 128.
// If you go above 128 it will cause rust-analyzer to fail,
#![recursion_limit = "87"]
extern crate openssl;
#[macro_use]
@ -28,6 +32,7 @@ mod crypto;
#[macro_use]
mod db;
mod mail;
mod ratelimit;
mod util;
pub use config::CONFIG;
@ -71,16 +76,18 @@ const HELP: &str = "\
-v, --version Prints the app version
";
pub const VERSION: Option<&str> = option_env!("VW_VERSION");
fn parse_args() {
const NO_VERSION: &str = "(Version info from Git not present)";
let mut pargs = pico_args::Arguments::from_env();
let version = VERSION.unwrap_or("(Version info from Git not present)");
if pargs.contains(["-h", "--help"]) {
println!("vaultwarden {}", option_env!("BWRS_VERSION").unwrap_or(NO_VERSION));
println!("vaultwarden {}", version);
print!("{}", HELP);
exit(0);
} else if pargs.contains(["-v", "--version"]) {
println!("vaultwarden {}", option_env!("BWRS_VERSION").unwrap_or(NO_VERSION));
println!("vaultwarden {}", version);
exit(0);
}
}
@ -89,7 +96,7 @@ fn launch_info() {
println!("/--------------------------------------------------------------------\\");
println!("| Starting Vaultwarden |");
if let Some(version) = option_env!("BWRS_VERSION") {
if let Some(version) = VERSION {
println!("|{:^68}|", format!("Version {}", version));
}
@ -104,6 +111,14 @@ fn launch_info() {
}
fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
// Depending on the main log level we either want to disable or enable logging for trust-dns.
// Else if there are timeouts it will clutter the logs since trust-dns uses warn for this.
let trust_dns_level = if level >= log::LevelFilter::Debug {
level
} else {
log::LevelFilter::Off
};
let mut logger = fern::Dispatch::new()
.level(level)
// Hide unknown certificate errors if using self-signed
@ -122,6 +137,8 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
.level_for("hyper::client", log::LevelFilter::Off)
// Prevent cookie_store logs
.level_for("cookie_store", log::LevelFilter::Off)
// Variable level for trust-dns used by reqwest
.level_for("trust_dns_proto", trust_dns_level)
.chain(std::io::stdout());
// Enable smtp debug logging only specifically for smtp when need.
@ -345,11 +362,40 @@ fn schedule_jobs(pool: db::DbPool) {
}));
}
// Send email notifications about incomplete 2FA logins, which potentially
// indicates that a user's master password has been compromised.
if !CONFIG.incomplete_2fa_schedule().is_empty() {
sched.add(Job::new(CONFIG.incomplete_2fa_schedule().parse().unwrap(), || {
api::send_incomplete_2fa_notifications(pool.clone());
}));
}
// Grant emergency access requests that have met the required wait time.
// This job should run before the emergency access reminders job to avoid
// sending reminders for requests that are about to be granted anyway.
if !CONFIG.emergency_request_timeout_schedule().is_empty() {
sched.add(Job::new(CONFIG.emergency_request_timeout_schedule().parse().unwrap(), || {
api::emergency_request_timeout_job(pool.clone());
}));
}
// Send reminders to emergency access grantors that there are pending
// emergency access requests.
if !CONFIG.emergency_notification_reminder_schedule().is_empty() {
sched.add(Job::new(CONFIG.emergency_notification_reminder_schedule().parse().unwrap(), || {
api::emergency_notification_reminder_job(pool.clone());
}));
}
// Periodically check for jobs to run. We probably won't need any
// jobs that run more often than once a minute, so a default poll
// interval of 30 seconds should be sufficient. Users who want to
// schedule jobs to run more frequently for some reason can reduce
// the poll interval accordingly.
//
// Note that the scheduler checks jobs in the order in which they
// were added, so if two jobs are both eligible to run at a given
// tick, the one that was added earlier will run first.
loop {
sched.tick();
thread::sleep(Duration::from_millis(CONFIG.job_poll_interval_ms()));

38
src/ratelimit.rs

@ -0,0 +1,38 @@
use once_cell::sync::Lazy;
use std::{net::IpAddr, num::NonZeroU32, time::Duration};
use governor::{clock::DefaultClock, state::keyed::DashMapStateStore, Quota, RateLimiter};
use crate::{Error, CONFIG};
type Limiter<T = IpAddr> = RateLimiter<T, DashMapStateStore<T>, DefaultClock>;
static LIMITER_LOGIN: Lazy<Limiter> = Lazy::new(|| {
let seconds = Duration::from_secs(CONFIG.login_ratelimit_seconds());
let burst = NonZeroU32::new(CONFIG.login_ratelimit_max_burst()).expect("Non-zero login ratelimit burst");
RateLimiter::keyed(Quota::with_period(seconds).expect("Non-zero login ratelimit seconds").allow_burst(burst))
});
static LIMITER_ADMIN: Lazy<Limiter> = Lazy::new(|| {
let seconds = Duration::from_secs(CONFIG.admin_ratelimit_seconds());
let burst = NonZeroU32::new(CONFIG.admin_ratelimit_max_burst()).expect("Non-zero admin ratelimit burst");
RateLimiter::keyed(Quota::with_period(seconds).expect("Non-zero admin ratelimit seconds").allow_burst(burst))
});
pub fn check_limit_login(ip: &IpAddr) -> Result<(), Error> {
match LIMITER_LOGIN.check_key(ip) {
Ok(_) => Ok(()),
Err(_e) => {
err_code!("Too many login requests", 429);
}
}
}
pub fn check_limit_admin(ip: &IpAddr) -> Result<(), Error> {
match LIMITER_ADMIN.check_key(ip) {
Ok(_) => Ok(()),
Err(_e) => {
err_code!("Too many admin requests", 429);
}
}
}

19
src/static/global_domains.json

@ -47,7 +47,8 @@
"Type": 5,
"Domains": [
"wellsfargo.com",
"wf.com"
"wf.com",
"wellsfargoadvisors.com"
],
"Excluded": false
},
@ -905,5 +906,21 @@
"protonvpn.com"
],
"Excluded": false
},
{
"Type": 86,
"Domains": [
"ubisoft.com",
"ubi.com"
],
"Excluded": false
},
{
"Type": 87,
"Domains": [
"transferwise.com",
"wise.com"
],
"Excluded": false
}
]

446
src/static/scripts/bootstrap-native.js

@ -1,5 +1,5 @@
/*!
* Native JavaScript for Bootstrap v4.0.4 (https://thednp.github.io/bootstrap.native/)
* Native JavaScript for Bootstrap v4.0.8 (https://thednp.github.io/bootstrap.native/)
* Copyright 2015-2021 © dnp_theme
* Licensed under MIT (https://github.com/thednp/bootstrap.native/blob/master/LICENSE)
*/
@ -7,7 +7,7 @@
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
typeof define === 'function' && define.amd ? define(factory) :
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, global.BSN = factory());
}(this, (function () { 'use strict';
})(this, (function () { 'use strict';
const transitionEndEvent = 'webkitTransition' in document.head.style ? 'webkitTransitionEnd' : 'transitionend';
@ -188,7 +188,7 @@
element.dispatchEvent(closedAlertEvent);
self.dispose();
element.parentNode.removeChild(element);
element.remove();
}
// ALERT PRIVATE METHOD
@ -1022,9 +1022,9 @@
function isEmptyAnchor(elem) {
const parentAnchor = elem.closest('A');
// anchor href starts with #
return elem && ((elem.href && elem.href.slice(-1) === '#')
return elem && ((elem.hasAttribute('href') && elem.href.slice(-1) === '#')
// OR a child of an anchor with href starts with #
|| (parentAnchor && parentAnchor.href && parentAnchor.href.slice(-1) === '#'));
|| (parentAnchor && parentAnchor.hasAttribute('href') && parentAnchor.href.slice(-1) === '#'));
}
function setFocus(element) {
@ -1467,20 +1467,27 @@
}
}
const modalOpenClass = 'modal-open';
const modalBackdropClass = 'modal-backdrop';
const offcanvasBackdropClass = 'offcanvas-backdrop';
const modalActiveSelector = `.modal.${showClass}`;
const offcanvasActiveSelector = `.offcanvas.${showClass}`;
const overlay = document.createElement('div');
overlay.setAttribute('class', `${modalBackdropClass}`);
function getCurrentOpen() {
return queryElement(`${modalActiveSelector},${offcanvasActiveSelector}`);
}
function appendOverlay(hasFade) {
document.body.appendChild(overlay);
function toggleOverlayType(isModal) {
const targetClass = isModal ? modalBackdropClass : offcanvasBackdropClass;
[modalBackdropClass, offcanvasBackdropClass].forEach((c) => {
removeClass(overlay, c);
});
addClass(overlay, targetClass);
}
function appendOverlay(hasFade, isModal) {
toggleOverlayType(isModal);
document.body.append(overlay);
if (hasFade) addClass(overlay, fadeClass);
}
@ -1494,13 +1501,11 @@
}
function removeOverlay() {
const bd = document.body;
const currentOpen = getCurrentOpen();
if (!currentOpen) {
removeClass(overlay, fadeClass);
removeClass(bd, modalOpenClass);
bd.removeChild(overlay);
overlay.remove();
resetScrollbar();
}
}
@ -1518,7 +1523,6 @@
const modalString = 'modal';
const modalComponent = 'Modal';
const modalSelector = `.${modalString}`;
// const modalActiveSelector = `.${modalString}.${showClass}`;
const modalToggleSelector = `[${dataBsToggle}="${modalString}"]`;
const modalDismissSelector = `[${dataBsDismiss}="${modalString}"]`;
const modalStaticClass = `${modalString}-static`;
@ -1567,8 +1571,11 @@
}
function afterModalHide(self) {
const { triggers } = self;
removeOverlay();
const { triggers, options } = self;
if (!getCurrentOpen()) {
if (options.backdrop) removeOverlay();
resetScrollbar();
}
self.element.style.paddingRight = '';
self.isAnimating = false;
@ -1594,9 +1601,8 @@
element.style.display = 'block';
setModalScrollbar(self);
if (!queryElement(modalActiveSelector)) {
if (!getCurrentOpen()) {
document.body.style.overflow = 'hidden';
addClass(document.body, modalOpenClass);
}
addClass(element, showClass);
@ -1609,16 +1615,15 @@
function beforeModalHide(self, force) {
const {
element, relatedTarget, hasFade,
element, options, relatedTarget, hasFade,
} = self;
const currentOpen = getCurrentOpen();
element.style.display = '';
// force can also be the transitionEvent object, we wanna make sure it's not
// call is not forced and overlay is visible
if (!force && hasFade && hasClass(overlay, showClass)
&& !currentOpen) { // AND no modal is visible
if (options.backdrop && !force && hasFade && hasClass(overlay, showClass)
&& !getCurrentOpen()) { // AND no modal is visible
hideOverlay();
emulateTransitionEnd(overlay, () => afterModalHide(self));
} else {
@ -1666,7 +1671,8 @@
if (self.isAnimating) return;
const { isStatic, modalDialog } = self;
const { options, isStatic, modalDialog } = self;
const { backdrop } = options;
const { target } = e;
const selectedText = document.getSelection().toString().length;
const targetInsideDialog = modalDialog.contains(target);
@ -1676,7 +1682,7 @@
addClass(element, modalStaticClass);
self.isAnimating = true;
emulateTransitionEnd(modalDialog, () => staticTransitionEnd(self));
} else if (dismiss || (!selectedText && !isStatic && !targetInsideDialog)) {
} else if (dismiss || (!selectedText && !isStatic && !targetInsideDialog && backdrop)) {
self.relatedTarget = dismiss || null;
self.hide();
e.preventDefault();
@ -1734,8 +1740,9 @@
show() {
const self = this;
const {
element, isAnimating, hasFade, relatedTarget,
element, options, isAnimating, hasFade, relatedTarget,
} = self;
const { backdrop } = options;
let overlayDelay = 0;
if (hasClass(element, showClass) && !isAnimating) return;
@ -1744,8 +1751,6 @@
element.dispatchEvent(showModalEvent);
if (showModalEvent.defaultPrevented) return;
self.isAnimating = true;
// we elegantly hide any opened modal/offcanvas
const currentOpen = getCurrentOpen();
if (currentOpen && currentOpen !== element) {
@ -1755,18 +1760,24 @@
that.hide();
}
if (!queryElement(`.${modalBackdropClass}`)) {
appendOverlay(hasFade);
}
overlayDelay = getElementTransitionDuration(overlay);
self.isAnimating = true;
if (!hasClass(overlay, showClass)) {
showOverlay();
if (backdrop) {
if (!currentOpen && !hasClass(overlay, showClass)) {
appendOverlay(hasFade, 1);
} else {
toggleOverlayType(1);
}
overlayDelay = getElementTransitionDuration(overlay);
if (!currentOpen) {
if (!hasClass(overlay, showClass)) showOverlay();
setTimeout(() => beforeModalShow(self), overlayDelay);
} else beforeModalShow(self);
} else {
beforeModalShow(self);
if (currentOpen && hasClass(overlay, showClass)) {
hideOverlay();
}
}
}
hide(force) {
@ -1863,7 +1874,6 @@
const { element, options } = self;
if (!options.scroll) {
addClass(document.body, modalOpenClass);
document.body.style.overflow = 'hidden';
setOffCanvasScrollbar(self);
}
@ -1909,15 +1919,15 @@
const self = element[offcanvasComponent];
if (!self) return;
const { options, open, triggers } = self;
const { options, triggers } = self;
const { target } = e;
const trigger = target.closest(offcanvasToggleSelector);
if (trigger && trigger.tagName === 'A') e.preventDefault();
if (open && ((!element.contains(target) && options.backdrop
if ((!element.contains(target) && options.backdrop
&& (!trigger || (trigger && !triggers.includes(trigger))))
|| offCanvasDismiss.contains(target))) {
|| (offCanvasDismiss && offCanvasDismiss.contains(target))) {
self.relatedTarget = target === offCanvasDismiss ? offCanvasDismiss : null;
self.hide();
}
@ -1965,7 +1975,6 @@
element.removeAttribute(ariaModal);
element.removeAttribute('role');
element.style.visibility = '';
self.open = false;
self.isAnimating = false;
if (triggers.length) {
@ -1979,7 +1988,6 @@
if (options.backdrop) removeOverlay();
if (!options.scroll) {
resetScrollbar();
removeClass(document.body, modalOpenClass);
}
}
@ -2005,7 +2013,6 @@
.filter((btn) => getTargetElement(btn) === element);
// additional instance property
self.open = false;
self.isAnimating = false;
self.scrollbarWidth = measureScrollbar();
@ -2017,7 +2024,8 @@
// ========================
toggle() {
const self = this;
return self.open ? self.hide() : self.show();
if (hasClass(self.element, showClass)) self.hide();
else self.show();
}
show() {
@ -2027,7 +2035,7 @@
} = self;
let overlayDelay = 0;
if (self.open || isAnimating) return;
if (hasClass(element, showClass) || isAnimating) return;
showOffcanvasEvent.relatedTarget = relatedTarget || null;
element.dispatchEvent(showOffcanvasEvent);
@ -2043,12 +2051,13 @@
that.hide();
}
self.open = true;
self.isAnimating = true;
if (options.backdrop) {
if (!queryElement(`.${modalBackdropClass}`)) {
if (!currentOpen) {
appendOverlay(1);
} else {
toggleOverlayType();
}
overlayDelay = getElementTransitionDuration(overlay);
@ -2056,14 +2065,19 @@
if (!hasClass(overlay, showClass)) showOverlay();
setTimeout(() => beforeOffcanvasShow(self), overlayDelay);
} else beforeOffcanvasShow(self);
} else {
beforeOffcanvasShow(self);
if (currentOpen && hasClass(overlay, showClass)) {
hideOverlay();
}
}
}
hide(force) {
const self = this;
const { element, isAnimating, relatedTarget } = self;
if (!self.open || isAnimating) return;
if (!hasClass(element, showClass) || isAnimating) return;
hideOffcanvasEvent.relatedTarget = relatedTarget || null;
element.dispatchEvent(hideOffcanvasEvent);
@ -2107,19 +2121,6 @@
.some((mediaType) => element instanceof mediaType);
}
function closestRelative(element) {
let retval = null;
let el = element;
while (el !== document.body) {
el = el.parentElement;
if (getComputedStyle(el).position === 'relative') {
retval = el;
break;
}
}
return retval;
}
// both popovers and tooltips (this, event)
function styleTip(self, e) {
const tipClasses = /\b(top|bottom|start|end)+/;
@ -2133,32 +2134,32 @@
let tipDimensions = { w: tip.offsetWidth, h: tip.offsetHeight };
const windowWidth = (document.documentElement.clientWidth || document.body.clientWidth);
const windowHeight = (document.documentElement.clientHeight || document.body.clientHeight);
const { element, options, arrow } = self;
const {
element, options, arrow, positions,
} = self;
let { container, placement } = options;
let parentIsBody = container === document.body;
const targetPosition = getComputedStyle(element).position;
const parentPosition = getComputedStyle(container).position;
const staticParent = !parentIsBody && parentPosition === 'static';
let relativeParent = !parentIsBody && parentPosition === 'relative';
const relContainer = staticParent && closestRelative(container);
const { elementPosition, containerIsStatic, relContainer } = positions;
let { containerIsRelative } = positions;
// static containers should refer to another relative container or the body
container = relContainer || container;
relativeParent = staticParent && relContainer ? 1 : relativeParent;
containerIsRelative = containerIsStatic && relContainer ? 1 : containerIsRelative;
parentIsBody = container === document.body;
const parentRect = container.getBoundingClientRect();
const leftBoundry = relativeParent ? parentRect.left : 0;
const rightBoundry = relativeParent ? parentRect.right : windowWidth;
const leftBoundry = containerIsRelative ? parentRect.left : 0;
const rightBoundry = containerIsRelative ? parentRect.right : windowWidth;
// this case should not be possible
// absoluteParent = !parentIsBody && parentPosition === 'absolute',
// this case requires a container with placement: relative
const absoluteTarget = targetPosition === 'absolute';
// containerIsAbsolute = !parentIsBody && containerPosition === 'absolute',
// this case requires a container with position: relative
const absoluteTarget = elementPosition === 'absolute';
const targetRect = element.getBoundingClientRect();
const scroll = parentIsBody
? { x: window.pageXOffset, y: window.pageYOffset }
: { x: container.scrollLeft, y: container.scrollTop };
const elemDimensions = { w: element.offsetWidth, h: element.offsetHeight };
const top = relativeParent ? element.offsetTop : targetRect.top;
const left = relativeParent ? element.offsetLeft : targetRect.left;
const top = containerIsRelative ? element.offsetTop : targetRect.top;
const left = containerIsRelative ? element.offsetLeft : targetRect.left;
// reset arrow style
arrow.style.top = '';
arrow.style.left = '';
@ -2230,8 +2231,12 @@
}
} else if (['top', 'bottom'].includes(placement)) {
if (e && isMedia(element)) {
const eX = !relativeParent ? e.pageX : e.layerX + (absoluteTarget ? element.offsetLeft : 0);
const eY = !relativeParent ? e.pageY : e.layerY + (absoluteTarget ? element.offsetTop : 0);
const eX = !containerIsRelative
? e.pageX
: e.layerX + (absoluteTarget ? element.offsetLeft : 0);
const eY = !containerIsRelative
? e.pageY
: e.layerY + (absoluteTarget ? element.offsetTop : 0);
if (placement === 'top') {
topPosition = eY - tipDimensions.h - (isPopover ? arrowWidth : arrowHeight);
@ -2308,6 +2313,36 @@
return modal || navbarFixed || document.body;
}
function closestRelative(element) {
let retval = null;
let el = element;
while (el !== document.body) {
el = el.parentElement;
if (getComputedStyle(el).position === 'relative') {
retval = el;
break;
}
}
return retval;
}
function setHtml(element, content, sanitizeFn) {
if (typeof content === 'string' && !content.length) return;
if (typeof content === 'object') {
element.append(content);
} else {
let dirty = content.trim(); // fixing #233
if (typeof sanitizeFn === 'function') dirty = sanitizeFn(dirty);
const domParser = new DOMParser();
const tempDocument = domParser.parseFromString(dirty, 'text/html');
const method = tempDocument.children.length ? 'innerHTML' : 'innerText';
element[method] = tempDocument.body[method];
}
}
/* Native JavaScript for Bootstrap 5 | Popover
---------------------------------------------- */
@ -2320,12 +2355,13 @@
template: '<div class="popover" role="tooltip"><div class="popover-arrow"></div><h3 class="popover-header"></h3><div class="popover-body"></div></div>', // string
title: null, // string
content: null, // string
sanitizeFn: null, // function
customClass: null, // string
dismissible: false, // boolean
animation: true, // boolean
trigger: 'hover', // string
placement: 'top', // string
btnClose: '<button class="btn-close" aria-label="Close"></button>', // string
sanitizeFn: null, // function
dismissible: false, // boolean
animation: true, // boolean
delay: 200, // number
};
@ -2335,11 +2371,8 @@
const isIphone = navigator.userAgentData
? navigator.userAgentData.brands.some((x) => appleBrands.test(x.brand))
: appleBrands.test(navigator.userAgent);
// popoverArrowClass = `${popoverString}-arrow`,
const popoverHeaderClass = `${popoverString}-header`;
const popoverBodyClass = `${popoverString}-body`;
// close btn for dissmissible popover
let popoverCloseButton = '<button type="button" class="btn-close"></button>';
// POPOVER CUSTOM EVENTS
// =====================
@ -2372,51 +2405,59 @@
const {
animation, customClass, sanitizeFn, placement, dismissible,
} = options;
let { title, content, template } = options;
let {
title, content,
} = options;
const {
template, btnClose,
} = options;
// set initial popover class
const placementClass = `bs-${popoverString}-${tipClassPositions[placement]}`;
// fixing #233
title = title ? title.trim() : null;
content = content ? content.trim() : null;
// sanitize title && content
if (sanitizeFn) {
title = title ? sanitizeFn(title) : null;
content = content ? sanitizeFn(content) : null;
template = template ? sanitizeFn(template) : null;
popoverCloseButton = sanitizeFn(popoverCloseButton);
// load template
let popoverTemplate;
if (typeof template === 'object') {
popoverTemplate = template;
} else {
const htmlMarkup = document.createElement('div');
setHtml(htmlMarkup, template, sanitizeFn);
popoverTemplate = htmlMarkup.firstChild;
}
// set popover markup
self.popover = popoverTemplate.cloneNode(true);
self.popover = document.createElement('div');
const { popover } = self;
// set id and aria-describedby
// set id and role attributes
popover.setAttribute('id', id);
popover.setAttribute('role', 'tooltip');
// load template
const popoverTemplate = document.createElement('div');
popoverTemplate.innerHTML = template.trim();
popover.className = popoverTemplate.firstChild.className;
popover.innerHTML = popoverTemplate.firstChild.innerHTML;
const popoverHeader = queryElement(`.${popoverHeaderClass}`, popover);
const popoverBody = queryElement(`.${popoverBodyClass}`, popover);
// set arrow
// set arrow and enable access for styleTip
self.arrow = queryElement(`.${popoverString}-arrow`, popover);
// set dismissible button
if (dismissible) {
title = title ? title + popoverCloseButton : title;
content = title === null ? +popoverCloseButton : content;
if (title) {
if (title instanceof Element) setHtml(title, btnClose, sanitizeFn);
else title += btnClose;
} else {
if (popoverHeader) popoverHeader.remove();
if (content instanceof Element) setHtml(content, btnClose, sanitizeFn);
else content += btnClose;
}
}
// fill the template with content from options / data attributes
// also sanitize title && content
if (title && popoverHeader) setHtml(popoverHeader, title, sanitizeFn);
if (content && popoverBody) setHtml(popoverBody, content, sanitizeFn);
// fill the template with content from data attributes
if (title && popoverHeader) popoverHeader.innerHTML = title.trim();
if (content && popoverBody) popoverBody.innerHTML = content.trim();
// set btn and enable access for styleTip
[self.btn] = popover.getElementsByClassName('btn-close');
// set popover animation and placement
if (!hasClass(popover, popoverString)) addClass(popover, popoverString);
@ -2428,9 +2469,9 @@
}
function removePopover(self) {
const { element, popover, options } = self;
const { element, popover } = self;
element.removeAttribute(ariaDescribedBy);
options.container.removeChild(popover);
popover.remove();
self.timer = null;
}
@ -2455,12 +2496,11 @@
function dismissHandlerToggle(self, add) {
const action = add ? addEventListener : removeEventListener;
const { options, element, popover } = self;
const { options, element, btn } = self;
const { trigger, dismissible } = options;
if (dismissible) {
const [btnClose] = popover.getElementsByClassName('btn-close');
if (btnClose) btnClose[action]('click', self.hide);
if (btn) btn[action]('click', self.hide);
} else {
if (trigger === 'focus') element[action]('focusout', self.hide);
if (trigger === 'hover') document[action]('touchstart', popoverTouchHandler, passiveHandler);
@ -2473,12 +2513,10 @@
}
function popoverShowTrigger(self) {
dismissHandlerToggle(self, 1);
self.element.dispatchEvent(shownPopoverEvent);
}
function popoverHideTrigger(self) {
dismissHandlerToggle(self);
removePopover(self);
self.element.dispatchEvent(hiddenPopoverEvent);
}
@ -2499,6 +2537,7 @@
self.timer = null;
self.popover = null;
self.arrow = null;
self.btn = null;
self.enabled = false;
// set unique ID for aria-describedby
self.id = `${popoverString}-${getUID(element)}`;
@ -2520,6 +2559,21 @@
// crate popover
createPopover(self);
// set positions
const { container } = self.options;
const elementPosition = getComputedStyle(element).position;
const containerPosition = getComputedStyle(container).position;
const parentIsBody = container === document.body;
const containerIsStatic = !parentIsBody && containerPosition === 'static';
const containerIsRelative = !parentIsBody && containerPosition === 'relative';
const relContainer = containerIsStatic && closestRelative(container);
self.positions = {
elementPosition,
containerIsRelative,
containerIsStatic,
relContainer,
};
// bind
self.update = self.update.bind(self);
@ -2548,23 +2602,21 @@
const { container } = options;
clearTimeout(self.timer);
self.timer = setTimeout(() => {
if (!isVisibleTip(popover, container)) {
element.dispatchEvent(showPopoverEvent);
if (showPopoverEvent.defaultPrevented) return;
// append to the container
container.appendChild(popover);
container.append(popover);
element.setAttribute(ariaDescribedBy, id);
self.update(e);
if (!hasClass(popover, showClass)) addClass(popover, showClass);
dismissHandlerToggle(self, 1);
if (options.animation) emulateTransitionEnd(popover, () => popoverShowTrigger(self));
else popoverShowTrigger(self);
}
}, 17);
}
hide(e) {
@ -2581,13 +2633,13 @@
const { element, popover, options } = self;
clearTimeout(self.timer);
self.timer = setTimeout(() => {
if (isVisibleTip(popover, options.container)) {
element.dispatchEvent(hidePopoverEvent);
if (hidePopoverEvent.defaultPrevented) return;
removeClass(popover, showClass);
dismissHandlerToggle(self);
if (options.animation) emulateTransitionEnd(popover, () => popoverHideTrigger(self));
else popoverHideTrigger(self);
@ -2633,7 +2685,7 @@
const { popover, options } = self;
const { container, animation } = options;
if (animation && isVisibleTip(popover, container)) {
options.delay = 0; // reset delay
self.options.delay = 0; // reset delay
self.hide();
emulateTransitionEnd(popover, () => togglePopoverHandlers(self));
} else {
@ -3052,7 +3104,7 @@
const toastSelector = `.${toastString}`;
const toastDismissSelector = `[${dataBsDismiss}="${toastString}"]`;
const showingClass = 'showing';
const hideClass = 'hide';
const hideClass = 'hide'; // marked as deprecated
const toastDefaultOptions = {
animation: true,
autohide: true,
@ -3070,10 +3122,7 @@
// =====================
function showToastComplete(self) {
const { element, options } = self;
if (!options.animation) {
removeClass(element, showingClass);
addClass(element, showClass);
}
element.dispatchEvent(shownToastEvent);
if (options.autohide) self.hide();
@ -3081,13 +3130,15 @@
function hideToastComplete(self) {
const { element } = self;
addClass(element, hideClass);
removeClass(element, showingClass);
removeClass(element, showClass);
addClass(element, hideClass); // B/C
element.dispatchEvent(hiddenToastEvent);
}
function closeToast(self) {
function hideToast(self) {
const { element, options } = self;
removeClass(element, showClass);
addClass(element, showingClass);
if (options.animation) {
reflow(element);
@ -3097,15 +3148,14 @@
}
}
function openToast(self) {
function showToast(self) {
const { element, options } = self;
removeClass(element, hideClass);
if (options.animation) {
removeClass(element, hideClass); // B/C
reflow(element);
addClass(element, showingClass);
addClass(element, showClass);
addClass(element, showingClass);
if (options.animation) {
emulateTransitionEnd(element, () => showToastComplete(self));
} else {
showToastComplete(self);
@ -3133,9 +3183,13 @@
super(toastComponent, target, toastDefaultOptions, config);
// bind
const self = this;
const { element, options } = self;
// set fadeClass, the options.animation will override the markup
if (options.animation && !hasClass(element, fadeClass)) addClass(element, fadeClass);
else if (!options.animation && hasClass(element, fadeClass)) removeClass(element, fadeClass);
// dismiss button
self.dismiss = queryElement(toastDismissSelector, self.element);
self.dismiss = queryElement(toastDismissSelector, element);
// bind
self.show = self.show.bind(self);
@ -3150,13 +3204,12 @@
show() {
const self = this;
const { element } = self;
if (element && hasClass(element, hideClass)) {
if (element && !hasClass(element, showClass)) {
element.dispatchEvent(showToastEvent);
if (showToastEvent.defaultPrevented) return;
addClass(element, fadeClass);
clearTimeout(self.timer);
self.timer = setTimeout(() => openToast(self), 10);
self.timer = setTimeout(() => showToast(self), 10);
}
}
@ -3169,7 +3222,7 @@
if (hideToastEvent.defaultPrevented) return;
clearTimeout(self.timer);
self.timer = setTimeout(() => closeToast(self),
self.timer = setTimeout(() => hideToast(self),
noTimer ? 10 : options.delay);
}
}
@ -3177,7 +3230,7 @@
dispose() {
const self = this;
const { element, options } = self;
self.hide();
self.hide(1);
if (options.animation) emulateTransitionEnd(element, () => completeDisposeToast(self));
else completeDisposeToast(self);
@ -3206,13 +3259,14 @@
const titleAttr = 'title';
const tooltipInnerClass = `${tooltipString}-inner`;
const tooltipDefaultOptions = {
title: null,
template: '<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',
placement: 'top',
animation: true,
customClass: null,
delay: 200,
sanitizeFn: null,
title: null, // string
customClass: null, // string | null
placement: 'top', // string
sanitizeFn: null, // function
animation: true, // bool
html: false, // bool
delay: 200, // number
};
// TOOLTIP CUSTOM EVENTS
@ -3226,51 +3280,48 @@
// =======================
function createTooltip(self) {
const { options, id } = self;
const placementClass = `bs-${tooltipString}-${tipClassPositions[options.placement]}`;
let titleString = options.title.trim();
const {
title, template, customClass, animation, placement, sanitizeFn,
} = options;
const placementClass = `bs-${tooltipString}-${tipClassPositions[placement]}`;
// sanitize stuff
if (options.sanitizeFn) {
titleString = options.sanitizeFn(titleString);
options.template = options.sanitizeFn(options.template);
}
if (!title) return;
if (!titleString) return;
// load template
let tooltipTemplate;
if (typeof template === 'object') {
tooltipTemplate = template;
} else {
const htmlMarkup = document.createElement('div');
setHtml(htmlMarkup, template, sanitizeFn);
tooltipTemplate = htmlMarkup.firstChild;
}
// create tooltip
self.tooltip = document.createElement('div');
self.tooltip = tooltipTemplate.cloneNode(true);
const { tooltip } = self;
// set aria
// set title
setHtml(queryElement(`.${tooltipInnerClass}`, tooltip), title, sanitizeFn);
// set id & role attribute
tooltip.setAttribute('id', id);
// set markup
const tooltipMarkup = document.createElement('div');
tooltipMarkup.innerHTML = options.template.trim();
tooltip.className = tooltipMarkup.firstChild.className;
tooltip.innerHTML = tooltipMarkup.firstChild.innerHTML;
queryElement(`.${tooltipInnerClass}`, tooltip).innerHTML = titleString;
tooltip.setAttribute('role', tooltipString);
// set arrow
self.arrow = queryElement(`.${tooltipString}-arrow`, tooltip);
// set class and role attribute
tooltip.setAttribute('role', tooltipString);
// set classes
if (!hasClass(tooltip, tooltipString)) addClass(tooltip, tooltipString);
if (options.animation && !hasClass(tooltip, fadeClass)) addClass(tooltip, fadeClass);
if (options.customClass && !hasClass(tooltip, options.customClass)) {
addClass(tooltip, options.customClass);
if (animation && !hasClass(tooltip, fadeClass)) addClass(tooltip, fadeClass);
if (customClass && !hasClass(tooltip, customClass)) {
addClass(tooltip, customClass);
}
if (!hasClass(tooltip, placementClass)) addClass(tooltip, placementClass);
}
function removeTooltip(self) {
const { element, options, tooltip } = self;
const { element, tooltip } = self;
element.removeAttribute(ariaDescribedBy);
options.container.removeChild(tooltip);
tooltip.remove();
self.timer = null;
}
@ -3372,6 +3423,21 @@
self.id = `${tooltipString}-${getUID(element)}`;
createTooltip(self);
// set positions
const { container } = self.options;
const elementPosition = getComputedStyle(element).position;
const containerPosition = getComputedStyle(container).position;
const parentIsBody = container === document.body;
const containerIsStatic = !parentIsBody && containerPosition === 'static';
const containerIsRelative = !parentIsBody && containerPosition === 'relative';
const relContainer = containerIsStatic && closestRelative(container);
self.positions = {
elementPosition,
containerIsRelative,
containerIsStatic,
relContainer,
};
// attach events
toggleTooltipHandlers(self, 1);
}
@ -3383,22 +3449,23 @@
const {
options, tooltip, element, id,
} = self;
const {
container, animation,
} = options;
clearTimeout(self.timer);
self.timer = setTimeout(() => {
if (!isVisibleTip(tooltip, options.container)) {
if (!isVisibleTip(tooltip, container)) {
element.dispatchEvent(showTooltipEvent);
if (showTooltipEvent.defaultPrevented) return;
// append to container
options.container.appendChild(tooltip);
container.append(tooltip);
element.setAttribute(ariaDescribedBy, id);
self.update(e);
if (!hasClass(tooltip, showClass)) addClass(tooltip, showClass);
if (options.animation) emulateTransitionEnd(tooltip, () => tooltipShownAction(self));
if (animation) emulateTransitionEnd(tooltip, () => tooltipShownAction(self));
else tooltipShownAction(self);
}
}, 20);
}
hide(e) {
@ -3483,20 +3550,9 @@
constructor: Tooltip,
};
var version = "4.0.4";
// import { alertInit } from '../components/alert-native.js';
// import { buttonInit } from '../components/button-native.js';
// import { carouselInit } from '../components/carousel-native.js';
// import { collapseInit } from '../components/collapse-native.js';
// import { dropdownInit } from '../components/dropdown-native.js';
// import { modalInit } from '../components/modal-native.js';
// import { offcanvasInit } from '../components/offcanvas-native.js';
// import { popoverInit } from '../components/popover-native.js';
// import { scrollSpyInit } from '../components/scrollspy-native.js';
// import { tabInit } from '../components/tab-native.js';
// import { toastInit } from '../components/toast-native.js';
// import { tooltipInit } from '../components/tooltip-native.js';
var version = "4.0.8";
const Version = version;
const componentsInit = {
Alert: Alert.init,
@ -3532,7 +3588,7 @@
document.addEventListener('DOMContentLoaded', () => initCallback(), { once: true });
}
var index = {
const BSN = {
Alert,
Button,
Carousel,
@ -3547,9 +3603,9 @@
Tooltip,
initCallback,
Version: version,
Version,
};
return index;
return BSN;
})));
}));

77
src/static/scripts/bootstrap.css

@ -1,6 +1,6 @@
@charset "UTF-8";
/*!
* Bootstrap v5.1.0 (https://getbootstrap.com/)
* Bootstrap v5.1.3 (https://getbootstrap.com/)
* Copyright 2011-2021 The Bootstrap Authors
* Copyright 2011-2021 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
@ -46,7 +46,8 @@
--bs-dark-rgb: 33, 37, 41;
--bs-white-rgb: 255, 255, 255;
--bs-black-rgb: 0, 0, 0;
--bs-body-rgb: 33, 37, 41;
--bs-body-color-rgb: 33, 37, 41;
--bs-body-bg-rgb: 255, 255, 255;
--bs-font-sans-serif: system-ui, -apple-system, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", "Liberation Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
--bs-font-monospace: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
--bs-gradient: linear-gradient(180deg, rgba(255, 255, 255, 0.15), rgba(255, 255, 255, 0));
@ -447,6 +448,10 @@ legend + * {
padding: 0;
}
::-webkit-file-upload-button {
font: inherit;
}
::file-selector-button {
font: inherit;
}
@ -660,16 +665,16 @@ progress {
--bs-gutter-y: 0;
display: flex;
flex-wrap: wrap;
margin-top: calc(var(--bs-gutter-y) * -1);
margin-right: calc(var(--bs-gutter-x) * -.5);
margin-left: calc(var(--bs-gutter-x) * -.5);
margin-top: calc(-1 * var(--bs-gutter-y));
margin-right: calc(-0.5 * var(--bs-gutter-x));
margin-left: calc(-0.5 * var(--bs-gutter-x));
}
.row > * {
flex-shrink: 0;
width: 100%;
max-width: 100%;
padding-right: calc(var(--bs-gutter-x) * .5);
padding-left: calc(var(--bs-gutter-x) * .5);
padding-right: calc(var(--bs-gutter-x) * 0.5);
padding-left: calc(var(--bs-gutter-x) * 0.5);
margin-top: var(--bs-gutter-y);
}
@ -1973,8 +1978,8 @@ progress {
.table > thead {
vertical-align: bottom;
}
.table > :not(:last-child) > :last-child > * {
border-bottom-color: currentColor;
.table > :not(:first-child) {
border-top: 2px solid currentColor;
}
.caption-top {
@ -1995,8 +2000,11 @@ progress {
.table-borderless > :not(caption) > * > * {
border-bottom-width: 0;
}
.table-borderless > :not(:first-child) {
border-top-width: 0;
}
.table-striped > tbody > tr:nth-of-type(odd) {
.table-striped > tbody > tr:nth-of-type(odd) > * {
--bs-table-accent-bg: var(--bs-table-striped-bg);
color: var(--bs-table-striped-color);
}
@ -2006,7 +2014,7 @@ progress {
color: var(--bs-table-active-color);
}
.table-hover > tbody > tr:hover {
.table-hover > tbody > tr:hover > * {
--bs-table-accent-bg: var(--bs-table-hover-bg);
color: var(--bs-table-hover-color);
}
@ -2222,6 +2230,22 @@ progress {
background-color: #e9ecef;
opacity: 1;
}
.form-control::-webkit-file-upload-button {
padding: 0.375rem 0.75rem;
margin: -0.375rem -0.75rem;
-webkit-margin-end: 0.75rem;
margin-inline-end: 0.75rem;
color: #212529;
background-color: #e9ecef;
pointer-events: none;
border-color: inherit;
border-style: solid;
border-width: 0;
border-inline-end-width: 1px;
border-radius: 0;
-webkit-transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;
transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;
}
.form-control::file-selector-button {
padding: 0.375rem 0.75rem;
margin: -0.375rem -0.75rem;
@ -2238,10 +2262,17 @@ progress {
transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;
}
@media (prefers-reduced-motion: reduce) {
.form-control::-webkit-file-upload-button {
-webkit-transition: none;
transition: none;
}
.form-control::file-selector-button {
transition: none;
}
}
.form-control:hover:not(:disabled):not([readonly])::-webkit-file-upload-button {
background-color: #dde0e3;
}
.form-control:hover:not(:disabled):not([readonly])::file-selector-button {
background-color: #dde0e3;
}
@ -2293,6 +2324,12 @@ progress {
font-size: 0.875rem;
border-radius: 0.2rem;
}
.form-control-sm::-webkit-file-upload-button {
padding: 0.25rem 0.5rem;
margin: -0.25rem -0.5rem;
-webkit-margin-end: 0.5rem;
margin-inline-end: 0.5rem;
}
.form-control-sm::file-selector-button {
padding: 0.25rem 0.5rem;
margin: -0.25rem -0.5rem;
@ -2312,6 +2349,12 @@ progress {
font-size: 1.25rem;
border-radius: 0.3rem;
}
.form-control-lg::-webkit-file-upload-button {
padding: 0.5rem 1rem;
margin: -0.5rem -1rem;
-webkit-margin-end: 1rem;
margin-inline-end: 1rem;
}
.form-control-lg::file-selector-button {
padding: 0.5rem 1rem;
margin: -0.5rem -1rem;
@ -2400,6 +2443,7 @@ textarea.form-control-lg {
padding-bottom: 0.25rem;
padding-left: 0.5rem;
font-size: 0.875rem;
border-radius: 0.2rem;
}
.form-select-lg {
@ -2407,6 +2451,7 @@ textarea.form-control-lg {
padding-bottom: 0.5rem;
padding-left: 1rem;
font-size: 1.25rem;
border-radius: 0.3rem;
}
.form-check {
@ -6443,15 +6488,15 @@ textarea.form-control-lg {
}
.ratio-4x3 {
--bs-aspect-ratio: calc(3 / 4 * 100%);
--bs-aspect-ratio: 75%;
}
.ratio-16x9 {
--bs-aspect-ratio: calc(9 / 16 * 100%);
--bs-aspect-ratio: 56.25%;
}
.ratio-21x9 {
--bs-aspect-ratio: calc(9 / 21 * 100%);
--bs-aspect-ratio: 42.8571428571%;
}
.fixed-top {
@ -7699,7 +7744,7 @@ textarea.form-control-lg {
.text-body {
--bs-text-opacity: 1;
color: rgba(var(--bs-body-rgb), var(--bs-text-opacity)) !important;
color: rgba(var(--bs-body-color-rgb), var(--bs-text-opacity)) !important;
}
.text-muted {
@ -7790,7 +7835,7 @@ textarea.form-control-lg {
.bg-body {
--bs-bg-opacity: 1;
background-color: rgba(var(--bs-body-rgb), var(--bs-bg-opacity)) !important;
background-color: rgba(var(--bs-body-bg-rgb), var(--bs-bg-opacity)) !important;
}
.bg-transparent {

110
src/static/scripts/datatables.css

@ -4,13 +4,94 @@
*
* To rebuild or modify this file with the latest versions of the included
* software please visit:
* https://datatables.net/download/#bs5/dt-1.10.25
* https://datatables.net/download/#bs5/dt-1.11.3
*
* Included libraries:
* DataTables 1.10.25
* DataTables 1.11.3
*/
@charset "UTF-8";
td.dt-control {
background: url("https://www.datatables.net/examples/resources/details_open.png") no-repeat center center;
cursor: pointer;
}
tr.dt-hasChild td.dt-control {
background: url("https://www.datatables.net/examples/resources/details_close.png") no-repeat center center;
}
table.dataTable th.dt-left,
table.dataTable td.dt-left {
text-align: left;
}
table.dataTable th.dt-center,
table.dataTable td.dt-center,
table.dataTable td.dataTables_empty {
text-align: center;
}
table.dataTable th.dt-right,
table.dataTable td.dt-right {
text-align: right;
}
table.dataTable th.dt-justify,
table.dataTable td.dt-justify {
text-align: justify;
}
table.dataTable th.dt-nowrap,
table.dataTable td.dt-nowrap {
white-space: nowrap;
}
table.dataTable thead th.dt-head-left,
table.dataTable thead td.dt-head-left,
table.dataTable tfoot th.dt-head-left,
table.dataTable tfoot td.dt-head-left {
text-align: left;
}
table.dataTable thead th.dt-head-center,
table.dataTable thead td.dt-head-center,
table.dataTable tfoot th.dt-head-center,
table.dataTable tfoot td.dt-head-center {
text-align: center;
}
table.dataTable thead th.dt-head-right,
table.dataTable thead td.dt-head-right,
table.dataTable tfoot th.dt-head-right,
table.dataTable tfoot td.dt-head-right {
text-align: right;
}
table.dataTable thead th.dt-head-justify,
table.dataTable thead td.dt-head-justify,
table.dataTable tfoot th.dt-head-justify,
table.dataTable tfoot td.dt-head-justify {
text-align: justify;
}
table.dataTable thead th.dt-head-nowrap,
table.dataTable thead td.dt-head-nowrap,
table.dataTable tfoot th.dt-head-nowrap,
table.dataTable tfoot td.dt-head-nowrap {
white-space: nowrap;
}
table.dataTable tbody th.dt-body-left,
table.dataTable tbody td.dt-body-left {
text-align: left;
}
table.dataTable tbody th.dt-body-center,
table.dataTable tbody td.dt-body-center {
text-align: center;
}
table.dataTable tbody th.dt-body-right,
table.dataTable tbody td.dt-body-right {
text-align: right;
}
table.dataTable tbody th.dt-body-justify,
table.dataTable tbody td.dt-body-justify {
text-align: justify;
}
table.dataTable tbody th.dt-body-nowrap,
table.dataTable tbody td.dt-body-nowrap {
white-space: nowrap;
}
/*! Bootstrap 5 integration for DataTables
*
* ©2020 SpryMedia Ltd, all rights reserved.
@ -143,21 +224,21 @@ div.dataTables_scrollHead table.dataTable {
margin-bottom: 0 !important;
}
div.dataTables_scrollBody table {
div.dataTables_scrollBody > table {
border-top: none;
margin-top: 0 !important;
margin-bottom: 0 !important;
}
div.dataTables_scrollBody table thead .sorting:before,
div.dataTables_scrollBody table thead .sorting_asc:before,
div.dataTables_scrollBody table thead .sorting_desc:before,
div.dataTables_scrollBody table thead .sorting:after,
div.dataTables_scrollBody table thead .sorting_asc:after,
div.dataTables_scrollBody table thead .sorting_desc:after {
div.dataTables_scrollBody > table > thead .sorting:before,
div.dataTables_scrollBody > table > thead .sorting_asc:before,
div.dataTables_scrollBody > table > thead .sorting_desc:before,
div.dataTables_scrollBody > table > thead .sorting:after,
div.dataTables_scrollBody > table > thead .sorting_asc:after,
div.dataTables_scrollBody > table > thead .sorting_desc:after {
display: none;
}
div.dataTables_scrollBody table tbody tr:first-child th,
div.dataTables_scrollBody table tbody tr:first-child td {
div.dataTables_scrollBody > table > tbody tr:first-child th,
div.dataTables_scrollBody > table > tbody tr:first-child td {
border-top: none;
}
@ -235,4 +316,11 @@ div.table-responsive > div.dataTables_wrapper > div.row > div[class^=col-]:last-
padding-right: 0;
}
table.dataTable.table-striped > tbody > tr:nth-of-type(2n+1) {
--bs-table-accent-bg: transparent;
}
table.dataTable.table-striped > tbody > tr.odd {
--bs-table-accent-bg: var(--bs-table-striped-bg);
}

1019
src/static/scripts/datatables.js

File diff suppressed because it is too large

18
src/static/templates/admin/base.hbs

@ -4,9 +4,9 @@
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no" />
<meta name="robots" content="noindex,nofollow" />
<link rel="icon" type="image/png" href="{{urlpath}}/bwrs_static/vaultwarden-icon.png">
<link rel="icon" type="image/png" href="{{urlpath}}/vw_static/vaultwarden-icon.png">
<title>Vaultwarden Admin Panel</title>
<link rel="stylesheet" href="{{urlpath}}/bwrs_static/bootstrap.css" />
<link rel="stylesheet" href="{{urlpath}}/vw_static/bootstrap.css" />
<style>
body {
padding-top: 75px;
@ -21,7 +21,7 @@
margin: -5px 0 0 0;
}
</style>
<script src="{{urlpath}}/bwrs_static/identicon.js"></script>
<script src="{{urlpath}}/vw_static/identicon.js"></script>
<script>
'use strict';
@ -62,8 +62,8 @@
headers: { "Content-Type": "application/json" }
}).then( resp => {
if (resp.ok) { msg(successMsg, reload_page); return Promise.reject({error: false}); }
respStatus = resp.status;
respStatusText = resp.statusText;
const respStatus = resp.status;
const respStatusText = resp.statusText;
return resp.text();
}).then( respText => {
try {
@ -85,7 +85,7 @@
<body class="bg-light">
<nav class="navbar navbar-expand-md navbar-dark bg-dark mb-4 shadow fixed-top">
<div class="container-xl">
<a class="navbar-brand" href="{{urlpath}}/admin"><img class="vaultwarden-icon" src="{{urlpath}}/bwrs_static/vaultwarden-icon.png" alt="V">aultwarden Admin</a>
<a class="navbar-brand" href="{{urlpath}}/admin"><img class="vaultwarden-icon" src="{{urlpath}}/vw_static/vaultwarden-icon.png" alt="V">aultwarden Admin</a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarCollapse"
aria-controls="navbarCollapse" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
@ -126,15 +126,15 @@
// get current URL path and assign 'active' class to the correct nav-item
(() => {
var pathname = window.location.pathname;
const pathname = window.location.pathname;
if (pathname === "") return;
var navItem = document.querySelectorAll('.navbar-nav .nav-item a[href="'+pathname+'"]');
let navItem = document.querySelectorAll('.navbar-nav .nav-item a[href="'+pathname+'"]');
if (navItem.length === 1) {
navItem[0].className = navItem[0].className + ' active';
navItem[0].setAttribute('aria-current', 'page');
}
})();
</script>
<script src="{{urlpath}}/bwrs_static/bootstrap-native.js"></script>
<script src="{{urlpath}}/vw_static/bootstrap-native.js"></script>
</body>
</html>

6
src/static/templates/admin/diagnostics.hbs

@ -58,7 +58,7 @@
<dt class="col-sm-5">Running within Docker</dt>
<dd class="col-sm-7">
{{#if page_data.running_within_docker}}
<span class="d-block"><b>Yes</b></span>
<span class="d-block"><b>Yes (Base: {{ page_data.docker_base_image }})</b></span>
{{/if}}
{{#unless page_data.running_within_docker}}
<span class="d-block"><b>No</b></span>
@ -150,7 +150,7 @@
<dt class="col-sm-5">Domain configuration
<span class="badge bg-success d-none" id="domain-success" title="The domain variable matches the browser location and seems to be configured correctly.">Match</span>
<span class="badge bg-danger d-none" id="domain-warning" title="The domain variable does not matches the browsers location.&#013;&#010;The domain variable does not seem to be configured correctly.&#013;&#010;Some features may not work as expected!">No Match</span>
<span class="badge bg-danger d-none" id="domain-warning" title="The domain variable does not match the browser location.&#013;&#010;The domain variable does not seem to be configured correctly.&#013;&#010;Some features may not work as expected!">No Match</span>
<span class="badge bg-success d-none" id="https-success" title="Configurued to use HTTPS">HTTPS</span>
<span class="badge bg-danger d-none" id="https-warning" title="Not configured to use HTTPS.&#013;&#010;Some features may not work as expected!">No HTTPS</span>
</dt>
@ -329,7 +329,7 @@
supportString += "* Vaultwarden version: v{{ version }}\n";
supportString += "* Web-vault version: v{{ page_data.web_vault_version }}\n";
supportString += "* Running within Docker: {{ page_data.running_within_docker }}\n";
supportString += "* Running within Docker: {{ page_data.running_within_docker }} (Base: {{ page_data.docker_base_image }})\n";
supportString += "* Environment settings overridden: ";
{{#if page_data.overrides}}
supportString += "true\n"

10
src/static/templates/admin/organizations.hbs

@ -37,8 +37,8 @@
<span class="d-block"><strong>Size:</strong> {{attachment_size}}</span>
{{/if}}
</td>
<td class="text-end pe-2 small">
<a class="d-block" href="#" onclick='deleteOrganization({{jsesc Id}}, {{jsesc Name}}, {{jsesc BillingEmail}})'>Delete Organization</a>
<td class="text-end px-0 small">
<button type="button" class="btn btn-sm btn-link p-0 border-0" onclick='deleteOrganization({{jsesc Id}}, {{jsesc Name}}, {{jsesc BillingEmail}})'>Delete Organization</button>
</td>
</tr>
{{/each}}
@ -48,9 +48,9 @@
</div>
</main>
<link rel="stylesheet" href="{{urlpath}}/bwrs_static/datatables.css" />
<script src="{{urlpath}}/bwrs_static/jquery-3.6.0.slim.js"></script>
<script src="{{urlpath}}/bwrs_static/datatables.js"></script>
<link rel="stylesheet" href="{{urlpath}}/vw_static/datatables.css" />
<script src="{{urlpath}}/vw_static/jquery-3.6.0.slim.js"></script>
<script src="{{urlpath}}/vw_static/datatables.js"></script>
<script>
'use strict';

22
src/static/templates/admin/settings.hbs

@ -12,9 +12,7 @@
{{#each config}}
{{#if groupdoc}}
<div class="card bg-light mb-3">
<div class="card-header" role="button" data-bs-toggle="collapse" data-bs-target="#g_{{group}}">
<button type="button" class="btn btn-link text-decoration-none collapsed" data-bs-toggle="collapse" data-bs-target="#g_{{group}}">{{groupdoc}}</button>
</div>
<button id="b_{{group}}" type="button" class="card-header text-start btn btn-link text-decoration-none" aria-expanded="false" aria-controls="g_{{group}}" data-bs-toggle="collapse" data-bs-target="#g_{{group}}">{{groupdoc}}</button>
<div id="g_{{group}}" class="card-body collapse">
{{#each elements}}
{{#if editable}}
@ -61,10 +59,8 @@
{{/each}}
<div class="card bg-light mb-3">
<div class="card-header" role="button" data-bs-toggle="collapse" data-bs-target="#g_readonly">
<button type="button" class="btn btn-link text-decoration-none collapsed" data-bs-toggle="collapse" data-bs-target="#g_readonly">Read-Only Config</button>
</div>
<button id="b_readonly" type="button" class="card-header text-start btn btn-link text-decoration-none" aria-expanded="false" aria-controls="g_readonly"
data-bs-toggle="collapse" data-bs-target="#g_readonly">Read-Only Config</button>
<div id="g_readonly" class="card-body collapse">
<div class="small mb-3">
NOTE: These options can't be modified in the editor because they would require the server
@ -109,9 +105,8 @@
{{#if can_backup}}
<div class="card bg-light mb-3">
<div class="card-header" role="button" data-bs-toggle="collapse" data-bs-target="#g_database">
<button type="button" class="btn btn-link text-decoration-none collapsed" data-bs-toggle="collapse" data-bs-target="#g_database">Backup Database</button>
</div>
<button id="b_database" type="button" class="card-header text-start btn btn-link text-decoration-none" aria-expanded="false" aria-controls="g_database"
data-bs-toggle="collapse" data-bs-target="#g_database">Backup Database</button>
<div id="g_database" class="card-body collapse">
<div class="small mb-3">
WARNING: This function only creates a backup copy of the SQLite database.
@ -224,11 +219,10 @@
onChange(); // Trigger the event initially
checkbox.addEventListener("change", onChange);
}
// These are formatted because otherwise the
// VSCode formatter breaks But they still work
// {{#each config}} {{#if grouptoggle}}
{{#each config}} {{#if grouptoggle}}
masterCheck("input_{{grouptoggle}}", "#g_{{group}} input");
// {{/if}} {{/each}}
{{/if}} {{/each}}
// Two functions to help check if there were changes to the form fields
// Useful for example during the smtp test to prevent people from clicking save before testing there new settings

18
src/static/templates/admin/users.hbs

@ -61,16 +61,16 @@
{{/each}}
</div>
</td>
<td class="text-end pe-2 small">
<td class="text-end px-0 small">
{{#if TwoFactorEnabled}}
<a class="d-block" href="#" onclick='remove2fa({{jsesc Id}})'>Remove all 2FA</a>
<button type="button" class="btn btn-sm btn-link p-0 border-0" onclick='remove2fa({{jsesc Id}})'>Remove all 2FA</button>
{{/if}}
<a class="d-block" href="#" onclick='deauthUser({{jsesc Id}})'>Deauthorize sessions</a>
<a class="d-block" href="#" onclick='deleteUser({{jsesc Id}}, {{jsesc Email}})'>Delete User</a>
<button type="button" class="btn btn-sm btn-link p-0 border-0" onclick='deauthUser({{jsesc Id}})'>Deauthorize sessions</button>
<button type="button" class="btn btn-sm btn-link p-0 border-0" onclick='deleteUser({{jsesc Id}}, {{jsesc Email}})'>Delete User</button>
{{#if user_enabled}}
<a class="d-block" href="#" onclick='disableUser({{jsesc Id}}, {{jsesc Email}})'>Disable User</a>
<button type="button" class="btn btn-sm btn-link p-0 border-0" onclick='disableUser({{jsesc Id}}, {{jsesc Email}})'>Disable User</button>
{{else}}
<a class="d-block" href="#" onclick='enableUser({{jsesc Id}}, {{jsesc Email}})'>Enable User</a>
<button type="button" class="btn btn-sm btn-link p-0 border-0" onclick='enableUser({{jsesc Id}}, {{jsesc Email}})'>Enable User</button>
{{/if}}
</td>
</tr>
@ -135,9 +135,9 @@
</div>
</main>
<link rel="stylesheet" href="{{urlpath}}/bwrs_static/datatables.css" />
<script src="{{urlpath}}/bwrs_static/jquery-3.6.0.slim.js"></script>
<script src="{{urlpath}}/bwrs_static/datatables.js"></script>
<link rel="stylesheet" href="{{urlpath}}/vw_static/datatables.css" />
<script src="{{urlpath}}/vw_static/jquery-3.6.0.slim.js"></script>
<script src="{{urlpath}}/vw_static/datatables.js"></script>
<script>
'use strict';

2
src/static/templates/email/email_footer.hbs

@ -10,7 +10,7 @@
<td class="aligncenter social-icons" align="center" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; padding: 15px 0 0 0;" valign="top">
<table cellpadding="0" cellspacing="0" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0 auto;">
<tr style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0;">
<td style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; padding: 0 10px;" valign="top"><a href="https://github.com/dani-garcia/vaultwarden" target="_blank" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; text-decoration: underline;"><img src="{{url}}/bwrs_static/mail-github.png" alt="GitHub" width="30" height="30" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; border: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0; max-width: 100%;" /></a></td>
<td style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; padding: 0 10px;" valign="top"><a href="https://github.com/dani-garcia/vaultwarden" target="_blank" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #999; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 12px; line-height: 20px; margin: 0; text-decoration: underline;"><img src="{{url}}/vw_static/mail-github.png" alt="GitHub" width="30" height="30" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; border: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0; max-width: 100%;" /></a></td>
</tr>
</table>
</td>

2
src/static/templates/email/email_header.hbs

@ -81,7 +81,7 @@
<table class="body-wrap" cellpadding="0" cellspacing="0" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0; width: 100%;" bgcolor="#f6f6f6">
<tr style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0;">
<td valign="middle" class="aligncenter middle logo" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0; padding: 20px 0 10px;" align="center">
<img src="{{url}}/bwrs_static/logo-gray.png" alt="" width="190" height="39" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; border: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0; max-width: 100%;" />
<img src="{{url}}/vw_static/logo-gray.png" alt="" width="190" height="39" style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; border: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0; max-width: 100%;" />
</td>
</tr>
<tr style="-webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none; box-sizing: border-box; color: #333; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; font-size: 16px; line-height: 25px; margin: 0;">

8
src/static/templates/email/emergency_access_invite_accepted.hbs

@ -0,0 +1,8 @@
Emergency access contact {{{grantee_email}}} accepted
<!---------------->
This email is to notify you that {{grantee_email}} has accepted your invitation to become an emergency access contact.
To confirm this user, log into the web vault ({{url}}), go to settings and confirm the user.
If you do not wish to confirm this user, you can also remove them on the same page.
{{> email/email_footer_text }}

21
src/static/templates/email/emergency_access_invite_accepted.html.hbs

@ -0,0 +1,21 @@
Emergency access contact {{{grantee_email}}} accepted
<!---------------->
{{> email/email_header }}
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
This email is to notify you that {{grantee_email}} has accepted your invitation to become an emergency access contact.
</td>
</tr>
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
To confirm this user, log into the <a href="{{url}}/">web vault</a>, go to settings and confirm the user.
</td>
</tr>
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<td class="content-block last" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0; -webkit-text-size-adjust: none;" valign="top">
If you do not wish to confirm this user, you can also remove them on the same page.
</td>
</tr>
</table>
{{> email/email_footer }}

6
src/static/templates/email/emergency_access_invite_confirmed.hbs

@ -0,0 +1,6 @@
Emergency access contact for {{{grantor_name}}} confirmed
<!---------------->
This email is to notify you that you have been confirmed as an emergency access contact for *{{grantor_name}}*.
You can now initiate emergency access requests from the web vault ({{url}}).
{{> email/email_footer_text }}

16
src/static/templates/email/emergency_access_invite_confirmed.html.hbs

@ -0,0 +1,16 @@
Emergency access contact for {{{grantor_name}}} confirmed
<!---------------->
{{> email/email_header }}
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top">
This email is to notify you that you have been confirmed as an emergency access contact for <b style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">{{grantor_name}}</b>.
</td>
</tr>
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<td class="content-block last" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0; -webkit-text-size-adjust: none;" valign="top">
You can now initiate emergency access requests from the <a href="{{url}}/">web vault</a>.
</td>
</tr>
</table>
{{> email/email_footer }}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save