diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7ed85943..985ad1ff 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -31,9 +31,6 @@ on: jobs: build: name: Build and Test ${{ matrix.channel }} - permissions: - actions: write - contents: read runs-on: ubuntu-24.04 timeout-minutes: 120 # Make warnings errors, this is to prevent warnings slipping through. @@ -69,9 +66,9 @@ jobs: CHANNEL: ${{ matrix.channel }} run: | if [[ "${CHANNEL}" == 'rust-toolchain' ]]; then - RUST_TOOLCHAIN="$(grep -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)" + RUST_TOOLCHAIN="$(grep -m1 -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)" elif [[ "${CHANNEL}" == 'msrv' ]]; then - RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)" + RUST_TOOLCHAIN="$(grep -m1 -oP 'rust-version\s.*"(\K.*?)(?=")' Cargo.toml)" else RUST_TOOLCHAIN="${CHANNEL}" fi @@ -81,7 +78,7 @@ jobs: # Only install the clippy and rustfmt components on the default rust-toolchain - name: "Install rust-toolchain version" - uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @ Aug 23, 2025, 3:20 AM GMT+2 + uses: dtolnay/rust-toolchain@6d653acede28d24f02e3cd41383119e8b1b35921 # master @ Sep 16, 2025, 8:37 PM GMT+2 if: ${{ matrix.channel == 'rust-toolchain' }} with: toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}" @@ -91,7 +88,7 @@ jobs: # Install the any other channel to be used for which we do not execute clippy and rustfmt - name: "Install MSRV version" - uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @ Aug 23, 2025, 3:20 AM GMT+2 + uses: dtolnay/rust-toolchain@6d653acede28d24f02e3cd41383119e8b1b35921 # master @ Sep 16, 2025, 8:37 PM GMT+2 if: ${{ matrix.channel != 'rust-toolchain' }} with: toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}" @@ -116,60 +113,60 @@ jobs: # Enable Rust Caching - name: Rust Caching - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: # Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes. # Like changing the build host from Ubuntu 20.04 to 22.04 for example. # Only update when really needed! Use a .[.] format. - prefix-key: "v2023.07-rust" + prefix-key: "v2025.09-rust" # End Enable Rust Caching # Run cargo tests # First test all features together, afterwards test them separately. - - name: "test features: sqlite,mysql,postgresql,enable_mimalloc,query_logger" - id: test_sqlite_mysql_postgresql_mimalloc_logger + - name: "test features: sqlite,mysql,postgresql,enable_mimalloc,s3" + id: test_sqlite_mysql_postgresql_mimalloc_s3 if: ${{ !cancelled() }} run: | - cargo test --features sqlite,mysql,postgresql,enable_mimalloc,query_logger + cargo test --profile ci --features sqlite,mysql,postgresql,enable_mimalloc,s3 - name: "test features: sqlite,mysql,postgresql,enable_mimalloc" id: test_sqlite_mysql_postgresql_mimalloc if: ${{ !cancelled() }} run: | - cargo test --features sqlite,mysql,postgresql,enable_mimalloc + cargo test --profile ci --features sqlite,mysql,postgresql,enable_mimalloc - name: "test features: sqlite,mysql,postgresql" id: test_sqlite_mysql_postgresql if: ${{ !cancelled() }} run: | - cargo test --features sqlite,mysql,postgresql + cargo test --profile ci --features sqlite,mysql,postgresql - name: "test features: sqlite" id: test_sqlite if: ${{ !cancelled() }} run: | - cargo test --features sqlite + cargo test --profile ci --features sqlite - name: "test features: mysql" id: test_mysql if: ${{ !cancelled() }} run: | - cargo test --features mysql + cargo test --profile ci --features mysql - name: "test features: postgresql" id: test_postgresql if: ${{ !cancelled() }} run: | - cargo test --features postgresql + cargo test --profile ci --features postgresql # End Run cargo tests # Run cargo clippy, and fail on warnings - - name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc" + - name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc,s3" id: clippy if: ${{ !cancelled() && matrix.channel == 'rust-toolchain' }} run: | - cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc + cargo clippy --profile ci --features sqlite,mysql,postgresql,enable_mimalloc,s3 # End Run cargo clippy @@ -187,7 +184,7 @@ jobs: - name: "Some checks failed" if: ${{ failure() }} env: - TEST_DB_M_L: ${{ steps.test_sqlite_mysql_postgresql_mimalloc_logger.outcome }} + TEST_DB_M_S3: ${{ steps.test_sqlite_mysql_postgresql_mimalloc_s3.outcome }} TEST_DB_M: ${{ steps.test_sqlite_mysql_postgresql_mimalloc.outcome }} TEST_DB: ${{ steps.test_sqlite_mysql_postgresql.outcome }} TEST_SQLITE: ${{ steps.test_sqlite.outcome }} @@ -200,13 +197,13 @@ jobs: echo "" >> "${GITHUB_STEP_SUMMARY}" echo "|Job|Status|" >> "${GITHUB_STEP_SUMMARY}" echo "|---|------|" >> "${GITHUB_STEP_SUMMARY}" - echo "|test (sqlite,mysql,postgresql,enable_mimalloc,query_logger)|${TEST_DB_M_L}|" >> "${GITHUB_STEP_SUMMARY}" + echo "|test (sqlite,mysql,postgresql,enable_mimalloc,s3)|${TEST_DB_M_S3}|" >> "${GITHUB_STEP_SUMMARY}" echo "|test (sqlite,mysql,postgresql,enable_mimalloc)|${TEST_DB_M}|" >> "${GITHUB_STEP_SUMMARY}" echo "|test (sqlite,mysql,postgresql)|${TEST_DB}|" >> "${GITHUB_STEP_SUMMARY}" echo "|test (sqlite)|${TEST_SQLITE}|" >> "${GITHUB_STEP_SUMMARY}" echo "|test (mysql)|${TEST_MYSQL}|" >> "${GITHUB_STEP_SUMMARY}" echo "|test (postgresql)|${TEST_POSTGRESQL}|" >> "${GITHUB_STEP_SUMMARY}" - echo "|clippy (sqlite,mysql,postgresql,enable_mimalloc)|${CLIPPY}|" >> "${GITHUB_STEP_SUMMARY}" + echo "|clippy (sqlite,mysql,postgresql,enable_mimalloc,s3)|${CLIPPY}|" >> "${GITHUB_STEP_SUMMARY}" echo "|fmt|${FMT}|" >> "${GITHUB_STEP_SUMMARY}" echo "" >> "${GITHUB_STEP_SUMMARY}" echo "Please check the failed jobs and fix where needed." >> "${GITHUB_STEP_SUMMARY}" diff --git a/.github/workflows/check-templates.yml b/.github/workflows/check-templates.yml index 943235fe..7a6a764a 100644 --- a/.github/workflows/check-templates.yml +++ b/.github/workflows/check-templates.yml @@ -6,8 +6,6 @@ on: [ push, pull_request ] jobs: docker-templates: name: Validate docker templates - permissions: - contents: read runs-on: ubuntu-24.04 timeout-minutes: 30 diff --git a/.github/workflows/hadolint.yml b/.github/workflows/hadolint.yml index 75d3a95d..9dfd7a59 100644 --- a/.github/workflows/hadolint.yml +++ b/.github/workflows/hadolint.yml @@ -1,13 +1,12 @@ name: Hadolint -permissions: {} on: [ push, pull_request ] +permissions: {} + jobs: hadolint: name: Validate Dockerfile syntax - permissions: - contents: read runs-on: ubuntu-24.04 timeout-minutes: 30 @@ -31,7 +30,7 @@ jobs: sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \ sudo chmod +x /usr/local/bin/hadolint env: - HADOLINT_VERSION: 2.12.0 + HADOLINT_VERSION: 2.14.0 # End Download hadolint # Checkout the repo - name: Checkout diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 114fa00f..22b031ab 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -21,10 +21,10 @@ jobs: name: Build Vaultwarden containers if: ${{ github.repository == 'dani-garcia/vaultwarden' }} permissions: - packages: write + packages: write # Needed to upload packages and artifacts contents: read - attestations: write - id-token: write + attestations: write # Needed to generate an artifact attestation for a build + id-token: write # Needed to mint the OIDC token necessary to request a Sigstore signing certificate runs-on: ubuntu-24.04 timeout-minutes: 120 # Start a local docker registry to extract the compiled binaries to upload as artifacts and attest them @@ -103,7 +103,7 @@ jobs: # Login to Docker Hub - name: Login to Docker Hub - uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0 + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} @@ -119,7 +119,7 @@ jobs: # Login to GitHub Container Registry - name: Login to GitHub Container Registry - uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0 + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 with: registry: ghcr.io username: ${{ github.repository_owner }} @@ -136,7 +136,7 @@ jobs: # Login to Quay.io - name: Login to Quay.io - uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0 + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 with: registry: quay.io username: ${{ secrets.QUAY_USERNAME }} @@ -204,7 +204,7 @@ jobs: # Attest container images - name: Attest - docker.io - ${{ matrix.base_image }} if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} - uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0 + uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 with: subject-name: ${{ vars.DOCKERHUB_REPO }} subject-digest: ${{ env.DIGEST_SHA }} @@ -212,7 +212,7 @@ jobs: - name: Attest - ghcr.io - ${{ matrix.base_image }} if: ${{ env.HAVE_GHCR_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} - uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0 + uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 with: subject-name: ${{ vars.GHCR_REPO }} subject-digest: ${{ env.DIGEST_SHA }} @@ -220,7 +220,7 @@ jobs: - name: Attest - quay.io - ${{ matrix.base_image }} if: ${{ env.HAVE_QUAY_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} - uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0 + uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 with: subject-name: ${{ vars.QUAY_REPO }} subject-digest: ${{ env.DIGEST_SHA }} @@ -299,7 +299,7 @@ jobs: path: vaultwarden-armv6-${{ matrix.base_image }} - name: "Attest artifacts ${{ matrix.base_image }}" - uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0 + uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 with: subject-path: vaultwarden-* # End Upload artifacts to Github Actions diff --git a/.github/workflows/releasecache-cleanup.yml b/.github/workflows/releasecache-cleanup.yml index f62fccd3..22d98fa2 100644 --- a/.github/workflows/releasecache-cleanup.yml +++ b/.github/workflows/releasecache-cleanup.yml @@ -16,7 +16,7 @@ jobs: releasecache-cleanup: name: Releasecache Cleanup permissions: - packages: write + packages: write # To be able to cleanup old caches runs-on: ubuntu-24.04 continue-on-error: true timeout-minutes: 30 diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 0d52da5a..59b06211 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -23,9 +23,7 @@ jobs: if: ${{ github.repository == 'dani-garcia/vaultwarden' }} name: Trivy Scan permissions: - contents: read - actions: read - security-events: write + security-events: write # To write the security report runs-on: ubuntu-24.04 timeout-minutes: 30 @@ -36,7 +34,7 @@ jobs: persist-credentials: false - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.0 + b6643a2 + uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # 0.33.1 env: TRIVY_DB_REPOSITORY: docker.io/aquasec/trivy-db:2,public.ecr.aws/aquasecurity/trivy-db:2,ghcr.io/aquasecurity/trivy-db:2 TRIVY_JAVA_DB_REPOSITORY: docker.io/aquasec/trivy-java-db:1,public.ecr.aws/aquasecurity/trivy-java-db:1,ghcr.io/aquasecurity/trivy-java-db:1 @@ -48,6 +46,6 @@ jobs: severity: CRITICAL,HIGH - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 + uses: github/codeql-action/upload-sarif@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3.30.6 with: sarif_file: 'trivy-results.sarif' diff --git a/.github/workflows/typos.yml b/.github/workflows/typos.yml new file mode 100644 index 00000000..fd58005f --- /dev/null +++ b/.github/workflows/typos.yml @@ -0,0 +1,22 @@ +name: Code Spell Checking + +on: [ push, pull_request ] +permissions: {} + +jobs: + typos: + name: Run typos spell checking + runs-on: ubuntu-24.04 + timeout-minutes: 30 + + steps: + # Checkout the repo + - name: Checkout + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 + with: + persist-credentials: false + # End Checkout the repo + + # When this version is updated, do not forget to update this in `.pre-commit-config.yaml` too + - name: Spell Check Repo + uses: crate-ci/typos@40156d6074bf731adb169cfb8234954971dbc487 # v1.37.1 diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index fde1f217..a3cd0df2 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -13,7 +13,7 @@ jobs: name: Run zizmor runs-on: ubuntu-latest permissions: - security-events: write + security-events: write # To write the security report steps: - name: Checkout repository uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5.0.0 @@ -21,7 +21,7 @@ jobs: persist-credentials: false - name: Run zizmor - uses: zizmorcore/zizmor-action@5ca5fc7a4779c5263a3ffa0e1f693009994446d1 # v0.1.2 + uses: zizmorcore/zizmor-action@e673c3917a1aef3c65c972347ed84ccd013ecda4 # v0.2.0 with: # intentionally not scanning the entire repository, # since it contains integration tests. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a967edbf..4f0ab121 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ --- repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v6.0.0 + rev: 3e8a8703264a2f4a69428a0aa4dcb512790b2c8c # v6.0.0 hooks: - id: check-yaml - id: check-json @@ -50,3 +50,8 @@ repos: args: - "-c" - "cd docker && make" +# When this version is updated, do not forget to update this in `.github/workflows/typos.yaml` too +- repo: https://github.com/crate-ci/typos + rev: 40156d6074bf731adb169cfb8234954971dbc487 # v1.37.1 + hooks: + - id: typos diff --git a/.typos.toml b/.typos.toml new file mode 100644 index 00000000..59f6d7d6 --- /dev/null +++ b/.typos.toml @@ -0,0 +1,26 @@ +[files] +extend-exclude = [ + ".git/", + "playwright/", + "*.js", # Ignore all JavaScript files + "!admin*.js", # Except our own JavaScript files +] +ignore-hidden = false + +[default] +extend-ignore-re = [ + # We use this in place of the reserved type identifier at some places + "typ", + # In SMTP it's called HELO, so ignore it + "(?i)helo_name", + "Server name sent during.+HELO", + # COSE Is short for CBOR Object Signing and Encryption, ignore these specific items + "COSEKey", + "COSEAlgorithm", + # Ignore this specific string as it's valid + "Ensure they are valid OTPs", + # This word is misspelled upstream + # https://github.com/bitwarden/server/blob/dff9f1cf538198819911cf2c20f8cda3307701c5/src/Notifications/HubHelpers.cs#L86 + # https://github.com/bitwarden/clients/blob/9612a4ac45063e372a6fbe87eb253c7cb3c588fb/libs/common/src/auth/services/anonymous-hub.service.ts#L45 + "AuthRequestResponseRecieved", +] diff --git a/Cargo.lock b/Cargo.lock index 39a3d942..ef4e20da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5736,7 +5736,6 @@ dependencies = [ "mini-moka", "num-derive", "num-traits", - "once_cell", "opendal", "openidconnect", "openssl", diff --git a/Cargo.toml b/Cargo.toml index c62bc929..0f6e7dd3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,3 +1,10 @@ +[workspace.package] +edition = "2021" +rust-version = "1.88.0" +license = "AGPL-3.0-only" +repository = "https://github.com/dani-garcia/vaultwarden" +publish = false + [workspace] members = ["macros"] @@ -5,15 +12,15 @@ members = ["macros"] name = "vaultwarden" version = "1.0.0" authors = ["Daniel GarcĂ­a "] -edition = "2021" -rust-version = "1.87.0" -resolver = "2" - -repository = "https://github.com/dani-garcia/vaultwarden" readme = "README.md" -license = "AGPL-3.0-only" -publish = false build = "build.rs" +resolver = "2" +repository.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +publish.workspace = true + [features] # default = ["sqlite"] @@ -57,9 +64,6 @@ tracing = { version = "0.1.41", features = ["log"] } # Needed to have lettre and # A `dotenv` implementation for Rust dotenvy = { version = "0.15.7", default-features = false } -# Lazy initialization -once_cell = "1.21.3" - # Numerical libraries num-traits = "0.2.19" num-derive = "0.4.2" @@ -209,24 +213,15 @@ reqsign = { version = "0.16.5", optional = true } strip = "debuginfo" lto = "fat" codegen-units = 1 - -# A little bit of a speedup -[profile.dev] -split-debuginfo = "unpacked" - -# Always build argon2 using opt-level 3 -# This is a huge speed improvement during testing -[profile.dev.package.argon2] -opt-level = 3 +debug = "line-tables-only" # Optimize for size [profile.release-micro] inherits = "release" -opt-level = "z" strip = "symbols" -lto = "fat" -codegen-units = 1 +opt-level = "z" panic = "abort" +debug = false # Profile for systems with low resources # It will use less resources during build @@ -236,6 +231,32 @@ strip = "symbols" lto = "thin" codegen-units = 16 +# Used for profiling and debugging like valgrind or heaptrack +# Inherits release to be sure all optimizations have been done +[profile.dbg] +inherits = "release" +strip = "none" +split-debuginfo = "off" +debug = "full" + +# A little bit of a speedup for generic building +[profile.dev] +split-debuginfo = "unpacked" +debug = "line-tables-only" + +# Used for CI builds to improve compile time +[profile.ci] +inherits = "dev" +debug = false +debug-assertions = false +strip = "symbols" +panic = "abort" + +# Always build argon2 using opt-level 3 +# This is a huge speed improvement during testing +[profile.dev.package.argon2] +opt-level = 3 + # Linting config # https://doc.rust-lang.org/rustc/lints/groups.html [workspace.lints.rust] @@ -245,15 +266,16 @@ non_ascii_idents = "forbid" # Deny deprecated_in_future = "deny" +deprecated_safe = { level = "deny", priority = -1 } future_incompatible = { level = "deny", priority = -1 } keyword_idents = { level = "deny", priority = -1 } let_underscore = { level = "deny", priority = -1 } +nonstandard_style = { level = "deny", priority = -1 } noop_method_call = "deny" refining_impl_trait = { level = "deny", priority = -1 } rust_2018_idioms = { level = "deny", priority = -1 } rust_2021_compatibility = { level = "deny", priority = -1 } rust_2024_compatibility = { level = "deny", priority = -1 } -edition_2024_expr_fragment_specifier = "allow" # Once changed to Rust 2024 this should be removed and macro's should be validated again single_use_lifetimes = "deny" trivial_casts = "deny" trivial_numeric_casts = "deny" @@ -263,7 +285,8 @@ unused_lifetimes = "deny" unused_qualifications = "deny" variant_size_differences = "deny" # Allow the following lints since these cause issues with Rust v1.84.0 or newer -# Building Vaultwarden with Rust v1.85.0 and edition 2024 also works without issues +# Building Vaultwarden with Rust v1.85.0 with edition 2024 also works without issues +edition_2024_expr_fragment_specifier = "allow" # Once changed to Rust 2024 this should be removed and macro's should be validated again if_let_rescope = "allow" tail_expr_drop_order = "allow" @@ -277,10 +300,12 @@ todo = "warn" result_large_err = "allow" # Deny +branches_sharing_code = "deny" case_sensitive_file_extension_comparisons = "deny" cast_lossless = "deny" clone_on_ref_ptr = "deny" equatable_if_let = "deny" +excessive_precision = "deny" filter_map_next = "deny" float_cmp_const = "deny" implicit_clone = "deny" @@ -294,15 +319,19 @@ manual_instant_elapsed = "deny" manual_string_new = "deny" match_wildcard_for_single_variants = "deny" mem_forget = "deny" +needless_borrow = "deny" +needless_collect = "deny" needless_continue = "deny" needless_lifetimes = "deny" option_option = "deny" +redundant_clone = "deny" string_add_assign = "deny" unnecessary_join = "deny" unnecessary_self_imports = "deny" unnested_or_patterns = "deny" unused_async = "deny" unused_self = "deny" +useless_let_if_seq = "deny" verbose_file_reads = "deny" zero_sized_map_values = "deny" diff --git a/docker/DockerSettings.yaml b/docker/DockerSettings.yaml index 68fa7532..f6f49ce3 100644 --- a/docker/DockerSettings.yaml +++ b/docker/DockerSettings.yaml @@ -5,7 +5,7 @@ vault_image_digest: "sha256:41c2b51c87882248f405d5a0ab37210d2672a312ec5d4f3b9afc # We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts # https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags xx_image_digest: "sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894" -rust_version: 1.89.0 # Rust version to be used +rust_version: 1.90.0 # Rust version to be used debian_version: trixie # Debian release name to be used alpine_version: "3.22" # Alpine version to be used # For which platforms/architectures will we try to build images diff --git a/docker/Dockerfile.alpine b/docker/Dockerfile.alpine index 09477b3e..ff85832b 100644 --- a/docker/Dockerfile.alpine +++ b/docker/Dockerfile.alpine @@ -32,10 +32,10 @@ FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:41c2b51c87882 ########################## ALPINE BUILD IMAGES ########################## ## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 ## And for Alpine we define all build images here, they will only be loaded when actually used -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.89.0 AS build_amd64 -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.89.0 AS build_arm64 -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.89.0 AS build_armv7 -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.89.0 AS build_armv6 +FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.90.0 AS build_amd64 +FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.90.0 AS build_arm64 +FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.90.0 AS build_armv7 +FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.90.0 AS build_armv6 ########################## BUILD IMAGE ########################## # hadolint ignore=DL3006 diff --git a/docker/Dockerfile.debian b/docker/Dockerfile.debian index 9d1af57f..aaf3a0ea 100644 --- a/docker/Dockerfile.debian +++ b/docker/Dockerfile.debian @@ -36,7 +36,7 @@ FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:9c207bead753dda9430bd ########################## BUILD IMAGE ########################## # hadolint ignore=DL3006 -FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.89.0-slim-trixie AS build +FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.90.0-slim-trixie AS build COPY --from=xx / / ARG TARGETARCH ARG TARGETVARIANT diff --git a/docker/README.md b/docker/README.md index f76cd35d..d64d5789 100644 --- a/docker/README.md +++ b/docker/README.md @@ -116,7 +116,7 @@ docker/bake.sh ``` You can append both `alpine` and `debian` with `-amd64`, `-arm64`, `-armv7` or `-armv6`, which will trigger a build for that specific platform.
-This will also append those values to the tag so you can see the builded container when running `docker images`. +This will also append those values to the tag so you can see the built container when running `docker images`. You can also append extra arguments after the target if you want. This can be useful for example to print what bake will use. ```bash @@ -162,7 +162,7 @@ You can append extra arguments after the target if you want. This can be useful For the podman builds you can, just like the `bake.sh` script, also append the architecture to build for that specific platform.
-### Testing podman builded images +### Testing podman built images The command to start a podman built container is almost the same as for the docker/bake built containers. The images start with `localhost/`, so you need to prepend that. diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 34e4ae04..9724275a 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -1,7 +1,11 @@ [package] name = "macros" version = "0.1.0" -edition = "2021" +repository.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +publish.workspace = true [lib] name = "macros" diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 8729fb13..41f3e4af 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.89.0" +channel = "1.90.0" components = [ "rustfmt", "clippy" ] profile = "minimal" diff --git a/src/api/admin.rs b/src/api/admin.rs index d52e24ef..b29914c5 100644 --- a/src/api/admin.rs +++ b/src/api/admin.rs @@ -1,17 +1,16 @@ -use once_cell::sync::Lazy; -use reqwest::Method; -use serde::de::DeserializeOwned; -use serde_json::Value; -use std::env; +use std::{env, sync::LazyLock}; -use rocket::serde::json::Json; +use reqwest::Method; use rocket::{ form::Form, http::{Cookie, CookieJar, MediaType, SameSite, Status}, request::{FromRequest, Outcome, Request}, response::{content::RawHtml as Html, Redirect}, + serde::json::Json, Catcher, Route, }; +use serde::de::DeserializeOwned; +use serde_json::Value; use crate::{ api::{ @@ -75,7 +74,7 @@ pub fn catchers() -> Vec { } } -static DB_TYPE: Lazy<&str> = Lazy::new(|| { +static DB_TYPE: LazyLock<&str> = LazyLock::new(|| { DbConnType::from_url(&CONFIG.database_url()) .map(|t| match t { DbConnType::sqlite => "SQLite", @@ -85,8 +84,8 @@ static DB_TYPE: Lazy<&str> = Lazy::new(|| { .unwrap_or("Unknown") }); -static CAN_BACKUP: Lazy = - Lazy::new(|| DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false)); +static CAN_BACKUP: LazyLock = + LazyLock::new(|| DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false)); #[get("/")] fn admin_disabled() -> &'static str { @@ -148,10 +147,10 @@ fn admin_login(request: &Request<'_>) -> ApiResult> { err_code!("Authorization failed.", Status::Unauthorized.code); } let redirect = request.segments::(0..).unwrap_or_default().display().to_string(); - render_admin_login(None, Some(redirect)) + render_admin_login(None, Some(&redirect)) } -fn render_admin_login(msg: Option<&str>, redirect: Option) -> ApiResult> { +fn render_admin_login(msg: Option<&str>, redirect: Option<&str>) -> ApiResult> { // If there is an error, show it let msg = msg.map(|msg| format!("Error: {msg}")); let json = json!({ @@ -185,14 +184,17 @@ fn post_admin_login( if crate::ratelimit::check_limit_admin(&ip.ip).is_err() { return Err(AdminResponse::TooManyRequests(render_admin_login( Some("Too many requests, try again later."), - redirect, + redirect.as_deref(), ))); } // If the token is invalid, redirect to login page if !_validate_token(&data.token) { error!("Invalid admin token. IP: {}", ip.ip); - Err(AdminResponse::Unauthorized(render_admin_login(Some("Invalid admin token, please try again."), redirect))) + Err(AdminResponse::Unauthorized(render_admin_login( + Some("Invalid admin token, please try again."), + redirect.as_deref(), + ))) } else { // If the token received is valid, generate JWT and save it as a cookie let claims = generate_admin_claims(); @@ -299,7 +301,7 @@ async fn invite_user(data: Json, _token: AdminToken, mut conn: DbCon err_code!("User already exists", Status::Conflict.code) } - let mut user = User::new(data.email, None); + let mut user = User::new(&data.email, None); async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult { if CONFIG.mail_enabled() { @@ -816,11 +818,7 @@ impl<'r> FromRequest<'r> for AdminToken { _ => err_handler!("Error getting Client IP"), }; - if CONFIG.disable_admin_token() { - Outcome::Success(Self { - ip, - }) - } else { + if !CONFIG.disable_admin_token() { let cookies = request.cookies(); let access_token = match cookies.get(COOKIE_NAME) { @@ -844,10 +842,10 @@ impl<'r> FromRequest<'r> for AdminToken { error!("Invalid or expired admin JWT. IP: {}.", &ip.ip); return Outcome::Error((Status::Unauthorized, "Session expired")); } - - Outcome::Success(Self { - ip, - }) } + + Outcome::Success(Self { + ip, + }) } } diff --git a/src/api/core/accounts.rs b/src/api/core/accounts.rs index c14bcef2..03808499 100644 --- a/src/api/core/accounts.rs +++ b/src/api/core/accounts.rs @@ -277,7 +277,7 @@ pub async fn _register(data: Json, email_verification: bool, mut c || CONFIG.is_signup_allowed(&email) || pending_emergency_access.is_some() { - User::new(email.clone(), None) + User::new(&email, None) } else { err!("Registration not allowed or user already exists") } @@ -287,7 +287,7 @@ pub async fn _register(data: Json, email_verification: bool, mut c // Make sure we don't leave a lingering invitation. Invitation::take(&email, &mut conn).await; - set_kdf_data(&mut user, data.kdf)?; + set_kdf_data(&mut user, &data.kdf)?; user.set_password(&data.master_password_hash, Some(data.key), true, None); user.password_hint = password_hint; @@ -350,7 +350,7 @@ async fn post_set_password(data: Json, headers: Headers, mut co let password_hint = clean_password_hint(&data.master_password_hint); enforce_password_hint_setting(&password_hint)?; - set_kdf_data(&mut user, data.kdf)?; + set_kdf_data(&mut user, &data.kdf)?; user.set_password( &data.master_password_hash, @@ -548,7 +548,7 @@ struct ChangeKdfData { key: String, } -fn set_kdf_data(user: &mut User, data: KDFData) -> EmptyResult { +fn set_kdf_data(user: &mut User, data: &KDFData) -> EmptyResult { if data.kdf == UserKdfType::Pbkdf2 as i32 && data.kdf_iterations < 100_000 { err!("PBKDF2 KDF iterations must be at least 100000.") } @@ -592,7 +592,7 @@ async fn post_kdf(data: Json, headers: Headers, mut conn: DbConn, err!("Invalid password") } - set_kdf_data(&mut user, data.kdf)?; + set_kdf_data(&mut user, &data.kdf)?; user.set_password(&data.new_master_password_hash, Some(data.key), true, None); let save_result = user.save(&mut conn).await; @@ -1261,10 +1261,11 @@ async fn rotate_api_key(data: Json, headers: Headers, conn: D #[get("/devices/knowndevice")] async fn get_known_device(device: KnownDevice, mut conn: DbConn) -> JsonResult { - let mut result = false; - if let Some(user) = User::find_by_mail(&device.email, &mut conn).await { - result = Device::find_by_uuid_and_user(&device.uuid, &user.uuid, &mut conn).await.is_some(); - } + let result = if let Some(user) = User::find_by_mail(&device.email, &mut conn).await { + Device::find_by_uuid_and_user(&device.uuid, &user.uuid, &mut conn).await.is_some() + } else { + false + }; Ok(Json(json!(result))) } diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs index d8e622f2..c2ef1f05 100644 --- a/src/api/core/ciphers.rs +++ b/src/api/core/ciphers.rs @@ -1275,7 +1275,7 @@ async fn save_attachment( attachment.save(&mut conn).await.expect("Error saving attachment"); } - save_temp_file(PathType::Attachments, &format!("{cipher_id}/{file_id}"), data.data, true).await?; + save_temp_file(&PathType::Attachments, &format!("{cipher_id}/{file_id}"), data.data, true).await?; nt.send_cipher_update( UpdateType::SyncCipherUpdate, diff --git a/src/api/core/emergency_access.rs b/src/api/core/emergency_access.rs index b6b77df1..90bdb859 100644 --- a/src/api/core/emergency_access.rs +++ b/src/api/core/emergency_access.rs @@ -239,7 +239,7 @@ async fn send_invite(data: Json, headers: Headers, mu invitation.save(&mut conn).await?; } - let mut user = User::new(email.clone(), None); + let mut user = User::new(&email, None); user.save(&mut conn).await?; (user, true) } diff --git a/src/api/core/organizations.rs b/src/api/core/organizations.rs index 22712003..f74345ae 100644 --- a/src/api/core/organizations.rs +++ b/src/api/core/organizations.rs @@ -194,7 +194,7 @@ async fn create_organization(headers: Headers, data: Json, mut conn: Db (None, None) }; - let org = Organization::new(data.name, data.billing_email, private_key, public_key); + let org = Organization::new(data.name, &data.billing_email, private_key, public_key); let mut member = Membership::new(headers.user.uuid, org.uuid.clone(), None); let collection = Collection::new(org.uuid.clone(), data.collection_name, None); @@ -1127,7 +1127,7 @@ async fn send_invite( Invitation::new(email).save(&mut conn).await?; } - let mut new_user = User::new(email.clone(), None); + let mut new_user = User::new(email, None); new_user.save(&mut conn).await?; user_created = true; new_user @@ -1600,7 +1600,7 @@ async fn edit_member( // HACK: We need the raw user-type to be sure custom role is selected to determine the access_all permission // The from_str() will convert the custom role type into a manager role type let raw_type = &data.r#type.into_string(); - // MembershipTyp::from_str will convert custom (4) to manager (3) + // MembershipType::from_str will convert custom (4) to manager (3) let Some(new_type) = MembershipType::from_str(raw_type) else { err!("Invalid type") }; diff --git a/src/api/core/public.rs b/src/api/core/public.rs index 46b59290..d96cbc03 100644 --- a/src/api/core/public.rs +++ b/src/api/core/public.rs @@ -89,7 +89,7 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db Some(user) => user, // exists in vaultwarden None => { // User does not exist yet - let mut new_user = User::new(user_data.email.clone(), None); + let mut new_user = User::new(&user_data.email, None); new_user.save(&mut conn).await?; if !CONFIG.mail_enabled() { diff --git a/src/api/core/sends.rs b/src/api/core/sends.rs index 96bf71a0..9c9643d6 100644 --- a/src/api/core/sends.rs +++ b/src/api/core/sends.rs @@ -1,13 +1,12 @@ -use std::path::Path; -use std::time::Duration; +use std::{path::Path, sync::LazyLock, time::Duration}; use chrono::{DateTime, TimeDelta, Utc}; use num_traits::ToPrimitive; -use once_cell::sync::Lazy; -use rocket::form::Form; -use rocket::fs::NamedFile; -use rocket::fs::TempFile; -use rocket::serde::json::Json; +use rocket::{ + form::Form, + fs::{NamedFile, TempFile}, + serde::json::Json, +}; use serde_json::Value; use crate::{ @@ -20,7 +19,7 @@ use crate::{ }; const SEND_INACCESSIBLE_MSG: &str = "Send does not exist or is no longer available"; -static ANON_PUSH_DEVICE: Lazy = Lazy::new(|| { +static ANON_PUSH_DEVICE: LazyLock = LazyLock::new(|| { let dt = crate::util::parse_date("1970-01-01T00:00:00.000000Z"); Device { uuid: String::from("00000000-0000-0000-0000-000000000000").into(), @@ -271,7 +270,7 @@ async fn post_send_file(data: Form>, headers: Headers, mut conn: let file_id = crate::crypto::generate_send_file_id(); - save_temp_file(PathType::Sends, &format!("{}/{file_id}", send.uuid), data, true).await?; + save_temp_file(&PathType::Sends, &format!("{}/{file_id}", send.uuid), data, true).await?; let mut data_value: Value = serde_json::from_str(&send.data)?; if let Some(o) = data_value.as_object_mut() { @@ -423,7 +422,7 @@ async fn post_send_file_v2_data( let file_path = format!("{send_id}/{file_id}"); - save_temp_file(PathType::Sends, &file_path, data.data, false).await?; + save_temp_file(&PathType::Sends, &file_path, data.data, false).await?; nt.send_send_update( UpdateType::SyncSendCreate, @@ -564,7 +563,7 @@ async fn post_access_file( } async fn download_url(host: &Host, send_id: &SendId, file_id: &SendFileId) -> Result { - let operator = CONFIG.opendal_operator_for_path_type(PathType::Sends)?; + let operator = CONFIG.opendal_operator_for_path_type(&PathType::Sends)?; if operator.info().scheme() == opendal::Scheme::Fs { let token_claims = crate::auth::generate_send_claims(send_id, file_id); diff --git a/src/api/core/two_factor/authenticator.rs b/src/api/core/two_factor/authenticator.rs index e5ffeedc..3a72477d 100644 --- a/src/api/core/two_factor/authenticator.rs +++ b/src/api/core/two_factor/authenticator.rs @@ -31,7 +31,7 @@ async fn generate_authenticator(data: Json, headers: Headers, let (enabled, key) = match twofactor { Some(tf) => (true, tf.data), - _ => (false, crypto::encode_random_bytes::<20>(BASE32)), + _ => (false, crypto::encode_random_bytes::<20>(&BASE32)), }; // Upstream seems to also return `userVerificationToken`, but doesn't seem to be used at all. diff --git a/src/api/core/two_factor/mod.rs b/src/api/core/two_factor/mod.rs index cfe0be86..2481606e 100644 --- a/src/api/core/two_factor/mod.rs +++ b/src/api/core/two_factor/mod.rs @@ -120,7 +120,7 @@ async fn recover(data: Json, client_headers: ClientHeaders, mu async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) { if user.totp_recover.is_none() { - let totp_recover = crypto::encode_random_bytes::<20>(BASE32); + let totp_recover = crypto::encode_random_bytes::<20>(&BASE32); user.totp_recover = Some(totp_recover); user.save(conn).await.ok(); } diff --git a/src/api/icons.rs b/src/api/icons.rs index df340e77..4e2aef1c 100644 --- a/src/api/icons.rs +++ b/src/api/icons.rs @@ -1,13 +1,13 @@ use std::{ collections::HashMap, net::IpAddr, - sync::Arc, + sync::{Arc, LazyLock}, time::{Duration, SystemTime}, }; use bytes::{Bytes, BytesMut}; use futures::{stream::StreamExt, TryFutureExt}; -use once_cell::sync::Lazy; +use html5gum::{Emitter, HtmlString, Readable, StringReader, Tokenizer}; use regex::Regex; use reqwest::{ header::{self, HeaderMap, HeaderValue}, @@ -16,8 +16,6 @@ use reqwest::{ use rocket::{http::ContentType, response::Redirect, Route}; use svg_hush::{data_url_filter, Filter}; -use html5gum::{Emitter, HtmlString, Readable, StringReader, Tokenizer}; - use crate::{ config::PathType, error::Error, @@ -33,7 +31,7 @@ pub fn routes() -> Vec { } } -static CLIENT: Lazy = Lazy::new(|| { +static CLIENT: LazyLock = LazyLock::new(|| { // Generate the default headers let mut default_headers = HeaderMap::new(); default_headers.insert( @@ -78,7 +76,7 @@ static CLIENT: Lazy = Lazy::new(|| { }); // Build Regex only once since this takes a lot of time. -static ICON_SIZE_REGEX: Lazy = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap()); +static ICON_SIZE_REGEX: LazyLock = LazyLock::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap()); // The function name `icon_external` is checked in the `on_response` function in `AppHeaders` // It is used to prevent sending a specific header which breaks icon downloads. @@ -220,7 +218,7 @@ async fn get_cached_icon(path: &str) -> Option> { } // Try to read the cached icon, and return it if it exists - if let Ok(operator) = CONFIG.opendal_operator_for_path_type(PathType::IconCache) { + if let Ok(operator) = CONFIG.opendal_operator_for_path_type(&PathType::IconCache) { if let Ok(buf) = operator.read(path).await { return Some(buf.to_vec()); } @@ -230,7 +228,7 @@ async fn get_cached_icon(path: &str) -> Option> { } async fn file_is_expired(path: &str, ttl: u64) -> Result { - let operator = CONFIG.opendal_operator_for_path_type(PathType::IconCache)?; + let operator = CONFIG.opendal_operator_for_path_type(&PathType::IconCache)?; let meta = operator.stat(path).await?; let modified = meta.last_modified().ok_or_else(|| std::io::Error::other(format!("No last modified time for `{path}`")))?; @@ -246,7 +244,7 @@ async fn icon_is_negcached(path: &str) -> bool { match expired { // No longer negatively cached, drop the marker Ok(true) => { - match CONFIG.opendal_operator_for_path_type(PathType::IconCache) { + match CONFIG.opendal_operator_for_path_type(&PathType::IconCache) { Ok(operator) => { if let Err(e) = operator.delete(&miss_indicator).await { error!("Could not remove negative cache indicator for icon {path:?}: {e:?}"); @@ -462,8 +460,8 @@ async fn get_page_with_referer(url: &str, referer: &str) -> Result u8 { - static PRIORITY_MAP: Lazy> = - Lazy::new(|| [(".png", 10), (".jpg", 20), (".jpeg", 20)].into_iter().collect()); + static PRIORITY_MAP: LazyLock> = + LazyLock::new(|| [(".png", 10), (".jpg", 20), (".jpeg", 20)].into_iter().collect()); // Check if there is a dimension set let (width, height) = parse_sizes(sizes); @@ -597,7 +595,7 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> { } async fn save_icon(path: &str, icon: Vec) { - let operator = match CONFIG.opendal_operator_for_path_type(PathType::IconCache) { + let operator = match CONFIG.opendal_operator_for_path_type(&PathType::IconCache) { Ok(operator) => operator, Err(e) => { warn!("Failed to get OpenDAL operator while saving icon: {e}"); diff --git a/src/api/identity.rs b/src/api/identity.rs index 04863b58..9c9f19fe 100644 --- a/src/api/identity.rs +++ b/src/api/identity.rs @@ -248,7 +248,7 @@ async fn _sso_login( _ => (), } - let mut user = User::new(user_infos.email, user_infos.user_name); + let mut user = User::new(&user_infos.email, user_infos.user_name); user.verified_at = Some(now); user.save(conn).await?; @@ -1066,7 +1066,7 @@ async fn oidcsignin_redirect( wrapper: impl FnOnce(OIDCState) -> sso::OIDCCodeWrapper, conn: &DbConn, ) -> ApiResult { - let state = sso::decode_state(base64_state)?; + let state = sso::decode_state(&base64_state)?; let code = sso::encode_code_claims(wrapper(state.clone())); let nonce = match SsoNonce::find(&state, conn).await { diff --git a/src/api/notifications.rs b/src/api/notifications.rs index a885e9b4..50f53943 100644 --- a/src/api/notifications.rs +++ b/src/api/notifications.rs @@ -1,11 +1,14 @@ -use std::{net::IpAddr, sync::Arc, time::Duration}; +use std::{ + net::IpAddr, + sync::{Arc, LazyLock}, + time::Duration, +}; use chrono::{NaiveDateTime, Utc}; use rmpv::Value; use rocket::{futures::StreamExt, Route}; -use tokio::sync::mpsc::Sender; - use rocket_ws::{Message, WebSocket}; +use tokio::sync::mpsc::Sender; use crate::{ auth::{ClientIp, WsAccessTokenHeader}, @@ -16,15 +19,13 @@ use crate::{ Error, CONFIG, }; -use once_cell::sync::Lazy; - -pub static WS_USERS: Lazy> = Lazy::new(|| { +pub static WS_USERS: LazyLock> = LazyLock::new(|| { Arc::new(WebSocketUsers { map: Arc::new(dashmap::DashMap::new()), }) }); -pub static WS_ANONYMOUS_SUBSCRIPTIONS: Lazy> = Lazy::new(|| { +pub static WS_ANONYMOUS_SUBSCRIPTIONS: LazyLock> = LazyLock::new(|| { Arc::new(AnonymousWebSocketSubscriptions { map: Arc::new(dashmap::DashMap::new()), }) @@ -35,7 +36,7 @@ use super::{ push_send_update, push_user_update, }; -static NOTIFICATIONS_DISABLED: Lazy = Lazy::new(|| !CONFIG.enable_websocket() && !CONFIG.push_enabled()); +static NOTIFICATIONS_DISABLED: LazyLock = LazyLock::new(|| !CONFIG.enable_websocket() && !CONFIG.push_enabled()); pub fn routes() -> Vec { if CONFIG.enable_websocket() { @@ -109,8 +110,7 @@ fn websockets_hub<'r>( ip: ClientIp, header_token: WsAccessTokenHeader, ) -> Result { - let addr = ip.ip; - info!("Accepting Rocket WS connection from {addr}"); + info!("Accepting Rocket WS connection from {}", ip.ip); let token = if let Some(token) = data.access_token { token @@ -133,7 +133,7 @@ fn websockets_hub<'r>( users.map.entry(claims.sub.to_string()).or_default().push((entry_uuid, tx)); // Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map - (rx, WSEntryMapGuard::new(users, claims.sub, entry_uuid, addr)) + (rx, WSEntryMapGuard::new(users, claims.sub, entry_uuid, ip.ip)) }; Ok({ @@ -189,8 +189,7 @@ fn websockets_hub<'r>( #[allow(tail_expr_drop_order)] #[get("/anonymous-hub?")] fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> Result { - let addr = ip.ip; - info!("Accepting Anonymous Rocket WS connection from {addr}"); + info!("Accepting Anonymous Rocket WS connection from {}", ip.ip); let (mut rx, guard) = { let subscriptions = Arc::clone(&WS_ANONYMOUS_SUBSCRIPTIONS); @@ -200,7 +199,7 @@ fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> R subscriptions.map.insert(token.clone(), tx); // Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map - (rx, WSAnonymousEntryMapGuard::new(subscriptions, token, addr)) + (rx, WSAnonymousEntryMapGuard::new(subscriptions, token, ip.ip)) }; Ok({ @@ -257,11 +256,11 @@ fn anonymous_websockets_hub<'r>(ws: WebSocket, token: String, ip: ClientIp) -> R // Websockets server // -fn serialize(val: Value) -> Vec { +fn serialize(val: &Value) -> Vec { use rmpv::encode::write_value; let mut buf = Vec::new(); - write_value(&mut buf, &val).expect("Error encoding MsgPack"); + write_value(&mut buf, val).expect("Error encoding MsgPack"); // Add size bytes at the start // Extracted from BinaryMessageFormat.js @@ -558,7 +557,7 @@ impl AnonymousWebSocketSubscriptions { let data = create_anonymous_update( vec![("Id".into(), auth_request_id.to_string().into()), ("UserId".into(), user_id.to_string().into())], UpdateType::AuthRequestResponse, - user_id.clone(), + user_id, ); self.send_update(auth_request_id, &data).await; } @@ -594,16 +593,19 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_id: ])]), ]); - serialize(value) + serialize(&value) } -fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id: UserId) -> Vec { +fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id: &UserId) -> Vec { use rmpv::Value as V; let value = V::Array(vec![ 1.into(), V::Map(vec![]), V::Nil, + // This word is misspelled, but upstream has this too + // https://github.com/bitwarden/server/blob/dff9f1cf538198819911cf2c20f8cda3307701c5/src/Notifications/HubHelpers.cs#L86 + // https://github.com/bitwarden/clients/blob/9612a4ac45063e372a6fbe87eb253c7cb3c588fb/libs/common/src/auth/services/anonymous-hub.service.ts#L45 "AuthRequestResponseRecieved".into(), V::Array(vec![V::Map(vec![ ("Type".into(), (ut as i32).into()), @@ -612,11 +614,11 @@ fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id ])]), ]); - serialize(value) + serialize(&value) } fn create_ping() -> Vec { - serialize(Value::Array(vec![6.into()])) + serialize(&Value::Array(vec![6.into()])) } // https://github.com/bitwarden/server/blob/375af7c43b10d9da03525d41452f95de3f921541/src/Core/Enums/PushType.cs diff --git a/src/api/push.rs b/src/api/push.rs index f3ade9b0..91f12f50 100644 --- a/src/api/push.rs +++ b/src/api/push.rs @@ -1,3 +1,8 @@ +use std::{ + sync::LazyLock, + time::{Duration, Instant}, +}; + use reqwest::{ header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE}, Method, @@ -13,9 +18,6 @@ use crate::{ CONFIG, }; -use once_cell::sync::Lazy; -use std::time::{Duration, Instant}; - #[derive(Deserialize)] struct AuthPushToken { access_token: String, @@ -29,7 +31,7 @@ struct LocalAuthPushToken { } async fn get_auth_api_token() -> ApiResult { - static API_TOKEN: Lazy> = Lazy::new(|| { + static API_TOKEN: LazyLock> = LazyLock::new(|| { RwLock::new(LocalAuthPushToken { access_token: String::new(), valid_until: Instant::now(), diff --git a/src/auth.rs b/src/auth.rs index a4a0b22c..d6337ab2 100644 --- a/src/auth.rs +++ b/src/auth.rs @@ -1,12 +1,15 @@ -// JWT Handling +use std::{ + env, + net::IpAddr, + sync::{LazyLock, OnceLock}, +}; + use chrono::{DateTime, TimeDelta, Utc}; use jsonwebtoken::{errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header}; use num_traits::FromPrimitive; -use once_cell::sync::{Lazy, OnceCell}; use openssl::rsa::Rsa; use serde::de::DeserializeOwned; use serde::ser::Serialize; -use std::{env, net::IpAddr}; use crate::{ api::ApiResult, @@ -22,27 +25,30 @@ use crate::{ const JWT_ALGORITHM: Algorithm = Algorithm::RS256; // Limit when BitWarden consider the token as expired -pub static BW_EXPIRATION: Lazy = Lazy::new(|| TimeDelta::try_minutes(5).unwrap()); - -pub static DEFAULT_REFRESH_VALIDITY: Lazy = Lazy::new(|| TimeDelta::try_days(30).unwrap()); -pub static MOBILE_REFRESH_VALIDITY: Lazy = Lazy::new(|| TimeDelta::try_days(90).unwrap()); -pub static DEFAULT_ACCESS_VALIDITY: Lazy = Lazy::new(|| TimeDelta::try_hours(2).unwrap()); -static JWT_HEADER: Lazy
= Lazy::new(|| Header::new(JWT_ALGORITHM)); - -pub static JWT_LOGIN_ISSUER: Lazy = Lazy::new(|| format!("{}|login", CONFIG.domain_origin())); -static JWT_INVITE_ISSUER: Lazy = Lazy::new(|| format!("{}|invite", CONFIG.domain_origin())); -static JWT_EMERGENCY_ACCESS_INVITE_ISSUER: Lazy = - Lazy::new(|| format!("{}|emergencyaccessinvite", CONFIG.domain_origin())); -static JWT_DELETE_ISSUER: Lazy = Lazy::new(|| format!("{}|delete", CONFIG.domain_origin())); -static JWT_VERIFYEMAIL_ISSUER: Lazy = Lazy::new(|| format!("{}|verifyemail", CONFIG.domain_origin())); -static JWT_ADMIN_ISSUER: Lazy = Lazy::new(|| format!("{}|admin", CONFIG.domain_origin())); -static JWT_SEND_ISSUER: Lazy = Lazy::new(|| format!("{}|send", CONFIG.domain_origin())); -static JWT_ORG_API_KEY_ISSUER: Lazy = Lazy::new(|| format!("{}|api.organization", CONFIG.domain_origin())); -static JWT_FILE_DOWNLOAD_ISSUER: Lazy = Lazy::new(|| format!("{}|file_download", CONFIG.domain_origin())); -static JWT_REGISTER_VERIFY_ISSUER: Lazy = Lazy::new(|| format!("{}|register_verify", CONFIG.domain_origin())); - -static PRIVATE_RSA_KEY: OnceCell = OnceCell::new(); -static PUBLIC_RSA_KEY: OnceCell = OnceCell::new(); +pub static BW_EXPIRATION: LazyLock = LazyLock::new(|| TimeDelta::try_minutes(5).unwrap()); + +pub static DEFAULT_REFRESH_VALIDITY: LazyLock = LazyLock::new(|| TimeDelta::try_days(30).unwrap()); +pub static MOBILE_REFRESH_VALIDITY: LazyLock = LazyLock::new(|| TimeDelta::try_days(90).unwrap()); +pub static DEFAULT_ACCESS_VALIDITY: LazyLock = LazyLock::new(|| TimeDelta::try_hours(2).unwrap()); +static JWT_HEADER: LazyLock
= LazyLock::new(|| Header::new(JWT_ALGORITHM)); + +pub static JWT_LOGIN_ISSUER: LazyLock = LazyLock::new(|| format!("{}|login", CONFIG.domain_origin())); +static JWT_INVITE_ISSUER: LazyLock = LazyLock::new(|| format!("{}|invite", CONFIG.domain_origin())); +static JWT_EMERGENCY_ACCESS_INVITE_ISSUER: LazyLock = + LazyLock::new(|| format!("{}|emergencyaccessinvite", CONFIG.domain_origin())); +static JWT_DELETE_ISSUER: LazyLock = LazyLock::new(|| format!("{}|delete", CONFIG.domain_origin())); +static JWT_VERIFYEMAIL_ISSUER: LazyLock = LazyLock::new(|| format!("{}|verifyemail", CONFIG.domain_origin())); +static JWT_ADMIN_ISSUER: LazyLock = LazyLock::new(|| format!("{}|admin", CONFIG.domain_origin())); +static JWT_SEND_ISSUER: LazyLock = LazyLock::new(|| format!("{}|send", CONFIG.domain_origin())); +static JWT_ORG_API_KEY_ISSUER: LazyLock = + LazyLock::new(|| format!("{}|api.organization", CONFIG.domain_origin())); +static JWT_FILE_DOWNLOAD_ISSUER: LazyLock = + LazyLock::new(|| format!("{}|file_download", CONFIG.domain_origin())); +static JWT_REGISTER_VERIFY_ISSUER: LazyLock = + LazyLock::new(|| format!("{}|register_verify", CONFIG.domain_origin())); + +static PRIVATE_RSA_KEY: OnceLock = OnceLock::new(); +static PUBLIC_RSA_KEY: OnceLock = OnceLock::new(); pub async fn initialize_keys() -> Result<(), Error> { use std::io::Error; @@ -54,7 +60,7 @@ pub async fn initialize_keys() -> Result<(), Error> { .ok_or_else(|| Error::other("Private RSA key path filename is not valid UTF-8"))? .to_string(); - let operator = CONFIG.opendal_operator_for_path_type(PathType::RsaKey).map_err(Error::other)?; + let operator = CONFIG.opendal_operator_for_path_type(&PathType::RsaKey).map_err(Error::other)?; let priv_key_buffer = match operator.read(&rsa_key_filename).await { Ok(buffer) => Some(buffer), @@ -457,7 +463,7 @@ pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims { } } -pub fn generate_verify_email_claims(user_id: UserId) -> BasicJwtClaims { +pub fn generate_verify_email_claims(user_id: &UserId) -> BasicJwtClaims { let time_now = Utc::now(); let expire_hours = i64::from(CONFIG.invitation_expiration_hours()); BasicJwtClaims { @@ -696,9 +702,9 @@ impl<'r> FromRequest<'r> for OrgHeaders { // First check the path, if this is not a valid uuid, try the query values. let url_org_id: Option = { if let Some(Ok(org_id)) = request.param::(1) { - Some(org_id.clone()) + Some(org_id) } else if let Some(Ok(org_id)) = request.query_value::("organizationId") { - Some(org_id.clone()) + Some(org_id) } else { None } diff --git a/src/config.rs b/src/config.rs index 116c9096..a3c47fb4 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,5 +1,6 @@ use std::{ env::consts::EXE_SUFFIX, + fmt, process::exit, sync::{ atomic::{AtomicBool, Ordering}, @@ -8,8 +9,8 @@ use std::{ }; use job_scheduler_ng::Schedule; -use once_cell::sync::Lazy; use reqwest::Url; +use serde::de::{self, Deserialize, Deserializer, MapAccess, Visitor}; use crate::{ db::DbConnType, @@ -17,7 +18,7 @@ use crate::{ util::{get_env, get_env_bool, get_web_vault_version, is_valid_email, parse_experimental_client_feature_flags}, }; -static CONFIG_FILE: Lazy = Lazy::new(|| { +static CONFIG_FILE: LazyLock = LazyLock::new(|| { let data_folder = get_env("DATA_FOLDER").unwrap_or_else(|| String::from("data")); get_env("CONFIG_FILE").unwrap_or_else(|| format!("{data_folder}/config.json")) }); @@ -34,7 +35,7 @@ static CONFIG_FILENAME: LazyLock = LazyLock::new(|| { pub static SKIP_CONFIG_VALIDATION: AtomicBool = AtomicBool::new(false); -pub static CONFIG: Lazy = Lazy::new(|| { +pub static CONFIG: LazyLock = LazyLock::new(|| { std::thread::spawn(|| { let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap_or_else(|e| { println!("Error loading config:\n {e:?}\n"); @@ -56,6 +57,41 @@ pub static CONFIG: Lazy = Lazy::new(|| { pub type Pass = String; macro_rules! make_config { + // Support string print + ( @supportstr $name:ident, $value:expr, Pass, option ) => { serde_json::to_value(&$value.as_ref().map(|_| String::from("***"))).unwrap() }; // Optional pass, we map to an Option with "***" + ( @supportstr $name:ident, $value:expr, Pass, $none_action:ident ) => { "***".into() }; // Required pass, we return "***" + ( @supportstr $name:ident, $value:expr, $ty:ty, option ) => { serde_json::to_value(&$value).unwrap() }; // Optional other or string, we convert to json + ( @supportstr $name:ident, $value:expr, String, $none_action:ident ) => { $value.as_str().into() }; // Required string value, we convert to json + ( @supportstr $name:ident, $value:expr, $ty:ty, $none_action:ident ) => { ($value).into() }; // Required other value, we return as is or convert to json + + // Group or empty string + ( @show ) => { "" }; + ( @show $lit:literal ) => { $lit }; + + // Wrap the optionals in an Option type + ( @type $ty:ty, option) => { Option<$ty> }; + ( @type $ty:ty, $id:ident) => { $ty }; + + // Generate the values depending on none_action + ( @build $value:expr, $config:expr, option, ) => { $value }; + ( @build $value:expr, $config:expr, def, $default:expr ) => { $value.unwrap_or($default) }; + ( @build $value:expr, $config:expr, auto, $default_fn:expr ) => {{ + match $value { + Some(v) => v, + None => { + let f: &dyn Fn(&ConfigItems) -> _ = &$default_fn; + f($config) + } + } + }}; + ( @build $value:expr, $config:expr, generated, $default_fn:expr ) => {{ + let f: &dyn Fn(&ConfigItems) -> _ = &$default_fn; + f($config) + }}; + + ( @getenv $name:expr, bool ) => { get_env_bool($name) }; + ( @getenv $name:expr, $ty:ident ) => { get_env($name) }; + ($( $(#[doc = $groupdoc:literal])? $group:ident $(: $group_enabled:ident)? { @@ -75,10 +111,103 @@ macro_rules! make_config { _env: ConfigBuilder, _usr: ConfigBuilder, - _overrides: Vec, + _overrides: Vec<&'static str>, + } + + // Custom Deserialize for ConfigBuilder, mainly based upon https://serde.rs/deserialize-struct.html + // This deserialize doesn't care if there are keys missing, or if there are duplicate keys + // In case of duplicate keys (which should never be possible unless manually edited), the last value is used! + // Main reason for this is removing the `visit_seq` function, which causes a lot of code generation not needed or used for this struct. + impl<'de> Deserialize<'de> for ConfigBuilder { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + $($( + stringify!($name), + )+)+ + ]; + + #[allow(non_camel_case_types)] + enum Field { + $($( + $name, + )+)+ + __ignore, + } + + impl<'de> Deserialize<'de> for Field { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct FieldVisitor; + + impl Visitor<'_> for FieldVisitor { + type Value = Field; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("ConfigBuilder field identifier") + } + + #[inline] + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + match value { + $($( + stringify!($name) => Ok(Field::$name), + )+)+ + _ => Ok(Field::__ignore), + } + } + } + + deserializer.deserialize_identifier(FieldVisitor) + } + } + + struct ConfigBuilderVisitor; + + impl<'de> Visitor<'de> for ConfigBuilderVisitor { + type Value = ConfigBuilder; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("struct ConfigBuilder") + } + + #[inline] + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let mut builder = ConfigBuilder::default(); + while let Some(key) = map.next_key()? { + match key { + $($( + Field::$name => { + if builder.$name.is_some() { + return Err(de::Error::duplicate_field(stringify!($name))); + } + builder.$name = map.next_value()?; + } + )+)+ + Field::__ignore => { + let _ = map.next_value::()?; + } + } + } + Ok(builder) + } + } + + deserializer.deserialize_struct("ConfigBuilder", FIELDS, ConfigBuilderVisitor) + } } - #[derive(Clone, Default, Deserialize, Serialize)] + #[derive(Clone, Default, Serialize)] pub struct ConfigBuilder { $($( #[serde(skip_serializing_if = "Option::is_none")] @@ -87,7 +216,6 @@ macro_rules! make_config { } impl ConfigBuilder { - #[allow(clippy::field_reassign_with_default)] fn from_env() -> Self { let env_file = get_env("ENV_FILE").unwrap_or_else(|| String::from(".env")); match dotenvy::from_path(&env_file) { @@ -149,14 +277,14 @@ macro_rules! make_config { /// Merges the values of both builders into a new builder. /// If both have the same element, `other` wins. - fn merge(&self, other: &Self, show_overrides: bool, overrides: &mut Vec) -> Self { + fn merge(&self, other: &Self, show_overrides: bool, overrides: &mut Vec<&str>) -> Self { let mut builder = self.clone(); $($( if let v @Some(_) = &other.$name { builder.$name = v.clone(); if self.$name.is_some() { - overrides.push(pastey::paste!(stringify!([<$name:upper>])).into()); + overrides.push(pastey::paste!(stringify!([<$name:upper>]))); } } )+)+ @@ -197,6 +325,32 @@ macro_rules! make_config { #[derive(Clone, Default)] struct ConfigItems { $($( $name: make_config!{@type $ty, $none_action}, )+)+ } + #[derive(Serialize)] + struct ElementDoc { + name: &'static str, + description: &'static str, + } + + #[derive(Serialize)] + struct ElementData { + editable: bool, + name: &'static str, + value: serde_json::Value, + default: serde_json::Value, + #[serde(rename = "type")] + r#type: &'static str, + doc: ElementDoc, + overridden: bool, + } + + #[derive(Serialize)] + pub struct GroupData { + group: &'static str, + grouptoggle: &'static str, + groupdoc: &'static str, + elements: Vec, + } + #[allow(unused)] impl Config { $($( @@ -208,11 +362,12 @@ macro_rules! make_config { pub fn prepare_json(&self) -> serde_json::Value { let (def, cfg, overridden) = { + // Lock the inner as short as possible and clone what is needed to prevent deadlocks let inner = &self.inner.read().unwrap(); (inner._env.build(), inner.config.clone(), inner._overrides.clone()) }; - fn _get_form_type(rust_type: &str) -> &'static str { + fn _get_form_type(rust_type: &'static str) -> &'static str { match rust_type { "Pass" => "password", "String" => "text", @@ -221,48 +376,36 @@ macro_rules! make_config { } } - fn _get_doc(doc: &str) -> serde_json::Value { - let mut split = doc.split("|>").map(str::trim); - - // We do not use the json!() macro here since that causes a lot of macro recursion. - // This slows down compile time and it also causes issues with rust-analyzer - serde_json::Value::Object({ - let mut doc_json = serde_json::Map::new(); - doc_json.insert("name".into(), serde_json::to_value(split.next()).unwrap()); - doc_json.insert("description".into(), serde_json::to_value(split.next()).unwrap()); - doc_json - }) + fn _get_doc(doc_str: &'static str) -> ElementDoc { + let mut split = doc_str.split("|>").map(str::trim); + ElementDoc { + name: split.next().unwrap_or_default(), + description: split.next().unwrap_or_default(), + } } - // We do not use the json!() macro here since that causes a lot of macro recursion. - // This slows down compile time and it also causes issues with rust-analyzer - serde_json::Value::Array(<[_]>::into_vec(Box::new([ - $( - serde_json::Value::Object({ - let mut group = serde_json::Map::new(); - group.insert("group".into(), (stringify!($group)).into()); - group.insert("grouptoggle".into(), (stringify!($($group_enabled)?)).into()); - group.insert("groupdoc".into(), (make_config!{ @show $($groupdoc)? }).into()); - - group.insert("elements".into(), serde_json::Value::Array(<[_]>::into_vec(Box::new([ - $( - serde_json::Value::Object({ - let mut element = serde_json::Map::new(); - element.insert("editable".into(), ($editable).into()); - element.insert("name".into(), (stringify!($name)).into()); - element.insert("value".into(), serde_json::to_value(cfg.$name).unwrap()); - element.insert("default".into(), serde_json::to_value(def.$name).unwrap()); - element.insert("type".into(), (_get_form_type(stringify!($ty))).into()); - element.insert("doc".into(), (_get_doc(concat!($($doc),+))).into()); - element.insert("overridden".into(), (overridden.contains(&pastey::paste!(stringify!([<$name:upper>])).into())).into()); - element - }), - )+ - ])))); - group - }), - )+ - ]))) + let data: Vec = vec![ + $( // This repetition is for each group + GroupData { + group: stringify!($group), + grouptoggle: stringify!($($group_enabled)?), + groupdoc: (make_config!{ @show $($groupdoc)? }), + + elements: vec![ + $( // This repetition is for each element within a group + ElementData { + editable: $editable, + name: stringify!($name), + value: serde_json::to_value(&cfg.$name).unwrap_or_default(), + default: serde_json::to_value(&def.$name).unwrap_or_default(), + r#type: _get_form_type(stringify!($ty)), + doc: _get_doc(concat!($($doc),+)), + overridden: overridden.contains(&pastey::paste!(stringify!([<$name:upper>]))), + }, + )+], // End of elements repetition + }, + )+]; // End of groups repetition + serde_json::to_value(data).unwrap() } pub fn get_support_json(&self) -> serde_json::Value { @@ -270,8 +413,8 @@ macro_rules! make_config { // Pass types will always be masked and no need to put them in the list. // Besides Pass, only String types will be masked via _privacy_mask. const PRIVACY_CONFIG: &[&str] = &[ - "allowed_iframe_ancestors", "allowed_connect_src", + "allowed_iframe_ancestors", "database_url", "domain_origin", "domain_path", @@ -279,16 +422,18 @@ macro_rules! make_config { "helo_name", "org_creation_users", "signups_domains_whitelist", + "_smtp_img_src", + "smtp_from_name", "smtp_from", "smtp_host", "smtp_username", - "_smtp_img_src", - "sso_client_id", "sso_authority", "sso_callback_path", + "sso_client_id", ]; let cfg = { + // Lock the inner as short as possible and clone what is needed to prevent deadlocks let inner = &self.inner.read().unwrap(); inner.config.clone() }; @@ -318,13 +463,21 @@ macro_rules! make_config { serde_json::Value::Object({ let mut json = serde_json::Map::new(); $($( - json.insert(stringify!($name).into(), make_config!{ @supportstr $name, cfg.$name, $ty, $none_action }); + json.insert(String::from(stringify!($name)), make_config!{ @supportstr $name, cfg.$name, $ty, $none_action }); )+)+; + // Loop through all privacy sensitive keys and mask them + for mask_key in PRIVACY_CONFIG { + if let Some(value) = json.get_mut(*mask_key) { + if let Some(s) = value.as_str() { + *value = _privacy_mask(s).into(); + } + } + } json }) } - pub fn get_overrides(&self) -> Vec { + pub fn get_overrides(&self) -> Vec<&'static str> { let overrides = { let inner = &self.inner.read().unwrap(); inner._overrides.clone() @@ -333,55 +486,6 @@ macro_rules! make_config { } } }; - - // Support string print - ( @supportstr $name:ident, $value:expr, Pass, option ) => { serde_json::to_value($value.as_ref().map(|_| String::from("***"))).unwrap() }; // Optional pass, we map to an Option with "***" - ( @supportstr $name:ident, $value:expr, Pass, $none_action:ident ) => { "***".into() }; // Required pass, we return "***" - ( @supportstr $name:ident, $value:expr, String, option ) => { // Optional other value, we return as is or convert to string to apply the privacy config - if PRIVACY_CONFIG.contains(&stringify!($name)) { - serde_json::to_value($value.as_ref().map(|x| _privacy_mask(x) )).unwrap() - } else { - serde_json::to_value($value).unwrap() - } - }; - ( @supportstr $name:ident, $value:expr, String, $none_action:ident ) => { // Required other value, we return as is or convert to string to apply the privacy config - if PRIVACY_CONFIG.contains(&stringify!($name)) { - _privacy_mask(&$value).into() - } else { - ($value).into() - } - }; - ( @supportstr $name:ident, $value:expr, $ty:ty, option ) => { serde_json::to_value($value).unwrap() }; // Optional other value, we return as is or convert to string to apply the privacy config - ( @supportstr $name:ident, $value:expr, $ty:ty, $none_action:ident ) => { ($value).into() }; // Required other value, we return as is or convert to string to apply the privacy config - - // Group or empty string - ( @show ) => { "" }; - ( @show $lit:literal ) => { $lit }; - - // Wrap the optionals in an Option type - ( @type $ty:ty, option) => { Option<$ty> }; - ( @type $ty:ty, $id:ident) => { $ty }; - - // Generate the values depending on none_action - ( @build $value:expr, $config:expr, option, ) => { $value }; - ( @build $value:expr, $config:expr, def, $default:expr ) => { $value.unwrap_or($default) }; - ( @build $value:expr, $config:expr, auto, $default_fn:expr ) => {{ - match $value { - Some(v) => v, - None => { - let f: &dyn Fn(&ConfigItems) -> _ = &$default_fn; - f($config) - } - } - }}; - ( @build $value:expr, $config:expr, generated, $default_fn:expr ) => {{ - let f: &dyn Fn(&ConfigItems) -> _ = &$default_fn; - f($config) - }}; - - ( @getenv $name:expr, bool ) => { get_env_bool($name) }; - ( @getenv $name:expr, $ty:ident ) => { get_env($name) }; - } //STRUCTURE: @@ -1512,7 +1616,7 @@ impl Config { if let Some(akey) = self._duo_akey() { akey } else { - let akey_s = crate::crypto::encode_random_bytes::<64>(data_encoding::BASE64); + let akey_s = crate::crypto::encode_random_bytes::<64>(&data_encoding::BASE64); // Save the new value let builder = ConfigBuilder { @@ -1536,7 +1640,7 @@ impl Config { token.is_some() && !token.unwrap().trim().is_empty() } - pub fn opendal_operator_for_path_type(&self, path_type: PathType) -> Result { + pub fn opendal_operator_for_path_type(&self, path_type: &PathType) -> Result { let path = match path_type { PathType::Data => self.data_folder(), PathType::IconCache => self.icon_cache_folder(), @@ -1728,7 +1832,7 @@ fn to_json<'reg, 'rc>( // Configure the web-vault version as an integer so it can be used as a comparison smaller or greater then. // The default is based upon the version since this feature is added. -static WEB_VAULT_VERSION: Lazy = Lazy::new(|| { +static WEB_VAULT_VERSION: LazyLock = LazyLock::new(|| { let vault_version = get_web_vault_version(); // Use a single regex capture to extract version components let re = regex::Regex::new(r"(\d{4})\.(\d{1,2})\.(\d{1,2})").unwrap(); @@ -1744,7 +1848,7 @@ static WEB_VAULT_VERSION: Lazy = Lazy::new(|| { // Configure the Vaultwarden version as an integer so it can be used as a comparison smaller or greater then. // The default is based upon the version since this feature is added. -static VW_VERSION: Lazy = Lazy::new(|| { +static VW_VERSION: LazyLock = LazyLock::new(|| { let vw_version = crate::VERSION.unwrap_or("1.32.5"); // Use a single regex capture to extract version components let re = regex::Regex::new(r"(\d{1})\.(\d{1,2})\.(\d{1,2})").unwrap(); diff --git a/src/crypto.rs b/src/crypto.rs index ada0a26a..e2add1c6 100644 --- a/src/crypto.rs +++ b/src/crypto.rs @@ -48,7 +48,7 @@ pub fn get_random_bytes() -> [u8; N] { } /// Encode random bytes using the provided function. -pub fn encode_random_bytes(e: Encoding) -> String { +pub fn encode_random_bytes(e: &Encoding) -> String { e.encode(&get_random_bytes::()) } @@ -81,7 +81,7 @@ pub fn get_random_string_alphanum(num_chars: usize) -> String { } pub fn generate_id() -> String { - encode_random_bytes::(HEXLOWER) + encode_random_bytes::(&HEXLOWER) } pub fn generate_send_file_id() -> String { diff --git a/src/db/models/attachment.rs b/src/db/models/attachment.rs index aafb8766..0f9beb55 100644 --- a/src/db/models/attachment.rs +++ b/src/db/models/attachment.rs @@ -45,7 +45,7 @@ impl Attachment { } pub async fn get_url(&self, host: &str) -> Result { - let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?; + let operator = CONFIG.opendal_operator_for_path_type(&PathType::Attachments)?; if operator.info().scheme() == opendal::Scheme::Fs { let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone())); @@ -118,7 +118,7 @@ impl Attachment { .map_res("Error deleting attachment") }}?; - let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?; + let operator = CONFIG.opendal_operator_for_path_type(&PathType::Attachments)?; let file_path = self.get_file_path(); if let Err(e) = operator.delete(&file_path).await { diff --git a/src/db/models/device.rs b/src/db/models/device.rs index 005d942d..57f827c6 100644 --- a/src/db/models/device.rs +++ b/src/db/models/device.rs @@ -48,7 +48,7 @@ impl Device { } pub fn refresh_twofactor_remember(&mut self) -> String { - let twofactor_remember = crypto::encode_random_bytes::<180>(BASE64); + let twofactor_remember = crypto::encode_random_bytes::<180>(&BASE64); self.twofactor_remember = Some(twofactor_remember.clone()); twofactor_remember @@ -135,7 +135,7 @@ impl Device { push_uuid: Some(PushId(get_uuid())), push_token: None, - refresh_token: crypto::encode_random_bytes::<64>(BASE64URL), + refresh_token: crypto::encode_random_bytes::<64>(&BASE64URL), twofactor_remember: None, }; diff --git a/src/db/models/organization.rs b/src/db/models/organization.rs index 77cf91c0..47daac27 100644 --- a/src/db/models/organization.rs +++ b/src/db/models/organization.rs @@ -169,7 +169,7 @@ impl PartialOrd for i32 { /// Local methods impl Organization { - pub fn new(name: String, billing_email: String, private_key: Option, public_key: Option) -> Self { + pub fn new(name: String, billing_email: &str, private_key: Option, public_key: Option) -> Self { let billing_email = billing_email.to_lowercase(); Self { uuid: OrganizationId(crate::util::get_uuid()), diff --git a/src/db/models/send.rs b/src/db/models/send.rs index bf82c181..52c3dbbf 100644 --- a/src/db/models/send.rs +++ b/src/db/models/send.rs @@ -226,7 +226,7 @@ impl Send { self.update_users_revision(conn).await; if self.atype == SendType::File as i32 { - let operator = CONFIG.opendal_operator_for_path_type(PathType::Sends)?; + let operator = CONFIG.opendal_operator_for_path_type(&PathType::Sends)?; operator.remove_all(&self.uuid).await.ok(); } diff --git a/src/db/models/user.rs b/src/db/models/user.rs index 3a3b5157..624d3efc 100644 --- a/src/db/models/user.rs +++ b/src/db/models/user.rs @@ -106,7 +106,7 @@ impl User { pub const CLIENT_KDF_TYPE_DEFAULT: i32 = UserKdfType::Pbkdf2 as i32; pub const CLIENT_KDF_ITER_DEFAULT: i32 = 600_000; - pub fn new(email: String, name: Option) -> Self { + pub fn new(email: &str, name: Option) -> Self { let now = Utc::now().naive_utc(); let email = email.to_lowercase(); diff --git a/src/error.rs b/src/error.rs index 06ebf3aa..7b46df09 100644 --- a/src/error.rs +++ b/src/error.rs @@ -3,6 +3,7 @@ // use crate::db::models::EventType; use crate::http_client::CustomHttpClientError; +use serde::ser::{Serialize, SerializeStruct, Serializer}; use std::error::Error as StdError; macro_rules! make_error { @@ -72,7 +73,7 @@ make_error! { Empty(Empty): _no_source, _serialize, // Used to represent err! calls Simple(String): _no_source, _api_error, - Compact(Compact): _no_source, _api_error_small, + Compact(Compact): _no_source, _compact_api_error, // Used in our custom http client to handle non-global IPs and blocked domains CustomHttpClient(CustomHttpClientError): _has_source, _api_error, @@ -128,6 +129,10 @@ impl Error { (usr_msg, log_msg.into()).into() } + pub fn new_msg + Clone>(usr_msg: M) -> Self { + (usr_msg.clone(), usr_msg.into()).into() + } + pub fn empty() -> Self { Empty {}.into() } @@ -194,38 +199,97 @@ fn _no_source(_: T) -> Option { None } -fn _serialize(e: &impl serde::Serialize, _msg: &str) -> String { +fn _serialize(e: &impl Serialize, _msg: &str) -> String { serde_json::to_string(e).unwrap() } +/// This will serialize the default ApiErrorResponse +/// It will add the needed fields which are mostly empty or have multiple copies of the message +/// This is more efficient than having a larger struct and use the Serialize derive +/// It also prevents using `json!()` calls to create the final output +impl Serialize for ApiErrorResponse<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + #[derive(serde::Serialize)] + struct ErrorModel<'a> { + message: &'a str, + object: &'static str, + } + + let mut state = serializer.serialize_struct("ApiErrorResponse", 9)?; + + state.serialize_field("message", self.0.message)?; + + let mut validation_errors = std::collections::HashMap::with_capacity(1); + validation_errors.insert("", vec![self.0.message]); + state.serialize_field("validationErrors", &validation_errors)?; + + let error_model = ErrorModel { + message: self.0.message, + object: "error", + }; + state.serialize_field("errorModel", &error_model)?; + + state.serialize_field("error", "")?; + state.serialize_field("error_description", "")?; + state.serialize_field("exceptionMessage", &None::<()>)?; + state.serialize_field("exceptionStackTrace", &None::<()>)?; + state.serialize_field("innerExceptionMessage", &None::<()>)?; + state.serialize_field("object", "error")?; + + state.end() + } +} + +/// This will serialize the smaller CompactApiErrorResponse +/// It will add the needed fields which are mostly empty +/// This is more efficient than having a larger struct and use the Serialize derive +/// It also prevents using `json!()` calls to create the final output +impl Serialize for CompactApiErrorResponse<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut state = serializer.serialize_struct("CompactApiErrorResponse", 6)?; + + state.serialize_field("message", self.0.message)?; + state.serialize_field("validationErrors", &None::<()>)?; + state.serialize_field("exceptionMessage", &None::<()>)?; + state.serialize_field("exceptionStackTrace", &None::<()>)?; + state.serialize_field("innerExceptionMessage", &None::<()>)?; + state.serialize_field("object", "error")?; + + state.end() + } +} + +/// Main API Error struct template +/// This struct which we can be used by both ApiErrorResponse and CompactApiErrorResponse +/// is small and doesn't contain unneeded empty fields. This is more memory efficient, but also less code to compile +struct ApiErrorMsg<'a> { + message: &'a str, +} +/// Default API Error response struct +/// The custom serialization adds all other needed fields +struct ApiErrorResponse<'a>(ApiErrorMsg<'a>); +/// Compact API Error response struct used for some newer error responses +/// The custom serialization adds all other needed fields +struct CompactApiErrorResponse<'a>(ApiErrorMsg<'a>); + fn _api_error(_: &impl std::any::Any, msg: &str) -> String { - let json = json!({ - "message": msg, - "error": "", - "error_description": "", - "validationErrors": {"": [ msg ]}, - "errorModel": { - "message": msg, - "object": "error" - }, - "exceptionMessage": null, - "exceptionStackTrace": null, - "innerExceptionMessage": null, - "object": "error" - }); - _serialize(&json, "") + let response = ApiErrorMsg { + message: msg, + }; + serde_json::to_string(&ApiErrorResponse(response)).unwrap() } -fn _api_error_small(_: &impl std::any::Any, msg: &str) -> String { - let json = json!({ - "message": msg, - "validationErrors": null, - "exceptionMessage": null, - "exceptionStackTrace": null, - "innerExceptionMessage": null, - "object": "error" - }); - _serialize(&json, "") +fn _compact_api_error(_: &impl std::any::Any, msg: &str) -> String { + let response = ApiErrorMsg { + message: msg, + }; + serde_json::to_string(&CompactApiErrorResponse(response)).unwrap() } // @@ -256,34 +320,41 @@ impl Responder<'_, 'static> for Error { #[macro_export] macro_rules! err { ($kind:ident, $msg:expr) => {{ - error!("{}", $msg); - return Err($crate::error::Error::new($msg, $msg).with_kind($crate::error::ErrorKind::$kind($crate::error::$kind {}))); + let msg = $msg; + error!("{msg}"); + return Err($crate::error::Error::new_msg(msg).with_kind($crate::error::ErrorKind::$kind($crate::error::$kind {}))); }}; ($msg:expr) => {{ - error!("{}", $msg); - return Err($crate::error::Error::new($msg, $msg)); + let msg = $msg; + error!("{msg}"); + return Err($crate::error::Error::new_msg(msg)); }}; ($msg:expr, ErrorEvent $err_event:tt) => {{ - error!("{}", $msg); - return Err($crate::error::Error::new($msg, $msg).with_event($crate::error::ErrorEvent $err_event)); + let msg = $msg; + error!("{msg}"); + return Err($crate::error::Error::new_msg(msg).with_event($crate::error::ErrorEvent $err_event)); }}; ($usr_msg:expr, $log_value:expr) => {{ - error!("{}. {}", $usr_msg, $log_value); - return Err($crate::error::Error::new($usr_msg, $log_value)); + let usr_msg = $usr_msg; + let log_value = $log_value; + error!("{usr_msg}. {log_value}"); + return Err($crate::error::Error::new(usr_msg, log_value)); }}; ($usr_msg:expr, $log_value:expr, ErrorEvent $err_event:tt) => {{ - error!("{}. {}", $usr_msg, $log_value); - return Err($crate::error::Error::new($usr_msg, $log_value).with_event($crate::error::ErrorEvent $err_event)); + let usr_msg = $usr_msg; + let log_value = $log_value; + error!("{usr_msg}. {log_value}"); + return Err($crate::error::Error::new(usr_msg, log_value).with_event($crate::error::ErrorEvent $err_event)); }}; } #[macro_export] macro_rules! err_silent { ($msg:expr) => {{ - return Err($crate::error::Error::new($msg, $msg)); + return Err($crate::error::Error::new_msg($msg)); }}; ($msg:expr, ErrorEvent $err_event:tt) => {{ - return Err($crate::error::Error::new($msg, $msg).with_event($crate::error::ErrorEvent $err_event)); + return Err($crate::error::Error::new_msg($msg).with_event($crate::error::ErrorEvent $err_event)); }}; ($usr_msg:expr, $log_value:expr) => {{ return Err($crate::error::Error::new($usr_msg, $log_value)); @@ -296,12 +367,15 @@ macro_rules! err_silent { #[macro_export] macro_rules! err_code { ($msg:expr, $err_code:expr) => {{ - error!("{}", $msg); - return Err($crate::error::Error::new($msg, $msg).with_code($err_code)); + let msg = $msg; + error!("{msg}"); + return Err($crate::error::Error::new_msg(msg).with_code($err_code)); }}; ($usr_msg:expr, $log_value:expr, $err_code:expr) => {{ - error!("{}. {}", $usr_msg, $log_value); - return Err($crate::error::Error::new($usr_msg, $log_value).with_code($err_code)); + let usr_msg = $usr_msg; + let log_value = $log_value; + error!("{usr_msg}. {log_value}"); + return Err($crate::error::Error::new(usr_msg, log_value).with_code($err_code)); }}; } @@ -309,7 +383,7 @@ macro_rules! err_code { macro_rules! err_discard { ($msg:expr, $data:expr) => {{ std::io::copy(&mut $data.open(), &mut std::io::sink()).ok(); - return Err($crate::error::Error::new($msg, $msg)); + return Err($crate::error::Error::new_msg($msg)); }}; ($usr_msg:expr, $log_value:expr, $data:expr) => {{ std::io::copy(&mut $data.open(), &mut std::io::sink()).ok(); @@ -334,7 +408,9 @@ macro_rules! err_handler { return ::rocket::request::Outcome::Error((rocket::http::Status::Unauthorized, $expr)); }}; ($usr_msg:expr, $log_value:expr) => {{ - error!(target: "auth", "Unauthorized Error: {}. {}", $usr_msg, $log_value); - return ::rocket::request::Outcome::Error((rocket::http::Status::Unauthorized, $usr_msg)); + let usr_msg = $usr_msg; + let log_value = $log_value; + error!(target: "auth", "Unauthorized Error: {usr_msg}. {log_value}"); + return ::rocket::request::Outcome::Error((rocket::http::Status::Unauthorized, usr_msg)); }}; } diff --git a/src/http_client.rs b/src/http_client.rs index fc3b1b42..b48f340c 100644 --- a/src/http_client.rs +++ b/src/http_client.rs @@ -2,12 +2,11 @@ use std::{ fmt, net::{IpAddr, SocketAddr}, str::FromStr, - sync::{Arc, Mutex}, + sync::{Arc, LazyLock, Mutex}, time::Duration, }; use hickory_resolver::{name_server::TokioConnectionProvider, TokioResolver}; -use once_cell::sync::Lazy; use regex::Regex; use reqwest::{ dns::{Name, Resolve, Resolving}, @@ -25,9 +24,10 @@ pub fn make_http_request(method: reqwest::Method, url: &str) -> Result = Lazy::new(|| get_reqwest_client_builder().build().expect("Failed to build client")); + static INSTANCE: LazyLock = + LazyLock::new(|| get_reqwest_client_builder().build().expect("Failed to build client")); Ok(INSTANCE.request(method, url)) } @@ -45,7 +45,7 @@ pub fn get_reqwest_client_builder() -> ClientBuilder { return attempt.error("Invalid host"); }; - if let Err(e) = should_block_host(host) { + if let Err(e) = should_block_host(&host) { return attempt.error(e); } @@ -100,11 +100,11 @@ fn should_block_address_regex(domain_or_ip: &str) -> bool { is_match } -fn should_block_host(host: Host<&str>) -> Result<(), CustomHttpClientError> { +fn should_block_host(host: &Host<&str>) -> Result<(), CustomHttpClientError> { let (ip, host_str): (Option, String) = match host { - Host::Ipv4(ip) => (Some(ip.into()), ip.to_string()), - Host::Ipv6(ip) => (Some(ip.into()), ip.to_string()), - Host::Domain(d) => (None, d.to_string()), + Host::Ipv4(ip) => (Some(IpAddr::V4(*ip)), ip.to_string()), + Host::Ipv6(ip) => (Some(IpAddr::V6(*ip)), ip.to_string()), + Host::Domain(d) => (None, (*d).to_string()), }; if let Some(ip) = ip { @@ -179,7 +179,7 @@ type BoxError = Box; impl CustomDnsResolver { fn instance() -> Arc { - static INSTANCE: Lazy> = Lazy::new(CustomDnsResolver::new); + static INSTANCE: LazyLock> = LazyLock::new(CustomDnsResolver::new); Arc::clone(&*INSTANCE) } diff --git a/src/mail.rs b/src/mail.rs index ca5b7eb5..9571c750 100644 --- a/src/mail.rs +++ b/src/mail.rs @@ -184,7 +184,7 @@ pub async fn send_delete_account(address: &str, user_id: &UserId) -> EmptyResult } pub async fn send_verify_email(address: &str, user_id: &UserId) -> EmptyResult { - let claims = generate_verify_email_claims(user_id.clone()); + let claims = generate_verify_email_claims(user_id); let verify_email_token = encode_jwt(&claims); let (subject, body_html, body_text) = get_text( @@ -235,7 +235,7 @@ pub async fn send_welcome(address: &str) -> EmptyResult { } pub async fn send_welcome_must_verify(address: &str, user_id: &UserId) -> EmptyResult { - let claims = generate_verify_email_claims(user_id.clone()); + let claims = generate_verify_email_claims(user_id); let verify_email_token = encode_jwt(&claims); let (subject, body_html, body_text) = get_text( diff --git a/src/main.rs b/src/main.rs index 3195300b..61c0ee54 100644 --- a/src/main.rs +++ b/src/main.rs @@ -467,7 +467,7 @@ async fn check_data_folder() { if data_folder.starts_with("s3://") { if let Err(e) = CONFIG - .opendal_operator_for_path_type(PathType::Data) + .opendal_operator_for_path_type(&PathType::Data) .unwrap_or_else(|e| { error!("Failed to create S3 operator for data folder '{data_folder}': {e:?}"); exit(1); diff --git a/src/ratelimit.rs b/src/ratelimit.rs index c85ce7ad..854bcc53 100644 --- a/src/ratelimit.rs +++ b/src/ratelimit.rs @@ -1,5 +1,4 @@ -use once_cell::sync::Lazy; -use std::{net::IpAddr, num::NonZeroU32, time::Duration}; +use std::{net::IpAddr, num::NonZeroU32, sync::LazyLock, time::Duration}; use governor::{clock::DefaultClock, state::keyed::DashMapStateStore, Quota, RateLimiter}; @@ -7,13 +6,13 @@ use crate::{Error, CONFIG}; type Limiter = RateLimiter, DefaultClock>; -static LIMITER_LOGIN: Lazy = Lazy::new(|| { +static LIMITER_LOGIN: LazyLock = LazyLock::new(|| { let seconds = Duration::from_secs(CONFIG.login_ratelimit_seconds()); let burst = NonZeroU32::new(CONFIG.login_ratelimit_max_burst()).expect("Non-zero login ratelimit burst"); RateLimiter::keyed(Quota::with_period(seconds).expect("Non-zero login ratelimit seconds").allow_burst(burst)) }); -static LIMITER_ADMIN: Lazy = Lazy::new(|| { +static LIMITER_ADMIN: LazyLock = LazyLock::new(|| { let seconds = Duration::from_secs(CONFIG.admin_ratelimit_seconds()); let burst = NonZeroU32::new(CONFIG.admin_ratelimit_max_burst()).expect("Non-zero admin ratelimit burst"); RateLimiter::keyed(Quota::with_period(seconds).expect("Non-zero admin ratelimit seconds").allow_burst(burst)) diff --git a/src/sso.rs b/src/sso.rs index 8e746114..11afc575 100644 --- a/src/sso.rs +++ b/src/sso.rs @@ -1,12 +1,11 @@ +use std::{sync::LazyLock, time::Duration}; + use chrono::Utc; use derive_more::{AsRef, Deref, Display, From}; +use mini_moka::sync::Cache; use regex::Regex; -use std::time::Duration; use url::Url; -use mini_moka::sync::Cache; -use once_cell::sync::Lazy; - use crate::{ api::ApiResult, auth, @@ -21,12 +20,12 @@ use crate::{ pub static FAKE_IDENTIFIER: &str = "Vaultwarden"; -static AC_CACHE: Lazy> = - Lazy::new(|| Cache::builder().max_capacity(1000).time_to_live(Duration::from_secs(10 * 60)).build()); +static AC_CACHE: LazyLock> = + LazyLock::new(|| Cache::builder().max_capacity(1000).time_to_live(Duration::from_secs(10 * 60)).build()); -static SSO_JWT_ISSUER: Lazy = Lazy::new(|| format!("{}|sso", CONFIG.domain_origin())); +static SSO_JWT_ISSUER: LazyLock = LazyLock::new(|| format!("{}|sso", CONFIG.domain_origin())); -pub static NONCE_EXPIRATION: Lazy = Lazy::new(|| chrono::TimeDelta::try_minutes(10).unwrap()); +pub static NONCE_EXPIRATION: LazyLock = LazyLock::new(|| chrono::TimeDelta::try_minutes(10).unwrap()); #[derive( Clone, @@ -151,7 +150,7 @@ fn decode_token_claims(token_name: &str, token: &str) -> ApiResult ApiResult { +pub fn decode_state(base64_state: &str) -> ApiResult { let state = match data_encoding::BASE64.decode(base64_state.as_bytes()) { Ok(vec) => match String::from_utf8(vec) { Ok(valid) => OIDCState(valid), diff --git a/src/sso_client.rs b/src/sso_client.rs index 3d2a3c48..5dc614e4 100644 --- a/src/sso_client.rs +++ b/src/sso_client.rs @@ -1,13 +1,9 @@ -use regex::Regex; -use std::borrow::Cow; -use std::time::Duration; -use url::Url; +use std::{borrow::Cow, sync::LazyLock, time::Duration}; use mini_moka::sync::Cache; -use once_cell::sync::Lazy; -use openidconnect::core::*; -use openidconnect::reqwest; -use openidconnect::*; +use openidconnect::{core::*, reqwest, *}; +use regex::Regex; +use url::Url; use crate::{ api::{ApiResult, EmptyResult}, @@ -16,8 +12,8 @@ use crate::{ CONFIG, }; -static CLIENT_CACHE_KEY: Lazy = Lazy::new(|| "sso-client".to_string()); -static CLIENT_CACHE: Lazy> = Lazy::new(|| { +static CLIENT_CACHE_KEY: LazyLock = LazyLock::new(|| "sso-client".to_string()); +static CLIENT_CACHE: LazyLock> = LazyLock::new(|| { Cache::builder().max_capacity(1).time_to_live(Duration::from_secs(CONFIG.sso_client_cache_expiration())).build() }); @@ -162,7 +158,7 @@ impl Client { if CONFIG.sso_pkce() { match nonce.verifier { None => err!(format!("Missing verifier in the DB nonce table")), - Some(secret) => exchange = exchange.set_pkce_verifier(PkceCodeVerifier::new(secret.clone())), + Some(secret) => exchange = exchange.set_pkce_verifier(PkceCodeVerifier::new(secret)), } } diff --git a/src/util.rs b/src/util.rs index 3048ff92..57747480 100644 --- a/src/util.rs +++ b/src/util.rs @@ -841,7 +841,7 @@ pub fn is_global(ip: std::net::IpAddr) -> bool { /// Saves a Rocket temporary file to the OpenDAL Operator at the given path. pub async fn save_temp_file( - path_type: PathType, + path_type: &PathType, path: &str, temp_file: rocket::fs::TempFile<'_>, overwrite: bool,