diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7ed85943..65ec08c9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -69,9 +69,9 @@ jobs: CHANNEL: ${{ matrix.channel }} run: | if [[ "${CHANNEL}" == 'rust-toolchain' ]]; then - RUST_TOOLCHAIN="$(grep -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)" + RUST_TOOLCHAIN="$(grep -m1 -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)" elif [[ "${CHANNEL}" == 'msrv' ]]; then - RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)" + RUST_TOOLCHAIN="$(grep -m1 -oP 'rust-version\s.*"(\K.*?)(?=")' Cargo.toml)" else RUST_TOOLCHAIN="${CHANNEL}" fi @@ -116,7 +116,7 @@ jobs: # Enable Rust Caching - name: Rust Caching - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: # Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes. # Like changing the build host from Ubuntu 20.04 to 22.04 for example. @@ -126,18 +126,6 @@ jobs: # Run cargo tests # First test all features together, afterwards test them separately. - - name: "test features: sqlite,mysql,postgresql,enable_mimalloc,query_logger" - id: test_sqlite_mysql_postgresql_mimalloc_logger - if: ${{ !cancelled() }} - run: | - cargo test --features sqlite,mysql,postgresql,enable_mimalloc,query_logger - - - name: "test features: sqlite,mysql,postgresql,enable_mimalloc" - id: test_sqlite_mysql_postgresql_mimalloc - if: ${{ !cancelled() }} - run: | - cargo test --features sqlite,mysql,postgresql,enable_mimalloc - - name: "test features: sqlite,mysql,postgresql" id: test_sqlite_mysql_postgresql if: ${{ !cancelled() }} @@ -187,8 +175,6 @@ jobs: - name: "Some checks failed" if: ${{ failure() }} env: - TEST_DB_M_L: ${{ steps.test_sqlite_mysql_postgresql_mimalloc_logger.outcome }} - TEST_DB_M: ${{ steps.test_sqlite_mysql_postgresql_mimalloc.outcome }} TEST_DB: ${{ steps.test_sqlite_mysql_postgresql.outcome }} TEST_SQLITE: ${{ steps.test_sqlite.outcome }} TEST_MYSQL: ${{ steps.test_mysql.outcome }} @@ -200,8 +186,6 @@ jobs: echo "" >> "${GITHUB_STEP_SUMMARY}" echo "|Job|Status|" >> "${GITHUB_STEP_SUMMARY}" echo "|---|------|" >> "${GITHUB_STEP_SUMMARY}" - echo "|test (sqlite,mysql,postgresql,enable_mimalloc,query_logger)|${TEST_DB_M_L}|" >> "${GITHUB_STEP_SUMMARY}" - echo "|test (sqlite,mysql,postgresql,enable_mimalloc)|${TEST_DB_M}|" >> "${GITHUB_STEP_SUMMARY}" echo "|test (sqlite,mysql,postgresql)|${TEST_DB}|" >> "${GITHUB_STEP_SUMMARY}" echo "|test (sqlite)|${TEST_SQLITE}|" >> "${GITHUB_STEP_SUMMARY}" echo "|test (mysql)|${TEST_MYSQL}|" >> "${GITHUB_STEP_SUMMARY}" diff --git a/.github/workflows/hadolint.yml b/.github/workflows/hadolint.yml index 75d3a95d..be546eed 100644 --- a/.github/workflows/hadolint.yml +++ b/.github/workflows/hadolint.yml @@ -31,7 +31,7 @@ jobs: sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \ sudo chmod +x /usr/local/bin/hadolint env: - HADOLINT_VERSION: 2.12.0 + HADOLINT_VERSION: 2.13.1 # End Download hadolint # Checkout the repo - name: Checkout diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 114fa00f..03fff1e4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -204,7 +204,7 @@ jobs: # Attest container images - name: Attest - docker.io - ${{ matrix.base_image }} if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} - uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0 + uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 with: subject-name: ${{ vars.DOCKERHUB_REPO }} subject-digest: ${{ env.DIGEST_SHA }} @@ -212,7 +212,7 @@ jobs: - name: Attest - ghcr.io - ${{ matrix.base_image }} if: ${{ env.HAVE_GHCR_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} - uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0 + uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 with: subject-name: ${{ vars.GHCR_REPO }} subject-digest: ${{ env.DIGEST_SHA }} @@ -220,7 +220,7 @@ jobs: - name: Attest - quay.io - ${{ matrix.base_image }} if: ${{ env.HAVE_QUAY_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} - uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0 + uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 with: subject-name: ${{ vars.QUAY_REPO }} subject-digest: ${{ env.DIGEST_SHA }} @@ -299,7 +299,7 @@ jobs: path: vaultwarden-armv6-${{ matrix.base_image }} - name: "Attest artifacts ${{ matrix.base_image }}" - uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # v2.4.0 + uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 with: subject-path: vaultwarden-* # End Upload artifacts to Github Actions diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 0d52da5a..d9e185d3 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -36,7 +36,7 @@ jobs: persist-credentials: false - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.0 + b6643a2 + uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # 0.33.1 env: TRIVY_DB_REPOSITORY: docker.io/aquasec/trivy-db:2,public.ecr.aws/aquasecurity/trivy-db:2,ghcr.io/aquasecurity/trivy-db:2 TRIVY_JAVA_DB_REPOSITORY: docker.io/aquasec/trivy-java-db:1,public.ecr.aws/aquasecurity/trivy-java-db:1,ghcr.io/aquasecurity/trivy-java-db:1 @@ -48,6 +48,6 @@ jobs: severity: CRITICAL,HIGH - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 + uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 with: sarif_file: 'trivy-results.sarif' diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index fde1f217..6f94aae6 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -21,7 +21,7 @@ jobs: persist-credentials: false - name: Run zizmor - uses: zizmorcore/zizmor-action@5ca5fc7a4779c5263a3ffa0e1f693009994446d1 # v0.1.2 + uses: zizmorcore/zizmor-action@e673c3917a1aef3c65c972347ed84ccd013ecda4 # v0.2.0 with: # intentionally not scanning the entire repository, # since it contains integration tests. diff --git a/Cargo.lock b/Cargo.lock index 39a3d942..f984b713 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -70,12 +70,6 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.5" @@ -87,9 +81,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.99" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "argon2" @@ -167,27 +161,22 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.28" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6448dfb3960f0b038e88c781ead1e7eb7929dfc3a71a1336ec9086c00f6d1e75" +checksum = "977eb15ea9efd848bb8a4a1a2500347ed7f0bf794edf0dc3ddcf439f43d36b23" dependencies = [ - "brotli", "compression-codecs", "compression-core", - "flate2", "futures-core", - "memchr", "pin-project-lite", "tokio", - "zstd", - "zstd-safe", ] [[package]] name = "async-executor" -version = "1.13.2" +version = "1.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb812ffb58524bdd10860d7d974e2f01cc0950c2438a74ee5ec2e2280c6c4ffa" +checksum = "497c00e0fd83a72a79a39fcbd8e3e2f055d6f6c7e025f3b3d91f4f8e76527fb8" dependencies = [ "async-task", "concurrent-queue", @@ -214,11 +203,11 @@ dependencies = [ [[package]] name = "async-io" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19634d6336019ef220f09fd31168ce5c184b295cbf80345437cc36094ef223ca" +checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc" dependencies = [ - "async-lock", + "autocfg", "cfg-if", "concurrent-queue", "futures-io", @@ -227,7 +216,7 @@ dependencies = [ "polling", "rustix", "slab", - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -243,9 +232,9 @@ dependencies = [ [[package]] name = "async-process" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65daa13722ad51e6ab1a1b9c01299142bc75135b337923cfa10e79bbbd669f00" +checksum = "fc50921ec0055cdd8a16de48773bfeec5c972598674347252c0399676be7da75" dependencies = [ "async-channel 2.5.0", "async-io", @@ -261,9 +250,9 @@ dependencies = [ [[package]] name = "async-signal" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f567af260ef69e1d52c2b560ce0ea230763e6fbb9214a85d768760a920e3e3c1" +checksum = "43c070bbf59cd3570b6b2dd54cd772527c7c3620fce8be898406dd3ed6adc64c" dependencies = [ "async-io", "async-lock", @@ -274,7 +263,7 @@ dependencies = [ "rustix", "signal-hook-registry", "slab", - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -372,9 +361,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-config" -version = "1.8.5" +version = "1.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c478f5b10ce55c9a33f87ca3404ca92768b144fc1bfdede7c0121214a8283a25" +checksum = "8bc1b40fb26027769f16960d2f4a6bc20c4bb755d403e552c8c1a73af433c246" dependencies = [ "aws-credential-types", "aws-runtime", @@ -402,9 +391,9 @@ dependencies = [ [[package]] name = "aws-credential-types" -version = "1.2.5" +version = "1.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1541072f81945fa1251f8795ef6c92c4282d74d59f88498ae7d4bf00f0ebdad9" +checksum = "d025db5d9f52cbc413b167136afb3d8aeea708c0d8884783cf6253be5e22f6f2" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -438,9 +427,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.81.0" +version = "1.84.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79ede098271e3471036c46957cba2ba30888f53bda2515bf04b560614a30a36e" +checksum = "357a841807f6b52cb26123878b3326921e2a25faca412fabdd32bd35b7edd5d3" dependencies = [ "aws-credential-types", "aws-runtime", @@ -460,9 +449,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.82.0" +version = "1.85.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43326f724ba2cc957e6f3deac0ca1621a3e5d4146f5970c24c8a108dac33070f" +checksum = "67e05f33b6c9026fecfe9b3b6740f34d41bc6ff641a6a32dabaab60209245b75" dependencies = [ "aws-credential-types", "aws-runtime", @@ -482,9 +471,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.84.0" +version = "1.86.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91abcdbfb48c38a0419eb75e0eac772a4783a96750392680e4f3c25a8a0535b9" +checksum = "e7d835f123f307cafffca7b9027c14979f1d403b417d8541d67cf252e8a21e35" dependencies = [ "aws-credential-types", "aws-runtime", @@ -558,9 +547,9 @@ dependencies = [ [[package]] name = "aws-smithy-json" -version = "0.61.4" +version = "0.61.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a16e040799d29c17412943bdbf488fd75db04112d0c0d4b9290bacf5ae0014b9" +checksum = "eaa31b350998e703e9826b2104dd6f63be0508666e1aba88137af060e8944047" dependencies = [ "aws-smithy-types", ] @@ -586,9 +575,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.9.0" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3d57c8b53a72d15c8e190475743acf34e4996685e346a3448dd54ef696fc6e0" +checksum = "4fa63ad37685ceb7762fa4d73d06f1d5493feb88e3f27259b9ed277f4c01b185" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -762,9 +751,9 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bitflags" -version = "2.9.3" +version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34efbcccd345379ca2868b2b2c9d3782e9cc58ba87bc7d79d5b53d9c9ae6f25d" +checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" [[package]] name = "blake2" @@ -891,7 +880,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9225bdcf4e4a9a4c08bf16607908eb2fbf746828d5e0b5e019726dbf6571f201" dependencies = [ - "darling", + "darling 0.20.11", "proc-macro2", "quote", "syn", @@ -905,11 +894,11 @@ checksum = "ade8366b8bd5ba243f0a58f036cc0ca8a2f069cff1a2351ef1cac6b083e16fc0" [[package]] name = "camino" -version = "1.1.11" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d07aa9a93b00c76f71bc35d598bed923f6d4f3a9ca5c24b7737ae1a292841c0" +checksum = "e1de8bc0aa9e9385ceb3bf0c152e3a9b9544f6c4a912c8ae504e80c1f0368603" dependencies = [ - "serde", + "serde_core", ] [[package]] @@ -945,10 +934,11 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.34" +version = "1.2.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42bc4aea80032b7bf409b0bc7ccad88853858911b7713a8062fdc0623867bedc" +checksum = "80f41ae168f955c12fb8960b057d70d0ca153fb83182b57d86380443527be7e9" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -968,17 +958,16 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "serde", "wasm-bindgen", - "windows-link", + "windows-link 0.2.0", ] [[package]] @@ -1019,25 +1008,23 @@ checksum = "b9e769b5c8c8283982a987c6e948e540254f1058d5a74b8794914d4ef5fc2a24" [[package]] name = "compression-codecs" -version = "0.4.28" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46cc6539bf1c592cff488b9f253b30bc0ec50d15407c2cf45e27bd8f308d5905" +checksum = "485abf41ac0c8047c07c87c72c8fb3eb5197f6e9d7ded615dfd1a00ae00a0f64" dependencies = [ "brotli", "compression-core", "flate2", - "futures-core", "memchr", - "pin-project-lite", "zstd", "zstd-safe", ] [[package]] name = "compression-core" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2957e823c15bde7ecf1e8b64e537aa03a6be5fda0e2334e99887669e75b12e01" +checksum = "e47641d3deaf41fb1538ac1f54735925e275eaf3bf4d55c81b137fba797e5cbb" [[package]] name = "concurrent-queue" @@ -1103,6 +1090,24 @@ dependencies = [ "url", ] +[[package]] +name = "cookie_store" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fc4bff745c9b4c7fb1e97b25d13153da2bc7796260141df62378998d070207f" +dependencies = [ + "cookie", + "document-features", + "idna", + "log", + "publicsuffix", + "serde", + "serde_derive", + "serde_json", + "time", + "url", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -1258,8 +1263,18 @@ version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.20.11", + "darling_macro 0.20.11", +] + +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core 0.21.3", + "darling_macro 0.21.3", ] [[package]] @@ -1276,13 +1291,38 @@ dependencies = [ "syn", ] +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + [[package]] name = "darling_macro" version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ - "darling_core", + "darling_core 0.20.11", + "quote", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core 0.21.3", "quote", "syn", ] @@ -1353,9 +1393,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.4.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +checksum = "d630bccd429a5bb5a64b5e94f693bfc48c9f8566418fda4c494cc94f911f87cc" dependencies = [ "powerfmt", "serde", @@ -1376,7 +1416,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" dependencies = [ - "darling", + "darling 0.20.11", "proc-macro2", "quote", "syn", @@ -1448,15 +1488,16 @@ dependencies = [ [[package]] name = "diesel" -version = "2.2.12" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229850a212cd9b84d4f0290ad9d294afc0ae70fccaa8949dbe8b43ffafa1e20c" +checksum = "e8496eeb328dce26ee9d9b73275d396d9bddb433fa30106cf6056dd8c3c2764c" dependencies = [ "bigdecimal", "bitflags", "byteorder", "chrono", "diesel_derives", + "downcast-rs", "itoa", "libsqlite3-sys", "mysqlclient-sys", @@ -1466,6 +1507,7 @@ dependencies = [ "percent-encoding", "pq-sys", "r2d2", + "sqlite-wasm-rs", "time", "url", ] @@ -1483,9 +1525,9 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.2.7" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b96984c469425cb577bf6f17121ecb3e4fe1e81de5d8f780dd372802858d756" +checksum = "09af0e983035368439f1383011cd87c46f41da81d0f21dc3727e2857d5a43c8e" dependencies = [ "diesel_table_macro_syntax", "dsl_auto_type", @@ -1494,21 +1536,11 @@ dependencies = [ "syn", ] -[[package]] -name = "diesel_logger" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8074833fffb675cf22a6ee669124f65f02971e48dd520bb80c7473ff70aeaf95" -dependencies = [ - "diesel", - "log", -] - [[package]] name = "diesel_migrations" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a73ce704bad4231f001bff3314d91dce4aba0770cee8b233991859abc15c1f6" +checksum = "ee060f709c3e3b1cadd83fcd0f61711f7a8cf493348f758d3a1c1147d70b3c97" dependencies = [ "diesel", "migrations_internals", @@ -1517,9 +1549,9 @@ dependencies = [ [[package]] name = "diesel_table_macro_syntax" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" +checksum = "fe2444076b48641147115697648dc743c2c00b61adade0f01ce67133c7babe8c" dependencies = [ "syn", ] @@ -1571,13 +1603,19 @@ version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "downcast-rs" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "117240f60069e65410b3ae1bb213295bd828f707b5bec6596a1afc8793ce0cbc" + [[package]] name = "dsl_auto_type" -version = "0.1.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "139ae9aca7527f85f26dd76483eb38533fd84bd571065da1739656ef71c5ff5b" +checksum = "dd122633e4bef06db27737f21d3738fb89c8f6d5360d6d9d7635dda142a7757e" dependencies = [ - "darling", + "darling 0.21.3", "either", "heck", "proc-macro2", @@ -1710,12 +1748,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -1802,6 +1840,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ced73b1dacfc750a6db6c0a0c3a3853c8b41997e2e2c563dc90804ae6867959" + [[package]] name = "flate2" version = "1.1.2" @@ -1848,6 +1892,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fragile" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619" + [[package]] name = "futures" version = "0.3.31" @@ -2017,7 +2067,7 @@ dependencies = [ "js-sys", "libc", "r-efi", - "wasi 0.14.2+wasi-0.2.4", + "wasi 0.14.7+wasi-0.2.4", "wasm-bindgen", ] @@ -2075,7 +2125,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d9e3df7f0222ce5184154973d247c591d9aadc28ce7a73c6cd31100c9facff6" dependencies = [ "codemap", - "indexmap 2.11.0", + "indexmap 2.11.4", "lasso", "once_cell", "phf 0.11.3", @@ -2104,7 +2154,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.3.1", - "indexmap 2.11.0", + "indexmap 2.11.4", "slab", "tokio", "tokio-util", @@ -2161,6 +2211,12 @@ dependencies = [ "foldhash", ] +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" + [[package]] name = "heck" version = "0.5.0" @@ -2260,7 +2316,7 @@ checksum = "a56f203cd1c76362b69e3863fd987520ac36cf70a8c92627449b2f64a8cf7d65" dependencies = [ "cfg-if", "libc", - "windows-link", + "windows-link 0.1.3", ] [[package]] @@ -2394,11 +2450,11 @@ dependencies = [ "http 1.3.1", "hyper 1.7.0", "hyper-util", - "rustls 0.23.31", + "rustls 0.23.32", "rustls-native-certs", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls 0.26.3", "tower-service", "webpki-roots", ] @@ -2421,9 +2477,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" dependencies = [ "base64 0.22.1", "bytes", @@ -2447,9 +2503,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.63" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2457,7 +2513,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core", + "windows-core 0.62.0", ] [[package]] @@ -2595,13 +2651,14 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.11.0" +version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", - "hashbrown 0.15.5", + "hashbrown 0.16.0", "serde", + "serde_core", ] [[package]] @@ -2714,9 +2771,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "852f13bec5eba4ba9afbeb93fd7c13fe56147f055939ae21c43a29a0ecb2702e" dependencies = [ "once_cell", "wasm-bindgen", @@ -2786,12 +2843,12 @@ dependencies = [ "nom 8.0.0", "percent-encoding", "quoted_printable", - "rustls 0.23.31", + "rustls 0.23.32", "rustls-native-certs", "serde", "socket2 0.6.0", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls 0.26.3", "tracing", "url", ] @@ -2831,9 +2888,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.9.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" @@ -2859,9 +2916,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" dependencies = [ "value-bag", ] @@ -2910,11 +2967,11 @@ dependencies = [ [[package]] name = "matchers" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" dependencies = [ - "regex-automata 0.1.10", + "regex-automata", ] [[package]] @@ -2935,19 +2992,19 @@ checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "migrations_internals" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bda1634d70d5bd53553cf15dca9842a396e8c799982a3ad22998dc44d961f24" +checksum = "36c791ecdf977c99f45f23280405d7723727470f6689a5e6dbf513ac547ae10d" dependencies = [ "serde", - "toml 0.9.5", + "toml 0.9.7", ] [[package]] name = "migrations_macros" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb161cc72176cb37aa47f1fc520d3ef02263d67d661f44f05d05a079e1237fd" +checksum = "36fc5ac76be324cfd2d3f2cf0fdf5d5d3c4f14ed8aaebadb09e304ba42282703" dependencies = [ "migrations_internals", "proc-macro2", @@ -3103,12 +3160,11 @@ checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" [[package]] name = "nu-ansi-term" -version = "0.46.0" +version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" dependencies = [ - "overload", - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -3405,12 +3461,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "p256" version = "0.13.2" @@ -3547,9 +3597,9 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" -version = "2.8.1" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" +checksum = "21e0a3a33733faeaf8651dfee72dd0f388f0c8e5ad496a3478fa5a922f49cfa8" dependencies = [ "memchr", "thiserror 2.0.16", @@ -3558,9 +3608,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.8.1" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb056d9e8ea77922845ec74a1c4e8fb17e7c218cc4fc11a15c5d25e189aa40bc" +checksum = "bc58706f770acb1dbd0973e6530a3cff4746fb721207feb3a8a6064cd0b6c663" dependencies = [ "pest", "pest_generator", @@ -3568,9 +3618,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.8.1" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87e404e638f781eb3202dc82db6760c8ae8a1eeef7fb3fa8264b2ef280504966" +checksum = "6d4f36811dfe07f7b8573462465d5cb8965fffc2e71ae377a33aecf14c2c9a2f" dependencies = [ "pest", "pest_meta", @@ -3581,9 +3631,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.8.1" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edd1101f170f5903fde0914f899bb503d9ff5271d7ba76bbb70bea63690cc0d5" +checksum = "42919b05089acbd0a5dcd5405fb304d17d1053847b81163d09c4ad18ce8e8420" dependencies = [ "pest", "sha2", @@ -3724,16 +3774,16 @@ checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "polling" -version = "3.10.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5bd19146350fe804f7cb2669c851c03d69da628803dab0d98018142aaa5d829" +checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218" dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi", "pin-project-lite", "rustix", - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -3744,9 +3794,9 @@ checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "potential_utf" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" dependencies = [ "zerovec", ] @@ -3876,9 +3926,9 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ "bytes", "cfg_aliases", @@ -3886,8 +3936,8 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.31", - "socket2 0.5.10", + "rustls 0.23.32", + "socket2 0.6.0", "thiserror 2.0.16", "tokio", "tracing", @@ -3896,9 +3946,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.12" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ "bytes", "getrandom 0.3.3", @@ -3906,7 +3956,7 @@ dependencies = [ "rand 0.9.2", "ring", "rustc-hash", - "rustls 0.23.31", + "rustls 0.23.32", "rustls-pki-types", "slab", "thiserror 2.0.16", @@ -3917,16 +3967,16 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.5.10", + "socket2 0.6.0", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -4022,9 +4072,9 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "11.5.0" +version = "11.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6df7ab838ed27997ba19a4664507e6f82b41fe6e20be42929332156e5e85146" +checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" dependencies = [ "bitflags", ] @@ -4066,17 +4116,8 @@ checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.10", - "regex-syntax 0.8.6", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax 0.6.29", + "regex-automata", + "regex-syntax", ] [[package]] @@ -4087,7 +4128,7 @@ checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.6", + "regex-syntax", ] [[package]] @@ -4096,12 +4137,6 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "943f41321c63ef1c92fd763bfe054d2668f7f225a5c29f0105903dc2fc04ba30" -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - [[package]] name = "regex-syntax" version = "0.8.6" @@ -4162,7 +4197,7 @@ dependencies = [ "base64 0.22.1", "bytes", "cookie", - "cookie_store", + "cookie_store 0.21.1", "encoding_rs", "futures-channel", "futures-core", @@ -4182,7 +4217,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.31", + "rustls 0.23.32", "rustls-native-certs", "rustls-pki-types", "serde", @@ -4191,7 +4226,7 @@ dependencies = [ "sync_wrapper", "tokio", "tokio-native-tls", - "tokio-rustls 0.26.2", + "tokio-rustls 0.26.3", "tokio-util", "tower", "tower-http", @@ -4206,9 +4241,9 @@ dependencies = [ [[package]] name = "resolv-conf" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3" +checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" [[package]] name = "rfc6979" @@ -4269,7 +4304,7 @@ dependencies = [ "either", "figment", "futures", - "indexmap 2.11.0", + "indexmap 2.11.4", "log", "memchr", "multer", @@ -4301,7 +4336,7 @@ checksum = "575d32d7ec1a9770108c879fc7c47815a80073f96ca07ff9525a94fcede1dd46" dependencies = [ "devise", "glob", - "indexmap 2.11.0", + "indexmap 2.11.4", "proc-macro2", "quote", "rocket_http", @@ -4321,7 +4356,7 @@ dependencies = [ "futures", "http 0.2.12", "hyper 0.14.32", - "indexmap 2.11.0", + "indexmap 2.11.4", "log", "memchr", "pear", @@ -4394,13 +4429,12 @@ dependencies = [ [[package]] name = "rust-ini" -version = "0.21.1" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e310ef0e1b6eeb79169a1171daf9abcb87a2e17c03bee2c4bb100b55c75409f" +checksum = "796e8d2b6696392a43bea58116b667fb4c29727dc5abd27d6acf338bb4f688c7" dependencies = [ "cfg-if", "ordered-multimap", - "trim-in-place", ] [[package]] @@ -4435,15 +4469,15 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.8" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -4460,15 +4494,15 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.31" +version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" +checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.4", + "rustls-webpki 0.103.6", "subtle", "zeroize", ] @@ -4482,7 +4516,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.3.0", + "security-framework 3.4.0", ] [[package]] @@ -4516,9 +4550,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.4" +version = "0.103.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +checksum = "8572f3c2cb9934231157b45499fc41e1f58c589fdfb81a844ba873265e80f8eb" dependencies = [ "ring", "rustls-pki-types", @@ -4557,11 +4591,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.0", ] [[package]] @@ -4659,9 +4693,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80fb1d92c5028aa318b4b8bd7302a5bfcf48be96a37fc6fc790f806b0004ee0c" +checksum = "60b369d18893388b345804dc0007963c99b7d665ae71d275812d828c6f089640" dependencies = [ "bitflags", "core-foundation 0.10.1", @@ -4672,9 +4706,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" dependencies = [ "core-foundation-sys", "libc", @@ -4682,19 +4716,21 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" dependencies = [ "serde", + "serde_core", ] [[package]] name = "serde" -version = "1.0.219" +version = "1.0.225" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "fd6c24dee235d0da097043389623fb913daddf92c76e9f5a1db88607a0bcbd1d" dependencies = [ + "serde_core", "serde_derive", ] @@ -4718,11 +4754,20 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_core" +version = "1.0.225" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "659356f9a0cb1e529b24c01e43ad2bdf520ec4ceaf83047b83ddcc2251f96383" +dependencies = [ + "serde_derive", +] + [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.225" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "0ea936adf78b1f766949a4977b91d2f5595825bd6ec079aa9543ad2685fc4516" dependencies = [ "proc-macro2", "quote", @@ -4731,24 +4776,26 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.143" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", + "serde_core", ] [[package]] name = "serde_path_to_error" -version = "0.1.17" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" dependencies = [ "itoa", "serde", + "serde_core", ] [[package]] @@ -4771,11 +4818,11 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40734c41988f7306bb04f0ecf60ec0f3f1caa34290e4e8ea471dcd3346483b83" +checksum = "5417783452c2be558477e104686f7de5dae53dba813c28435e0e70f82d9b04ee" dependencies = [ - "serde", + "serde_core", ] [[package]] @@ -4792,15 +4839,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.14.0" +version = "3.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" +checksum = "c522100790450cf78eeac1507263d0a350d4d5b30df0c8e1fe051a10c22b376e" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.11.0", + "indexmap 2.11.4", "schemars 0.9.0", "schemars 1.0.4", "serde", @@ -4812,11 +4859,11 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.14.0" +version = "3.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" +checksum = "327ada00f7d64abaac1e55a6911e90cf665aa051b9a561c7006c157f4633135e" dependencies = [ - "darling", + "darling 0.21.3", "proc-macro2", "quote", "syn", @@ -4978,6 +5025,24 @@ dependencies = [ "der", ] +[[package]] +name = "sqlite-wasm-rs" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0894a1b91dc660fbf1e6ea6f287562708e01ca1a18fa4e2c6dae0df5a05199c5" +dependencies = [ + "fragile", + "js-sys", + "once_cell", + "parking_lot", + "thiserror 2.0.16", + "tokio", + "wasm-array-cp", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "stable-pattern" version = "0.1.0" @@ -5113,15 +5178,15 @@ checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" [[package]] name = "tempfile" -version = "3.21.0" +version = "3.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15b61f8f20e3a6f7e0649d825294eaf317edce30f82cf6026e7e4cb9222a7d1e" +checksum = "84fa4d11fadde498443cca10fd3ac23c951f0dc59e080e9f4b93d4df4e4eea53" dependencies = [ "fastrand", "getrandom 0.3.3", "once_cell", "rustix", - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -5184,9 +5249,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.41" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", "itoa", @@ -5201,15 +5266,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" [[package]] name = "time-macros" -version = "0.2.22" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" dependencies = [ "num-conv", "time-core", @@ -5302,11 +5367,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +checksum = "05f63835928ca123f1bef57abbcd23bb2ba0ac9ae1235f1e65bda0d06e7786bd" dependencies = [ - "rustls 0.23.31", + "rustls 0.23.32", "tokio", ] @@ -5361,16 +5426,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.5" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75129e1dc5000bfbaa9fee9d1b21f974f9fbad9daec557a521ee6e080825f6e8" +checksum = "00e5e5d9bf2475ac9d4f0d9edab68cc573dc2fd644b0dba36b0c30a92dd9eaa0" dependencies = [ - "indexmap 2.11.0", - "serde", - "serde_spanned 1.0.0", - "toml_datetime 0.7.0", + "serde_core", + "serde_spanned 1.0.2", + "toml_datetime 0.7.2", "toml_parser", - "toml_writer", "winnow 0.7.13", ] @@ -5385,11 +5448,11 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bade1c3e902f58d73d3f294cd7f20391c1cb2fbcb643b73566bc773971df91e3" +checksum = "32f1085dec27c2b6632b04c80b3bb1b4300d6495d1e129693bdda7d91e72eec1" dependencies = [ - "serde", + "serde_core", ] [[package]] @@ -5398,7 +5461,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.11.0", + "indexmap 2.11.4", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", @@ -5408,9 +5471,9 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b551886f449aa90d4fe2bdaa9f4a2577ad2dde302c61ecf262d80b116db95c10" +checksum = "4cf893c33be71572e0e9aa6dd15e6677937abd686b066eac3f8cd3531688a627" dependencies = [ "winnow 0.7.13", ] @@ -5421,12 +5484,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" -[[package]] -name = "toml_writer" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc842091f2def52017664b53082ecbbeb5c7731092bad69d2c63050401dfd64" - [[package]] name = "totp-lite" version = "2.0.1" @@ -5530,14 +5587,14 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" dependencies = [ "matchers", "nu-ansi-term", "once_cell", - "regex", + "regex-automata", "sharded-slab", "smallvec", "thread_local", @@ -5546,12 +5603,6 @@ dependencies = [ "tracing-log", ] -[[package]] -name = "trim-in-place" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343e926fc669bc8cde4fa3129ab681c63671bae288b1f1081ceee6d9d37904fc" - [[package]] name = "triomphe" version = "0.1.14" @@ -5622,9 +5673,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" [[package]] name = "unicode-xid" @@ -5670,9 +5721,9 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.18.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f33196643e165781c20a5ead5582283a7dacbb87855d867fbc2df3f81eddc1be" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ "getrandom 0.3.3", "js-sys", @@ -5707,14 +5758,13 @@ dependencies = [ "chrono", "chrono-tz", "cookie", - "cookie_store", + "cookie_store 0.22.0", "dashmap 6.1.0", "data-encoding", "data-url", "derive_more", "diesel", "diesel-derive-newtype", - "diesel_logger", "diesel_migrations", "dotenvy", "email_address", @@ -5817,30 +5867,50 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" -version = "0.14.2+wasi-0.2.4" +version = "0.14.7+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +checksum = "883478de20367e224c0090af9cf5f9fa85bed63a95c1abf3afc5c083ebc06e8c" dependencies = [ - "wit-bindgen-rt", + "wasip2", +] + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-array-cp" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb633b3e235f0ebe0a35162adc1e0293fc4b7e3f3a6fc7b5374d80464267ff84" +dependencies = [ + "js-sys", + "wasm-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "ab10a69fbd0a177f5f649ad4d8d3305499c42bab9aef2f7ff592d0ec8f833819" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "0bb702423545a6007bbc368fde243ba47ca275e549c8a28617f56f6ba53b1d1c" dependencies = [ "bumpalo", "log", @@ -5852,9 +5922,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "a0b221ff421256839509adbb55998214a70d829d3a28c69b4a6672e9d2a42f67" dependencies = [ "cfg-if", "js-sys", @@ -5865,9 +5935,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "fc65f4f411d91494355917b605e1480033152658d71f722a90647f56a70c88a0" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5875,9 +5945,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "ffc003a991398a8ee604a401e194b6b3a39677b3173d6e74495eb51b82e99a32" dependencies = [ "proc-macro2", "quote", @@ -5888,9 +5958,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "293c37f4efa430ca14db3721dfbe48d8c33308096bd44d80ebaa775ab71ba1cf" dependencies = [ "unicode-ident", ] @@ -5910,9 +5980,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "fbe734895e869dc429d78c4b433f8d17d95f8d05317440b4fad5ab2d33e596dc" dependencies = [ "js-sys", "wasm-bindgen", @@ -6040,11 +6110,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0978bf7171b3d90bac376700cb56d606feb40f251a475a5d6634613564460b22" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.60.2", + "windows-sys 0.61.0", ] [[package]] @@ -6069,9 +6139,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" dependencies = [ "windows-collections", - "windows-core", + "windows-core 0.61.2", "windows-future", - "windows-link", + "windows-link 0.1.3", "windows-numerics", ] @@ -6081,7 +6151,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" dependencies = [ - "windows-core", + "windows-core 0.61.2", ] [[package]] @@ -6092,9 +6162,22 @@ checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", - "windows-link", - "windows-result", - "windows-strings", + "windows-link 0.1.3", + "windows-result 0.3.4", + "windows-strings 0.4.2", +] + +[[package]] +name = "windows-core" +version = "0.62.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57fe7168f7de578d2d8a05b07fd61870d2e73b4020e9f49aa00da8471723497c" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link 0.2.0", + "windows-result 0.4.0", + "windows-strings 0.5.0", ] [[package]] @@ -6103,8 +6186,8 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" dependencies = [ - "windows-core", - "windows-link", + "windows-core 0.61.2", + "windows-link 0.1.3", "windows-threading", ] @@ -6136,14 +6219,20 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" +[[package]] +name = "windows-link" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" + [[package]] name = "windows-numerics" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" dependencies = [ - "windows-core", - "windows-link", + "windows-core 0.61.2", + "windows-link 0.1.3", ] [[package]] @@ -6152,9 +6241,9 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" dependencies = [ - "windows-link", - "windows-result", - "windows-strings", + "windows-link 0.1.3", + "windows-result 0.3.4", + "windows-strings 0.4.2", ] [[package]] @@ -6163,7 +6252,16 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ - "windows-link", + "windows-link 0.1.3", +] + +[[package]] +name = "windows-result" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7084dcc306f89883455a206237404d3eaf961e5bd7e0f312f7c91f57eb44167f" +dependencies = [ + "windows-link 0.2.0", ] [[package]] @@ -6172,7 +6270,16 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ - "windows-link", + "windows-link 0.1.3", +] + +[[package]] +name = "windows-strings" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7218c655a553b0bed4426cf54b20d7ba363ef543b52d515b3e48d7fd55318dda" +dependencies = [ + "windows-link 0.2.0", ] [[package]] @@ -6211,6 +6318,15 @@ dependencies = [ "windows-targets 0.53.3", ] +[[package]] +name = "windows-sys" +version = "0.61.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e201184e40b2ede64bc2ea34968b28e33622acdbbf37104f0e4a33f7abe657aa" +dependencies = [ + "windows-link 0.2.0", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -6248,7 +6364,7 @@ version = "0.53.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" dependencies = [ - "windows-link", + "windows-link 0.1.3", "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", "windows_i686_gnu 0.53.0", @@ -6265,7 +6381,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" dependencies = [ - "windows-link", + "windows-link 0.1.3", ] [[package]] @@ -6441,13 +6557,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" [[package]] -name = "wit-bindgen-rt" -version = "0.39.0" +name = "wit-bindgen" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" -dependencies = [ - "bitflags", -] +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "writeable" @@ -6535,18 +6648,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", @@ -6633,9 +6746,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.15+zstd.1.5.7" +version = "2.0.16+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index c62bc929..c17bb81f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,11 @@ publish = false build = "build.rs" [features] -# default = ["sqlite"] +default = [ + # "sqlite", + # "mysql", + # "postgresql", +] # Empty to keep compatibility, prefer to set USE_SYSLOG=true enable_syslog = [] mysql = ["diesel/mysql", "diesel_migrations/mysql"] @@ -27,11 +31,6 @@ vendored_openssl = ["openssl/vendored"] # Enable MiMalloc memory allocator to replace the default malloc # This can improve performance for Alpine builds enable_mimalloc = ["dep:mimalloc"] -# This is a development dependency, and should only be used during development! -# It enables the usage of the diesel_logger crate, which is able to output the generated queries. -# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile -# if you want to turn off the logging for a specific run. -query_logger = ["dep:diesel_logger"] s3 = ["opendal/services-s3", "dep:aws-config", "dep:aws-credential-types", "dep:aws-smithy-runtime-api", "dep:anyhow", "dep:http", "dep:reqsign"] # OIDC specific features @@ -50,7 +49,7 @@ syslog = "7.0.0" macros = { path = "./macros" } # Logging -log = "0.4.27" +log = "0.4.28" fern = { version = "0.7.1", features = ["syslog-7", "reopen-1"] } tracing = { version = "0.1.41", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work @@ -81,13 +80,12 @@ tokio = { version = "1.47.1", features = ["rt-multi-thread", "fs", "io-util", "p tokio-util = { version = "0.7.16", features = ["compat"]} # A generic serialization/deserialization framework -serde = { version = "1.0.219", features = ["derive"] } -serde_json = "1.0.143" +serde = { version = "1.0.225", features = ["derive"] } +serde_json = "1.0.145" # A safe, extensible ORM and Query builder -diesel = { version = "2.2.12", features = ["chrono", "r2d2", "numeric"] } -diesel_migrations = "2.2.0" -diesel_logger = { version = "0.4.0", optional = true } +diesel = { version = "2.3.2", features = ["chrono", "r2d2", "numeric"] } +diesel_migrations = "2.3.0" derive_more = { version = "2.0.1", features = ["from", "into", "as_ref", "deref", "display"] } diesel-derive-newtype = "2.1.2" @@ -101,12 +99,12 @@ ring = "0.17.14" subtle = "2.6.1" # UUID generation -uuid = { version = "1.18.0", features = ["v4"] } +uuid = { version = "1.18.1", features = ["v4"] } # Date and time libraries -chrono = { version = "0.4.41", features = ["clock", "serde"], default-features = false } +chrono = { version = "0.4.42", features = ["clock", "serde"], default-features = false } chrono-tz = "0.10.4" -time = "0.3.41" +time = "0.3.44" # Job scheduler job_scheduler_ng = "2.3.0" @@ -157,7 +155,7 @@ cached = { version = "0.56.0", features = ["async"] } # Used for custom short lived cookie jar during favicon extraction cookie = "0.18.1" -cookie_store = "0.21.1" +cookie_store = "0.22.0" # Used by U2F, JWT and PostgreSQL openssl = "0.10.73" @@ -174,7 +172,7 @@ openidconnect = { version = "4.0.1", features = ["reqwest", "native-tls"] } mini-moka = "0.10.3" # Check client versions for specific features. -semver = "1.0.26" +semver = "1.0.27" # Allow overriding the default memory allocator # Mainly used for the musl builds, since the default musl malloc is very slow @@ -195,9 +193,9 @@ grass_compiler = { version = "0.13.4", default-features = false } opendal = { version = "0.54.0", features = ["services-fs"], default-features = false } # For retrieving AWS credentials, including temporary SSO credentials -anyhow = { version = "1.0.99", optional = true } -aws-config = { version = "1.8.5", features = ["behavior-version-latest", "rt-tokio", "credentials-process", "sso"], default-features = false, optional = true } -aws-credential-types = { version = "1.2.5", optional = true } +anyhow = { version = "1.0.100", optional = true } +aws-config = { version = "1.8.6", features = ["behavior-version-latest", "rt-tokio", "credentials-process", "sso"], default-features = false, optional = true } +aws-credential-types = { version = "1.2.6", optional = true } aws-smithy-runtime-api = { version = "1.9.0", optional = true } http = { version = "1.3.1", optional = true } reqsign = { version = "0.16.5", optional = true } diff --git a/build.rs b/build.rs index 1dbb1a0b..4a831737 100644 --- a/build.rs +++ b/build.rs @@ -9,8 +9,6 @@ fn main() { println!("cargo:rustc-cfg=mysql"); #[cfg(feature = "postgresql")] println!("cargo:rustc-cfg=postgresql"); - #[cfg(feature = "query_logger")] - println!("cargo:rustc-cfg=query_logger"); #[cfg(feature = "s3")] println!("cargo:rustc-cfg=s3"); @@ -24,7 +22,6 @@ fn main() { println!("cargo::rustc-check-cfg=cfg(sqlite)"); println!("cargo::rustc-check-cfg=cfg(mysql)"); println!("cargo::rustc-check-cfg=cfg(postgresql)"); - println!("cargo::rustc-check-cfg=cfg(query_logger)"); println!("cargo::rustc-check-cfg=cfg(s3)"); // Rerun when these paths are changed. @@ -34,9 +31,6 @@ fn main() { println!("cargo:rerun-if-changed=.git/index"); println!("cargo:rerun-if-changed=.git/refs/tags"); - #[cfg(all(not(debug_assertions), feature = "query_logger"))] - compile_error!("Query Logging is only allowed during development, it is not intended for production usage!"); - // Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION. // If neither exist, read from git. let maybe_vaultwarden_version = diff --git a/docker/Dockerfile.alpine b/docker/Dockerfile.alpine index 09477b3e..9a13146b 100644 --- a/docker/Dockerfile.alpine +++ b/docker/Dockerfile.alpine @@ -53,9 +53,9 @@ ENV DEBIAN_FRONTEND=noninteractive \ TERM=xterm-256color \ CARGO_HOME="/root/.cargo" \ USER="root" \ - # Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11 - # Debian Bookworm already contains libpq v15 - PQ_LIB_DIR="/usr/local/musl/pq15/lib" + # Use PostgreSQL v17 during Alpine/MUSL builds instead of the default v16 + # Debian Trixie uses libpq v17 + PQ_LIB_DIR="/usr/local/musl/pq17/lib" # Create CARGO_HOME folder and don't download rust docs diff --git a/docker/Dockerfile.j2 b/docker/Dockerfile.j2 index 98a3a3f9..c1f2a032 100644 --- a/docker/Dockerfile.j2 +++ b/docker/Dockerfile.j2 @@ -63,9 +63,9 @@ ENV DEBIAN_FRONTEND=noninteractive \ CARGO_HOME="/root/.cargo" \ USER="root" {%- if base == "alpine" %} \ - # Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11 - # Debian Bookworm already contains libpq v15 - PQ_LIB_DIR="/usr/local/musl/pq15/lib" + # Use PostgreSQL v17 during Alpine/MUSL builds instead of the default v16 + # Debian Trixie uses libpq v17 + PQ_LIB_DIR="/usr/local/musl/pq17/lib" {% endif %} {% if base == "debian" %} diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 34e4ae04..bf318309 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -10,7 +10,7 @@ proc-macro = true [dependencies] quote = "1.0.40" -syn = "2.0.105" +syn = "2.0.106" [lints] workspace = true diff --git a/src/api/admin.rs b/src/api/admin.rs index d52e24ef..59c21acf 100644 --- a/src/api/admin.rs +++ b/src/api/admin.rs @@ -20,7 +20,14 @@ use crate::{ }, auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp, Secure}, config::ConfigBuilder, - db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType}, + db::{ + backup_sqlite, get_sql_server_version, + models::{ + Attachment, Cipher, Collection, Device, Event, EventType, Group, Invitation, Membership, MembershipId, + MembershipType, OrgPolicy, OrgPolicyErr, Organization, OrganizationId, SsoUser, TwoFactor, User, UserId, + }, + DbConn, DbConnType, ACTIVE_DB_TYPE, + }, error::{Error, MapResult}, http_client::make_http_request, mail, @@ -75,18 +82,20 @@ pub fn catchers() -> Vec { } } -static DB_TYPE: Lazy<&str> = Lazy::new(|| { - DbConnType::from_url(&CONFIG.database_url()) - .map(|t| match t { - DbConnType::sqlite => "SQLite", - DbConnType::mysql => "MySQL", - DbConnType::postgresql => "PostgreSQL", - }) - .unwrap_or("Unknown") +static DB_TYPE: Lazy<&str> = Lazy::new(|| match ACTIVE_DB_TYPE.get() { + #[cfg(mysql)] + Some(DbConnType::Mysql) => "MySQL", + #[cfg(postgresql)] + Some(DbConnType::Postgresql) => "PostgreSQL", + #[cfg(sqlite)] + Some(DbConnType::Sqlite) => "SQLite", + _ => "Unknown", }); -static CAN_BACKUP: Lazy = - Lazy::new(|| DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false)); +#[cfg(sqlite)] +static CAN_BACKUP: Lazy = Lazy::new(|| ACTIVE_DB_TYPE.get().map(|t| *t == DbConnType::Sqlite).unwrap_or(false)); +#[cfg(not(sqlite))] +static CAN_BACKUP: Lazy = Lazy::new(|| false); #[get("/")] fn admin_disabled() -> &'static str { @@ -284,7 +293,7 @@ struct InviteData { email: String, } -async fn get_user_or_404(user_id: &UserId, conn: &mut DbConn) -> ApiResult { +async fn get_user_or_404(user_id: &UserId, conn: &DbConn) -> ApiResult { if let Some(user) = User::find_by_uuid(user_id, conn).await { Ok(user) } else { @@ -293,15 +302,15 @@ async fn get_user_or_404(user_id: &UserId, conn: &mut DbConn) -> ApiResult } #[post("/invite", format = "application/json", data = "")] -async fn invite_user(data: Json, _token: AdminToken, mut conn: DbConn) -> JsonResult { +async fn invite_user(data: Json, _token: AdminToken, conn: DbConn) -> JsonResult { let data: InviteData = data.into_inner(); - if User::find_by_mail(&data.email, &mut conn).await.is_some() { + if User::find_by_mail(&data.email, &conn).await.is_some() { err_code!("User already exists", Status::Conflict.code) } let mut user = User::new(data.email, None); - async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult { + async fn _generate_invite(user: &User, conn: &DbConn) -> EmptyResult { if CONFIG.mail_enabled() { let org_id: OrganizationId = FAKE_ADMIN_UUID.to_string().into(); let member_id: MembershipId = FAKE_ADMIN_UUID.to_string().into(); @@ -312,10 +321,10 @@ async fn invite_user(data: Json, _token: AdminToken, mut conn: DbCon } } - _generate_invite(&user, &mut conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?; - user.save(&mut conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?; + _generate_invite(&user, &conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?; + user.save(&conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?; - Ok(Json(user.to_json(&mut conn).await)) + Ok(Json(user.to_json(&conn).await)) } #[post("/test/smtp", format = "application/json", data = "")] @@ -336,14 +345,14 @@ fn logout(cookies: &CookieJar<'_>) -> Redirect { } #[get("/users")] -async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json { - let users = User::get_all(&mut conn).await; +async fn get_users_json(_token: AdminToken, conn: DbConn) -> Json { + let users = User::get_all(&conn).await; let mut users_json = Vec::with_capacity(users.len()); for (u, _) in users { - let mut usr = u.to_json(&mut conn).await; + let mut usr = u.to_json(&conn).await; usr["userEnabled"] = json!(u.enabled); usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); - usr["lastActive"] = match u.last_active(&mut conn).await { + usr["lastActive"] = match u.last_active(&conn).await { Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)), None => json!(None::), }; @@ -354,17 +363,17 @@ async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json { } #[get("/users/overview")] -async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult> { - let users = User::get_all(&mut conn).await; +async fn users_overview(_token: AdminToken, conn: DbConn) -> ApiResult> { + let users = User::get_all(&conn).await; let mut users_json = Vec::with_capacity(users.len()); for (u, sso_u) in users { - let mut usr = u.to_json(&mut conn).await; - usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await); - usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await); - usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await)); + let mut usr = u.to_json(&conn).await; + usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &conn).await); + usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &conn).await); + usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &conn).await)); usr["user_enabled"] = json!(u.enabled); usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); - usr["last_active"] = match u.last_active(&mut conn).await { + usr["last_active"] = match u.last_active(&conn).await { Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)), None => json!("Never"), }; @@ -379,9 +388,9 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult")] -async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult { - if let Some(u) = User::find_by_mail(mail, &mut conn).await { - let mut usr = u.to_json(&mut conn).await; +async fn get_user_by_mail_json(mail: &str, _token: AdminToken, conn: DbConn) -> JsonResult { + if let Some(u) = User::find_by_mail(mail, &conn).await { + let mut usr = u.to_json(&conn).await; usr["userEnabled"] = json!(u.enabled); usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); Ok(Json(usr)) @@ -391,21 +400,21 @@ async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) } #[get("/users/")] -async fn get_user_json(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> JsonResult { - let u = get_user_or_404(&user_id, &mut conn).await?; - let mut usr = u.to_json(&mut conn).await; +async fn get_user_json(user_id: UserId, _token: AdminToken, conn: DbConn) -> JsonResult { + let u = get_user_or_404(&user_id, &conn).await?; + let mut usr = u.to_json(&conn).await; usr["userEnabled"] = json!(u.enabled); usr["createdAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); Ok(Json(usr)) } #[post("/users//delete", format = "application/json")] -async fn delete_user(user_id: UserId, token: AdminToken, mut conn: DbConn) -> EmptyResult { - let user = get_user_or_404(&user_id, &mut conn).await?; +async fn delete_user(user_id: UserId, token: AdminToken, conn: DbConn) -> EmptyResult { + let user = get_user_or_404(&user_id, &conn).await?; // Get the membership records before deleting the actual user - let memberships = Membership::find_any_state_by_user(&user_id, &mut conn).await; - let res = user.delete(&mut conn).await; + let memberships = Membership::find_any_state_by_user(&user_id, &conn).await; + let res = user.delete(&conn).await; for membership in memberships { log_event( @@ -415,7 +424,7 @@ async fn delete_user(user_id: UserId, token: AdminToken, mut conn: DbConn) -> Em &ACTING_ADMIN_USER.into(), 14, // Use UnknownBrowser type &token.ip.ip, - &mut conn, + &conn, ) .await; } @@ -424,9 +433,9 @@ async fn delete_user(user_id: UserId, token: AdminToken, mut conn: DbConn) -> Em } #[delete("/users//sso", format = "application/json")] -async fn delete_sso_user(user_id: UserId, token: AdminToken, mut conn: DbConn) -> EmptyResult { - let memberships = Membership::find_any_state_by_user(&user_id, &mut conn).await; - let res = SsoUser::delete(&user_id, &mut conn).await; +async fn delete_sso_user(user_id: UserId, token: AdminToken, conn: DbConn) -> EmptyResult { + let memberships = Membership::find_any_state_by_user(&user_id, &conn).await; + let res = SsoUser::delete(&user_id, &conn).await; for membership in memberships { log_event( @@ -436,7 +445,7 @@ async fn delete_sso_user(user_id: UserId, token: AdminToken, mut conn: DbConn) - &ACTING_ADMIN_USER.into(), 14, // Use UnknownBrowser type &token.ip.ip, - &mut conn, + &conn, ) .await; } @@ -445,13 +454,13 @@ async fn delete_sso_user(user_id: UserId, token: AdminToken, mut conn: DbConn) - } #[post("/users//deauth", format = "application/json")] -async fn deauth_user(user_id: UserId, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - let mut user = get_user_or_404(&user_id, &mut conn).await?; +async fn deauth_user(user_id: UserId, _token: AdminToken, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + let mut user = get_user_or_404(&user_id, &conn).await?; - nt.send_logout(&user, None, &mut conn).await; + nt.send_logout(&user, None, &conn).await; if CONFIG.push_enabled() { - for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await { + for device in Device::find_push_devices_by_user(&user.uuid, &conn).await { match unregister_push_device(&device.push_uuid).await { Ok(r) => r, Err(e) => error!("Unable to unregister devices from Bitwarden server: {e}"), @@ -459,46 +468,46 @@ async fn deauth_user(user_id: UserId, _token: AdminToken, mut conn: DbConn, nt: } } - Device::delete_all_by_user(&user.uuid, &mut conn).await?; + Device::delete_all_by_user(&user.uuid, &conn).await?; user.reset_security_stamp(); - user.save(&mut conn).await + user.save(&conn).await } #[post("/users//disable", format = "application/json")] -async fn disable_user(user_id: UserId, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - let mut user = get_user_or_404(&user_id, &mut conn).await?; - Device::delete_all_by_user(&user.uuid, &mut conn).await?; +async fn disable_user(user_id: UserId, _token: AdminToken, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + let mut user = get_user_or_404(&user_id, &conn).await?; + Device::delete_all_by_user(&user.uuid, &conn).await?; user.reset_security_stamp(); user.enabled = false; - let save_result = user.save(&mut conn).await; + let save_result = user.save(&conn).await; - nt.send_logout(&user, None, &mut conn).await; + nt.send_logout(&user, None, &conn).await; save_result } #[post("/users//enable", format = "application/json")] -async fn enable_user(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> EmptyResult { - let mut user = get_user_or_404(&user_id, &mut conn).await?; +async fn enable_user(user_id: UserId, _token: AdminToken, conn: DbConn) -> EmptyResult { + let mut user = get_user_or_404(&user_id, &conn).await?; user.enabled = true; - user.save(&mut conn).await + user.save(&conn).await } #[post("/users//remove-2fa", format = "application/json")] -async fn remove_2fa(user_id: UserId, token: AdminToken, mut conn: DbConn) -> EmptyResult { - let mut user = get_user_or_404(&user_id, &mut conn).await?; - TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?; - two_factor::enforce_2fa_policy(&user, &ACTING_ADMIN_USER.into(), 14, &token.ip.ip, &mut conn).await?; +async fn remove_2fa(user_id: UserId, token: AdminToken, conn: DbConn) -> EmptyResult { + let mut user = get_user_or_404(&user_id, &conn).await?; + TwoFactor::delete_all_by_user(&user.uuid, &conn).await?; + two_factor::enforce_2fa_policy(&user, &ACTING_ADMIN_USER.into(), 14, &token.ip.ip, &conn).await?; user.totp_recover = None; - user.save(&mut conn).await + user.save(&conn).await } #[post("/users//invite/resend", format = "application/json")] -async fn resend_user_invite(user_id: UserId, _token: AdminToken, mut conn: DbConn) -> EmptyResult { - if let Some(user) = User::find_by_uuid(&user_id, &mut conn).await { +async fn resend_user_invite(user_id: UserId, _token: AdminToken, conn: DbConn) -> EmptyResult { + if let Some(user) = User::find_by_uuid(&user_id, &conn).await { //TODO: replace this with user.status check when it will be available (PR#3397) if !user.password_hash.is_empty() { err_code!("User already accepted invitation", Status::BadRequest.code); @@ -524,10 +533,10 @@ struct MembershipTypeData { } #[post("/users/org_type", format = "application/json", data = "")] -async fn update_membership_type(data: Json, token: AdminToken, mut conn: DbConn) -> EmptyResult { +async fn update_membership_type(data: Json, token: AdminToken, conn: DbConn) -> EmptyResult { let data: MembershipTypeData = data.into_inner(); - let Some(mut member_to_edit) = Membership::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &mut conn).await + let Some(mut member_to_edit) = Membership::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &conn).await else { err!("The specified user isn't member of the organization") }; @@ -539,7 +548,7 @@ async fn update_membership_type(data: Json, token: AdminToke if member_to_edit.atype == MembershipType::Owner && new_type != MembershipType::Owner { // Removing owner permission, check that there is at least one other confirmed owner - if Membership::count_confirmed_by_org_and_type(&data.org_uuid, MembershipType::Owner, &mut conn).await <= 1 { + if Membership::count_confirmed_by_org_and_type(&data.org_uuid, MembershipType::Owner, &conn).await <= 1 { err!("Can't change the type of the last owner") } } @@ -547,11 +556,11 @@ async fn update_membership_type(data: Json, token: AdminToke // This check is also done at api::organizations::{accept_invite, _confirm_invite, _activate_member, edit_member}, update_membership_type // It returns different error messages per function. if new_type < MembershipType::Admin { - match OrgPolicy::is_user_allowed(&member_to_edit.user_uuid, &member_to_edit.org_uuid, true, &mut conn).await { + match OrgPolicy::is_user_allowed(&member_to_edit.user_uuid, &member_to_edit.org_uuid, true, &conn).await { Ok(_) => {} Err(OrgPolicyErr::TwoFactorMissing) => { if CONFIG.email_2fa_auto_fallback() { - two_factor::email::find_and_activate_email_2fa(&member_to_edit.user_uuid, &mut conn).await?; + two_factor::email::find_and_activate_email_2fa(&member_to_edit.user_uuid, &conn).await?; } else { err!("You cannot modify this user to this type because they have not setup 2FA"); } @@ -569,32 +578,32 @@ async fn update_membership_type(data: Json, token: AdminToke &ACTING_ADMIN_USER.into(), 14, // Use UnknownBrowser type &token.ip.ip, - &mut conn, + &conn, ) .await; member_to_edit.atype = new_type; - member_to_edit.save(&mut conn).await + member_to_edit.save(&conn).await } #[post("/users/update_revision", format = "application/json")] -async fn update_revision_users(_token: AdminToken, mut conn: DbConn) -> EmptyResult { - User::update_all_revisions(&mut conn).await +async fn update_revision_users(_token: AdminToken, conn: DbConn) -> EmptyResult { + User::update_all_revisions(&conn).await } #[get("/organizations/overview")] -async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult> { - let organizations = Organization::get_all(&mut conn).await; +async fn organizations_overview(_token: AdminToken, conn: DbConn) -> ApiResult> { + let organizations = Organization::get_all(&conn).await; let mut organizations_json = Vec::with_capacity(organizations.len()); for o in organizations { let mut org = o.to_json(); - org["user_count"] = json!(Membership::count_by_org(&o.uuid, &mut conn).await); - org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &mut conn).await); - org["collection_count"] = json!(Collection::count_by_org(&o.uuid, &mut conn).await); - org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await); - org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await); - org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await); - org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await)); + org["user_count"] = json!(Membership::count_by_org(&o.uuid, &conn).await); + org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &conn).await); + org["collection_count"] = json!(Collection::count_by_org(&o.uuid, &conn).await); + org["group_count"] = json!(Group::count_by_org(&o.uuid, &conn).await); + org["event_count"] = json!(Event::count_by_org(&o.uuid, &conn).await); + org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &conn).await); + org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &conn).await)); organizations_json.push(org); } @@ -603,9 +612,9 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu } #[post("/organizations//delete", format = "application/json")] -async fn delete_organization(org_id: OrganizationId, _token: AdminToken, mut conn: DbConn) -> EmptyResult { - let org = Organization::find_by_uuid(&org_id, &mut conn).await.map_res("Organization doesn't exist")?; - org.delete(&mut conn).await +async fn delete_organization(org_id: OrganizationId, _token: AdminToken, conn: DbConn) -> EmptyResult { + let org = Organization::find_by_uuid(&org_id, &conn).await.map_res("Organization doesn't exist")?; + org.delete(&conn).await } #[derive(Deserialize)] @@ -693,7 +702,7 @@ async fn get_ntp_time(has_http_access: bool) -> String { } #[get("/diagnostics")] -async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn) -> ApiResult> { +async fn diagnostics(_token: AdminToken, ip_header: IpHeader, conn: DbConn) -> ApiResult> { use chrono::prelude::*; use std::net::ToSocketAddrs; @@ -747,7 +756,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn) "uses_proxy": uses_proxy, "enable_websocket": &CONFIG.enable_websocket(), "db_type": *DB_TYPE, - "db_version": get_sql_server_version(&mut conn).await, + "db_version": get_sql_server_version(&conn).await, "admin_url": format!("{}/diagnostics", admin_url()), "overrides": &CONFIG.get_overrides().join(", "), "host_arch": env::consts::ARCH, @@ -791,9 +800,9 @@ async fn delete_config(_token: AdminToken) -> EmptyResult { } #[post("/config/backup_db", format = "application/json")] -async fn backup_db(_token: AdminToken, mut conn: DbConn) -> ApiResult { +fn backup_db(_token: AdminToken) -> ApiResult { if *CAN_BACKUP { - match backup_database(&mut conn).await { + match backup_sqlite() { Ok(f) => Ok(format!("Backup to '{f}' was successful")), Err(e) => err!(format!("Backup was unsuccessful {e}")), } diff --git a/src/api/core/accounts.rs b/src/api/core/accounts.rs index c14bcef2..a900241d 100644 --- a/src/api/core/accounts.rs +++ b/src/api/core/accounts.rs @@ -13,7 +13,14 @@ use crate::{ }, auth::{decode_delete, decode_invite, decode_verify_email, ClientHeaders, Headers}, crypto, - db::{models::*, DbConn}, + db::{ + models::{ + AuthRequest, AuthRequestId, Cipher, CipherId, Device, DeviceId, DeviceType, EmergencyAccess, + EmergencyAccessId, EventType, Folder, FolderId, Invitation, Membership, MembershipId, OrgPolicy, + OrgPolicyType, Organization, OrganizationId, Send, SendId, User, UserId, UserKdfType, + }, + DbConn, + }, mail, util::{format_date, NumberOrString}, CONFIG, @@ -142,7 +149,7 @@ fn enforce_password_hint_setting(password_hint: &Option) -> EmptyResult } Ok(()) } -async fn is_email_2fa_required(member_id: Option, conn: &mut DbConn) -> bool { +async fn is_email_2fa_required(member_id: Option, conn: &DbConn) -> bool { if !CONFIG._enable_email_2fa() { return false; } @@ -160,7 +167,7 @@ async fn register(data: Json, conn: DbConn) -> JsonResult { _register(data, false, conn).await } -pub async fn _register(data: Json, email_verification: bool, mut conn: DbConn) -> JsonResult { +pub async fn _register(data: Json, email_verification: bool, conn: DbConn) -> JsonResult { let mut data: RegisterData = data.into_inner(); let email = data.email.to_lowercase(); @@ -242,7 +249,7 @@ pub async fn _register(data: Json, email_verification: bool, mut c let password_hint = clean_password_hint(&data.master_password_hint); enforce_password_hint_setting(&password_hint)?; - let mut user = match User::find_by_mail(&email, &mut conn).await { + let mut user = match User::find_by_mail(&email, &conn).await { Some(user) => { if !user.password_hash.is_empty() { err!("Registration not allowed or user already exists") @@ -257,12 +264,12 @@ pub async fn _register(data: Json, email_verification: bool, mut c } else { err!("Registration email does not match invite email") } - } else if Invitation::take(&email, &mut conn).await { - Membership::accept_user_invitations(&user.uuid, &mut conn).await?; + } else if Invitation::take(&email, &conn).await { + Membership::accept_user_invitations(&user.uuid, &conn).await?; user } else if CONFIG.is_signup_allowed(&email) || (CONFIG.emergency_access_allowed() - && EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some()) + && EmergencyAccess::find_invited_by_grantee_email(&email, &conn).await.is_some()) { user } else { @@ -273,7 +280,7 @@ pub async fn _register(data: Json, email_verification: bool, mut c // Order is important here; the invitation check must come first // because the vaultwarden admin can invite anyone, regardless // of other signup restrictions. - if Invitation::take(&email, &mut conn).await + if Invitation::take(&email, &conn).await || CONFIG.is_signup_allowed(&email) || pending_emergency_access.is_some() { @@ -285,7 +292,7 @@ pub async fn _register(data: Json, email_verification: bool, mut c }; // Make sure we don't leave a lingering invitation. - Invitation::take(&email, &mut conn).await; + Invitation::take(&email, &conn).await; set_kdf_data(&mut user, data.kdf)?; @@ -316,17 +323,17 @@ pub async fn _register(data: Json, email_verification: bool, mut c error!("Error sending welcome email: {e:#?}"); } - if email_verified && is_email_2fa_required(data.organization_user_id, &mut conn).await { - email::activate_email_2fa(&user, &mut conn).await.ok(); + if email_verified && is_email_2fa_required(data.organization_user_id, &conn).await { + email::activate_email_2fa(&user, &conn).await.ok(); } } - user.save(&mut conn).await?; + user.save(&conn).await?; // accept any open emergency access invitations if !CONFIG.mail_enabled() && CONFIG.emergency_access_allowed() { - for mut emergency_invite in EmergencyAccess::find_all_invited_by_grantee_email(&user.email, &mut conn).await { - emergency_invite.accept_invite(&user.uuid, &user.email, &mut conn).await.ok(); + for mut emergency_invite in EmergencyAccess::find_all_invited_by_grantee_email(&user.email, &conn).await { + emergency_invite.accept_invite(&user.uuid, &user.email, &conn).await.ok(); } } @@ -337,7 +344,7 @@ pub async fn _register(data: Json, email_verification: bool, mut c } #[post("/accounts/set-password", data = "")] -async fn post_set_password(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn post_set_password(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: SetPasswordData = data.into_inner(); let mut user = headers.user; @@ -367,30 +374,30 @@ async fn post_set_password(data: Json, headers: Headers, mut co if let Some(identifier) = data.org_identifier { if identifier != crate::sso::FAKE_IDENTIFIER { - let org = match Organization::find_by_name(&identifier, &mut conn).await { + let org = match Organization::find_by_name(&identifier, &conn).await { None => err!("Failed to retrieve the associated organization"), Some(org) => org, }; - let membership = match Membership::find_by_user_and_org(&user.uuid, &org.uuid, &mut conn).await { + let membership = match Membership::find_by_user_and_org(&user.uuid, &org.uuid, &conn).await { None => err!("Failed to retrieve the invitation"), Some(org) => org, }; - accept_org_invite(&user, membership, None, &mut conn).await?; + accept_org_invite(&user, membership, None, &conn).await?; } } if CONFIG.mail_enabled() { mail::send_welcome(&user.email.to_lowercase()).await?; } else { - Membership::accept_user_invitations(&user.uuid, &mut conn).await?; + Membership::accept_user_invitations(&user.uuid, &conn).await?; } - log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn) + log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &conn) .await; - user.save(&mut conn).await?; + user.save(&conn).await?; Ok(Json(json!({ "Object": "set-password", @@ -399,8 +406,8 @@ async fn post_set_password(data: Json, headers: Headers, mut co } #[get("/accounts/profile")] -async fn profile(headers: Headers, mut conn: DbConn) -> Json { - Json(headers.user.to_json(&mut conn).await) +async fn profile(headers: Headers, conn: DbConn) -> Json { + Json(headers.user.to_json(&conn).await) } #[derive(Debug, Deserialize)] @@ -416,7 +423,7 @@ async fn put_profile(data: Json, headers: Headers, conn: DbConn) -> } #[post("/accounts/profile", data = "")] -async fn post_profile(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn post_profile(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: ProfileData = data.into_inner(); // Check if the length of the username exceeds 50 characters (Same is Upstream Bitwarden) @@ -428,8 +435,8 @@ async fn post_profile(data: Json, headers: Headers, mut conn: DbCon let mut user = headers.user; user.name = data.name; - user.save(&mut conn).await?; - Ok(Json(user.to_json(&mut conn).await)) + user.save(&conn).await?; + Ok(Json(user.to_json(&conn).await)) } #[derive(Deserialize)] @@ -439,7 +446,7 @@ struct AvatarData { } #[put("/accounts/avatar", data = "")] -async fn put_avatar(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn put_avatar(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: AvatarData = data.into_inner(); // It looks like it only supports the 6 hex color format. @@ -454,13 +461,13 @@ async fn put_avatar(data: Json, headers: Headers, mut conn: DbConn) let mut user = headers.user; user.avatar_color = data.avatar_color; - user.save(&mut conn).await?; - Ok(Json(user.to_json(&mut conn).await)) + user.save(&conn).await?; + Ok(Json(user.to_json(&conn).await)) } #[get("/users//public-key")] -async fn get_public_keys(user_id: UserId, _headers: Headers, mut conn: DbConn) -> JsonResult { - let user = match User::find_by_uuid(&user_id, &mut conn).await { +async fn get_public_keys(user_id: UserId, _headers: Headers, conn: DbConn) -> JsonResult { + let user = match User::find_by_uuid(&user_id, &conn).await { Some(user) if user.public_key.is_some() => user, Some(_) => err_code!("User has no public_key", Status::NotFound.code), None => err_code!("User doesn't exist", Status::NotFound.code), @@ -474,7 +481,7 @@ async fn get_public_keys(user_id: UserId, _headers: Headers, mut conn: DbConn) - } #[post("/accounts/keys", data = "")] -async fn post_keys(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn post_keys(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: KeysData = data.into_inner(); let mut user = headers.user; @@ -482,7 +489,7 @@ async fn post_keys(data: Json, headers: Headers, mut conn: DbConn) -> user.private_key = Some(data.encrypted_private_key); user.public_key = Some(data.public_key); - user.save(&mut conn).await?; + user.save(&conn).await?; Ok(Json(json!({ "privateKey": user.private_key, @@ -501,7 +508,7 @@ struct ChangePassData { } #[post("/accounts/password", data = "")] -async fn post_password(data: Json, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn post_password(data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { let data: ChangePassData = data.into_inner(); let mut user = headers.user; @@ -512,7 +519,7 @@ async fn post_password(data: Json, headers: Headers, mut conn: D user.password_hint = clean_password_hint(&data.master_password_hint); enforce_password_hint_setting(&user.password_hint)?; - log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn) + log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &conn) .await; user.set_password( @@ -527,12 +534,12 @@ async fn post_password(data: Json, headers: Headers, mut conn: D ]), ); - let save_result = user.save(&mut conn).await; + let save_result = user.save(&conn).await; // Prevent logging out the client where the user requested this endpoint from. // If you do logout the user it will causes issues at the client side. // Adding the device uuid will prevent this. - nt.send_logout(&user, Some(headers.device.uuid.clone()), &mut conn).await; + nt.send_logout(&user, Some(headers.device.uuid.clone()), &conn).await; save_result } @@ -584,7 +591,7 @@ fn set_kdf_data(user: &mut User, data: KDFData) -> EmptyResult { } #[post("/accounts/kdf", data = "")] -async fn post_kdf(data: Json, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn post_kdf(data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { let data: ChangeKdfData = data.into_inner(); let mut user = headers.user; @@ -595,9 +602,9 @@ async fn post_kdf(data: Json, headers: Headers, mut conn: DbConn, set_kdf_data(&mut user, data.kdf)?; user.set_password(&data.new_master_password_hash, Some(data.key), true, None); - let save_result = user.save(&mut conn).await; + let save_result = user.save(&conn).await; - nt.send_logout(&user, Some(headers.device.uuid.clone()), &mut conn).await; + nt.send_logout(&user, Some(headers.device.uuid.clone()), &conn).await; save_result } @@ -752,7 +759,7 @@ fn validate_keydata( } #[post("/accounts/key-management/rotate-user-account-keys", data = "")] -async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn post_rotatekey(data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { // TODO: See if we can wrap everything within a SQL Transaction. If something fails it should revert everything. let data: KeyData = data.into_inner(); @@ -770,13 +777,13 @@ async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, // TODO: Ideally we'd do everything after this point in a single transaction. - let mut existing_ciphers = Cipher::find_owned_by_user(user_id, &mut conn).await; - let mut existing_folders = Folder::find_by_user(user_id, &mut conn).await; - let mut existing_emergency_access = EmergencyAccess::find_all_by_grantor_uuid(user_id, &mut conn).await; - let mut existing_memberships = Membership::find_by_user(user_id, &mut conn).await; + let mut existing_ciphers = Cipher::find_owned_by_user(user_id, &conn).await; + let mut existing_folders = Folder::find_by_user(user_id, &conn).await; + let mut existing_emergency_access = EmergencyAccess::find_all_by_grantor_uuid(user_id, &conn).await; + let mut existing_memberships = Membership::find_by_user(user_id, &conn).await; // We only rotate the reset password key if it is set. existing_memberships.retain(|m| m.reset_password_key.is_some()); - let mut existing_sends = Send::find_by_user(user_id, &mut conn).await; + let mut existing_sends = Send::find_by_user(user_id, &conn).await; validate_keydata( &data, @@ -798,7 +805,7 @@ async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, }; saved_folder.name = folder_data.name; - saved_folder.save(&mut conn).await? + saved_folder.save(&conn).await? } } @@ -811,7 +818,7 @@ async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, }; saved_emergency_access.key_encrypted = Some(emergency_access_data.key_encrypted); - saved_emergency_access.save(&mut conn).await? + saved_emergency_access.save(&conn).await? } // Update reset password data @@ -823,7 +830,7 @@ async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, }; membership.reset_password_key = Some(reset_password_data.reset_password_key); - membership.save(&mut conn).await? + membership.save(&conn).await? } // Update send data @@ -832,7 +839,7 @@ async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, err!("Send doesn't exist") }; - update_send_from_data(send, send_data, &headers, &mut conn, &nt, UpdateType::None).await?; + update_send_from_data(send, send_data, &headers, &conn, &nt, UpdateType::None).await?; } // Update cipher data @@ -848,7 +855,7 @@ async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, // Prevent triggering cipher updates via WebSockets by settings UpdateType::None // The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues. // We force the users to logout after the user has been saved to try and prevent these issues. - update_cipher_from_data(saved_cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await? + update_cipher_from_data(saved_cipher, cipher_data, &headers, None, &conn, &nt, UpdateType::None).await? } } @@ -863,28 +870,28 @@ async fn post_rotatekey(data: Json, headers: Headers, mut conn: DbConn, None, ); - let save_result = user.save(&mut conn).await; + let save_result = user.save(&conn).await; // Prevent logging out the client where the user requested this endpoint from. // If you do logout the user it will causes issues at the client side. // Adding the device uuid will prevent this. - nt.send_logout(&user, Some(headers.device.uuid.clone()), &mut conn).await; + nt.send_logout(&user, Some(headers.device.uuid.clone()), &conn).await; save_result } #[post("/accounts/security-stamp", data = "")] -async fn post_sstamp(data: Json, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn post_sstamp(data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { let data: PasswordOrOtpData = data.into_inner(); let mut user = headers.user; - data.validate(&user, true, &mut conn).await?; + data.validate(&user, true, &conn).await?; - Device::delete_all_by_user(&user.uuid, &mut conn).await?; + Device::delete_all_by_user(&user.uuid, &conn).await?; user.reset_security_stamp(); - let save_result = user.save(&mut conn).await; + let save_result = user.save(&conn).await; - nt.send_logout(&user, None, &mut conn).await; + nt.send_logout(&user, None, &conn).await; save_result } @@ -897,7 +904,7 @@ struct EmailTokenData { } #[post("/accounts/email-token", data = "")] -async fn post_email_token(data: Json, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn post_email_token(data: Json, headers: Headers, conn: DbConn) -> EmptyResult { if !CONFIG.email_change_allowed() { err!("Email change is not allowed."); } @@ -909,7 +916,7 @@ async fn post_email_token(data: Json, headers: Headers, mut conn err!("Invalid password") } - if User::find_by_mail(&data.new_email, &mut conn).await.is_some() { + if User::find_by_mail(&data.new_email, &conn).await.is_some() { if CONFIG.mail_enabled() { if let Err(e) = mail::send_change_email_existing(&data.new_email, &user.email).await { error!("Error sending change-email-existing email: {e:#?}"); @@ -934,7 +941,7 @@ async fn post_email_token(data: Json, headers: Headers, mut conn user.email_new = Some(data.new_email); user.email_new_token = Some(token); - user.save(&mut conn).await + user.save(&conn).await } #[derive(Deserialize)] @@ -949,7 +956,7 @@ struct ChangeEmailData { } #[post("/accounts/email", data = "")] -async fn post_email(data: Json, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn post_email(data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { if !CONFIG.email_change_allowed() { err!("Email change is not allowed."); } @@ -961,7 +968,7 @@ async fn post_email(data: Json, headers: Headers, mut conn: DbC err!("Invalid password") } - if User::find_by_mail(&data.new_email, &mut conn).await.is_some() { + if User::find_by_mail(&data.new_email, &conn).await.is_some() { err!("Email already in use"); } @@ -995,9 +1002,9 @@ async fn post_email(data: Json, headers: Headers, mut conn: DbC user.set_password(&data.new_master_password_hash, Some(data.key), true, None); - let save_result = user.save(&mut conn).await; + let save_result = user.save(&conn).await; - nt.send_logout(&user, None, &mut conn).await; + nt.send_logout(&user, None, &conn).await; save_result } @@ -1025,10 +1032,10 @@ struct VerifyEmailTokenData { } #[post("/accounts/verify-email-token", data = "")] -async fn post_verify_email_token(data: Json, mut conn: DbConn) -> EmptyResult { +async fn post_verify_email_token(data: Json, conn: DbConn) -> EmptyResult { let data: VerifyEmailTokenData = data.into_inner(); - let Some(mut user) = User::find_by_uuid(&data.user_id, &mut conn).await else { + let Some(mut user) = User::find_by_uuid(&data.user_id, &conn).await else { err!("User doesn't exist") }; @@ -1041,7 +1048,7 @@ async fn post_verify_email_token(data: Json, mut conn: DbC user.verified_at = Some(Utc::now().naive_utc()); user.last_verifying_at = None; user.login_verify_count = 0; - if let Err(e) = user.save(&mut conn).await { + if let Err(e) = user.save(&conn).await { error!("Error saving email verification: {e:#?}"); } @@ -1055,11 +1062,11 @@ struct DeleteRecoverData { } #[post("/accounts/delete-recover", data = "")] -async fn post_delete_recover(data: Json, mut conn: DbConn) -> EmptyResult { +async fn post_delete_recover(data: Json, conn: DbConn) -> EmptyResult { let data: DeleteRecoverData = data.into_inner(); if CONFIG.mail_enabled() { - if let Some(user) = User::find_by_mail(&data.email, &mut conn).await { + if let Some(user) = User::find_by_mail(&data.email, &conn).await { if let Err(e) = mail::send_delete_account(&user.email, &user.uuid).await { error!("Error sending delete account email: {e:#?}"); } @@ -1082,21 +1089,21 @@ struct DeleteRecoverTokenData { } #[post("/accounts/delete-recover-token", data = "")] -async fn post_delete_recover_token(data: Json, mut conn: DbConn) -> EmptyResult { +async fn post_delete_recover_token(data: Json, conn: DbConn) -> EmptyResult { let data: DeleteRecoverTokenData = data.into_inner(); let Ok(claims) = decode_delete(&data.token) else { err!("Invalid claim") }; - let Some(user) = User::find_by_uuid(&data.user_id, &mut conn).await else { + let Some(user) = User::find_by_uuid(&data.user_id, &conn).await else { err!("User doesn't exist") }; if claims.sub != *user.uuid { err!("Invalid claim"); } - user.delete(&mut conn).await + user.delete(&conn).await } #[post("/accounts/delete", data = "")] @@ -1105,13 +1112,13 @@ async fn post_delete_account(data: Json, headers: Headers, co } #[delete("/accounts", data = "")] -async fn delete_account(data: Json, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn delete_account(data: Json, headers: Headers, conn: DbConn) -> EmptyResult { let data: PasswordOrOtpData = data.into_inner(); let user = headers.user; - data.validate(&user, true, &mut conn).await?; + data.validate(&user, true, &conn).await?; - user.delete(&mut conn).await + user.delete(&conn).await } #[get("/accounts/revision-date")] @@ -1127,7 +1134,7 @@ struct PasswordHintData { } #[post("/accounts/password-hint", data = "")] -async fn password_hint(data: Json, mut conn: DbConn) -> EmptyResult { +async fn password_hint(data: Json, conn: DbConn) -> EmptyResult { if !CONFIG.password_hints_allowed() || (!CONFIG.mail_enabled() && !CONFIG.show_password_hint()) { err!("This server is not configured to provide password hints."); } @@ -1137,7 +1144,7 @@ async fn password_hint(data: Json, mut conn: DbConn) -> EmptyR let data: PasswordHintData = data.into_inner(); let email = &data.email; - match User::find_by_mail(email, &mut conn).await { + match User::find_by_mail(email, &conn).await { None => { // To prevent user enumeration, act as if the user exists. if CONFIG.mail_enabled() { @@ -1179,10 +1186,10 @@ async fn prelogin(data: Json, conn: DbConn) -> Json { _prelogin(data, conn).await } -pub async fn _prelogin(data: Json, mut conn: DbConn) -> Json { +pub async fn _prelogin(data: Json, conn: DbConn) -> Json { let data: PreloginData = data.into_inner(); - let (kdf_type, kdf_iter, kdf_mem, kdf_para) = match User::find_by_mail(&data.email, &mut conn).await { + let (kdf_type, kdf_iter, kdf_mem, kdf_para) = match User::find_by_mail(&data.email, &conn).await { Some(user) => (user.client_kdf_type, user.client_kdf_iter, user.client_kdf_memory, user.client_kdf_parallelism), None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT, None, None), }; @@ -1203,7 +1210,7 @@ struct SecretVerificationRequest { } // Change the KDF Iterations if necessary -pub async fn kdf_upgrade(user: &mut User, pwd_hash: &str, conn: &mut DbConn) -> ApiResult<()> { +pub async fn kdf_upgrade(user: &mut User, pwd_hash: &str, conn: &DbConn) -> ApiResult<()> { if user.password_iterations < CONFIG.password_iterations() { user.password_iterations = CONFIG.password_iterations(); user.set_password(pwd_hash, None, false, None); @@ -1216,7 +1223,7 @@ pub async fn kdf_upgrade(user: &mut User, pwd_hash: &str, conn: &mut DbConn) -> } #[post("/accounts/verify-password", data = "")] -async fn verify_password(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn verify_password(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: SecretVerificationRequest = data.into_inner(); let mut user = headers.user; @@ -1224,22 +1231,22 @@ async fn verify_password(data: Json, headers: Headers err!("Invalid password") } - kdf_upgrade(&mut user, &data.master_password_hash, &mut conn).await?; + kdf_upgrade(&mut user, &data.master_password_hash, &conn).await?; Ok(Json(master_password_policy(&user, &conn).await)) } -async fn _api_key(data: Json, rotate: bool, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn _api_key(data: Json, rotate: bool, headers: Headers, conn: DbConn) -> JsonResult { use crate::util::format_date; let data: PasswordOrOtpData = data.into_inner(); let mut user = headers.user; - data.validate(&user, true, &mut conn).await?; + data.validate(&user, true, &conn).await?; if rotate || user.api_key.is_none() { user.api_key = Some(crypto::generate_api_key()); - user.save(&mut conn).await.expect("Error saving API key"); + user.save(&conn).await.expect("Error saving API key"); } Ok(Json(json!({ @@ -1260,10 +1267,10 @@ async fn rotate_api_key(data: Json, headers: Headers, conn: D } #[get("/devices/knowndevice")] -async fn get_known_device(device: KnownDevice, mut conn: DbConn) -> JsonResult { +async fn get_known_device(device: KnownDevice, conn: DbConn) -> JsonResult { let mut result = false; - if let Some(user) = User::find_by_mail(&device.email, &mut conn).await { - result = Device::find_by_uuid_and_user(&device.uuid, &user.uuid, &mut conn).await.is_some(); + if let Some(user) = User::find_by_mail(&device.email, &conn).await { + result = Device::find_by_uuid_and_user(&device.uuid, &user.uuid, &conn).await.is_some(); } Ok(Json(json!(result))) } @@ -1306,8 +1313,8 @@ impl<'r> FromRequest<'r> for KnownDevice { } #[get("/devices")] -async fn get_all_devices(headers: Headers, mut conn: DbConn) -> JsonResult { - let devices = Device::find_with_auth_request_by_user(&headers.user.uuid, &mut conn).await; +async fn get_all_devices(headers: Headers, conn: DbConn) -> JsonResult { + let devices = Device::find_with_auth_request_by_user(&headers.user.uuid, &conn).await; let devices = devices.iter().map(|device| device.to_json()).collect::>(); Ok(Json(json!({ @@ -1318,8 +1325,8 @@ async fn get_all_devices(headers: Headers, mut conn: DbConn) -> JsonResult { } #[get("/devices/identifier/")] -async fn get_device(device_id: DeviceId, headers: Headers, mut conn: DbConn) -> JsonResult { - let Some(device) = Device::find_by_uuid_and_user(&device_id, &headers.user.uuid, &mut conn).await else { +async fn get_device(device_id: DeviceId, headers: Headers, conn: DbConn) -> JsonResult { + let Some(device) = Device::find_by_uuid_and_user(&device_id, &headers.user.uuid, &conn).await else { err!("No device found"); }; Ok(Json(device.to_json())) @@ -1337,17 +1344,11 @@ async fn post_device_token(device_id: DeviceId, data: Json, headers: } #[put("/devices/identifier//token", data = "")] -async fn put_device_token( - device_id: DeviceId, - data: Json, - headers: Headers, - mut conn: DbConn, -) -> EmptyResult { +async fn put_device_token(device_id: DeviceId, data: Json, headers: Headers, conn: DbConn) -> EmptyResult { let data = data.into_inner(); let token = data.push_token; - let Some(mut device) = Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await - else { + let Some(mut device) = Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &conn).await else { err!(format!("Error: device {device_id} should be present before a token can be assigned")) }; @@ -1360,17 +1361,17 @@ async fn put_device_token( } device.push_token = Some(token); - if let Err(e) = device.save(&mut conn).await { + if let Err(e) = device.save(&conn).await { err!(format!("An error occurred while trying to save the device push token: {e}")); } - register_push_device(&mut device, &mut conn).await?; + register_push_device(&mut device, &conn).await?; Ok(()) } #[put("/devices/identifier//clear-token")] -async fn put_clear_device_token(device_id: DeviceId, mut conn: DbConn) -> EmptyResult { +async fn put_clear_device_token(device_id: DeviceId, conn: DbConn) -> EmptyResult { // This only clears push token // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Controllers/DevicesController.cs#L215 // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Services/Implementations/DeviceService.cs#L37 @@ -1382,8 +1383,8 @@ async fn put_clear_device_token(device_id: DeviceId, mut conn: DbConn) -> EmptyR return Ok(()); } - if let Some(device) = Device::find_by_uuid(&device_id, &mut conn).await { - Device::clear_push_token_by_uuid(&device_id, &mut conn).await?; + if let Some(device) = Device::find_by_uuid(&device_id, &conn).await { + Device::clear_push_token_by_uuid(&device_id, &conn).await?; unregister_push_device(&device.push_uuid).await?; } @@ -1412,17 +1413,17 @@ struct AuthRequestRequest { async fn post_auth_request( data: Json, client_headers: ClientHeaders, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { let data = data.into_inner(); - let Some(user) = User::find_by_mail(&data.email, &mut conn).await else { + let Some(user) = User::find_by_mail(&data.email, &conn).await else { err!("AuthRequest doesn't exist", "User not found") }; // Validate device uuid and type - let device = match Device::find_by_uuid_and_user(&data.device_identifier, &user.uuid, &mut conn).await { + let device = match Device::find_by_uuid_and_user(&data.device_identifier, &user.uuid, &conn).await { Some(device) if device.atype == client_headers.device_type => device, _ => err!("AuthRequest doesn't exist", "Device verification failed"), }; @@ -1435,16 +1436,16 @@ async fn post_auth_request( data.access_code, data.public_key, ); - auth_request.save(&mut conn).await?; + auth_request.save(&conn).await?; - nt.send_auth_request(&user.uuid, &auth_request.uuid, &device, &mut conn).await; + nt.send_auth_request(&user.uuid, &auth_request.uuid, &device, &conn).await; log_user_event( EventType::UserRequestedDeviceApproval as i32, &user.uuid, client_headers.device_type, &client_headers.ip.ip, - &mut conn, + &conn, ) .await; @@ -1464,8 +1465,8 @@ async fn post_auth_request( } #[get("/auth-requests/")] -async fn get_auth_request(auth_request_id: AuthRequestId, headers: Headers, mut conn: DbConn) -> JsonResult { - let Some(auth_request) = AuthRequest::find_by_uuid_and_user(&auth_request_id, &headers.user.uuid, &mut conn).await +async fn get_auth_request(auth_request_id: AuthRequestId, headers: Headers, conn: DbConn) -> JsonResult { + let Some(auth_request) = AuthRequest::find_by_uuid_and_user(&auth_request_id, &headers.user.uuid, &conn).await else { err!("AuthRequest doesn't exist", "Record not found or user uuid does not match") }; @@ -1501,13 +1502,12 @@ async fn put_auth_request( auth_request_id: AuthRequestId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, ant: AnonymousNotify<'_>, nt: Notify<'_>, ) -> JsonResult { let data = data.into_inner(); - let Some(mut auth_request) = - AuthRequest::find_by_uuid_and_user(&auth_request_id, &headers.user.uuid, &mut conn).await + let Some(mut auth_request) = AuthRequest::find_by_uuid_and_user(&auth_request_id, &headers.user.uuid, &conn).await else { err!("AuthRequest doesn't exist", "Record not found or user uuid does not match") }; @@ -1529,28 +1529,28 @@ async fn put_auth_request( auth_request.master_password_hash = data.master_password_hash; auth_request.response_device_id = Some(data.device_identifier.clone()); auth_request.response_date = Some(response_date); - auth_request.save(&mut conn).await?; + auth_request.save(&conn).await?; ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid).await; - nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, &headers.device, &mut conn).await; + nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, &headers.device, &conn).await; log_user_event( EventType::OrganizationUserApprovedAuthRequest as i32, &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; } else { // If denied, there's no reason to keep the request - auth_request.delete(&mut conn).await?; + auth_request.delete(&conn).await?; log_user_event( EventType::OrganizationUserRejectedAuthRequest as i32, &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; } @@ -1575,9 +1575,9 @@ async fn get_auth_request_response( auth_request_id: AuthRequestId, code: &str, client_headers: ClientHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { - let Some(auth_request) = AuthRequest::find_by_uuid(&auth_request_id, &mut conn).await else { + let Some(auth_request) = AuthRequest::find_by_uuid(&auth_request_id, &conn).await else { err!("AuthRequest doesn't exist", "User not found") }; @@ -1606,8 +1606,8 @@ async fn get_auth_request_response( } #[get("/auth-requests")] -async fn get_auth_requests(headers: Headers, mut conn: DbConn) -> JsonResult { - let auth_requests = AuthRequest::find_by_user(&headers.user.uuid, &mut conn).await; +async fn get_auth_requests(headers: Headers, conn: DbConn) -> JsonResult { + let auth_requests = AuthRequest::find_by_user(&headers.user.uuid, &conn).await; Ok(Json(json!({ "data": auth_requests @@ -1637,8 +1637,8 @@ async fn get_auth_requests(headers: Headers, mut conn: DbConn) -> JsonResult { pub async fn purge_auth_requests(pool: DbPool) { debug!("Purging auth requests"); - if let Ok(mut conn) = pool.get().await { - AuthRequest::purge_expired_auth_requests(&mut conn).await; + if let Ok(conn) = pool.get().await { + AuthRequest::purge_expired_auth_requests(&conn).await; } else { error!("Failed to get DB connection while purging trashed ciphers") } diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs index d8e622f2..803dc291 100644 --- a/src/api/core/ciphers.rs +++ b/src/api/core/ciphers.rs @@ -17,7 +17,14 @@ use crate::{ auth::Headers, config::PathType, crypto, - db::{models::*, DbConn, DbPool}, + db::{ + models::{ + Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup, CollectionId, + CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership, MembershipType, + OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId, + }, + DbConn, DbPool, + }, CONFIG, }; @@ -93,8 +100,8 @@ pub fn routes() -> Vec { pub async fn purge_trashed_ciphers(pool: DbPool) { debug!("Purging trashed ciphers"); - if let Ok(mut conn) = pool.get().await { - Cipher::purge_trash(&mut conn).await; + if let Ok(conn) = pool.get().await { + Cipher::purge_trash(&conn).await; } else { error!("Failed to get DB connection while purging trashed ciphers") } @@ -107,11 +114,11 @@ struct SyncData { } #[get("/sync?")] -async fn sync(data: SyncData, headers: Headers, client_version: Option, mut conn: DbConn) -> JsonResult { - let user_json = headers.user.to_json(&mut conn).await; +async fn sync(data: SyncData, headers: Headers, client_version: Option, conn: DbConn) -> JsonResult { + let user_json = headers.user.to_json(&conn).await; // Get all ciphers which are visible by the user - let mut ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await; + let mut ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn).await; // Filter out SSH keys if the client version is less than 2024.12.0 let show_ssh_keys = if let Some(client_version) = client_version { @@ -124,31 +131,30 @@ async fn sync(data: SyncData, headers: Headers, client_version: Option = - Folder::find_by_user(&headers.user.uuid, &mut conn).await.iter().map(Folder::to_json).collect(); + Folder::find_by_user(&headers.user.uuid, &conn).await.iter().map(Folder::to_json).collect(); let sends_json: Vec = - Send::find_by_user(&headers.user.uuid, &mut conn).await.iter().map(Send::to_json).collect(); + Send::find_by_user(&headers.user.uuid, &conn).await.iter().map(Send::to_json).collect(); let policies_json: Vec = - OrgPolicy::find_confirmed_by_user(&headers.user.uuid, &mut conn).await.iter().map(OrgPolicy::to_json).collect(); + OrgPolicy::find_confirmed_by_user(&headers.user.uuid, &conn).await.iter().map(OrgPolicy::to_json).collect(); let domains_json = if data.exclude_domains { Value::Null @@ -169,15 +175,14 @@ async fn sync(data: SyncData, headers: Headers, client_version: Option JsonResult { - let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await; - let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await; +async fn get_ciphers(headers: Headers, conn: DbConn) -> JsonResult { + let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn).await; + let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &conn).await; let mut ciphers_json = Vec::with_capacity(ciphers.len()); for c in ciphers { ciphers_json.push( - c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn) - .await?, + c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &conn).await?, ); } @@ -189,16 +194,16 @@ async fn get_ciphers(headers: Headers, mut conn: DbConn) -> JsonResult { } #[get("/ciphers/")] -async fn get_cipher(cipher_id: CipherId, headers: Headers, mut conn: DbConn) -> JsonResult { - let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { +async fn get_cipher(cipher_id: CipherId, headers: Headers, conn: DbConn) -> JsonResult { + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else { err!("Cipher doesn't exist") }; - if !cipher.is_accessible_to_user(&headers.user.uuid, &mut conn).await { + if !cipher.is_accessible_to_user(&headers.user.uuid, &conn).await { err!("Cipher is not owned by user") } - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?)) + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?)) } #[get("/ciphers//admin")] @@ -291,7 +296,7 @@ async fn post_ciphers_admin(data: Json, headers: Headers, conn: async fn post_ciphers_create( data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { let mut data: ShareCipherData = data.into_inner(); @@ -305,11 +310,11 @@ async fn post_ciphers_create( // This check is usually only needed in update_cipher_from_data(), but we // need it here as well to avoid creating an empty cipher in the call to // cipher.save() below. - enforce_personal_ownership_policy(Some(&data.cipher), &headers, &mut conn).await?; + enforce_personal_ownership_policy(Some(&data.cipher), &headers, &conn).await?; let mut cipher = Cipher::new(data.cipher.r#type, data.cipher.name.clone()); cipher.user_uuid = Some(headers.user.uuid.clone()); - cipher.save(&mut conn).await?; + cipher.save(&conn).await?; // When cloning a cipher, the Bitwarden clients seem to set this field // based on the cipher being cloned (when creating a new cipher, it's set @@ -319,12 +324,12 @@ async fn post_ciphers_create( // or otherwise), we can just ignore this field entirely. data.cipher.last_known_revision_date = None; - share_cipher_by_uuid(&cipher.uuid, data, &headers, &mut conn, &nt, None).await + share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt, None).await } /// Called when creating a new user-owned cipher. #[post("/ciphers", data = "")] -async fn post_ciphers(data: Json, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { +async fn post_ciphers(data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { let mut data: CipherData = data.into_inner(); // The web/browser clients set this field to null as expected, but the @@ -334,9 +339,9 @@ async fn post_ciphers(data: Json, headers: Headers, mut conn: DbConn data.last_known_revision_date = None; let mut cipher = Cipher::new(data.r#type, data.name.clone()); - update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?; + update_cipher_from_data(&mut cipher, data, &headers, None, &conn, &nt, UpdateType::SyncCipherCreate).await?; - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?)) + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?)) } /// Enforces the personal ownership policy on user-owned ciphers, if applicable. @@ -346,11 +351,7 @@ async fn post_ciphers(data: Json, headers: Headers, mut conn: DbConn /// allowed to delete or share such ciphers to an org, however. /// /// Ref: https://bitwarden.com/help/article/policies/#personal-ownership -async fn enforce_personal_ownership_policy( - data: Option<&CipherData>, - headers: &Headers, - conn: &mut DbConn, -) -> EmptyResult { +async fn enforce_personal_ownership_policy(data: Option<&CipherData>, headers: &Headers, conn: &DbConn) -> EmptyResult { if data.is_none() || data.unwrap().organization_id.is_none() { let user_id = &headers.user.uuid; let policy_type = OrgPolicyType::PersonalOwnership; @@ -366,7 +367,7 @@ pub async fn update_cipher_from_data( data: CipherData, headers: &Headers, shared_to_collections: Option>, - conn: &mut DbConn, + conn: &DbConn, nt: &Notify<'_>, ut: UpdateType, ) -> EmptyResult { @@ -559,13 +560,8 @@ struct RelationsData { } #[post("/ciphers/import", data = "")] -async fn post_ciphers_import( - data: Json, - headers: Headers, - mut conn: DbConn, - nt: Notify<'_>, -) -> EmptyResult { - enforce_personal_ownership_policy(None, &headers, &mut conn).await?; +async fn post_ciphers_import(data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + enforce_personal_ownership_policy(None, &headers, &conn).await?; let data: ImportData = data.into_inner(); @@ -577,14 +573,14 @@ async fn post_ciphers_import( // Read and create the folders let existing_folders: HashSet> = - Folder::find_by_user(&headers.user.uuid, &mut conn).await.into_iter().map(|f| Some(f.uuid)).collect(); + Folder::find_by_user(&headers.user.uuid, &conn).await.into_iter().map(|f| Some(f.uuid)).collect(); let mut folders: Vec = Vec::with_capacity(data.folders.len()); for folder in data.folders.into_iter() { let folder_id = if existing_folders.contains(&folder.id) { folder.id.unwrap() } else { let mut new_folder = Folder::new(headers.user.uuid.clone(), folder.name); - new_folder.save(&mut conn).await?; + new_folder.save(&conn).await?; new_folder.uuid }; @@ -604,12 +600,12 @@ async fn post_ciphers_import( cipher_data.folder_id = folder_id; let mut cipher = Cipher::new(cipher_data.r#type, cipher_data.name.clone()); - update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await?; + update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &conn, &nt, UpdateType::None).await?; } let mut user = headers.user; - user.update_revision(&mut conn).await?; - nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &mut conn).await; + user.update_revision(&conn).await?; + nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await; Ok(()) } @@ -653,12 +649,12 @@ async fn put_cipher( cipher_id: CipherId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { let data: CipherData = data.into_inner(); - let Some(mut cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + let Some(mut cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else { err!("Cipher doesn't exist") }; @@ -667,13 +663,13 @@ async fn put_cipher( // cipher itself, so the user shouldn't need write access to change these. // Interestingly, upstream Bitwarden doesn't properly handle this either. - if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { + if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn).await { err!("Cipher is not write accessible") } - update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?; + update_cipher_from_data(&mut cipher, data, &headers, None, &conn, &nt, UpdateType::SyncCipherUpdate).await?; - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?)) + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?)) } #[post("/ciphers//partial", data = "")] @@ -692,26 +688,26 @@ async fn put_cipher_partial( cipher_id: CipherId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { let data: PartialCipherData = data.into_inner(); - let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else { err!("Cipher doesn't exist") }; if let Some(ref folder_id) = data.folder_id { - if Folder::find_by_uuid_and_user(folder_id, &headers.user.uuid, &mut conn).await.is_none() { + if Folder::find_by_uuid_and_user(folder_id, &headers.user.uuid, &conn).await.is_none() { err!("Invalid folder", "Folder does not exist or belongs to another user"); } } // Move cipher - cipher.move_to_folder(data.folder_id.clone(), &headers.user.uuid, &mut conn).await?; + cipher.move_to_folder(data.folder_id.clone(), &headers.user.uuid, &conn).await?; // Update favorite - cipher.set_favorite(Some(data.favorite), &headers.user.uuid, &mut conn).await?; + cipher.set_favorite(Some(data.favorite), &headers.user.uuid, &conn).await?; - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?)) + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?)) } #[derive(Deserialize)] @@ -764,35 +760,34 @@ async fn post_collections_update( cipher_id: CipherId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { let data: CollectionsAdminData = data.into_inner(); - let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else { err!("Cipher doesn't exist") }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { + if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn).await { err!("Cipher is not write accessible") } let posted_collections = HashSet::::from_iter(data.collection_ids); let current_collections = - HashSet::::from_iter(cipher.get_collections(headers.user.uuid.clone(), &mut conn).await); + HashSet::::from_iter(cipher.get_collections(headers.user.uuid.clone(), &conn).await); for collection in posted_collections.symmetric_difference(¤t_collections) { - match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &mut conn).await - { + match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &conn).await { None => err!("Invalid collection ID provided"), Some(collection) => { - if collection.is_writable_by_user(&headers.user.uuid, &mut conn).await { + if collection.is_writable_by_user(&headers.user.uuid, &conn).await { if posted_collections.contains(&collection.uuid) { // Add to collection - CollectionCipher::save(&cipher.uuid, &collection.uuid, &mut conn).await?; + CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn).await?; } else { // Remove from collection - CollectionCipher::delete(&cipher.uuid, &collection.uuid, &mut conn).await?; + CollectionCipher::delete(&cipher.uuid, &collection.uuid, &conn).await?; } } else { err!("No rights to modify the collection") @@ -804,10 +799,10 @@ async fn post_collections_update( nt.send_cipher_update( UpdateType::SyncCipherUpdate, &cipher, - &cipher.update_users_revision(&mut conn).await, + &cipher.update_users_revision(&conn).await, &headers.device, Some(Vec::from_iter(posted_collections)), - &mut conn, + &conn, ) .await; @@ -818,11 +813,11 @@ async fn post_collections_update( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?)) + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?)) } #[put("/ciphers//collections-admin", data = "")] @@ -841,35 +836,34 @@ async fn post_collections_admin( cipher_id: CipherId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { let data: CollectionsAdminData = data.into_inner(); - let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else { err!("Cipher doesn't exist") }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { + if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn).await { err!("Cipher is not write accessible") } let posted_collections = HashSet::::from_iter(data.collection_ids); let current_collections = - HashSet::::from_iter(cipher.get_admin_collections(headers.user.uuid.clone(), &mut conn).await); + HashSet::::from_iter(cipher.get_admin_collections(headers.user.uuid.clone(), &conn).await); for collection in posted_collections.symmetric_difference(¤t_collections) { - match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &mut conn).await - { + match Collection::find_by_uuid_and_org(collection, cipher.organization_uuid.as_ref().unwrap(), &conn).await { None => err!("Invalid collection ID provided"), Some(collection) => { - if collection.is_writable_by_user(&headers.user.uuid, &mut conn).await { + if collection.is_writable_by_user(&headers.user.uuid, &conn).await { if posted_collections.contains(&collection.uuid) { // Add to collection - CollectionCipher::save(&cipher.uuid, &collection.uuid, &mut conn).await?; + CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn).await?; } else { // Remove from collection - CollectionCipher::delete(&cipher.uuid, &collection.uuid, &mut conn).await?; + CollectionCipher::delete(&cipher.uuid, &collection.uuid, &conn).await?; } } else { err!("No rights to modify the collection") @@ -881,10 +875,10 @@ async fn post_collections_admin( nt.send_cipher_update( UpdateType::SyncCipherUpdate, &cipher, - &cipher.update_users_revision(&mut conn).await, + &cipher.update_users_revision(&conn).await, &headers.device, Some(Vec::from_iter(posted_collections)), - &mut conn, + &conn, ) .await; @@ -895,7 +889,7 @@ async fn post_collections_admin( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; @@ -916,12 +910,12 @@ async fn post_cipher_share( cipher_id: CipherId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { let data: ShareCipherData = data.into_inner(); - share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt, None).await + share_cipher_by_uuid(&cipher_id, data, &headers, &conn, &nt, None).await } #[put("/ciphers//share", data = "")] @@ -929,12 +923,12 @@ async fn put_cipher_share( cipher_id: CipherId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { let data: ShareCipherData = data.into_inner(); - share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt, None).await + share_cipher_by_uuid(&cipher_id, data, &headers, &conn, &nt, None).await } #[derive(Deserialize)] @@ -948,7 +942,7 @@ struct ShareSelectedCipherData { async fn put_cipher_share_selected( data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { let mut data: ShareSelectedCipherData = data.into_inner(); @@ -975,14 +969,14 @@ async fn put_cipher_share_selected( match shared_cipher_data.cipher.id.take() { Some(id) => { - share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &nt, Some(UpdateType::None)).await? + share_cipher_by_uuid(&id, shared_cipher_data, &headers, &conn, &nt, Some(UpdateType::None)).await? } None => err!("Request missing ids field"), }; } // Multi share actions do not send out a push for each cipher, we need to send a general sync here - nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &mut conn).await; + nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await; Ok(()) } @@ -991,7 +985,7 @@ async fn share_cipher_by_uuid( cipher_id: &CipherId, data: ShareCipherData, headers: &Headers, - conn: &mut DbConn, + conn: &DbConn, nt: &Notify<'_>, override_ut: Option, ) -> JsonResult { @@ -1050,17 +1044,17 @@ async fn get_attachment( cipher_id: CipherId, attachment_id: AttachmentId, headers: Headers, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { - let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else { err!("Cipher doesn't exist") }; - if !cipher.is_accessible_to_user(&headers.user.uuid, &mut conn).await { + if !cipher.is_accessible_to_user(&headers.user.uuid, &conn).await { err!("Cipher is not accessible") } - match Attachment::find_by_id(&attachment_id, &mut conn).await { + match Attachment::find_by_id(&attachment_id, &conn).await { Some(attachment) if cipher_id == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host).await?)), Some(_) => err!("Attachment doesn't belong to cipher"), None => err!("Attachment doesn't exist"), @@ -1090,13 +1084,13 @@ async fn post_attachment_v2( cipher_id: CipherId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { - let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else { err!("Cipher doesn't exist") }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { + if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn).await { err!("Cipher is not write accessible") } @@ -1109,7 +1103,7 @@ async fn post_attachment_v2( let attachment_id = crypto::generate_attachment_id(); let attachment = Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.file_name, file_size, Some(data.key)); - attachment.save(&mut conn).await.expect("Error saving attachment"); + attachment.save(&conn).await.expect("Error saving attachment"); let url = format!("/ciphers/{}/attachment/{attachment_id}", cipher.uuid); let response_key = match data.admin_request { @@ -1122,7 +1116,7 @@ async fn post_attachment_v2( "attachmentId": attachment_id, "url": url, "fileUploadType": FileUploadType::Direct as i32, - response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?, + response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?, }))) } @@ -1145,7 +1139,7 @@ async fn save_attachment( cipher_id: CipherId, data: Form>, headers: &Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> Result<(Cipher, DbConn), crate::error::Error> { let data = data.into_inner(); @@ -1157,11 +1151,11 @@ async fn save_attachment( err!("Attachment size can't be negative") } - let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else { + let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &conn).await else { err!("Cipher doesn't exist") }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { + if !cipher.is_write_accessible_to_user(&headers.user.uuid, &conn).await { err!("Cipher is not write accessible") } @@ -1176,7 +1170,7 @@ async fn save_attachment( match CONFIG.user_attachment_limit() { Some(0) => err!("Attachments are disabled"), Some(limit_kb) => { - let already_used = Attachment::size_by_user(user_id, &mut conn).await; + let already_used = Attachment::size_by_user(user_id, &conn).await; let left = limit_kb .checked_mul(1024) .and_then(|l| l.checked_sub(already_used)) @@ -1198,7 +1192,7 @@ async fn save_attachment( match CONFIG.org_attachment_limit() { Some(0) => err!("Attachments are disabled"), Some(limit_kb) => { - let already_used = Attachment::size_by_org(org_id, &mut conn).await; + let already_used = Attachment::size_by_org(org_id, &conn).await; let left = limit_kb .checked_mul(1024) .and_then(|l| l.checked_sub(already_used)) @@ -1249,10 +1243,10 @@ async fn save_attachment( if size != attachment.file_size { // Update the attachment with the actual file size. attachment.file_size = size; - attachment.save(&mut conn).await.expect("Error updating attachment"); + attachment.save(&conn).await.expect("Error updating attachment"); } } else { - attachment.delete(&mut conn).await.ok(); + attachment.delete(&conn).await.ok(); err!(format!("Attachment size mismatch (expected within [{min_size}, {max_size}], got {size})")); } @@ -1272,7 +1266,7 @@ async fn save_attachment( } let attachment = Attachment::new(file_id.clone(), cipher_id.clone(), encrypted_filename.unwrap(), size, data.key); - attachment.save(&mut conn).await.expect("Error saving attachment"); + attachment.save(&conn).await.expect("Error saving attachment"); } save_temp_file(PathType::Attachments, &format!("{cipher_id}/{file_id}"), data.data, true).await?; @@ -1280,10 +1274,10 @@ async fn save_attachment( nt.send_cipher_update( UpdateType::SyncCipherUpdate, &cipher, - &cipher.update_users_revision(&mut conn).await, + &cipher.update_users_revision(&conn).await, &headers.device, None, - &mut conn, + &conn, ) .await; @@ -1295,7 +1289,7 @@ async fn save_attachment( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; } @@ -1313,10 +1307,10 @@ async fn post_attachment_v2_data( attachment_id: AttachmentId, data: Form>, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - let attachment = match Attachment::find_by_id(&attachment_id, &mut conn).await { + let attachment = match Attachment::find_by_id(&attachment_id, &conn).await { Some(attachment) if cipher_id == attachment.cipher_uuid => Some(attachment), Some(_) => err!("Attachment doesn't belong to cipher"), None => err!("Attachment doesn't exist"), @@ -1340,9 +1334,9 @@ async fn post_attachment( // the attachment database record as well as saving the data to disk. let attachment = None; - let (cipher, mut conn) = save_attachment(attachment, cipher_id, data, &headers, conn, nt).await?; + let (cipher, conn) = save_attachment(attachment, cipher_id, data, &headers, conn, nt).await?; - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?)) + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &conn).await?)) } #[post("/ciphers//attachment-admin", format = "multipart/form-data", data = "")] @@ -1362,10 +1356,10 @@ async fn post_attachment_share( attachment_id: AttachmentId, data: Form>, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - _delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &mut conn, &nt).await?; + _delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &conn, &nt).await?; post_attachment(cipher_id, data, headers, conn, nt).await } @@ -1396,10 +1390,10 @@ async fn delete_attachment( cipher_id: CipherId, attachment_id: AttachmentId, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - _delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &mut conn, &nt).await + _delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &conn, &nt).await } #[delete("/ciphers//attachment//admin")] @@ -1407,55 +1401,45 @@ async fn delete_attachment_admin( cipher_id: CipherId, attachment_id: AttachmentId, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - _delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &mut conn, &nt).await + _delete_cipher_attachment_by_id(&cipher_id, &attachment_id, &headers, &conn, &nt).await } #[post("/ciphers//delete")] -async fn delete_cipher_post(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &CipherDeleteOptions::HardSingle, &nt).await +async fn delete_cipher_post(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&cipher_id, &headers, &conn, &CipherDeleteOptions::HardSingle, &nt).await // permanent delete } #[post("/ciphers//delete-admin")] -async fn delete_cipher_post_admin( - cipher_id: CipherId, - headers: Headers, - mut conn: DbConn, - nt: Notify<'_>, -) -> EmptyResult { - _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &CipherDeleteOptions::HardSingle, &nt).await +async fn delete_cipher_post_admin(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&cipher_id, &headers, &conn, &CipherDeleteOptions::HardSingle, &nt).await // permanent delete } #[put("/ciphers//delete")] -async fn delete_cipher_put(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &CipherDeleteOptions::SoftSingle, &nt).await +async fn delete_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&cipher_id, &headers, &conn, &CipherDeleteOptions::SoftSingle, &nt).await // soft delete } #[put("/ciphers//delete-admin")] -async fn delete_cipher_put_admin( - cipher_id: CipherId, - headers: Headers, - mut conn: DbConn, - nt: Notify<'_>, -) -> EmptyResult { - _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &CipherDeleteOptions::SoftSingle, &nt).await +async fn delete_cipher_put_admin(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&cipher_id, &headers, &conn, &CipherDeleteOptions::SoftSingle, &nt).await // soft delete } #[delete("/ciphers/")] -async fn delete_cipher(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &CipherDeleteOptions::HardSingle, &nt).await +async fn delete_cipher(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&cipher_id, &headers, &conn, &CipherDeleteOptions::HardSingle, &nt).await // permanent delete } #[delete("/ciphers//admin")] -async fn delete_cipher_admin(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &CipherDeleteOptions::HardSingle, &nt).await +async fn delete_cipher_admin(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + _delete_cipher_by_uuid(&cipher_id, &headers, &conn, &CipherDeleteOptions::HardSingle, &nt).await // permanent delete } @@ -1526,38 +1510,33 @@ async fn delete_cipher_selected_put_admin( } #[put("/ciphers//restore")] -async fn restore_cipher_put(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { - _restore_cipher_by_uuid(&cipher_id, &headers, false, &mut conn, &nt).await +async fn restore_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { + _restore_cipher_by_uuid(&cipher_id, &headers, false, &conn, &nt).await } #[put("/ciphers//restore-admin")] -async fn restore_cipher_put_admin( - cipher_id: CipherId, - headers: Headers, - mut conn: DbConn, - nt: Notify<'_>, -) -> JsonResult { - _restore_cipher_by_uuid(&cipher_id, &headers, false, &mut conn, &nt).await +async fn restore_cipher_put_admin(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { + _restore_cipher_by_uuid(&cipher_id, &headers, false, &conn, &nt).await } #[put("/ciphers/restore-admin", data = "")] async fn restore_cipher_selected_admin( data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - _restore_multiple_ciphers(data, &headers, &mut conn, &nt).await + _restore_multiple_ciphers(data, &headers, &conn, &nt).await } #[put("/ciphers/restore", data = "")] async fn restore_cipher_selected( data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - _restore_multiple_ciphers(data, &headers, &mut conn, &nt).await + _restore_multiple_ciphers(data, &headers, &conn, &nt).await } #[derive(Deserialize)] @@ -1571,14 +1550,14 @@ struct MoveCipherData { async fn move_cipher_selected( data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { let data = data.into_inner(); let user_id = &headers.user.uuid; if let Some(ref folder_id) = data.folder_id { - if Folder::find_by_uuid_and_user(folder_id, user_id, &mut conn).await.is_none() { + if Folder::find_by_uuid_and_user(folder_id, user_id, &conn).await.is_none() { err!("Invalid folder", "Folder does not exist or belongs to another user"); } } @@ -1588,10 +1567,10 @@ async fn move_cipher_selected( // TODO: Convert this to use a single query (or at least less) to update all items // Find all ciphers a user has access to, all others will be ignored - let accessible_ciphers = Cipher::find_by_user_and_ciphers(user_id, &data.ids, &mut conn).await; + let accessible_ciphers = Cipher::find_by_user_and_ciphers(user_id, &data.ids, &conn).await; let accessible_ciphers_count = accessible_ciphers.len(); for cipher in accessible_ciphers { - cipher.move_to_folder(data.folder_id.clone(), user_id, &mut conn).await?; + cipher.move_to_folder(data.folder_id.clone(), user_id, &conn).await?; if cipher_count == 1 { single_cipher = Some(cipher); } @@ -1604,12 +1583,12 @@ async fn move_cipher_selected( std::slice::from_ref(user_id), &headers.device, None, - &mut conn, + &conn, ) .await; } else { // Multi move actions do not send out a push for each cipher, we need to send a general sync here - nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &mut conn).await; + nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await; } if cipher_count != accessible_ciphers_count { @@ -1642,23 +1621,23 @@ async fn delete_all( organization: Option, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { let data: PasswordOrOtpData = data.into_inner(); let mut user = headers.user; - data.validate(&user, true, &mut conn).await?; + data.validate(&user, true, &conn).await?; match organization { Some(org_data) => { // Organization ID in query params, purging organization vault - match Membership::find_by_user_and_org(&user.uuid, &org_data.org_id, &mut conn).await { + match Membership::find_by_user_and_org(&user.uuid, &org_data.org_id, &conn).await { None => err!("You don't have permission to purge the organization vault"), Some(member) => { if member.atype == MembershipType::Owner { - Cipher::delete_all_by_organization(&org_data.org_id, &mut conn).await?; - nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &mut conn).await; + Cipher::delete_all_by_organization(&org_data.org_id, &conn).await?; + nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await; log_event( EventType::OrganizationPurgedVault as i32, @@ -1667,7 +1646,7 @@ async fn delete_all( &user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; @@ -1681,17 +1660,17 @@ async fn delete_all( None => { // No organization ID in query params, purging user vault // Delete ciphers and their attachments - for cipher in Cipher::find_owned_by_user(&user.uuid, &mut conn).await { - cipher.delete(&mut conn).await?; + for cipher in Cipher::find_owned_by_user(&user.uuid, &conn).await { + cipher.delete(&conn).await?; } // Delete folders - for f in Folder::find_by_user(&user.uuid, &mut conn).await { - f.delete(&mut conn).await?; + for f in Folder::find_by_user(&user.uuid, &conn).await { + f.delete(&conn).await?; } - user.update_revision(&mut conn).await?; - nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &mut conn).await; + user.update_revision(&conn).await?; + nt.send_user_update(UpdateType::SyncVault, &user, &headers.device.push_uuid, &conn).await; Ok(()) } @@ -1709,7 +1688,7 @@ pub enum CipherDeleteOptions { async fn _delete_cipher_by_uuid( cipher_id: &CipherId, headers: &Headers, - conn: &mut DbConn, + conn: &DbConn, delete_options: &CipherDeleteOptions, nt: &Notify<'_>, ) -> EmptyResult { @@ -1775,20 +1754,20 @@ struct CipherIdsData { async fn _delete_multiple_ciphers( data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, delete_options: CipherDeleteOptions, nt: Notify<'_>, ) -> EmptyResult { let data = data.into_inner(); for cipher_id in data.ids { - if let error @ Err(_) = _delete_cipher_by_uuid(&cipher_id, &headers, &mut conn, &delete_options, &nt).await { + if let error @ Err(_) = _delete_cipher_by_uuid(&cipher_id, &headers, &conn, &delete_options, &nt).await { return error; }; } // Multi delete actions do not send out a push for each cipher, we need to send a general sync here - nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &mut conn).await; + nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &conn).await; Ok(()) } @@ -1797,7 +1776,7 @@ async fn _restore_cipher_by_uuid( cipher_id: &CipherId, headers: &Headers, multi_restore: bool, - conn: &mut DbConn, + conn: &DbConn, nt: &Notify<'_>, ) -> JsonResult { let Some(mut cipher) = Cipher::find_by_uuid(cipher_id, conn).await else { @@ -1842,7 +1821,7 @@ async fn _restore_cipher_by_uuid( async fn _restore_multiple_ciphers( data: Json, headers: &Headers, - conn: &mut DbConn, + conn: &DbConn, nt: &Notify<'_>, ) -> JsonResult { let data = data.into_inner(); @@ -1869,7 +1848,7 @@ async fn _delete_cipher_attachment_by_id( cipher_id: &CipherId, attachment_id: &AttachmentId, headers: &Headers, - conn: &mut DbConn, + conn: &DbConn, nt: &Notify<'_>, ) -> JsonResult { let Some(attachment) = Attachment::find_by_id(attachment_id, conn).await else { @@ -1938,7 +1917,7 @@ pub enum CipherSyncType { } impl CipherSyncData { - pub async fn new(user_id: &UserId, sync_type: CipherSyncType, conn: &mut DbConn) -> Self { + pub async fn new(user_id: &UserId, sync_type: CipherSyncType, conn: &DbConn) -> Self { let cipher_folders: HashMap; let cipher_favorites: HashSet; match sync_type { diff --git a/src/api/core/emergency_access.rs b/src/api/core/emergency_access.rs index b6b77df1..6f4c8eb2 100644 --- a/src/api/core/emergency_access.rs +++ b/src/api/core/emergency_access.rs @@ -8,7 +8,13 @@ use crate::{ EmptyResult, JsonResult, }, auth::{decode_emergency_access_invite, Headers}, - db::{models::*, DbConn, DbPool}, + db::{ + models::{ + Cipher, EmergencyAccess, EmergencyAccessId, EmergencyAccessStatus, EmergencyAccessType, Invitation, + Membership, MembershipType, OrgPolicy, TwoFactor, User, UserId, + }, + DbConn, DbPool, + }, mail, util::NumberOrString, CONFIG, @@ -40,7 +46,7 @@ pub fn routes() -> Vec { // region get #[get("/emergency-access/trusted")] -async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json { +async fn get_contacts(headers: Headers, conn: DbConn) -> Json { if !CONFIG.emergency_access_allowed() { return Json(json!({ "data": [{ @@ -58,10 +64,10 @@ async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json { "continuationToken": null })); } - let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await; + let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &conn).await; let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len()); for ea in emergency_access_list { - if let Some(grantee) = ea.to_json_grantee_details(&mut conn).await { + if let Some(grantee) = ea.to_json_grantee_details(&conn).await { emergency_access_list_json.push(grantee) } } @@ -74,15 +80,15 @@ async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json { } #[get("/emergency-access/granted")] -async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json { +async fn get_grantees(headers: Headers, conn: DbConn) -> Json { let emergency_access_list = if CONFIG.emergency_access_allowed() { - EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await + EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &conn).await } else { Vec::new() }; let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len()); for ea in emergency_access_list { - emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await); + emergency_access_list_json.push(ea.to_json_grantor_details(&conn).await); } Json(json!({ @@ -93,12 +99,12 @@ async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json { } #[get("/emergency-access/")] -async fn get_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn get_emergency_access(emer_id: EmergencyAccessId, headers: Headers, conn: DbConn) -> JsonResult { check_emergency_access_enabled()?; - match EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await { + match EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &conn).await { Some(emergency_access) => Ok(Json( - emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"), + emergency_access.to_json_grantee_details(&conn).await.expect("Grantee user should exist but does not!"), )), None => err!("Emergency access not valid."), } @@ -131,14 +137,14 @@ async fn post_emergency_access( emer_id: EmergencyAccessId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { check_emergency_access_enabled()?; let data: EmergencyAccessUpdateData = data.into_inner(); let Some(mut emergency_access) = - EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await + EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &conn).await else { err!("Emergency access not valid.") }; @@ -154,7 +160,7 @@ async fn post_emergency_access( emergency_access.key_encrypted = data.key_encrypted; } - emergency_access.save(&mut conn).await?; + emergency_access.save(&conn).await?; Ok(Json(emergency_access.to_json())) } @@ -163,12 +169,12 @@ async fn post_emergency_access( // region delete #[delete("/emergency-access/")] -async fn delete_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn delete_emergency_access(emer_id: EmergencyAccessId, headers: Headers, conn: DbConn) -> EmptyResult { check_emergency_access_enabled()?; let emergency_access = match ( - EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await, - EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &headers.user.uuid, &mut conn).await, + EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &conn).await, + EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &headers.user.uuid, &conn).await, ) { (Some(grantor_emer), None) => { info!("Grantor deleted emergency access {emer_id}"); @@ -181,7 +187,7 @@ async fn delete_emergency_access(emer_id: EmergencyAccessId, headers: Headers, m _ => err!("Emergency access not valid."), }; - emergency_access.delete(&mut conn).await?; + emergency_access.delete(&conn).await?; Ok(()) } @@ -203,7 +209,7 @@ struct EmergencyAccessInviteData { } #[post("/emergency-access/invite", data = "")] -async fn send_invite(data: Json, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn send_invite(data: Json, headers: Headers, conn: DbConn) -> EmptyResult { check_emergency_access_enabled()?; let data: EmergencyAccessInviteData = data.into_inner(); @@ -224,7 +230,7 @@ async fn send_invite(data: Json, headers: Headers, mu err!("You can not set yourself as an emergency contact.") } - let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await { + let (grantee_user, new_user) = match User::find_by_mail(&email, &conn).await { None => { if !CONFIG.invitations_allowed() { err!(format!("Grantee user does not exist: {email}")) @@ -236,11 +242,11 @@ async fn send_invite(data: Json, headers: Headers, mu if !CONFIG.mail_enabled() { let invitation = Invitation::new(&email); - invitation.save(&mut conn).await?; + invitation.save(&conn).await?; } let mut user = User::new(email.clone(), None); - user.save(&mut conn).await?; + user.save(&conn).await?; (user, true) } Some(user) if user.password_hash.is_empty() => (user, true), @@ -251,7 +257,7 @@ async fn send_invite(data: Json, headers: Headers, mu &grantor_user.uuid, &grantee_user.uuid, &grantee_user.email, - &mut conn, + &conn, ) .await .is_some() @@ -261,7 +267,7 @@ async fn send_invite(data: Json, headers: Headers, mu let mut new_emergency_access = EmergencyAccess::new(grantor_user.uuid, grantee_user.email, emergency_access_status, new_type, wait_time_days); - new_emergency_access.save(&mut conn).await?; + new_emergency_access.save(&conn).await?; if CONFIG.mail_enabled() { mail::send_emergency_access_invite( @@ -274,18 +280,18 @@ async fn send_invite(data: Json, headers: Headers, mu .await?; } else if !new_user { // if mail is not enabled immediately accept the invitation for existing users - new_emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?; + new_emergency_access.accept_invite(&grantee_user.uuid, &email, &conn).await?; } Ok(()) } #[post("/emergency-access//reinvite")] -async fn resend_invite(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn resend_invite(emer_id: EmergencyAccessId, headers: Headers, conn: DbConn) -> EmptyResult { check_emergency_access_enabled()?; let Some(mut emergency_access) = - EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await + EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &conn).await else { err!("Emergency access not valid.") }; @@ -298,7 +304,7 @@ async fn resend_invite(emer_id: EmergencyAccessId, headers: Headers, mut conn: D err!("Email not valid.") }; - let Some(grantee_user) = User::find_by_mail(&email, &mut conn).await else { + let Some(grantee_user) = User::find_by_mail(&email, &conn).await else { err!("Grantee user not found.") }; @@ -315,10 +321,10 @@ async fn resend_invite(emer_id: EmergencyAccessId, headers: Headers, mut conn: D .await?; } else if !grantee_user.password_hash.is_empty() { // accept the invitation for existing user - emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?; - } else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &mut conn).await.is_none() { + emergency_access.accept_invite(&grantee_user.uuid, &email, &conn).await?; + } else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &conn).await.is_none() { let invitation = Invitation::new(&email); - invitation.save(&mut conn).await?; + invitation.save(&conn).await?; } Ok(()) @@ -335,7 +341,7 @@ async fn accept_invite( emer_id: EmergencyAccessId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { check_emergency_access_enabled()?; @@ -349,9 +355,9 @@ async fn accept_invite( err!("Claim email does not match current users email") } - let grantee_user = match User::find_by_mail(&claims.email, &mut conn).await { + let grantee_user = match User::find_by_mail(&claims.email, &conn).await { Some(user) => { - Invitation::take(&claims.email, &mut conn).await; + Invitation::take(&claims.email, &conn).await; user } None => err!("Invited user not found"), @@ -360,13 +366,13 @@ async fn accept_invite( // We need to search for the uuid in combination with the email, since we do not yet store the uuid of the grantee in the database. // The uuid of the grantee gets stored once accepted. let Some(mut emergency_access) = - EmergencyAccess::find_by_uuid_and_grantee_email(&emer_id, &headers.user.email, &mut conn).await + EmergencyAccess::find_by_uuid_and_grantee_email(&emer_id, &headers.user.email, &conn).await else { err!("Emergency access not valid.") }; // get grantor user to send Accepted email - let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else { + let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await else { err!("Grantor user not found.") }; @@ -374,7 +380,7 @@ async fn accept_invite( && grantor_user.name == claims.grantor_name && grantor_user.email == claims.grantor_email { - emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &mut conn).await?; + emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &conn).await?; if CONFIG.mail_enabled() { mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?; @@ -397,7 +403,7 @@ async fn confirm_emergency_access( emer_id: EmergencyAccessId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { check_emergency_access_enabled()?; @@ -406,7 +412,7 @@ async fn confirm_emergency_access( let key = data.key; let Some(mut emergency_access) = - EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &confirming_user.uuid, &mut conn).await + EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &confirming_user.uuid, &conn).await else { err!("Emergency access not valid.") }; @@ -417,12 +423,12 @@ async fn confirm_emergency_access( err!("Emergency access not valid.") } - let Some(grantor_user) = User::find_by_uuid(&confirming_user.uuid, &mut conn).await else { + let Some(grantor_user) = User::find_by_uuid(&confirming_user.uuid, &conn).await else { err!("Grantor user not found.") }; if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { - let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else { + let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &conn).await else { err!("Grantee user not found.") }; @@ -430,7 +436,7 @@ async fn confirm_emergency_access( emergency_access.key_encrypted = Some(key); emergency_access.email = None; - emergency_access.save(&mut conn).await?; + emergency_access.save(&conn).await?; if CONFIG.mail_enabled() { mail::send_emergency_access_invite_confirmed(&grantee_user.email, &grantor_user.name).await?; @@ -446,12 +452,12 @@ async fn confirm_emergency_access( // region access emergency access #[post("/emergency-access//initiate")] -async fn initiate_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn initiate_emergency_access(emer_id: EmergencyAccessId, headers: Headers, conn: DbConn) -> JsonResult { check_emergency_access_enabled()?; let initiating_user = headers.user; let Some(mut emergency_access) = - EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &initiating_user.uuid, &mut conn).await + EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &initiating_user.uuid, &conn).await else { err!("Emergency access not valid.") }; @@ -460,7 +466,7 @@ async fn initiate_emergency_access(emer_id: EmergencyAccessId, headers: Headers, err!("Emergency access not valid.") } - let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else { + let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await else { err!("Grantor user not found.") }; @@ -469,7 +475,7 @@ async fn initiate_emergency_access(emer_id: EmergencyAccessId, headers: Headers, emergency_access.updated_at = now; emergency_access.recovery_initiated_at = Some(now); emergency_access.last_notification_at = Some(now); - emergency_access.save(&mut conn).await?; + emergency_access.save(&conn).await?; if CONFIG.mail_enabled() { mail::send_emergency_access_recovery_initiated( @@ -484,11 +490,11 @@ async fn initiate_emergency_access(emer_id: EmergencyAccessId, headers: Headers, } #[post("/emergency-access//approve")] -async fn approve_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn approve_emergency_access(emer_id: EmergencyAccessId, headers: Headers, conn: DbConn) -> JsonResult { check_emergency_access_enabled()?; let Some(mut emergency_access) = - EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await + EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &conn).await else { err!("Emergency access not valid.") }; @@ -497,17 +503,17 @@ async fn approve_emergency_access(emer_id: EmergencyAccessId, headers: Headers, err!("Emergency access not valid.") } - let Some(grantor_user) = User::find_by_uuid(&headers.user.uuid, &mut conn).await else { + let Some(grantor_user) = User::find_by_uuid(&headers.user.uuid, &conn).await else { err!("Grantor user not found.") }; if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { - let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else { + let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &conn).await else { err!("Grantee user not found.") }; emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32; - emergency_access.save(&mut conn).await?; + emergency_access.save(&conn).await?; if CONFIG.mail_enabled() { mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name).await?; @@ -519,11 +525,11 @@ async fn approve_emergency_access(emer_id: EmergencyAccessId, headers: Headers, } #[post("/emergency-access//reject")] -async fn reject_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn reject_emergency_access(emer_id: EmergencyAccessId, headers: Headers, conn: DbConn) -> JsonResult { check_emergency_access_enabled()?; let Some(mut emergency_access) = - EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &mut conn).await + EmergencyAccess::find_by_uuid_and_grantor_uuid(&emer_id, &headers.user.uuid, &conn).await else { err!("Emergency access not valid.") }; @@ -535,12 +541,12 @@ async fn reject_emergency_access(emer_id: EmergencyAccessId, headers: Headers, m } if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { - let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &mut conn).await else { + let Some(grantee_user) = User::find_by_uuid(grantee_uuid, &conn).await else { err!("Grantee user not found.") }; emergency_access.status = EmergencyAccessStatus::Confirmed as i32; - emergency_access.save(&mut conn).await?; + emergency_access.save(&conn).await?; if CONFIG.mail_enabled() { mail::send_emergency_access_recovery_rejected(&grantee_user.email, &headers.user.name).await?; @@ -556,11 +562,11 @@ async fn reject_emergency_access(emer_id: EmergencyAccessId, headers: Headers, m // region action #[post("/emergency-access//view")] -async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, conn: DbConn) -> JsonResult { check_emergency_access_enabled()?; let Some(emergency_access) = - EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &headers.user.uuid, &mut conn).await + EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &headers.user.uuid, &conn).await else { err!("Emergency access not valid.") }; @@ -569,8 +575,8 @@ async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut err!("Emergency access not valid.") } - let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &mut conn).await; - let cipher_sync_data = CipherSyncData::new(&emergency_access.grantor_uuid, CipherSyncType::User, &mut conn).await; + let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &conn).await; + let cipher_sync_data = CipherSyncData::new(&emergency_access.grantor_uuid, CipherSyncType::User, &conn).await; let mut ciphers_json = Vec::with_capacity(ciphers.len()); for c in ciphers { @@ -580,7 +586,7 @@ async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut &emergency_access.grantor_uuid, Some(&cipher_sync_data), CipherSyncType::User, - &mut conn, + &conn, ) .await?, ); @@ -594,12 +600,12 @@ async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut } #[post("/emergency-access//takeover")] -async fn takeover_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn takeover_emergency_access(emer_id: EmergencyAccessId, headers: Headers, conn: DbConn) -> JsonResult { check_emergency_access_enabled()?; let requesting_user = headers.user; let Some(emergency_access) = - EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await + EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &conn).await else { err!("Emergency access not valid.") }; @@ -608,7 +614,7 @@ async fn takeover_emergency_access(emer_id: EmergencyAccessId, headers: Headers, err!("Emergency access not valid.") } - let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else { + let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await else { err!("Grantor user not found.") }; @@ -636,7 +642,7 @@ async fn password_emergency_access( emer_id: EmergencyAccessId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { check_emergency_access_enabled()?; @@ -646,7 +652,7 @@ async fn password_emergency_access( let requesting_user = headers.user; let Some(emergency_access) = - EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await + EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &conn).await else { err!("Emergency access not valid.") }; @@ -655,21 +661,21 @@ async fn password_emergency_access( err!("Emergency access not valid.") } - let Some(mut grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else { + let Some(mut grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await else { err!("Grantor user not found.") }; // change grantor_user password grantor_user.set_password(new_master_password_hash, Some(data.key), true, None); - grantor_user.save(&mut conn).await?; + grantor_user.save(&conn).await?; // Disable TwoFactor providers since they will otherwise block logins - TwoFactor::delete_all_by_user(&grantor_user.uuid, &mut conn).await?; + TwoFactor::delete_all_by_user(&grantor_user.uuid, &conn).await?; // Remove grantor from all organisations unless Owner - for member in Membership::find_any_state_by_user(&grantor_user.uuid, &mut conn).await { + for member in Membership::find_any_state_by_user(&grantor_user.uuid, &conn).await { if member.atype != MembershipType::Owner as i32 { - member.delete(&mut conn).await?; + member.delete(&conn).await?; } } Ok(()) @@ -678,10 +684,10 @@ async fn password_emergency_access( // endregion #[get("/emergency-access//policies")] -async fn policies_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn policies_emergency_access(emer_id: EmergencyAccessId, headers: Headers, conn: DbConn) -> JsonResult { let requesting_user = headers.user; let Some(emergency_access) = - EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &mut conn).await + EmergencyAccess::find_by_uuid_and_grantee_uuid(&emer_id, &requesting_user.uuid, &conn).await else { err!("Emergency access not valid.") }; @@ -690,11 +696,11 @@ async fn policies_emergency_access(emer_id: EmergencyAccessId, headers: Headers, err!("Emergency access not valid.") } - let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await else { + let Some(grantor_user) = User::find_by_uuid(&emergency_access.grantor_uuid, &conn).await else { err!("Grantor user not found.") }; - let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &mut conn); + let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &conn); let policies_json: Vec = policies.await.iter().map(OrgPolicy::to_json).collect(); Ok(Json(json!({ @@ -728,8 +734,8 @@ pub async fn emergency_request_timeout_job(pool: DbPool) { return; } - if let Ok(mut conn) = pool.get().await { - let emergency_access_list = EmergencyAccess::find_all_recoveries_initiated(&mut conn).await; + if let Ok(conn) = pool.get().await { + let emergency_access_list = EmergencyAccess::find_all_recoveries_initiated(&conn).await; if emergency_access_list.is_empty() { debug!("No emergency request timeout to approve"); @@ -743,18 +749,18 @@ pub async fn emergency_request_timeout_job(pool: DbPool) { if recovery_allowed_at.le(&now) { // Only update the access status // Updating the whole record could cause issues when the emergency_notification_reminder_job is also active - emer.update_access_status_and_save(EmergencyAccessStatus::RecoveryApproved as i32, &now, &mut conn) + emer.update_access_status_and_save(EmergencyAccessStatus::RecoveryApproved as i32, &now, &conn) .await .expect("Unable to update emergency access status"); if CONFIG.mail_enabled() { // get grantor user to send Accepted email let grantor_user = - User::find_by_uuid(&emer.grantor_uuid, &mut conn).await.expect("Grantor user not found"); + User::find_by_uuid(&emer.grantor_uuid, &conn).await.expect("Grantor user not found"); // get grantee user to send Accepted email let grantee_user = - User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid"), &mut conn) + User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid"), &conn) .await .expect("Grantee user not found"); @@ -783,8 +789,8 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) { return; } - if let Ok(mut conn) = pool.get().await { - let emergency_access_list = EmergencyAccess::find_all_recoveries_initiated(&mut conn).await; + if let Ok(conn) = pool.get().await { + let emergency_access_list = EmergencyAccess::find_all_recoveries_initiated(&conn).await; if emergency_access_list.is_empty() { debug!("No emergency request reminder notification to send"); @@ -805,18 +811,18 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) { if final_recovery_reminder_at.le(&now) && next_recovery_reminder_at.le(&now) { // Only update the last notification date // Updating the whole record could cause issues when the emergency_request_timeout_job is also active - emer.update_last_notification_date_and_save(&now, &mut conn) + emer.update_last_notification_date_and_save(&now, &conn) .await .expect("Unable to update emergency access notification date"); if CONFIG.mail_enabled() { // get grantor user to send Accepted email let grantor_user = - User::find_by_uuid(&emer.grantor_uuid, &mut conn).await.expect("Grantor user not found"); + User::find_by_uuid(&emer.grantor_uuid, &conn).await.expect("Grantor user not found"); // get grantee user to send Accepted email let grantee_user = - User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid"), &mut conn) + User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid"), &conn) .await .expect("Grantee user not found"); diff --git a/src/api/core/events.rs b/src/api/core/events.rs index 597c6ad6..2f33a407 100644 --- a/src/api/core/events.rs +++ b/src/api/core/events.rs @@ -31,12 +31,7 @@ struct EventRange { // Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/AdminConsole/Controllers/EventsController.cs#L87 #[get("/organizations//events?")] -async fn get_org_events( - org_id: OrganizationId, - data: EventRange, - headers: AdminHeaders, - mut conn: DbConn, -) -> JsonResult { +async fn get_org_events(org_id: OrganizationId, data: EventRange, headers: AdminHeaders, conn: DbConn) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } @@ -53,7 +48,7 @@ async fn get_org_events( parse_date(&data.end) }; - Event::find_by_organization_uuid(&org_id, &start_date, &end_date, &mut conn) + Event::find_by_organization_uuid(&org_id, &start_date, &end_date, &conn) .await .iter() .map(|e| e.to_json()) @@ -68,14 +63,14 @@ async fn get_org_events( } #[get("/ciphers//events?")] -async fn get_cipher_events(cipher_id: CipherId, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn get_cipher_events(cipher_id: CipherId, data: EventRange, headers: Headers, conn: DbConn) -> JsonResult { // Return an empty vec when we org events are disabled. // This prevents client errors let events_json: Vec = if !CONFIG.org_events_enabled() { Vec::with_capacity(0) } else { let mut events_json = Vec::with_capacity(0); - if Membership::user_has_ge_admin_access_to_cipher(&headers.user.uuid, &cipher_id, &mut conn).await { + if Membership::user_has_ge_admin_access_to_cipher(&headers.user.uuid, &cipher_id, &conn).await { let start_date = parse_date(&data.start); let end_date = if let Some(before_date) = &data.continuation_token { parse_date(before_date) @@ -83,7 +78,7 @@ async fn get_cipher_events(cipher_id: CipherId, data: EventRange, headers: Heade parse_date(&data.end) }; - events_json = Event::find_by_cipher_uuid(&cipher_id, &start_date, &end_date, &mut conn) + events_json = Event::find_by_cipher_uuid(&cipher_id, &start_date, &end_date, &conn) .await .iter() .map(|e| e.to_json()) @@ -105,7 +100,7 @@ async fn get_user_events( member_id: MembershipId, data: EventRange, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -122,7 +117,7 @@ async fn get_user_events( parse_date(&data.end) }; - Event::find_by_org_and_member(&org_id, &member_id, &start_date, &end_date, &mut conn) + Event::find_by_org_and_member(&org_id, &member_id, &start_date, &end_date, &conn) .await .iter() .map(|e| e.to_json()) @@ -172,7 +167,7 @@ struct EventCollection { // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Events/Controllers/CollectController.cs // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Services/Implementations/EventService.cs #[post("/collect", format = "application/json", data = "")] -async fn post_events_collect(data: Json>, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn post_events_collect(data: Json>, headers: Headers, conn: DbConn) -> EmptyResult { if !CONFIG.org_events_enabled() { return Ok(()); } @@ -187,7 +182,7 @@ async fn post_events_collect(data: Json>, headers: Headers, headers.device.atype, Some(event_date), &headers.ip.ip, - &mut conn, + &conn, ) .await; } @@ -201,14 +196,14 @@ async fn post_events_collect(data: Json>, headers: Headers, headers.device.atype, Some(event_date), &headers.ip.ip, - &mut conn, + &conn, ) .await; } } _ => { if let Some(cipher_uuid) = &event.cipher_id { - if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await { + if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &conn).await { if let Some(org_id) = cipher.organization_uuid { _log_event( event.r#type, @@ -218,7 +213,7 @@ async fn post_events_collect(data: Json>, headers: Headers, headers.device.atype, Some(event_date), &headers.ip.ip, - &mut conn, + &conn, ) .await; } @@ -230,7 +225,7 @@ async fn post_events_collect(data: Json>, headers: Headers, Ok(()) } -pub async fn log_user_event(event_type: i32, user_id: &UserId, device_type: i32, ip: &IpAddr, conn: &mut DbConn) { +pub async fn log_user_event(event_type: i32, user_id: &UserId, device_type: i32, ip: &IpAddr, conn: &DbConn) { if !CONFIG.org_events_enabled() { return; } @@ -243,7 +238,7 @@ async fn _log_user_event( device_type: i32, event_date: Option, ip: &IpAddr, - conn: &mut DbConn, + conn: &DbConn, ) { let memberships = Membership::find_by_user(user_id, conn).await; let mut events: Vec = Vec::with_capacity(memberships.len() + 1); // We need an event per org and one without an org @@ -278,7 +273,7 @@ pub async fn log_event( act_user_id: &UserId, device_type: i32, ip: &IpAddr, - conn: &mut DbConn, + conn: &DbConn, ) { if !CONFIG.org_events_enabled() { return; @@ -295,7 +290,7 @@ async fn _log_event( device_type: i32, event_date: Option, ip: &IpAddr, - conn: &mut DbConn, + conn: &DbConn, ) { // Create a new empty event let mut event = Event::new(event_type, event_date); @@ -340,8 +335,8 @@ pub async fn event_cleanup_job(pool: DbPool) { return; } - if let Ok(mut conn) = pool.get().await { - Event::clean_events(&mut conn).await.ok(); + if let Ok(conn) = pool.get().await { + Event::clean_events(&conn).await.ok(); } else { error!("Failed to get DB connection while trying to cleanup the events table") } diff --git a/src/api/core/folders.rs b/src/api/core/folders.rs index c0769dad..dc971a13 100644 --- a/src/api/core/folders.rs +++ b/src/api/core/folders.rs @@ -4,7 +4,10 @@ use serde_json::Value; use crate::{ api::{EmptyResult, JsonResult, Notify, UpdateType}, auth::Headers, - db::{models::*, DbConn}, + db::{ + models::{Folder, FolderId}, + DbConn, + }, }; pub fn routes() -> Vec { @@ -12,8 +15,8 @@ pub fn routes() -> Vec { } #[get("/folders")] -async fn get_folders(headers: Headers, mut conn: DbConn) -> Json { - let folders = Folder::find_by_user(&headers.user.uuid, &mut conn).await; +async fn get_folders(headers: Headers, conn: DbConn) -> Json { + let folders = Folder::find_by_user(&headers.user.uuid, &conn).await; let folders_json: Vec = folders.iter().map(Folder::to_json).collect(); Json(json!({ @@ -24,8 +27,8 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json { } #[get("/folders/")] -async fn get_folder(folder_id: FolderId, headers: Headers, mut conn: DbConn) -> JsonResult { - match Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await { +async fn get_folder(folder_id: FolderId, headers: Headers, conn: DbConn) -> JsonResult { + match Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &conn).await { Some(folder) => Ok(Json(folder.to_json())), _ => err!("Invalid folder", "Folder does not exist or belongs to another user"), } @@ -39,13 +42,13 @@ pub struct FolderData { } #[post("/folders", data = "")] -async fn post_folders(data: Json, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { +async fn post_folders(data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { let data: FolderData = data.into_inner(); let mut folder = Folder::new(headers.user.uuid, data.name); - folder.save(&mut conn).await?; - nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device, &mut conn).await; + folder.save(&conn).await?; + nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device, &conn).await; Ok(Json(folder.to_json())) } @@ -66,19 +69,19 @@ async fn put_folder( folder_id: FolderId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { let data: FolderData = data.into_inner(); - let Some(mut folder) = Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await else { + let Some(mut folder) = Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &conn).await else { err!("Invalid folder", "Folder does not exist or belongs to another user") }; folder.name = data.name; - folder.save(&mut conn).await?; - nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device, &mut conn).await; + folder.save(&conn).await?; + nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device, &conn).await; Ok(Json(folder.to_json())) } @@ -89,14 +92,14 @@ async fn delete_folder_post(folder_id: FolderId, headers: Headers, conn: DbConn, } #[delete("/folders/")] -async fn delete_folder(folder_id: FolderId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - let Some(folder) = Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &mut conn).await else { +async fn delete_folder(folder_id: FolderId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + let Some(folder) = Folder::find_by_uuid_and_user(&folder_id, &headers.user.uuid, &conn).await else { err!("Invalid folder", "Folder does not exist or belongs to another user") }; // Delete the actual folder entry - folder.delete(&mut conn).await?; + folder.delete(&conn).await?; - nt.send_folder_update(UpdateType::SyncFolderDelete, &folder, &headers.device, &mut conn).await; + nt.send_folder_update(UpdateType::SyncFolderDelete, &folder, &headers.device, &conn).await; Ok(()) } diff --git a/src/api/core/mod.rs b/src/api/core/mod.rs index 28737bdf..d5ca0cc9 100644 --- a/src/api/core/mod.rs +++ b/src/api/core/mod.rs @@ -52,7 +52,10 @@ use rocket::{serde::json::Json, serde::json::Value, Catcher, Route}; use crate::{ api::{EmptyResult, JsonResult, Notify, UpdateType}, auth::Headers, - db::{models::*, DbConn}, + db::{ + models::{Membership, MembershipStatus, MembershipType, OrgPolicy, OrgPolicyErr, Organization, User}, + DbConn, + }, error::Error, http_client::make_http_request, mail, @@ -106,12 +109,7 @@ struct EquivDomainData { } #[post("/settings/domains", data = "")] -async fn post_eq_domains( - data: Json, - headers: Headers, - mut conn: DbConn, - nt: Notify<'_>, -) -> JsonResult { +async fn post_eq_domains(data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { let data: EquivDomainData = data.into_inner(); let excluded_globals = data.excluded_global_equivalent_domains.unwrap_or_default(); @@ -123,9 +121,9 @@ async fn post_eq_domains( user.excluded_globals = to_string(&excluded_globals).unwrap_or_else(|_| "[]".to_string()); user.equivalent_domains = to_string(&equivalent_domains).unwrap_or_else(|_| "[]".to_string()); - user.save(&mut conn).await?; + user.save(&conn).await?; - nt.send_user_update(UpdateType::SyncSettings, &user, &headers.device.push_uuid, &mut conn).await; + nt.send_user_update(UpdateType::SyncSettings, &user, &headers.device.push_uuid, &conn).await; Ok(Json(json!({}))) } @@ -265,7 +263,7 @@ async fn accept_org_invite( user: &User, mut member: Membership, reset_password_key: Option, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { if member.status != MembershipStatus::Invited as i32 { err!("User already accepted the invitation"); diff --git a/src/api/core/organizations.rs b/src/api/core/organizations.rs index 22712003..5d57b41f 100644 --- a/src/api/core/organizations.rs +++ b/src/api/core/organizations.rs @@ -11,7 +11,14 @@ use crate::{ EmptyResult, JsonResult, Notify, PasswordOrOtpData, UpdateType, }, auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OrgMemberHeaders, OwnerHeaders}, - db::{models::*, DbConn}, + db::{ + models::{ + Cipher, CipherId, Collection, CollectionCipher, CollectionGroup, CollectionId, CollectionUser, EventType, + Group, GroupId, GroupUser, Invitation, Membership, MembershipId, MembershipStatus, MembershipType, + OrgPolicy, OrgPolicyErr, OrgPolicyType, Organization, OrganizationApiKey, OrganizationId, User, UserId, + }, + DbConn, + }, mail, util::{convert_json_key_lcase_first, get_uuid, NumberOrString}, CONFIG, @@ -176,11 +183,11 @@ struct BulkMembershipIds { } #[post("/organizations", data = "")] -async fn create_organization(headers: Headers, data: Json, mut conn: DbConn) -> JsonResult { +async fn create_organization(headers: Headers, data: Json, conn: DbConn) -> JsonResult { if !CONFIG.is_org_creation_allowed(&headers.user.email) { err!("User not allowed to create organizations") } - if OrgPolicy::is_applicable_to_user(&headers.user.uuid, OrgPolicyType::SingleOrg, None, &mut conn).await { + if OrgPolicy::is_applicable_to_user(&headers.user.uuid, OrgPolicyType::SingleOrg, None, &conn).await { err!( "You may not create an organization. You belong to an organization which has a policy that prohibits you from being a member of any other organization." ) @@ -203,9 +210,9 @@ async fn create_organization(headers: Headers, data: Json, mut conn: Db member.atype = MembershipType::Owner as i32; member.status = MembershipStatus::Confirmed as i32; - org.save(&mut conn).await?; - member.save(&mut conn).await?; - collection.save(&mut conn).await?; + org.save(&conn).await?; + member.save(&conn).await?; + collection.save(&conn).await?; Ok(Json(org.to_json())) } @@ -215,18 +222,18 @@ async fn delete_organization( org_id: OrganizationId, data: Json, headers: OwnerHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } let data: PasswordOrOtpData = data.into_inner(); - data.validate(&headers.user, true, &mut conn).await?; + data.validate(&headers.user, true, &conn).await?; - match Organization::find_by_uuid(&org_id, &mut conn).await { + match Organization::find_by_uuid(&org_id, &conn).await { None => err!("Organization not found"), - Some(org) => org.delete(&mut conn).await, + Some(org) => org.delete(&conn).await, } } @@ -241,12 +248,12 @@ async fn post_delete_organization( } #[post("/organizations//leave")] -async fn leave_organization(org_id: OrganizationId, headers: Headers, mut conn: DbConn) -> EmptyResult { - match Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &mut conn).await { +async fn leave_organization(org_id: OrganizationId, headers: Headers, conn: DbConn) -> EmptyResult { + match Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &conn).await { None => err!("User not part of organization"), Some(member) => { if member.atype == MembershipType::Owner - && Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await <= 1 + && Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &conn).await <= 1 { err!("The last owner can't leave") } @@ -258,21 +265,21 @@ async fn leave_organization(org_id: OrganizationId, headers: Headers, mut conn: &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; - member.delete(&mut conn).await + member.delete(&conn).await } } } #[get("/organizations/")] -async fn get_organization(org_id: OrganizationId, headers: OwnerHeaders, mut conn: DbConn) -> JsonResult { +async fn get_organization(org_id: OrganizationId, headers: OwnerHeaders, conn: DbConn) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } - match Organization::find_by_uuid(&org_id, &mut conn).await { + match Organization::find_by_uuid(&org_id, &conn).await { Some(organization) => Ok(Json(organization.to_json())), None => err!("Can't find organization details"), } @@ -293,7 +300,7 @@ async fn post_organization( org_id: OrganizationId, headers: OwnerHeaders, data: Json, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -301,14 +308,14 @@ async fn post_organization( let data: OrganizationUpdateData = data.into_inner(); - let Some(mut org) = Organization::find_by_uuid(&org_id, &mut conn).await else { + let Some(mut org) = Organization::find_by_uuid(&org_id, &conn).await else { err!("Organization not found") }; org.name = data.name; org.billing_email = data.billing_email.to_lowercase(); - org.save(&mut conn).await?; + org.save(&conn).await?; log_event( EventType::OrganizationUpdated as i32, @@ -317,7 +324,7 @@ async fn post_organization( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; @@ -326,10 +333,10 @@ async fn post_organization( // GET /api/collections?writeOnly=false #[get("/collections")] -async fn get_user_collections(headers: Headers, mut conn: DbConn) -> Json { +async fn get_user_collections(headers: Headers, conn: DbConn) -> Json { Json(json!({ "data": - Collection::find_by_user_uuid(headers.user.uuid, &mut conn).await + Collection::find_by_user_uuid(headers.user.uuid, &conn).await .iter() .map(Collection::to_json) .collect::(), @@ -342,20 +349,20 @@ async fn get_user_collections(headers: Headers, mut conn: DbConn) -> Json // The `identifier` should be the value returned by `get_org_domain_sso_details` // The returned `Id` will then be passed to `get_master_password_policy` which will mainly ignore it #[get("/organizations//auto-enroll-status")] -async fn get_auto_enroll_status(identifier: &str, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn get_auto_enroll_status(identifier: &str, headers: Headers, conn: DbConn) -> JsonResult { let org = if identifier == crate::sso::FAKE_IDENTIFIER { - match Membership::find_main_user_org(&headers.user.uuid, &mut conn).await { - Some(member) => Organization::find_by_uuid(&member.org_uuid, &mut conn).await, + match Membership::find_main_user_org(&headers.user.uuid, &conn).await { + Some(member) => Organization::find_by_uuid(&member.org_uuid, &conn).await, None => None, } } else { - Organization::find_by_name(identifier, &mut conn).await + Organization::find_by_name(identifier, &conn).await }; let (id, identifier, rp_auto_enroll) = match org { None => (get_uuid(), identifier.to_string(), false), Some(org) => { - (org.uuid.to_string(), org.name, OrgPolicy::org_is_reset_password_auto_enroll(&org.uuid, &mut conn).await) + (org.uuid.to_string(), org.name, OrgPolicy::org_is_reset_password_auto_enroll(&org.uuid, &conn).await) } }; @@ -367,47 +374,42 @@ async fn get_auto_enroll_status(identifier: &str, headers: Headers, mut conn: Db } #[get("/organizations//collections")] -async fn get_org_collections(org_id: OrganizationId, headers: ManagerHeadersLoose, mut conn: DbConn) -> JsonResult { +async fn get_org_collections(org_id: OrganizationId, headers: ManagerHeadersLoose, conn: DbConn) -> JsonResult { if org_id != headers.membership.org_uuid { err!("Organization not found", "Organization id's do not match"); } Ok(Json(json!({ - "data": _get_org_collections(&org_id, &mut conn).await, + "data": _get_org_collections(&org_id, &conn).await, "object": "list", "continuationToken": null, }))) } #[get("/organizations//collections/details")] -async fn get_org_collections_details( - org_id: OrganizationId, - headers: ManagerHeadersLoose, - mut conn: DbConn, -) -> JsonResult { +async fn get_org_collections_details(org_id: OrganizationId, headers: ManagerHeadersLoose, conn: DbConn) -> JsonResult { if org_id != headers.membership.org_uuid { err!("Organization not found", "Organization id's do not match"); } let mut data = Vec::new(); - let Some(member) = Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &mut conn).await else { + let Some(member) = Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &conn).await else { err!("User is not part of organization") }; // get all collection memberships for the current organization - let col_users = CollectionUser::find_by_organization_swap_user_uuid_with_member_uuid(&org_id, &mut conn).await; + let col_users = CollectionUser::find_by_organization_swap_user_uuid_with_member_uuid(&org_id, &conn).await; // Generate a HashMap to get the correct MembershipType per user to determine the manage permission // We use the uuid instead of the user_uuid here, since that is what is used in CollectionUser let membership_type: HashMap = - Membership::find_confirmed_by_org(&org_id, &mut conn).await.into_iter().map(|m| (m.uuid, m.atype)).collect(); + Membership::find_confirmed_by_org(&org_id, &conn).await.into_iter().map(|m| (m.uuid, m.atype)).collect(); // check if current user has full access to the organization (either directly or via any group) let has_full_access_to_org = member.access_all - || (CONFIG.org_groups_enabled() - && GroupUser::has_full_access_by_member(&org_id, &member.uuid, &mut conn).await); + || (CONFIG.org_groups_enabled() && GroupUser::has_full_access_by_member(&org_id, &member.uuid, &conn).await); // Get all admins, owners and managers who can manage/access all // Those are currently not listed in the col_users but need to be listed too. - let manage_all_members: Vec = Membership::find_confirmed_and_manage_all_by_org(&org_id, &mut conn) + let manage_all_members: Vec = Membership::find_confirmed_and_manage_all_by_org(&org_id, &conn) .await .into_iter() .map(|member| { @@ -420,12 +422,12 @@ async fn get_org_collections_details( }) .collect(); - for col in Collection::find_by_organization(&org_id, &mut conn).await { + for col in Collection::find_by_organization(&org_id, &conn).await { // check whether the current user has access to the given collection let assigned = has_full_access_to_org - || CollectionUser::has_access_to_collection_by_user(&col.uuid, &member.user_uuid, &mut conn).await + || CollectionUser::has_access_to_collection_by_user(&col.uuid, &member.user_uuid, &conn).await || (CONFIG.org_groups_enabled() - && GroupUser::has_access_to_collection_by_member(&col.uuid, &member.uuid, &mut conn).await); + && GroupUser::has_access_to_collection_by_member(&col.uuid, &member.uuid, &conn).await); // get the users assigned directly to the given collection let mut users: Vec = col_users @@ -441,7 +443,7 @@ async fn get_org_collections_details( // get the group details for the given collection let groups: Vec = if CONFIG.org_groups_enabled() { - CollectionGroup::find_by_collection(&col.uuid, &mut conn) + CollectionGroup::find_by_collection(&col.uuid, &conn) .await .iter() .map(|collection_group| collection_group.to_json_details_for_group()) @@ -450,7 +452,7 @@ async fn get_org_collections_details( Vec::with_capacity(0) }; - let mut json_object = col.to_json_details(&headers.user.uuid, None, &mut conn).await; + let mut json_object = col.to_json_details(&headers.user.uuid, None, &conn).await; json_object["assigned"] = json!(assigned); json_object["users"] = json!(users); json_object["groups"] = json!(groups); @@ -466,7 +468,7 @@ async fn get_org_collections_details( }))) } -async fn _get_org_collections(org_id: &OrganizationId, conn: &mut DbConn) -> Value { +async fn _get_org_collections(org_id: &OrganizationId, conn: &DbConn) -> Value { Collection::find_by_organization(org_id, conn).await.iter().map(Collection::to_json).collect::() } @@ -475,19 +477,19 @@ async fn post_organization_collections( org_id: OrganizationId, headers: ManagerHeadersLoose, data: Json, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.membership.org_uuid { err!("Organization not found", "Organization id's do not match"); } let data: FullCollectionData = data.into_inner(); - let Some(org) = Organization::find_by_uuid(&org_id, &mut conn).await else { + let Some(org) = Organization::find_by_uuid(&org_id, &conn).await else { err!("Can't find organization details") }; let collection = Collection::new(org.uuid, data.name, data.external_id); - collection.save(&mut conn).await?; + collection.save(&conn).await?; log_event( EventType::CollectionCreated as i32, @@ -496,18 +498,18 @@ async fn post_organization_collections( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; for group in data.groups { CollectionGroup::new(collection.uuid.clone(), group.id, group.read_only, group.hide_passwords, group.manage) - .save(&mut conn) + .save(&conn) .await?; } for user in data.users { - let Some(member) = Membership::find_by_uuid_and_org(&user.id, &org_id, &mut conn).await else { + let Some(member) = Membership::find_by_uuid_and_org(&user.id, &org_id, &conn).await else { err!("User is not part of organization") }; @@ -521,16 +523,16 @@ async fn post_organization_collections( user.read_only, user.hide_passwords, user.manage, - &mut conn, + &conn, ) .await?; } if headers.membership.atype == MembershipType::Manager && !headers.membership.access_all { - CollectionUser::save(&headers.membership.user_uuid, &collection.uuid, false, false, false, &mut conn).await?; + CollectionUser::save(&headers.membership.user_uuid, &collection.uuid, false, false, false, &conn).await?; } - Ok(Json(collection.to_json_details(&headers.membership.user_uuid, None, &mut conn).await)) + Ok(Json(collection.to_json_details(&headers.membership.user_uuid, None, &conn).await)) } #[derive(Deserialize)] @@ -546,24 +548,24 @@ async fn post_bulk_access_collections( org_id: OrganizationId, headers: ManagerHeadersLoose, data: Json, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { if org_id != headers.membership.org_uuid { err!("Organization not found", "Organization id's do not match"); } let data: BulkCollectionAccessData = data.into_inner(); - if Organization::find_by_uuid(&org_id, &mut conn).await.is_none() { + if Organization::find_by_uuid(&org_id, &conn).await.is_none() { err!("Can't find organization details") }; for col_id in data.collection_ids { - let Some(collection) = Collection::find_by_uuid_and_org(&col_id, &org_id, &mut conn).await else { + let Some(collection) = Collection::find_by_uuid_and_org(&col_id, &org_id, &conn).await else { err!("Collection not found") }; // update collection modification date - collection.save(&mut conn).await?; + collection.save(&conn).await?; log_event( EventType::CollectionUpdated as i32, @@ -572,20 +574,20 @@ async fn post_bulk_access_collections( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; - CollectionGroup::delete_all_by_collection(&col_id, &mut conn).await?; + CollectionGroup::delete_all_by_collection(&col_id, &conn).await?; for group in &data.groups { CollectionGroup::new(col_id.clone(), group.id.clone(), group.read_only, group.hide_passwords, group.manage) - .save(&mut conn) + .save(&conn) .await?; } - CollectionUser::delete_all_by_collection(&col_id, &mut conn).await?; + CollectionUser::delete_all_by_collection(&col_id, &conn).await?; for user in &data.users { - let Some(member) = Membership::find_by_uuid_and_org(&user.id, &org_id, &mut conn).await else { + let Some(member) = Membership::find_by_uuid_and_org(&user.id, &org_id, &conn).await else { err!("User is not part of organization") }; @@ -593,15 +595,8 @@ async fn post_bulk_access_collections( continue; } - CollectionUser::save( - &member.user_uuid, - &col_id, - user.read_only, - user.hide_passwords, - user.manage, - &mut conn, - ) - .await?; + CollectionUser::save(&member.user_uuid, &col_id, user.read_only, user.hide_passwords, user.manage, &conn) + .await?; } } @@ -625,18 +620,18 @@ async fn post_organization_collection_update( col_id: CollectionId, headers: ManagerHeaders, data: Json, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } let data: FullCollectionData = data.into_inner(); - if Organization::find_by_uuid(&org_id, &mut conn).await.is_none() { + if Organization::find_by_uuid(&org_id, &conn).await.is_none() { err!("Can't find organization details") }; - let Some(mut collection) = Collection::find_by_uuid_and_org(&col_id, &org_id, &mut conn).await else { + let Some(mut collection) = Collection::find_by_uuid_and_org(&col_id, &org_id, &conn).await else { err!("Collection not found") }; @@ -646,7 +641,7 @@ async fn post_organization_collection_update( _ => None, }; - collection.save(&mut conn).await?; + collection.save(&conn).await?; log_event( EventType::CollectionUpdated as i32, @@ -655,22 +650,22 @@ async fn post_organization_collection_update( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; - CollectionGroup::delete_all_by_collection(&col_id, &mut conn).await?; + CollectionGroup::delete_all_by_collection(&col_id, &conn).await?; for group in data.groups { CollectionGroup::new(col_id.clone(), group.id, group.read_only, group.hide_passwords, group.manage) - .save(&mut conn) + .save(&conn) .await?; } - CollectionUser::delete_all_by_collection(&col_id, &mut conn).await?; + CollectionUser::delete_all_by_collection(&col_id, &conn).await?; for user in data.users { - let Some(member) = Membership::find_by_uuid_and_org(&user.id, &org_id, &mut conn).await else { + let Some(member) = Membership::find_by_uuid_and_org(&user.id, &org_id, &conn).await else { err!("User is not part of organization") }; @@ -678,11 +673,11 @@ async fn post_organization_collection_update( continue; } - CollectionUser::save(&member.user_uuid, &col_id, user.read_only, user.hide_passwords, user.manage, &mut conn) + CollectionUser::save(&member.user_uuid, &col_id, user.read_only, user.hide_passwords, user.manage, &conn) .await?; } - Ok(Json(collection.to_json_details(&headers.user.uuid, None, &mut conn).await)) + Ok(Json(collection.to_json_details(&headers.user.uuid, None, &conn).await)) } #[delete("/organizations//collections//user/")] @@ -691,21 +686,21 @@ async fn delete_organization_collection_member( col_id: CollectionId, member_id: MembershipId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } - let Some(collection) = Collection::find_by_uuid_and_org(&col_id, &org_id, &mut conn).await else { + let Some(collection) = Collection::find_by_uuid_and_org(&col_id, &org_id, &conn).await else { err!("Collection not found", "Collection does not exist or does not belong to this organization") }; - match Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await { + match Membership::find_by_uuid_and_org(&member_id, &org_id, &conn).await { None => err!("User not found in organization"), Some(member) => { - match CollectionUser::find_by_collection_and_user(&collection.uuid, &member.user_uuid, &mut conn).await { + match CollectionUser::find_by_collection_and_user(&collection.uuid, &member.user_uuid, &conn).await { None => err!("User not assigned to collection"), - Some(col_user) => col_user.delete(&mut conn).await, + Some(col_user) => col_user.delete(&conn).await, } } } @@ -726,7 +721,7 @@ async fn _delete_organization_collection( org_id: &OrganizationId, col_id: &CollectionId, headers: &ManagerHeaders, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { if org_id != &headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -752,9 +747,9 @@ async fn delete_organization_collection( org_id: OrganizationId, col_id: CollectionId, headers: ManagerHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { - _delete_organization_collection(&org_id, &col_id, &headers, &mut conn).await + _delete_organization_collection(&org_id, &col_id, &headers, &conn).await } #[post("/organizations//collections//delete")] @@ -762,9 +757,9 @@ async fn post_organization_collection_delete( org_id: OrganizationId, col_id: CollectionId, headers: ManagerHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { - _delete_organization_collection(&org_id, &col_id, &headers, &mut conn).await + _delete_organization_collection(&org_id, &col_id, &headers, &conn).await } #[derive(Deserialize, Debug)] @@ -778,7 +773,7 @@ async fn bulk_delete_organization_collections( org_id: OrganizationId, headers: ManagerHeadersLoose, data: Json, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { if org_id != headers.membership.org_uuid { err!("Organization not found", "Organization id's do not match"); @@ -787,10 +782,10 @@ async fn bulk_delete_organization_collections( let collections = data.ids; - let headers = ManagerHeaders::from_loose(headers, &collections, &mut conn).await?; + let headers = ManagerHeaders::from_loose(headers, &collections, &conn).await?; for col_id in collections { - _delete_organization_collection(&org_id, &col_id, &headers, &mut conn).await? + _delete_organization_collection(&org_id, &col_id, &headers, &conn).await? } Ok(()) } @@ -800,24 +795,24 @@ async fn get_org_collection_detail( org_id: OrganizationId, col_id: CollectionId, headers: ManagerHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } - match Collection::find_by_uuid_and_user(&col_id, headers.user.uuid.clone(), &mut conn).await { + match Collection::find_by_uuid_and_user(&col_id, headers.user.uuid.clone(), &conn).await { None => err!("Collection not found"), Some(collection) => { if collection.org_uuid != org_id { err!("Collection is not owned by organization") } - let Some(member) = Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &mut conn).await else { + let Some(member) = Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &conn).await else { err!("User is not part of organization") }; let groups: Vec = if CONFIG.org_groups_enabled() { - CollectionGroup::find_by_collection(&collection.uuid, &mut conn) + CollectionGroup::find_by_collection(&collection.uuid, &conn) .await .iter() .map(|collection_group| collection_group.to_json_details_for_group()) @@ -830,29 +825,28 @@ async fn get_org_collection_detail( // Generate a HashMap to get the correct MembershipType per user to determine the manage permission // We use the uuid instead of the user_uuid here, since that is what is used in CollectionUser - let membership_type: HashMap = Membership::find_confirmed_by_org(&org_id, &mut conn) + let membership_type: HashMap = Membership::find_confirmed_by_org(&org_id, &conn) .await .into_iter() .map(|m| (m.uuid, m.atype)) .collect(); - let users: Vec = CollectionUser::find_by_org_and_coll_swap_user_uuid_with_member_uuid( - &org_id, - &collection.uuid, - &mut conn, - ) - .await - .iter() - .map(|collection_member| { - collection_member.to_json_details_for_member( - *membership_type.get(&collection_member.membership_uuid).unwrap_or(&(MembershipType::User as i32)), - ) - }) - .collect(); + let users: Vec = + CollectionUser::find_by_org_and_coll_swap_user_uuid_with_member_uuid(&org_id, &collection.uuid, &conn) + .await + .iter() + .map(|collection_member| { + collection_member.to_json_details_for_member( + *membership_type + .get(&collection_member.membership_uuid) + .unwrap_or(&(MembershipType::User as i32)), + ) + }) + .collect(); - let assigned = Collection::can_access_collection(&member, &collection.uuid, &mut conn).await; + let assigned = Collection::can_access_collection(&member, &collection.uuid, &conn).await; - let mut json_object = collection.to_json_details(&headers.user.uuid, None, &mut conn).await; + let mut json_object = collection.to_json_details(&headers.user.uuid, None, &conn).await; json_object["assigned"] = json!(assigned); json_object["users"] = json!(users); json_object["groups"] = json!(groups); @@ -868,20 +862,20 @@ async fn get_collection_users( org_id: OrganizationId, col_id: CollectionId, headers: ManagerHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } // Get org and collection, check that collection is from org - let Some(collection) = Collection::find_by_uuid_and_org(&col_id, &org_id, &mut conn).await else { + let Some(collection) = Collection::find_by_uuid_and_org(&col_id, &org_id, &conn).await else { err!("Collection not found in Organization") }; let mut member_list = Vec::new(); - for col_user in CollectionUser::find_by_collection(&collection.uuid, &mut conn).await { + for col_user in CollectionUser::find_by_collection(&collection.uuid, &conn).await { member_list.push( - Membership::find_by_user_and_org(&col_user.user_uuid, &org_id, &mut conn) + Membership::find_by_user_and_org(&col_user.user_uuid, &org_id, &conn) .await .unwrap() .to_json_user_access_restrictions(&col_user), @@ -897,22 +891,22 @@ async fn put_collection_users( col_id: CollectionId, data: Json>, headers: ManagerHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } // Get org and collection, check that collection is from org - if Collection::find_by_uuid_and_org(&col_id, &org_id, &mut conn).await.is_none() { + if Collection::find_by_uuid_and_org(&col_id, &org_id, &conn).await.is_none() { err!("Collection not found in Organization") } // Delete all the user-collections - CollectionUser::delete_all_by_collection(&col_id, &mut conn).await?; + CollectionUser::delete_all_by_collection(&col_id, &conn).await?; // And then add all the received ones (except if the user has access_all) for d in data.iter() { - let Some(user) = Membership::find_by_uuid_and_org(&d.id, &org_id, &mut conn).await else { + let Some(user) = Membership::find_by_uuid_and_org(&d.id, &org_id, &conn).await else { err!("User is not part of organization") }; @@ -920,7 +914,7 @@ async fn put_collection_users( continue; } - CollectionUser::save(&user.user_uuid, &col_id, d.read_only, d.hide_passwords, d.manage, &mut conn).await?; + CollectionUser::save(&user.user_uuid, &col_id, d.read_only, d.hide_passwords, d.manage, &conn).await?; } Ok(()) @@ -933,13 +927,13 @@ struct OrgIdData { } #[get("/ciphers/organization-details?")] -async fn get_org_details(data: OrgIdData, headers: OrgMemberHeaders, mut conn: DbConn) -> JsonResult { +async fn get_org_details(data: OrgIdData, headers: OrgMemberHeaders, conn: DbConn) -> JsonResult { if data.organization_id != headers.membership.org_uuid { err_code!("Resource not found.", "Organization id's do not match", rocket::http::Status::NotFound.code); } Ok(Json(json!({ - "data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await?, + "data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &conn).await?, "object": "list", "continuationToken": null, }))) @@ -949,7 +943,7 @@ async fn _get_org_details( org_id: &OrganizationId, host: &str, user_id: &UserId, - conn: &mut DbConn, + conn: &DbConn, ) -> Result { let ciphers = Cipher::find_by_org(org_id, conn).await; let cipher_sync_data = CipherSyncData::new(user_id, CipherSyncType::Organization, conn).await; @@ -971,10 +965,10 @@ struct OrgDomainDetails { // So we either return an Org name associated to the user or a dummy value. // In use since `v2025.6.0`, appears to use only the first `organizationIdentifier` #[post("/organizations/domain/sso/verified", data = "")] -async fn get_org_domain_sso_verified(data: Json, mut conn: DbConn) -> JsonResult { +async fn get_org_domain_sso_verified(data: Json, conn: DbConn) -> JsonResult { let data: OrgDomainDetails = data.into_inner(); - let identifiers = match Organization::find_org_user_email(&data.email, &mut conn) + let identifiers = match Organization::find_org_user_email(&data.email, &conn) .await .into_iter() .map(|o| o.name) @@ -1007,18 +1001,18 @@ async fn get_members( data: GetOrgUserData, org_id: OrganizationId, headers: ManagerHeadersLoose, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.membership.org_uuid { err!("Organization not found", "Organization id's do not match"); } let mut users_json = Vec::new(); - for u in Membership::find_by_org(&org_id, &mut conn).await { + for u in Membership::find_by_org(&org_id, &conn).await { users_json.push( u.to_json_user_details( data.include_collections.unwrap_or(false), data.include_groups.unwrap_or(false), - &mut conn, + &conn, ) .await, ); @@ -1036,14 +1030,14 @@ async fn post_org_keys( org_id: OrganizationId, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } let data: OrgKeyData = data.into_inner(); - let mut org = match Organization::find_by_uuid(&org_id, &mut conn).await { + let mut org = match Organization::find_by_uuid(&org_id, &conn).await { Some(organization) => { if organization.private_key.is_some() && organization.public_key.is_some() { err!("Organization Keys already exist") @@ -1056,7 +1050,7 @@ async fn post_org_keys( org.private_key = Some(data.encrypted_private_key); org.public_key = Some(data.public_key); - org.save(&mut conn).await?; + org.save(&conn).await?; Ok(Json(json!({ "object": "organizationKeys", @@ -1081,7 +1075,7 @@ async fn send_invite( org_id: OrganizationId, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -1113,7 +1107,7 @@ async fn send_invite( let mut user_created: bool = false; for email in data.emails.iter() { let mut member_status = MembershipStatus::Invited as i32; - let user = match User::find_by_mail(email, &mut conn).await { + let user = match User::find_by_mail(email, &conn).await { None => { if !CONFIG.invitations_allowed() { err!(format!("User does not exist: {email}")) @@ -1124,16 +1118,16 @@ async fn send_invite( } if !CONFIG.mail_enabled() { - Invitation::new(email).save(&mut conn).await?; + Invitation::new(email).save(&conn).await?; } let mut new_user = User::new(email.clone(), None); - new_user.save(&mut conn).await?; + new_user.save(&conn).await?; user_created = true; new_user } Some(user) => { - if Membership::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await.is_some() { + if Membership::find_by_user_and_org(&user.uuid, &org_id, &conn).await.is_some() { err!(format!("User already in organization: {email}")) } else { // automatically accept existing users if mail is disabled @@ -1149,10 +1143,10 @@ async fn send_invite( new_member.access_all = access_all; new_member.atype = new_type; new_member.status = member_status; - new_member.save(&mut conn).await?; + new_member.save(&conn).await?; if CONFIG.mail_enabled() { - let org_name = match Organization::find_by_uuid(&org_id, &mut conn).await { + let org_name = match Organization::find_by_uuid(&org_id, &conn).await { Some(org) => org.name, None => err!("Error looking up organization"), }; @@ -1168,9 +1162,9 @@ async fn send_invite( { // Upon error delete the user, invite and org member records when needed if user_created { - user.delete(&mut conn).await?; + user.delete(&conn).await?; } else { - new_member.delete(&mut conn).await?; + new_member.delete(&conn).await?; } err!(format!("Error sending invite: {e:?} ")); @@ -1184,14 +1178,14 @@ async fn send_invite( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; // If no accessAll, add the collections received if !access_all { for col in data.collections.iter().flatten() { - match Collection::find_by_uuid_and_org(&col.id, &org_id, &mut conn).await { + match Collection::find_by_uuid_and_org(&col.id, &org_id, &conn).await { None => err!("Collection not found in Organization"), Some(collection) => { CollectionUser::save( @@ -1200,7 +1194,7 @@ async fn send_invite( col.read_only, col.hide_passwords, col.manage, - &mut conn, + &conn, ) .await?; } @@ -1210,7 +1204,7 @@ async fn send_invite( for group_id in data.groups.iter() { let mut group_entry = GroupUser::new(group_id.clone(), new_member.uuid.clone()); - group_entry.save(&mut conn).await?; + group_entry.save(&conn).await?; } } @@ -1222,7 +1216,7 @@ async fn bulk_reinvite_members( org_id: OrganizationId, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -1231,7 +1225,7 @@ async fn bulk_reinvite_members( let mut bulk_response = Vec::new(); for member_id in data.ids { - let err_msg = match _reinvite_member(&org_id, &member_id, &headers.user.email, &mut conn).await { + let err_msg = match _reinvite_member(&org_id, &member_id, &headers.user.email, &conn).await { Ok(_) => String::new(), Err(e) => format!("{e:?}"), }; @@ -1257,19 +1251,19 @@ async fn reinvite_member( org_id: OrganizationId, member_id: MembershipId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } - _reinvite_member(&org_id, &member_id, &headers.user.email, &mut conn).await + _reinvite_member(&org_id, &member_id, &headers.user.email, &conn).await } async fn _reinvite_member( org_id: &OrganizationId, member_id: &MembershipId, invited_by_email: &str, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { let Some(member) = Membership::find_by_uuid_and_org(member_id, org_id, conn).await else { err!("The user hasn't been invited to the organization.") @@ -1320,7 +1314,7 @@ async fn accept_invite( member_id: MembershipId, data: Json, headers: Headers, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { // The web-vault passes org_id and member_id in the URL, but we are just reading them from the JWT instead let data: AcceptData = data.into_inner(); @@ -1342,15 +1336,15 @@ async fn accept_invite( } let member_id = &claims.member_id; - Invitation::take(&claims.email, &mut conn).await; + Invitation::take(&claims.email, &conn).await; // skip invitation logic when we were invited via the /admin panel if **member_id != FAKE_ADMIN_UUID { - let Some(mut member) = Membership::find_by_uuid_and_org(member_id, &claims.org_id, &mut conn).await else { + let Some(mut member) = Membership::find_by_uuid_and_org(member_id, &claims.org_id, &conn).await else { err!("Error accepting the invitation") }; - let reset_password_key = match OrgPolicy::org_is_reset_password_auto_enroll(&member.org_uuid, &mut conn).await { + let reset_password_key = match OrgPolicy::org_is_reset_password_auto_enroll(&member.org_uuid, &conn).await { true if data.reset_password_key.is_none() => err!("Reset password key is required, but not provided."), true => data.reset_password_key, false => None, @@ -1359,7 +1353,7 @@ async fn accept_invite( // In case the user was invited before the mail was saved in db. member.invited_by_email = member.invited_by_email.or(claims.invited_by_email); - accept_org_invite(&headers.user, member, reset_password_key, &mut conn).await?; + accept_org_invite(&headers.user, member, reset_password_key, &conn).await?; } else if CONFIG.mail_enabled() { // User was invited from /admin, so they are automatically confirmed let org_name = CONFIG.invitation_org_name(); @@ -1387,7 +1381,7 @@ async fn bulk_confirm_invite( org_id: OrganizationId, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { if org_id != headers.org_id { @@ -1401,7 +1395,7 @@ async fn bulk_confirm_invite( for invite in keys { let member_id = invite.id.unwrap(); let user_key = invite.key.unwrap_or_default(); - let err_msg = match _confirm_invite(&org_id, &member_id, &user_key, &headers, &mut conn, &nt).await { + let err_msg = match _confirm_invite(&org_id, &member_id, &user_key, &headers, &conn, &nt).await { Ok(_) => String::new(), Err(e) => format!("{e:?}"), }; @@ -1431,12 +1425,12 @@ async fn confirm_invite( member_id: MembershipId, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { let data = data.into_inner(); let user_key = data.key.unwrap_or_default(); - _confirm_invite(&org_id, &member_id, &user_key, &headers, &mut conn, &nt).await + _confirm_invite(&org_id, &member_id, &user_key, &headers, &conn, &nt).await } async fn _confirm_invite( @@ -1444,7 +1438,7 @@ async fn _confirm_invite( member_id: &MembershipId, key: &str, headers: &AdminHeaders, - conn: &mut DbConn, + conn: &DbConn, nt: &Notify<'_>, ) -> EmptyResult { if org_id != &headers.org_id { @@ -1520,17 +1514,13 @@ async fn _confirm_invite( } #[get("/organizations//users/mini-details", rank = 1)] -async fn get_org_user_mini_details( - org_id: OrganizationId, - headers: ManagerHeadersLoose, - mut conn: DbConn, -) -> JsonResult { +async fn get_org_user_mini_details(org_id: OrganizationId, headers: ManagerHeadersLoose, conn: DbConn) -> JsonResult { if org_id != headers.membership.org_uuid { err!("Organization not found", "Organization id's do not match"); } let mut members_json = Vec::new(); - for m in Membership::find_by_org(&org_id, &mut conn).await { - members_json.push(m.to_json_mini_details(&mut conn).await); + for m in Membership::find_by_org(&org_id, &conn).await { + members_json.push(m.to_json_mini_details(&conn).await); } Ok(Json(json!({ @@ -1546,21 +1536,19 @@ async fn get_user( member_id: MembershipId, data: GetOrgUserData, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } - let Some(user) = Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await else { + let Some(user) = Membership::find_by_uuid_and_org(&member_id, &org_id, &conn).await else { err!("The specified user isn't a member of the organization") }; // In this case, when groups are requested we also need to include collections. // Else these will not be shown in the interface, and could lead to missing collections when saved. let include_groups = data.include_groups.unwrap_or(false); - Ok(Json( - user.to_json_user_details(data.include_collections.unwrap_or(include_groups), include_groups, &mut conn).await, - )) + Ok(Json(user.to_json_user_details(data.include_collections.unwrap_or(include_groups), include_groups, &conn).await)) } #[derive(Deserialize)] @@ -1590,7 +1578,7 @@ async fn edit_member( member_id: MembershipId, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -1614,7 +1602,7 @@ async fn edit_member( && data.permissions.get("deleteAnyCollection") == Some(&json!(true)) && data.permissions.get("createNewCollections") == Some(&json!(true))); - let mut member_to_edit = match Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await { + let mut member_to_edit = match Membership::find_by_uuid_and_org(&member_id, &org_id, &conn).await { Some(member) => member, None => err!("The specified user isn't member of the organization"), }; @@ -1635,7 +1623,7 @@ async fn edit_member( && member_to_edit.status == MembershipStatus::Confirmed as i32 { // Removing owner permission, check that there is at least one other confirmed owner - if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await <= 1 { + if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &conn).await <= 1 { err!("Can't delete the last owner") } } @@ -1643,11 +1631,11 @@ async fn edit_member( // This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type // It returns different error messages per function. if new_type < MembershipType::Admin { - match OrgPolicy::is_user_allowed(&member_to_edit.user_uuid, &org_id, true, &mut conn).await { + match OrgPolicy::is_user_allowed(&member_to_edit.user_uuid, &org_id, true, &conn).await { Ok(_) => {} Err(OrgPolicyErr::TwoFactorMissing) => { if CONFIG.email_2fa_auto_fallback() { - two_factor::email::find_and_activate_email_2fa(&member_to_edit.user_uuid, &mut conn).await?; + two_factor::email::find_and_activate_email_2fa(&member_to_edit.user_uuid, &conn).await?; } else { err!("You cannot modify this user to this type because they have not setup 2FA"); } @@ -1662,14 +1650,14 @@ async fn edit_member( member_to_edit.atype = new_type as i32; // Delete all the odd collections - for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &member_to_edit.user_uuid, &mut conn).await { - c.delete(&mut conn).await?; + for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &member_to_edit.user_uuid, &conn).await { + c.delete(&conn).await?; } // If no accessAll, add the collections received if !access_all { for col in data.collections.iter().flatten() { - match Collection::find_by_uuid_and_org(&col.id, &org_id, &mut conn).await { + match Collection::find_by_uuid_and_org(&col.id, &org_id, &conn).await { None => err!("Collection not found in Organization"), Some(collection) => { CollectionUser::save( @@ -1678,7 +1666,7 @@ async fn edit_member( col.read_only, col.hide_passwords, col.manage, - &mut conn, + &conn, ) .await?; } @@ -1686,11 +1674,11 @@ async fn edit_member( } } - GroupUser::delete_all_by_member(&member_to_edit.uuid, &mut conn).await?; + GroupUser::delete_all_by_member(&member_to_edit.uuid, &conn).await?; for group_id in data.groups.iter().flatten() { let mut group_entry = GroupUser::new(group_id.clone(), member_to_edit.uuid.clone()); - group_entry.save(&mut conn).await?; + group_entry.save(&conn).await?; } log_event( @@ -1700,11 +1688,11 @@ async fn edit_member( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; - member_to_edit.save(&mut conn).await + member_to_edit.save(&conn).await } #[delete("/organizations//users", data = "")] @@ -1712,7 +1700,7 @@ async fn bulk_delete_member( org_id: OrganizationId, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { if org_id != headers.org_id { @@ -1722,7 +1710,7 @@ async fn bulk_delete_member( let mut bulk_response = Vec::new(); for member_id in data.ids { - let err_msg = match _delete_member(&org_id, &member_id, &headers, &mut conn, &nt).await { + let err_msg = match _delete_member(&org_id, &member_id, &headers, &conn, &nt).await { Ok(_) => String::new(), Err(e) => format!("{e:?}"), }; @@ -1748,10 +1736,10 @@ async fn delete_member( org_id: OrganizationId, member_id: MembershipId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - _delete_member(&org_id, &member_id, &headers, &mut conn, &nt).await + _delete_member(&org_id, &member_id, &headers, &conn, &nt).await } #[post("/organizations//users//delete")] @@ -1759,17 +1747,17 @@ async fn post_delete_member( org_id: OrganizationId, member_id: MembershipId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - _delete_member(&org_id, &member_id, &headers, &mut conn, &nt).await + _delete_member(&org_id, &member_id, &headers, &conn, &nt).await } async fn _delete_member( org_id: &OrganizationId, member_id: &MembershipId, headers: &AdminHeaders, - conn: &mut DbConn, + conn: &DbConn, nt: &Notify<'_>, ) -> EmptyResult { if org_id != &headers.org_id { @@ -1814,7 +1802,7 @@ async fn bulk_public_keys( org_id: OrganizationId, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -1826,8 +1814,8 @@ async fn bulk_public_keys( // If the user does not exists, just ignore it, and do not return any information regarding that Membership UUID. // The web-vault will then ignore that user for the following steps. for member_id in data.ids { - match Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await { - Some(member) => match User::find_by_uuid(&member.user_uuid, &mut conn).await { + match Membership::find_by_uuid_and_org(&member_id, &org_id, &conn).await { + Some(member) => match User::find_by_uuid(&member.user_uuid, &conn).await { Some(user) => bulk_response.push(json!( { "object": "organizationUserPublicKeyResponseModel", @@ -1875,7 +1863,7 @@ async fn post_org_import( query: OrgIdData, data: Json, headers: OrgMemberHeaders, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { let org_id = query.organization_id; @@ -1891,14 +1879,14 @@ async fn post_org_import( Cipher::validate_cipher_data(&data.ciphers)?; let existing_collections: HashSet> = - Collection::find_by_organization(&org_id, &mut conn).await.into_iter().map(|c| Some(c.uuid)).collect(); + Collection::find_by_organization(&org_id, &conn).await.into_iter().map(|c| Some(c.uuid)).collect(); let mut collections: Vec = Vec::with_capacity(data.collections.len()); for col in data.collections { let collection_uuid = if existing_collections.contains(&col.id) { let col_id = col.id.unwrap(); // When not an Owner or Admin, check if the member is allowed to access the collection. if headers.membership.atype < MembershipType::Admin - && !Collection::can_access_collection(&headers.membership, &col_id, &mut conn).await + && !Collection::can_access_collection(&headers.membership, &col_id, &conn).await { err!(Compact, "The current user isn't allowed to manage this collection") } @@ -1910,7 +1898,7 @@ async fn post_org_import( err!(Compact, "The current user isn't allowed to create new collections") } let new_collection = Collection::new(org_id.clone(), col.name, col.external_id); - new_collection.save(&mut conn).await?; + new_collection.save(&conn).await?; new_collection.uuid }; @@ -1936,7 +1924,7 @@ async fn post_org_import( cipher_data, &headers, Some(collections.clone()), - &mut conn, + &conn, &nt, UpdateType::None, ) @@ -1949,11 +1937,11 @@ async fn post_org_import( for (cipher_index, col_index) in relations { let cipher_id = &ciphers[cipher_index]; let col_id = &collections[col_index]; - CollectionCipher::save(cipher_id, col_id, &mut conn).await?; + CollectionCipher::save(cipher_id, col_id, &conn).await?; } let mut user = headers.user; - user.update_revision(&mut conn).await + user.update_revision(&conn).await } #[derive(Deserialize)] @@ -1969,13 +1957,13 @@ struct BulkCollectionsData { // This endpoint is only reachable via the organization view, therefore this endpoint is located here // Also Bitwarden does not send out Notifications for these changes, it only does this for individual cipher collection updates #[post("/ciphers/bulk-collections", data = "")] -async fn post_bulk_collections(data: Json, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn post_bulk_collections(data: Json, headers: Headers, conn: DbConn) -> EmptyResult { let data: BulkCollectionsData = data.into_inner(); // Get all the collection available to the user in one query // Also filter based upon the provided collections let user_collections: HashMap = - Collection::find_by_organization_and_user_uuid(&data.organization_id, &headers.user.uuid, &mut conn) + Collection::find_by_organization_and_user_uuid(&data.organization_id, &headers.user.uuid, &conn) .await .into_iter() .filter_map(|c| { @@ -1990,7 +1978,7 @@ async fn post_bulk_collections(data: Json, headers: Headers // Verify if all the collections requested exists and are writeable for the user, else abort for collection_uuid in &data.collection_ids { match user_collections.get(collection_uuid) { - Some(collection) if collection.is_writable_by_user(&headers.user.uuid, &mut conn).await => (), + Some(collection) if collection.is_writable_by_user(&headers.user.uuid, &conn).await => (), _ => err_code!("Resource not found", "User does not have access to a collection", 404), } } @@ -1998,17 +1986,17 @@ async fn post_bulk_collections(data: Json, headers: Headers for cipher_id in data.cipher_ids.iter() { // Only act on existing cipher uuid's // Do not abort the operation just ignore it, it could be a cipher was just deleted for example - if let Some(cipher) = Cipher::find_by_uuid_and_org(cipher_id, &data.organization_id, &mut conn).await { - if cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { + if let Some(cipher) = Cipher::find_by_uuid_and_org(cipher_id, &data.organization_id, &conn).await { + if cipher.is_write_accessible_to_user(&headers.user.uuid, &conn).await { // When selecting a specific collection from the left filter list, and use the bulk option, you can remove an item from that collection // In these cases the client will call this endpoint twice, once for adding the new collections and a second for deleting. if data.remove_collections { for collection in &data.collection_ids { - CollectionCipher::delete(&cipher.uuid, collection, &mut conn).await?; + CollectionCipher::delete(&cipher.uuid, collection, &conn).await?; } } else { for collection in &data.collection_ids { - CollectionCipher::save(&cipher.uuid, collection, &mut conn).await?; + CollectionCipher::save(&cipher.uuid, collection, &conn).await?; } } } @@ -2019,11 +2007,11 @@ async fn post_bulk_collections(data: Json, headers: Headers } #[get("/organizations//policies")] -async fn list_policies(org_id: OrganizationId, headers: AdminHeaders, mut conn: DbConn) -> JsonResult { +async fn list_policies(org_id: OrganizationId, headers: AdminHeaders, conn: DbConn) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } - let policies = OrgPolicy::find_by_org(&org_id, &mut conn).await; + let policies = OrgPolicy::find_by_org(&org_id, &conn).await; let policies_json: Vec = policies.iter().map(OrgPolicy::to_json).collect(); Ok(Json(json!({ @@ -2034,7 +2022,7 @@ async fn list_policies(org_id: OrganizationId, headers: AdminHeaders, mut conn: } #[get("/organizations//policies/token?")] -async fn list_policies_token(org_id: OrganizationId, token: &str, mut conn: DbConn) -> JsonResult { +async fn list_policies_token(org_id: OrganizationId, token: &str, conn: DbConn) -> JsonResult { let invite = decode_invite(token)?; if invite.org_id != org_id { @@ -2047,7 +2035,7 @@ async fn list_policies_token(org_id: OrganizationId, token: &str, mut conn: DbCo } // TODO: We receive the invite token as ?token=<>, validate it contains the org id - let policies = OrgPolicy::find_by_org(&org_id, &mut conn).await; + let policies = OrgPolicy::find_by_org(&org_id, &conn).await; let policies_json: Vec = policies.iter().map(OrgPolicy::to_json).collect(); Ok(Json(json!({ @@ -2060,9 +2048,9 @@ async fn list_policies_token(org_id: OrganizationId, token: &str, mut conn: DbCo // Called during the SSO enrollment. // Return the org policy if it exists, otherwise use the default one. #[get("/organizations//policies/master-password", rank = 1)] -async fn get_master_password_policy(org_id: OrganizationId, _headers: Headers, mut conn: DbConn) -> JsonResult { +async fn get_master_password_policy(org_id: OrganizationId, _headers: Headers, conn: DbConn) -> JsonResult { let policy = - OrgPolicy::find_by_org_and_type(&org_id, OrgPolicyType::MasterPassword, &mut conn).await.unwrap_or_else(|| { + OrgPolicy::find_by_org_and_type(&org_id, OrgPolicyType::MasterPassword, &conn).await.unwrap_or_else(|| { let (enabled, data) = match CONFIG.sso_master_password_policy_value() { Some(policy) if CONFIG.sso_enabled() => (true, policy.to_string()), _ => (false, "null".to_string()), @@ -2075,7 +2063,7 @@ async fn get_master_password_policy(org_id: OrganizationId, _headers: Headers, m } #[get("/organizations//policies/", rank = 2)] -async fn get_policy(org_id: OrganizationId, pol_type: i32, headers: AdminHeaders, mut conn: DbConn) -> JsonResult { +async fn get_policy(org_id: OrganizationId, pol_type: i32, headers: AdminHeaders, conn: DbConn) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } @@ -2084,7 +2072,7 @@ async fn get_policy(org_id: OrganizationId, pol_type: i32, headers: AdminHeaders err!("Invalid or unsupported policy type") }; - let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type_enum, &mut conn).await { + let policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type_enum, &conn).await { Some(p) => p, None => OrgPolicy::new(org_id.clone(), pol_type_enum, false, "null".to_string()), }; @@ -2106,7 +2094,7 @@ async fn put_policy( pol_type: i32, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -2125,7 +2113,7 @@ async fn put_policy( if CONFIG.enforce_single_org_with_reset_pw_policy() { if pol_type_enum == OrgPolicyType::ResetPassword && data.enabled { let single_org_policy_enabled = - match OrgPolicy::find_by_org_and_type(&org_id, OrgPolicyType::SingleOrg, &mut conn).await { + match OrgPolicy::find_by_org_and_type(&org_id, OrgPolicyType::SingleOrg, &conn).await { Some(p) => p.enabled, None => false, }; @@ -2138,7 +2126,7 @@ async fn put_policy( // Also prevent the Single Org Policy to be disabled if the Reset Password policy is enabled if pol_type_enum == OrgPolicyType::SingleOrg && !data.enabled { let reset_pw_policy_enabled = - match OrgPolicy::find_by_org_and_type(&org_id, OrgPolicyType::ResetPassword, &mut conn).await { + match OrgPolicy::find_by_org_and_type(&org_id, OrgPolicyType::ResetPassword, &conn).await { Some(p) => p.enabled, None => false, }; @@ -2156,25 +2144,25 @@ async fn put_policy( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await?; } // When enabling the SingleOrg policy, remove this org's members that are members of other orgs if pol_type_enum == OrgPolicyType::SingleOrg && data.enabled { - for member in Membership::find_by_org(&org_id, &mut conn).await.into_iter() { + for member in Membership::find_by_org(&org_id, &conn).await.into_iter() { // Policy only applies to non-Owner/non-Admin members who have accepted joining the org // Exclude invited and revoked users when checking for this policy. // Those users will not be allowed to accept or be activated because of the policy checks done there. // We check if the count is larger then 1, because it includes this organization also. if member.atype < MembershipType::Admin && member.status != MembershipStatus::Invited as i32 - && Membership::count_accepted_and_confirmed_by_user(&member.user_uuid, &mut conn).await > 1 + && Membership::count_accepted_and_confirmed_by_user(&member.user_uuid, &conn).await > 1 { if CONFIG.mail_enabled() { - let org = Organization::find_by_uuid(&member.org_uuid, &mut conn).await.unwrap(); - let user = User::find_by_uuid(&member.user_uuid, &mut conn).await.unwrap(); + let org = Organization::find_by_uuid(&member.org_uuid, &conn).await.unwrap(); + let user = User::find_by_uuid(&member.user_uuid, &conn).await.unwrap(); mail::send_single_org_removed_from_org(&user.email, &org.name).await?; } @@ -2186,23 +2174,23 @@ async fn put_policy( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; - member.delete(&mut conn).await?; + member.delete(&conn).await?; } } } - let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type_enum, &mut conn).await { + let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type_enum, &conn).await { Some(p) => p, None => OrgPolicy::new(org_id.clone(), pol_type_enum, false, "{}".to_string()), }; policy.enabled = data.enabled; policy.data = serde_json::to_string(&data.data)?; - policy.save(&mut conn).await?; + policy.save(&conn).await?; log_event( EventType::PolicyUpdated as i32, @@ -2211,7 +2199,7 @@ async fn put_policy( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; @@ -2314,7 +2302,7 @@ struct OrgImportData { /// It is only used with older directory connectors /// TODO: Cleanup Tech debt #[post("/organizations//import", data = "")] -async fn import(org_id: OrganizationId, data: Json, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn import(org_id: OrganizationId, data: Json, headers: Headers, conn: DbConn) -> EmptyResult { let data = data.into_inner(); // TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way @@ -2323,7 +2311,7 @@ async fn import(org_id: OrganizationId, data: Json, headers: Head // as opposed to upstream which only removes auto-imported users. // User needs to be admin or owner to use the Directory Connector - match Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &mut conn).await { + match Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &conn).await { Some(member) if member.atype >= MembershipType::Admin => { /* Okay, nothing to do */ } Some(_) => err!("User has insufficient permissions to use Directory Connector"), None => err!("User not part of organization"), @@ -2332,7 +2320,7 @@ async fn import(org_id: OrganizationId, data: Json, headers: Head for user_data in &data.users { if user_data.deleted { // If user is marked for deletion and it exists, delete it - if let Some(member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await { + if let Some(member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &conn).await { log_event( EventType::OrganizationUserRemoved as i32, &member.uuid, @@ -2340,16 +2328,16 @@ async fn import(org_id: OrganizationId, data: Json, headers: Head &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; - member.delete(&mut conn).await?; + member.delete(&conn).await?; } // If user is not part of the organization, but it exists - } else if Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await.is_none() { - if let Some(user) = User::find_by_mail(&user_data.email, &mut conn).await { + } else if Membership::find_by_email_and_org(&user_data.email, &org_id, &conn).await.is_none() { + if let Some(user) = User::find_by_mail(&user_data.email, &conn).await { let member_status = if CONFIG.mail_enabled() { MembershipStatus::Invited as i32 } else { @@ -2363,7 +2351,7 @@ async fn import(org_id: OrganizationId, data: Json, headers: Head new_member.status = member_status; if CONFIG.mail_enabled() { - let org_name = match Organization::find_by_uuid(&org_id, &mut conn).await { + let org_name = match Organization::find_by_uuid(&org_id, &conn).await { Some(org) => org.name, None => err!("Error looking up organization"), }; @@ -2380,7 +2368,7 @@ async fn import(org_id: OrganizationId, data: Json, headers: Head // Save the member after sending an email // If sending fails the member will not be saved to the database, and will not result in the admin needing to reinvite the users manually - new_member.save(&mut conn).await?; + new_member.save(&conn).await?; log_event( EventType::OrganizationUserInvited as i32, @@ -2389,7 +2377,7 @@ async fn import(org_id: OrganizationId, data: Json, headers: Head &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; } @@ -2398,8 +2386,8 @@ async fn import(org_id: OrganizationId, data: Json, headers: Head // If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true) if data.overwrite_existing { - for member in Membership::find_by_org_and_type(&org_id, MembershipType::User, &mut conn).await { - if let Some(user_email) = User::find_by_uuid(&member.user_uuid, &mut conn).await.map(|u| u.email) { + for member in Membership::find_by_org_and_type(&org_id, MembershipType::User, &conn).await { + if let Some(user_email) = User::find_by_uuid(&member.user_uuid, &conn).await.map(|u| u.email) { if !data.users.iter().any(|u| u.email == user_email) { log_event( EventType::OrganizationUserRemoved as i32, @@ -2408,11 +2396,11 @@ async fn import(org_id: OrganizationId, data: Json, headers: Head &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; - member.delete(&mut conn).await?; + member.delete(&conn).await?; } } } @@ -2427,9 +2415,9 @@ async fn deactivate_member( org_id: OrganizationId, member_id: MembershipId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { - _revoke_member(&org_id, &member_id, &headers, &mut conn).await + _revoke_member(&org_id, &member_id, &headers, &conn).await } #[derive(Deserialize, Debug)] @@ -2454,9 +2442,9 @@ async fn revoke_member( org_id: OrganizationId, member_id: MembershipId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { - _revoke_member(&org_id, &member_id, &headers, &mut conn).await + _revoke_member(&org_id, &member_id, &headers, &conn).await } #[put("/organizations//users/revoke", data = "")] @@ -2464,7 +2452,7 @@ async fn bulk_revoke_members( org_id: OrganizationId, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -2475,7 +2463,7 @@ async fn bulk_revoke_members( match data.ids { Some(members) => { for member_id in members { - let err_msg = match _revoke_member(&org_id, &member_id, &headers, &mut conn).await { + let err_msg = match _revoke_member(&org_id, &member_id, &headers, &conn).await { Ok(_) => String::new(), Err(e) => format!("{e:?}"), }; @@ -2503,7 +2491,7 @@ async fn _revoke_member( org_id: &OrganizationId, member_id: &MembershipId, headers: &AdminHeaders, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { if org_id != &headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -2548,9 +2536,9 @@ async fn activate_member( org_id: OrganizationId, member_id: MembershipId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { - _restore_member(&org_id, &member_id, &headers, &mut conn).await + _restore_member(&org_id, &member_id, &headers, &conn).await } // Pre web-vault v2022.9.x endpoint @@ -2569,9 +2557,9 @@ async fn restore_member( org_id: OrganizationId, member_id: MembershipId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { - _restore_member(&org_id, &member_id, &headers, &mut conn).await + _restore_member(&org_id, &member_id, &headers, &conn).await } #[put("/organizations//users/restore", data = "")] @@ -2579,7 +2567,7 @@ async fn bulk_restore_members( org_id: OrganizationId, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -2588,7 +2576,7 @@ async fn bulk_restore_members( let mut bulk_response = Vec::new(); for member_id in data.ids { - let err_msg = match _restore_member(&org_id, &member_id, &headers, &mut conn).await { + let err_msg = match _restore_member(&org_id, &member_id, &headers, &conn).await { Ok(_) => String::new(), Err(e) => format!("{e:?}"), }; @@ -2613,7 +2601,7 @@ async fn _restore_member( org_id: &OrganizationId, member_id: &MembershipId, headers: &AdminHeaders, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { if org_id != &headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -2669,18 +2657,18 @@ async fn get_groups_data( details: bool, org_id: OrganizationId, headers: ManagerHeadersLoose, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.membership.org_uuid { err!("Organization not found", "Organization id's do not match"); } let groups: Vec = if CONFIG.org_groups_enabled() { - let groups = Group::find_by_organization(&org_id, &mut conn).await; + let groups = Group::find_by_organization(&org_id, &conn).await; let mut groups_json = Vec::with_capacity(groups.len()); if details { for g in groups { - groups_json.push(g.to_json_details(&mut conn).await) + groups_json.push(g.to_json_details(&conn).await) } } else { for g in groups { @@ -2768,7 +2756,7 @@ async fn post_groups( org_id: OrganizationId, headers: AdminHeaders, data: Json, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -2787,11 +2775,11 @@ async fn post_groups( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; - add_update_group(group, group_request.collections, group_request.users, org_id, &headers, &mut conn).await + add_update_group(group, group_request.collections, group_request.users, org_id, &headers, &conn).await } #[put("/organizations//groups/", data = "")] @@ -2800,7 +2788,7 @@ async fn put_group( group_id: GroupId, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -2809,15 +2797,15 @@ async fn put_group( err!("Group support is disabled"); } - let Some(group) = Group::find_by_uuid_and_org(&group_id, &org_id, &mut conn).await else { + let Some(group) = Group::find_by_uuid_and_org(&group_id, &org_id, &conn).await else { err!("Group not found", "Group uuid is invalid or does not belong to the organization") }; let group_request = data.into_inner(); let updated_group = group_request.update_group(group); - CollectionGroup::delete_all_by_group(&group_id, &mut conn).await?; - GroupUser::delete_all_by_group(&group_id, &mut conn).await?; + CollectionGroup::delete_all_by_group(&group_id, &conn).await?; + GroupUser::delete_all_by_group(&group_id, &conn).await?; log_event( EventType::GroupUpdated as i32, @@ -2826,11 +2814,11 @@ async fn put_group( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; - add_update_group(updated_group, group_request.collections, group_request.users, org_id, &headers, &mut conn).await + add_update_group(updated_group, group_request.collections, group_request.users, org_id, &headers, &conn).await } async fn add_update_group( @@ -2839,7 +2827,7 @@ async fn add_update_group( members: Vec, org_id: OrganizationId, headers: &AdminHeaders, - conn: &mut DbConn, + conn: &DbConn, ) -> JsonResult { group.save(conn).await?; @@ -2879,7 +2867,7 @@ async fn get_group_details( org_id: OrganizationId, group_id: GroupId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -2888,11 +2876,11 @@ async fn get_group_details( err!("Group support is disabled"); } - let Some(group) = Group::find_by_uuid_and_org(&group_id, &org_id, &mut conn).await else { + let Some(group) = Group::find_by_uuid_and_org(&group_id, &org_id, &conn).await else { err!("Group not found", "Group uuid is invalid or does not belong to the organization") }; - Ok(Json(group.to_json_details(&mut conn).await)) + Ok(Json(group.to_json_details(&conn).await)) } #[post("/organizations//groups//delete")] @@ -2900,26 +2888,21 @@ async fn post_delete_group( org_id: OrganizationId, group_id: GroupId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { - _delete_group(&org_id, &group_id, &headers, &mut conn).await + _delete_group(&org_id, &group_id, &headers, &conn).await } #[delete("/organizations//groups/")] -async fn delete_group( - org_id: OrganizationId, - group_id: GroupId, - headers: AdminHeaders, - mut conn: DbConn, -) -> EmptyResult { - _delete_group(&org_id, &group_id, &headers, &mut conn).await +async fn delete_group(org_id: OrganizationId, group_id: GroupId, headers: AdminHeaders, conn: DbConn) -> EmptyResult { + _delete_group(&org_id, &group_id, &headers, &conn).await } async fn _delete_group( org_id: &OrganizationId, group_id: &GroupId, headers: &AdminHeaders, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { if org_id != &headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -2951,7 +2934,7 @@ async fn bulk_delete_groups( org_id: OrganizationId, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -2963,13 +2946,13 @@ async fn bulk_delete_groups( let data: BulkGroupIds = data.into_inner(); for group_id in data.ids { - _delete_group(&org_id, &group_id, &headers, &mut conn).await? + _delete_group(&org_id, &group_id, &headers, &conn).await? } Ok(()) } #[get("/organizations//groups/", rank = 2)] -async fn get_group(org_id: OrganizationId, group_id: GroupId, headers: AdminHeaders, mut conn: DbConn) -> JsonResult { +async fn get_group(org_id: OrganizationId, group_id: GroupId, headers: AdminHeaders, conn: DbConn) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } @@ -2977,7 +2960,7 @@ async fn get_group(org_id: OrganizationId, group_id: GroupId, headers: AdminHead err!("Group support is disabled"); } - let Some(group) = Group::find_by_uuid_and_org(&group_id, &org_id, &mut conn).await else { + let Some(group) = Group::find_by_uuid_and_org(&group_id, &org_id, &conn).await else { err!("Group not found", "Group uuid is invalid or does not belong to the organization") }; @@ -2989,7 +2972,7 @@ async fn get_group_members( org_id: OrganizationId, group_id: GroupId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -2998,11 +2981,11 @@ async fn get_group_members( err!("Group support is disabled"); } - if Group::find_by_uuid_and_org(&group_id, &org_id, &mut conn).await.is_none() { + if Group::find_by_uuid_and_org(&group_id, &org_id, &conn).await.is_none() { err!("Group could not be found!", "Group uuid is invalid or does not belong to the organization") }; - let group_members: Vec = GroupUser::find_by_group(&group_id, &mut conn) + let group_members: Vec = GroupUser::find_by_group(&group_id, &conn) .await .iter() .map(|entry| entry.users_organizations_uuid.clone()) @@ -3017,7 +3000,7 @@ async fn put_group_members( group_id: GroupId, headers: AdminHeaders, data: Json>, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -3026,16 +3009,16 @@ async fn put_group_members( err!("Group support is disabled"); } - if Group::find_by_uuid_and_org(&group_id, &org_id, &mut conn).await.is_none() { + if Group::find_by_uuid_and_org(&group_id, &org_id, &conn).await.is_none() { err!("Group could not be found!", "Group uuid is invalid or does not belong to the organization") }; - GroupUser::delete_all_by_group(&group_id, &mut conn).await?; + GroupUser::delete_all_by_group(&group_id, &conn).await?; let assigned_members = data.into_inner(); for assigned_member in assigned_members { let mut user_entry = GroupUser::new(group_id.clone(), assigned_member.clone()); - user_entry.save(&mut conn).await?; + user_entry.save(&conn).await?; log_event( EventType::OrganizationUserUpdatedGroups as i32, @@ -3044,7 +3027,7 @@ async fn put_group_members( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; } @@ -3057,7 +3040,7 @@ async fn get_user_groups( org_id: OrganizationId, member_id: MembershipId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -3066,12 +3049,12 @@ async fn get_user_groups( err!("Group support is disabled"); } - if Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await.is_none() { + if Membership::find_by_uuid_and_org(&member_id, &org_id, &conn).await.is_none() { err!("User could not be found!") }; let user_groups: Vec = - GroupUser::find_by_member(&member_id, &mut conn).await.iter().map(|entry| entry.groups_uuid.clone()).collect(); + GroupUser::find_by_member(&member_id, &conn).await.iter().map(|entry| entry.groups_uuid.clone()).collect(); Ok(Json(json!(user_groups))) } @@ -3099,7 +3082,7 @@ async fn put_user_groups( member_id: MembershipId, data: Json, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -3108,16 +3091,16 @@ async fn put_user_groups( err!("Group support is disabled"); } - if Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await.is_none() { + if Membership::find_by_uuid_and_org(&member_id, &org_id, &conn).await.is_none() { err!("User could not be found or does not belong to the organization."); } - GroupUser::delete_all_by_member(&member_id, &mut conn).await?; + GroupUser::delete_all_by_member(&member_id, &conn).await?; let assigned_group_ids = data.into_inner(); for assigned_group_id in assigned_group_ids.group_ids { let mut group_user = GroupUser::new(assigned_group_id.clone(), member_id.clone()); - group_user.save(&mut conn).await?; + group_user.save(&conn).await?; } log_event( @@ -3127,7 +3110,7 @@ async fn put_user_groups( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; @@ -3151,7 +3134,7 @@ async fn delete_group_member( group_id: GroupId, member_id: MembershipId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -3160,11 +3143,11 @@ async fn delete_group_member( err!("Group support is disabled"); } - if Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await.is_none() { + if Membership::find_by_uuid_and_org(&member_id, &org_id, &conn).await.is_none() { err!("User could not be found or does not belong to the organization."); } - if Group::find_by_uuid_and_org(&group_id, &org_id, &mut conn).await.is_none() { + if Group::find_by_uuid_and_org(&group_id, &org_id, &conn).await.is_none() { err!("Group could not be found or does not belong to the organization."); } @@ -3175,11 +3158,11 @@ async fn delete_group_member( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; - GroupUser::delete_by_group_and_member(&group_id, &member_id, &mut conn).await + GroupUser::delete_by_group_and_member(&group_id, &member_id, &conn).await } #[derive(Deserialize)] @@ -3201,15 +3184,11 @@ struct OrganizationUserResetPasswordRequest { // But the clients do not seem to use this at all // Just add it here in case they will #[get("/organizations//public-key")] -async fn get_organization_public_key( - org_id: OrganizationId, - headers: OrgMemberHeaders, - mut conn: DbConn, -) -> JsonResult { +async fn get_organization_public_key(org_id: OrganizationId, headers: OrgMemberHeaders, conn: DbConn) -> JsonResult { if org_id != headers.membership.org_uuid { err!("Organization not found", "Organization id's do not match"); } - let Some(org) = Organization::find_by_uuid(&org_id, &mut conn).await else { + let Some(org) = Organization::find_by_uuid(&org_id, &conn).await else { err!("Organization not found") }; @@ -3232,25 +3211,25 @@ async fn put_reset_password( member_id: MembershipId, headers: AdminHeaders, data: Json, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } - let Some(org) = Organization::find_by_uuid(&org_id, &mut conn).await else { + let Some(org) = Organization::find_by_uuid(&org_id, &conn).await else { err!("Required organization not found") }; - let Some(member) = Membership::find_by_uuid_and_org(&member_id, &org.uuid, &mut conn).await else { + let Some(member) = Membership::find_by_uuid_and_org(&member_id, &org.uuid, &conn).await else { err!("User to reset isn't member of required organization") }; - let Some(user) = User::find_by_uuid(&member.user_uuid, &mut conn).await else { + let Some(user) = User::find_by_uuid(&member.user_uuid, &conn).await else { err!("User not found") }; - check_reset_password_applicable_and_permissions(&org_id, &member_id, &headers, &mut conn).await?; + check_reset_password_applicable_and_permissions(&org_id, &member_id, &headers, &conn).await?; if member.reset_password_key.is_none() { err!("Password reset not or not correctly enrolled"); @@ -3269,9 +3248,9 @@ async fn put_reset_password( let mut user = user; user.set_password(reset_request.new_master_password_hash.as_str(), Some(reset_request.key), true, None); - user.save(&mut conn).await?; + user.save(&conn).await?; - nt.send_logout(&user, None, &mut conn).await; + nt.send_logout(&user, None, &conn).await; log_event( EventType::OrganizationUserAdminResetPassword as i32, @@ -3280,7 +3259,7 @@ async fn put_reset_password( &headers.user.uuid, headers.device.atype, &headers.ip.ip, - &mut conn, + &conn, ) .await; @@ -3292,24 +3271,24 @@ async fn get_reset_password_details( org_id: OrganizationId, member_id: MembershipId, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } - let Some(org) = Organization::find_by_uuid(&org_id, &mut conn).await else { + let Some(org) = Organization::find_by_uuid(&org_id, &conn).await else { err!("Required organization not found") }; - let Some(member) = Membership::find_by_uuid_and_org(&member_id, &org_id, &mut conn).await else { + let Some(member) = Membership::find_by_uuid_and_org(&member_id, &org_id, &conn).await else { err!("User to reset isn't member of required organization") }; - let Some(user) = User::find_by_uuid(&member.user_uuid, &mut conn).await else { + let Some(user) = User::find_by_uuid(&member.user_uuid, &conn).await else { err!("User not found") }; - check_reset_password_applicable_and_permissions(&org_id, &member_id, &headers, &mut conn).await?; + check_reset_password_applicable_and_permissions(&org_id, &member_id, &headers, &conn).await?; // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/AdminConsole/Models/Response/Organizations/OrganizationUserResponseModel.cs#L190 Ok(Json(json!({ @@ -3328,7 +3307,7 @@ async fn check_reset_password_applicable_and_permissions( org_id: &OrganizationId, member_id: &MembershipId, headers: &AdminHeaders, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { check_reset_password_applicable(org_id, conn).await?; @@ -3344,7 +3323,7 @@ async fn check_reset_password_applicable_and_permissions( } } -async fn check_reset_password_applicable(org_id: &OrganizationId, conn: &mut DbConn) -> EmptyResult { +async fn check_reset_password_applicable(org_id: &OrganizationId, conn: &DbConn) -> EmptyResult { if !CONFIG.mail_enabled() { err!("Password reset is not supported on an email-disabled instance."); } @@ -3366,13 +3345,13 @@ async fn put_reset_password_enrollment( member_id: MembershipId, headers: Headers, data: Json, - mut conn: DbConn, + conn: DbConn, ) -> EmptyResult { - let Some(mut member) = Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &mut conn).await else { + let Some(mut member) = Membership::find_by_user_and_org(&headers.user.uuid, &org_id, &conn).await else { err!("User to enroll isn't member of required organization") }; - check_reset_password_applicable(&org_id, &mut conn).await?; + check_reset_password_applicable(&org_id, &conn).await?; let reset_request = data.into_inner(); @@ -3382,7 +3361,7 @@ async fn put_reset_password_enrollment( Some(key) => Some(key), }; - if reset_password_key.is_none() && OrgPolicy::org_is_reset_password_auto_enroll(&org_id, &mut conn).await { + if reset_password_key.is_none() && OrgPolicy::org_is_reset_password_auto_enroll(&org_id, &conn).await { err!("Reset password can't be withdrawn due to an enterprise policy"); } @@ -3391,12 +3370,12 @@ async fn put_reset_password_enrollment( master_password_hash: reset_request.master_password_hash, otp: reset_request.otp, } - .validate(&headers.user, true, &mut conn) + .validate(&headers.user, true, &conn) .await?; } member.reset_password_key = reset_password_key; - member.save(&mut conn).await?; + member.save(&conn).await?; let log_id = if member.reset_password_key.is_some() { EventType::OrganizationUserResetPasswordEnroll as i32 @@ -3404,7 +3383,7 @@ async fn put_reset_password_enrollment( EventType::OrganizationUserResetPasswordWithdraw as i32 }; - log_event(log_id, &member_id, &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; + log_event(log_id, &member_id, &org_id, &headers.user.uuid, headers.device.atype, &headers.ip.ip, &conn).await; Ok(()) } @@ -3416,14 +3395,14 @@ async fn put_reset_password_enrollment( // Vaultwarden does not yet support exporting only managed collections! // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Tools/Controllers/OrganizationExportController.cs#L52 #[get("/organizations//export")] -async fn get_org_export(org_id: OrganizationId, headers: AdminHeaders, mut conn: DbConn) -> JsonResult { +async fn get_org_export(org_id: OrganizationId, headers: AdminHeaders, conn: DbConn) -> JsonResult { if org_id != headers.org_id { err!("Organization not found", "Organization id's do not match"); } Ok(Json(json!({ - "collections": convert_json_key_lcase_first(_get_org_collections(&org_id, &mut conn).await), - "ciphers": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await?), + "collections": convert_json_key_lcase_first(_get_org_collections(&org_id, &conn).await), + "ciphers": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &conn).await?), }))) } @@ -3432,7 +3411,7 @@ async fn _api_key( data: Json, rotate: bool, headers: AdminHeaders, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { if org_id != &headers.org_id { err!("Organization not found", "Organization id's do not match"); @@ -3441,7 +3420,7 @@ async fn _api_key( let user = headers.user; // Validate the admin users password/otp - data.validate(&user, true, &mut conn).await?; + data.validate(&user, true, &conn).await?; let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_id, &conn).await { Some(mut org_api_key) => { diff --git a/src/api/core/public.rs b/src/api/core/public.rs index 46b59290..f4717ee6 100644 --- a/src/api/core/public.rs +++ b/src/api/core/public.rs @@ -10,7 +10,13 @@ use std::collections::HashSet; use crate::{ api::EmptyResult, auth, - db::{models::*, DbConn}, + db::{ + models::{ + Group, GroupUser, Invitation, Membership, MembershipStatus, MembershipType, Organization, + OrganizationApiKey, OrganizationId, User, + }, + DbConn, + }, mail, CONFIG, }; @@ -44,7 +50,7 @@ struct OrgImportData { } #[post("/public/organization/import", data = "")] -async fn ldap_import(data: Json, token: PublicToken, mut conn: DbConn) -> EmptyResult { +async fn ldap_import(data: Json, token: PublicToken, conn: DbConn) -> EmptyResult { // Most of the logic for this function can be found here // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Services/Implementations/OrganizationService.cs#L1203 @@ -55,13 +61,12 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db let mut user_created: bool = false; if user_data.deleted { // If user is marked for deletion and it exists, revoke it - if let Some(mut member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await { + if let Some(mut member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &conn).await { // Only revoke a user if it is not the last confirmed owner let revoked = if member.atype == MembershipType::Owner && member.status == MembershipStatus::Confirmed as i32 { - if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await <= 1 - { + if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &conn).await <= 1 { warn!("Can't revoke the last owner"); false } else { @@ -73,27 +78,27 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db let ext_modified = member.set_external_id(Some(user_data.external_id.clone())); if revoked || ext_modified { - member.save(&mut conn).await?; + member.save(&conn).await?; } } // If user is part of the organization, restore it - } else if let Some(mut member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await { + } else if let Some(mut member) = Membership::find_by_email_and_org(&user_data.email, &org_id, &conn).await { let restored = member.restore(); let ext_modified = member.set_external_id(Some(user_data.external_id.clone())); if restored || ext_modified { - member.save(&mut conn).await?; + member.save(&conn).await?; } } else { // If user is not part of the organization - let user = match User::find_by_mail(&user_data.email, &mut conn).await { + let user = match User::find_by_mail(&user_data.email, &conn).await { Some(user) => user, // exists in vaultwarden None => { // User does not exist yet let mut new_user = User::new(user_data.email.clone(), None); - new_user.save(&mut conn).await?; + new_user.save(&conn).await?; if !CONFIG.mail_enabled() { - Invitation::new(&new_user.email).save(&mut conn).await?; + Invitation::new(&new_user.email).save(&conn).await?; } user_created = true; new_user @@ -105,7 +110,7 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db MembershipStatus::Accepted as i32 // Automatically mark user as accepted if no email invites }; - let (org_name, org_email) = match Organization::find_by_uuid(&org_id, &mut conn).await { + let (org_name, org_email) = match Organization::find_by_uuid(&org_id, &conn).await { Some(org) => (org.name, org.billing_email), None => err!("Error looking up organization"), }; @@ -116,7 +121,7 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db new_member.atype = MembershipType::User as i32; new_member.status = member_status; - new_member.save(&mut conn).await?; + new_member.save(&conn).await?; if CONFIG.mail_enabled() { if let Err(e) = @@ -124,9 +129,9 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db { // Upon error delete the user, invite and org member records when needed if user_created { - user.delete(&mut conn).await?; + user.delete(&conn).await?; } else { - new_member.delete(&mut conn).await?; + new_member.delete(&conn).await?; } err!(format!("Error sending invite: {e:?} ")); @@ -137,8 +142,7 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db if CONFIG.org_groups_enabled() { for group_data in &data.groups { - let group_uuid = match Group::find_by_external_id_and_org(&group_data.external_id, &org_id, &mut conn).await - { + let group_uuid = match Group::find_by_external_id_and_org(&group_data.external_id, &org_id, &conn).await { Some(group) => group.uuid, None => { let mut group = Group::new( @@ -147,17 +151,17 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db false, Some(group_data.external_id.clone()), ); - group.save(&mut conn).await?; + group.save(&conn).await?; group.uuid } }; - GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?; + GroupUser::delete_all_by_group(&group_uuid, &conn).await?; for ext_id in &group_data.member_external_ids { - if let Some(member) = Membership::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await { + if let Some(member) = Membership::find_by_external_id_and_org(ext_id, &org_id, &conn).await { let mut group_user = GroupUser::new(group_uuid.clone(), member.uuid.clone()); - group_user.save(&mut conn).await?; + group_user.save(&conn).await?; } } } @@ -169,19 +173,18 @@ async fn ldap_import(data: Json, token: PublicToken, mut conn: Db if data.overwrite_existing { // Generate a HashSet to quickly verify if a member is listed or not. let sync_members: HashSet = data.members.into_iter().map(|m| m.external_id).collect(); - for member in Membership::find_by_org(&org_id, &mut conn).await { + for member in Membership::find_by_org(&org_id, &conn).await { if let Some(ref user_external_id) = member.external_id { if !sync_members.contains(user_external_id) { if member.atype == MembershipType::Owner && member.status == MembershipStatus::Confirmed as i32 { // Removing owner, check that there is at least one other confirmed owner - if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &mut conn).await - <= 1 + if Membership::count_confirmed_by_org_and_type(&org_id, MembershipType::Owner, &conn).await <= 1 { warn!("Can't delete the last owner"); continue; } } - member.delete(&mut conn).await?; + member.delete(&conn).await?; } } } diff --git a/src/api/core/sends.rs b/src/api/core/sends.rs index 96bf71a0..286dac3d 100644 --- a/src/api/core/sends.rs +++ b/src/api/core/sends.rs @@ -14,7 +14,10 @@ use crate::{ api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType}, auth::{ClientIp, Headers, Host}, config::PathType, - db::{models::*, DbConn, DbPool}, + db::{ + models::{Device, OrgPolicy, OrgPolicyType, Send, SendFileId, SendId, SendType, UserId}, + DbConn, DbPool, + }, util::{save_temp_file, NumberOrString}, CONFIG, }; @@ -58,8 +61,8 @@ pub fn routes() -> Vec { pub async fn purge_sends(pool: DbPool) { debug!("Purging sends"); - if let Ok(mut conn) = pool.get().await { - Send::purge(&mut conn).await; + if let Ok(conn) = pool.get().await { + Send::purge(&conn).await; } else { error!("Failed to get DB connection while purging sends") } @@ -96,7 +99,7 @@ pub struct SendData { /// /// There is also a Vaultwarden-specific `sends_allowed` config setting that /// controls this policy globally. -async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> EmptyResult { +async fn enforce_disable_send_policy(headers: &Headers, conn: &DbConn) -> EmptyResult { let user_id = &headers.user.uuid; if !CONFIG.sends_allowed() || OrgPolicy::is_applicable_to_user(user_id, OrgPolicyType::DisableSend, None, conn).await @@ -112,7 +115,7 @@ async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> Em /// but is allowed to remove this option from an existing Send. /// /// Ref: https://bitwarden.com/help/article/policies/#send-options -async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult { +async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &DbConn) -> EmptyResult { let user_id = &headers.user.uuid; let hide_email = data.hide_email.unwrap_or(false); if hide_email && OrgPolicy::is_hide_email_disabled(user_id, conn).await { @@ -164,8 +167,8 @@ fn create_send(data: SendData, user_id: UserId) -> ApiResult { } #[get("/sends")] -async fn get_sends(headers: Headers, mut conn: DbConn) -> Json { - let sends = Send::find_by_user(&headers.user.uuid, &mut conn); +async fn get_sends(headers: Headers, conn: DbConn) -> Json { + let sends = Send::find_by_user(&headers.user.uuid, &conn); let sends_json: Vec = sends.await.iter().map(|s| s.to_json()).collect(); Json(json!({ @@ -176,32 +179,32 @@ async fn get_sends(headers: Headers, mut conn: DbConn) -> Json { } #[get("/sends/")] -async fn get_send(send_id: SendId, headers: Headers, mut conn: DbConn) -> JsonResult { - match Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await { +async fn get_send(send_id: SendId, headers: Headers, conn: DbConn) -> JsonResult { + match Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &conn).await { Some(send) => Ok(Json(send.to_json())), None => err!("Send not found", "Invalid send uuid or does not belong to user"), } } #[post("/sends", data = "")] -async fn post_send(data: Json, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { - enforce_disable_send_policy(&headers, &mut conn).await?; +async fn post_send(data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { + enforce_disable_send_policy(&headers, &conn).await?; let data: SendData = data.into_inner(); - enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?; + enforce_disable_hide_email_policy(&data, &headers, &conn).await?; if data.r#type == SendType::File as i32 { err!("File sends should use /api/sends/file") } let mut send = create_send(data, headers.user.uuid)?; - send.save(&mut conn).await?; + send.save(&conn).await?; nt.send_send_update( UpdateType::SyncSendCreate, &send, - &send.update_users_revision(&mut conn).await, + &send.update_users_revision(&conn).await, &headers.device, - &mut conn, + &conn, ) .await; @@ -225,8 +228,8 @@ struct UploadDataV2<'f> { // 2025: This endpoint doesn't seem to exists anymore in the latest version // See: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Tools/Controllers/SendsController.cs #[post("/sends/file", format = "multipart/form-data", data = "")] -async fn post_send_file(data: Form>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { - enforce_disable_send_policy(&headers, &mut conn).await?; +async fn post_send_file(data: Form>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { + enforce_disable_send_policy(&headers, &conn).await?; let UploadData { model, @@ -241,12 +244,12 @@ async fn post_send_file(data: Form>, headers: Headers, mut conn: err!("Send size can't be negative") } - enforce_disable_hide_email_policy(&model, &headers, &mut conn).await?; + enforce_disable_hide_email_policy(&model, &headers, &conn).await?; let size_limit = match CONFIG.user_send_limit() { Some(0) => err!("File uploads are disabled"), Some(limit_kb) => { - let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else { + let Some(already_used) = Send::size_by_user(&headers.user.uuid, &conn).await else { err!("Existing sends overflow") }; let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else { @@ -282,13 +285,13 @@ async fn post_send_file(data: Form>, headers: Headers, mut conn: send.data = serde_json::to_string(&data_value)?; // Save the changes in the database - send.save(&mut conn).await?; + send.save(&conn).await?; nt.send_send_update( UpdateType::SyncSendCreate, &send, - &send.update_users_revision(&mut conn).await, + &send.update_users_revision(&conn).await, &headers.device, - &mut conn, + &conn, ) .await; @@ -297,8 +300,8 @@ async fn post_send_file(data: Form>, headers: Headers, mut conn: // Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/Tools/Controllers/SendsController.cs#L165 #[post("/sends/file/v2", data = "")] -async fn post_send_file_v2(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { - enforce_disable_send_policy(&headers, &mut conn).await?; +async fn post_send_file_v2(data: Json, headers: Headers, conn: DbConn) -> JsonResult { + enforce_disable_send_policy(&headers, &conn).await?; let data = data.into_inner(); @@ -306,7 +309,7 @@ async fn post_send_file_v2(data: Json, headers: Headers, mut conn: DbC err!("Send content is not a file"); } - enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?; + enforce_disable_hide_email_policy(&data, &headers, &conn).await?; let file_length = match &data.file_length { Some(m) => m.into_i64()?, @@ -319,7 +322,7 @@ async fn post_send_file_v2(data: Json, headers: Headers, mut conn: DbC let size_limit = match CONFIG.user_send_limit() { Some(0) => err!("File uploads are disabled"), Some(limit_kb) => { - let Some(already_used) = Send::size_by_user(&headers.user.uuid, &mut conn).await else { + let Some(already_used) = Send::size_by_user(&headers.user.uuid, &conn).await else { err!("Existing sends overflow") }; let Some(left) = limit_kb.checked_mul(1024).and_then(|l| l.checked_sub(already_used)) else { @@ -348,7 +351,7 @@ async fn post_send_file_v2(data: Json, headers: Headers, mut conn: DbC o.insert(String::from("sizeName"), Value::String(crate::util::get_display_size(file_length))); } send.data = serde_json::to_string(&data_value)?; - send.save(&mut conn).await?; + send.save(&conn).await?; Ok(Json(json!({ "fileUploadType": 0, // 0 == Direct | 1 == Azure @@ -373,14 +376,14 @@ async fn post_send_file_v2_data( file_id: SendFileId, data: Form>, headers: Headers, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> EmptyResult { - enforce_disable_send_policy(&headers, &mut conn).await?; + enforce_disable_send_policy(&headers, &conn).await?; let data = data.into_inner(); - let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else { + let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &conn).await else { err!("Send not found. Unable to save the file.", "Invalid send uuid or does not belong to user.") }; @@ -428,9 +431,9 @@ async fn post_send_file_v2_data( nt.send_send_update( UpdateType::SyncSendCreate, &send, - &send.update_users_revision(&mut conn).await, + &send.update_users_revision(&conn).await, &headers.device, - &mut conn, + &conn, ) .await; @@ -447,11 +450,11 @@ pub struct SendAccessData { async fn post_access( access_id: &str, data: Json, - mut conn: DbConn, + conn: DbConn, ip: ClientIp, nt: Notify<'_>, ) -> JsonResult { - let Some(mut send) = Send::find_by_access_id(access_id, &mut conn).await else { + let Some(mut send) = Send::find_by_access_id(access_id, &conn).await else { err_code!(SEND_INACCESSIBLE_MSG, 404) }; @@ -488,18 +491,18 @@ async fn post_access( send.access_count += 1; } - send.save(&mut conn).await?; + send.save(&conn).await?; nt.send_send_update( UpdateType::SyncSendUpdate, &send, - &send.update_users_revision(&mut conn).await, + &send.update_users_revision(&conn).await, &ANON_PUSH_DEVICE, - &mut conn, + &conn, ) .await; - Ok(Json(send.to_json_access(&mut conn).await)) + Ok(Json(send.to_json_access(&conn).await)) } #[post("/sends//access/file/", data = "")] @@ -508,10 +511,10 @@ async fn post_access_file( file_id: SendFileId, data: Json, host: Host, - mut conn: DbConn, + conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - let Some(mut send) = Send::find_by_uuid(&send_id, &mut conn).await else { + let Some(mut send) = Send::find_by_uuid(&send_id, &conn).await else { err_code!(SEND_INACCESSIBLE_MSG, 404) }; @@ -545,14 +548,14 @@ async fn post_access_file( send.access_count += 1; - send.save(&mut conn).await?; + send.save(&conn).await?; nt.send_send_update( UpdateType::SyncSendUpdate, &send, - &send.update_users_revision(&mut conn).await, + &send.update_users_revision(&conn).await, &ANON_PUSH_DEVICE, - &mut conn, + &conn, ) .await; @@ -587,23 +590,17 @@ async fn download_send(send_id: SendId, file_id: SendFileId, t: &str) -> Option< } #[put("/sends/", data = "")] -async fn put_send( - send_id: SendId, - data: Json, - headers: Headers, - mut conn: DbConn, - nt: Notify<'_>, -) -> JsonResult { - enforce_disable_send_policy(&headers, &mut conn).await?; +async fn put_send(send_id: SendId, data: Json, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { + enforce_disable_send_policy(&headers, &conn).await?; let data: SendData = data.into_inner(); - enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?; + enforce_disable_hide_email_policy(&data, &headers, &conn).await?; - let Some(mut send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else { + let Some(mut send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &conn).await else { err!("Send not found", "Send send_id is invalid or does not belong to user") }; - update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?; + update_send_from_data(&mut send, data, &headers, &conn, &nt, UpdateType::SyncSendUpdate).await?; Ok(Json(send.to_json())) } @@ -612,7 +609,7 @@ pub async fn update_send_from_data( send: &mut Send, data: SendData, headers: &Headers, - conn: &mut DbConn, + conn: &DbConn, nt: &Notify<'_>, ut: UpdateType, ) -> EmptyResult { @@ -667,18 +664,18 @@ pub async fn update_send_from_data( } #[delete("/sends/")] -async fn delete_send(send_id: SendId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else { +async fn delete_send(send_id: SendId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult { + let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &conn).await else { err!("Send not found", "Invalid send uuid, or does not belong to user") }; - send.delete(&mut conn).await?; + send.delete(&conn).await?; nt.send_send_update( UpdateType::SyncSendDelete, &send, - &send.update_users_revision(&mut conn).await, + &send.update_users_revision(&conn).await, &headers.device, - &mut conn, + &conn, ) .await; @@ -686,21 +683,21 @@ async fn delete_send(send_id: SendId, headers: Headers, mut conn: DbConn, nt: No } #[put("/sends//remove-password")] -async fn put_remove_password(send_id: SendId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { - enforce_disable_send_policy(&headers, &mut conn).await?; +async fn put_remove_password(send_id: SendId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { + enforce_disable_send_policy(&headers, &conn).await?; - let Some(mut send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else { + let Some(mut send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &conn).await else { err!("Send not found", "Invalid send uuid, or does not belong to user") }; send.set_password(None); - send.save(&mut conn).await?; + send.save(&conn).await?; nt.send_send_update( UpdateType::SyncSendUpdate, &send, - &send.update_users_revision(&mut conn).await, + &send.update_users_revision(&conn).await, &headers.device, - &mut conn, + &conn, ) .await; diff --git a/src/api/core/two_factor/authenticator.rs b/src/api/core/two_factor/authenticator.rs index e5ffeedc..d8f52995 100644 --- a/src/api/core/two_factor/authenticator.rs +++ b/src/api/core/two_factor/authenticator.rs @@ -20,14 +20,14 @@ pub fn routes() -> Vec { } #[post("/two-factor/get-authenticator", data = "")] -async fn generate_authenticator(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn generate_authenticator(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: PasswordOrOtpData = data.into_inner(); let user = headers.user; - data.validate(&user, false, &mut conn).await?; + data.validate(&user, false, &conn).await?; let type_ = TwoFactorType::Authenticator as i32; - let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await; + let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await; let (enabled, key) = match twofactor { Some(tf) => (true, tf.data), @@ -55,7 +55,7 @@ struct EnableAuthenticatorData { } #[post("/two-factor/authenticator", data = "")] -async fn activate_authenticator(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn activate_authenticator(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: EnableAuthenticatorData = data.into_inner(); let key = data.key; let token = data.token.into_string(); @@ -66,7 +66,7 @@ async fn activate_authenticator(data: Json, headers: He master_password_hash: data.master_password_hash, otp: data.otp, } - .validate(&user, true, &mut conn) + .validate(&user, true, &conn) .await?; // Validate key as base32 and 20 bytes length @@ -80,11 +80,11 @@ async fn activate_authenticator(data: Json, headers: He } // Validate the token provided with the key, and save new twofactor - validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &headers.ip, &mut conn).await?; + validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &headers.ip, &conn).await?; - _generate_recover_code(&mut user, &mut conn).await; + _generate_recover_code(&mut user, &conn).await; - log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; + log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &conn).await; Ok(Json(json!({ "enabled": true, @@ -103,7 +103,7 @@ pub async fn validate_totp_code_str( totp_code: &str, secret: &str, ip: &ClientIp, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { if !totp_code.chars().all(char::is_numeric) { err!("TOTP code is not a number"); @@ -117,7 +117,7 @@ pub async fn validate_totp_code( totp_code: &str, secret: &str, ip: &ClientIp, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { use totp_lite::{totp_custom, Sha1}; @@ -189,7 +189,7 @@ struct DisableAuthenticatorData { } #[delete("/two-factor/authenticator", data = "")] -async fn disable_authenticator(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn disable_authenticator(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let user = headers.user; let type_ = data.r#type.into_i32()?; @@ -197,24 +197,18 @@ async fn disable_authenticator(data: Json, headers: He err!("Invalid password"); } - if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await { + if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await { if twofactor.data == data.key { - twofactor.delete(&mut conn).await?; - log_user_event( - EventType::UserDisabled2fa as i32, - &user.uuid, - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; + twofactor.delete(&conn).await?; + log_user_event(EventType::UserDisabled2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &conn) + .await; } else { err!(format!("TOTP key for user {} does not match recorded value, cannot deactivate", &user.email)); } } - if TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty() { - super::enforce_2fa_policy(&user, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await?; + if TwoFactor::find_by_user(&user.uuid, &conn).await.is_empty() { + super::enforce_2fa_policy(&user, &user.uuid, headers.device.atype, &headers.ip.ip, &conn).await?; } Ok(Json(json!({ diff --git a/src/api/core/two_factor/duo.rs b/src/api/core/two_factor/duo.rs index d5914608..f2de50c3 100644 --- a/src/api/core/two_factor/duo.rs +++ b/src/api/core/two_factor/duo.rs @@ -92,13 +92,13 @@ impl DuoStatus { const DISABLED_MESSAGE_DEFAULT: &str = ""; #[post("/two-factor/get-duo", data = "")] -async fn get_duo(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn get_duo(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: PasswordOrOtpData = data.into_inner(); let user = headers.user; - data.validate(&user, false, &mut conn).await?; + data.validate(&user, false, &conn).await?; - let data = get_user_duo_data(&user.uuid, &mut conn).await; + let data = get_user_duo_data(&user.uuid, &conn).await; let (enabled, data) = match data { DuoStatus::Global(_) => (true, Some(DuoData::secret())), @@ -158,7 +158,7 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool { } #[post("/two-factor/duo", data = "")] -async fn activate_duo(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn activate_duo(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: EnableDuoData = data.into_inner(); let mut user = headers.user; @@ -166,7 +166,7 @@ async fn activate_duo(data: Json, headers: Headers, mut conn: DbC master_password_hash: data.master_password_hash.clone(), otp: data.otp.clone(), } - .validate(&user, true, &mut conn) + .validate(&user, true, &conn) .await?; let (data, data_str) = if check_duo_fields_custom(&data) { @@ -180,11 +180,11 @@ async fn activate_duo(data: Json, headers: Headers, mut conn: DbC let type_ = TwoFactorType::Duo; let twofactor = TwoFactor::new(user.uuid.clone(), type_, data_str); - twofactor.save(&mut conn).await?; + twofactor.save(&conn).await?; - _generate_recover_code(&mut user, &mut conn).await; + _generate_recover_code(&mut user, &conn).await; - log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; + log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &conn).await; Ok(Json(json!({ "enabled": true, @@ -231,7 +231,7 @@ const AUTH_PREFIX: &str = "AUTH"; const DUO_PREFIX: &str = "TX"; const APP_PREFIX: &str = "APP"; -async fn get_user_duo_data(user_id: &UserId, conn: &mut DbConn) -> DuoStatus { +async fn get_user_duo_data(user_id: &UserId, conn: &DbConn) -> DuoStatus { let type_ = TwoFactorType::Duo as i32; // If the user doesn't have an entry, disabled @@ -254,7 +254,7 @@ async fn get_user_duo_data(user_id: &UserId, conn: &mut DbConn) -> DuoStatus { } // let (ik, sk, ak, host) = get_duo_keys(); -pub(crate) async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiResult<(String, String, String, String)> { +pub(crate) async fn get_duo_keys_email(email: &str, conn: &DbConn) -> ApiResult<(String, String, String, String)> { let data = match User::find_by_mail(email, conn).await { Some(u) => get_user_duo_data(&u.uuid, conn).await.data(), _ => DuoData::global(), @@ -264,7 +264,7 @@ pub(crate) async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiRes Ok((data.ik, data.sk, CONFIG.get_duo_akey().await, data.host)) } -pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult<(String, String)> { +pub async fn generate_duo_signature(email: &str, conn: &DbConn) -> ApiResult<(String, String)> { let now = Utc::now().timestamp(); let (ik, sk, ak, host) = get_duo_keys_email(email, conn).await?; @@ -282,7 +282,7 @@ fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64 format!("{cookie}|{}", crypto::hmac_sign(key, &cookie)) } -pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) -> EmptyResult { +pub async fn validate_duo_login(email: &str, response: &str, conn: &DbConn) -> EmptyResult { let split: Vec<&str> = response.split(':').collect(); if split.len() != 2 { err!( diff --git a/src/api/core/two_factor/duo_oidc.rs b/src/api/core/two_factor/duo_oidc.rs index ad948a75..144ffe84 100644 --- a/src/api/core/two_factor/duo_oidc.rs +++ b/src/api/core/two_factor/duo_oidc.rs @@ -317,7 +317,7 @@ struct DuoAuthContext { // Given a state string, retrieve the associated Duo auth context and // delete the retrieved state from the database. -async fn extract_context(state: &str, conn: &mut DbConn) -> Option { +async fn extract_context(state: &str, conn: &DbConn) -> Option { let ctx: TwoFactorDuoContext = match TwoFactorDuoContext::find_by_state(state, conn).await { Some(c) => c, None => return None, @@ -344,8 +344,8 @@ async fn extract_context(state: &str, conn: &mut DbConn) -> Option Result { let (ik, sk, _, host) = get_duo_keys_email(email, conn).await?; @@ -418,7 +418,7 @@ pub async fn validate_duo_login( two_factor_token: &str, client_id: &str, device_identifier: &DeviceId, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { // Result supplied to us by clients in the form "|" let split: Vec<&str> = two_factor_token.split('|').collect(); diff --git a/src/api/core/two_factor/email.rs b/src/api/core/two_factor/email.rs index f895efa1..460caa7b 100644 --- a/src/api/core/two_factor/email.rs +++ b/src/api/core/two_factor/email.rs @@ -39,13 +39,13 @@ struct SendEmailLoginData { /// User is trying to login and wants to use email 2FA. /// Does not require Bearer token #[post("/two-factor/send-email-login", data = "")] // JsonResult -async fn send_email_login(data: Json, mut conn: DbConn) -> EmptyResult { +async fn send_email_login(data: Json, conn: DbConn) -> EmptyResult { let data: SendEmailLoginData = data.into_inner(); use crate::db::models::User; // Get the user - let Some(user) = User::find_by_device_id(&data.device_identifier, &mut conn).await else { + let Some(user) = User::find_by_device_id(&data.device_identifier, &conn).await else { err!("Cannot find user. Try again.") }; @@ -53,13 +53,13 @@ async fn send_email_login(data: Json, mut conn: DbConn) -> E err!("Email 2FA is disabled") } - send_token(&user.uuid, &mut conn).await?; + send_token(&user.uuid, &conn).await?; Ok(()) } /// Generate the token, save the data for later verification and send email to user -pub async fn send_token(user_id: &UserId, conn: &mut DbConn) -> EmptyResult { +pub async fn send_token(user_id: &UserId, conn: &DbConn) -> EmptyResult { let type_ = TwoFactorType::Email as i32; let mut twofactor = TwoFactor::find_by_user_and_type(user_id, type_, conn).await.map_res("Two factor not found")?; @@ -77,14 +77,14 @@ pub async fn send_token(user_id: &UserId, conn: &mut DbConn) -> EmptyResult { /// When user clicks on Manage email 2FA show the user the related information #[post("/two-factor/get-email", data = "")] -async fn get_email(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn get_email(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: PasswordOrOtpData = data.into_inner(); let user = headers.user; - data.validate(&user, false, &mut conn).await?; + data.validate(&user, false, &conn).await?; let (enabled, mfa_email) = - match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &mut conn).await { + match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &conn).await { Some(x) => { let twofactor_data = EmailTokenData::from_json(&x.data)?; (true, json!(twofactor_data.email)) @@ -110,7 +110,7 @@ struct SendEmailData { /// Send a verification email to the specified email address to check whether it exists/belongs to user. #[post("/two-factor/send-email", data = "")] -async fn send_email(data: Json, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn send_email(data: Json, headers: Headers, conn: DbConn) -> EmptyResult { let data: SendEmailData = data.into_inner(); let user = headers.user; @@ -118,7 +118,7 @@ async fn send_email(data: Json, headers: Headers, mut conn: DbCon master_password_hash: data.master_password_hash, otp: data.otp, } - .validate(&user, false, &mut conn) + .validate(&user, false, &conn) .await?; if !CONFIG._enable_email_2fa() { @@ -127,8 +127,8 @@ async fn send_email(data: Json, headers: Headers, mut conn: DbCon let type_ = TwoFactorType::Email as i32; - if let Some(tf) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await { - tf.delete(&mut conn).await?; + if let Some(tf) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await { + tf.delete(&conn).await?; } let generated_token = crypto::generate_email_token(CONFIG.email_token_size()); @@ -136,7 +136,7 @@ async fn send_email(data: Json, headers: Headers, mut conn: DbCon // Uses EmailVerificationChallenge as type to show that it's not verified yet. let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json()); - twofactor.save(&mut conn).await?; + twofactor.save(&conn).await?; mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?).await?; @@ -154,7 +154,7 @@ struct EmailData { /// Verify email belongs to user and can be used for 2FA email codes. #[put("/two-factor/email", data = "")] -async fn email(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn email(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: EmailData = data.into_inner(); let mut user = headers.user; @@ -163,12 +163,12 @@ async fn email(data: Json, headers: Headers, mut conn: DbConn) -> Jso master_password_hash: data.master_password_hash, otp: data.otp, } - .validate(&user, true, &mut conn) + .validate(&user, true, &conn) .await?; let type_ = TwoFactorType::EmailVerificationChallenge as i32; let mut twofactor = - TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await.map_res("Two factor not found")?; + TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await.map_res("Two factor not found")?; let mut email_data = EmailTokenData::from_json(&twofactor.data)?; @@ -183,11 +183,11 @@ async fn email(data: Json, headers: Headers, mut conn: DbConn) -> Jso email_data.reset_token(); twofactor.atype = TwoFactorType::Email as i32; twofactor.data = email_data.to_json(); - twofactor.save(&mut conn).await?; + twofactor.save(&conn).await?; - _generate_recover_code(&mut user, &mut conn).await; + _generate_recover_code(&mut user, &conn).await; - log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; + log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &conn).await; Ok(Json(json!({ "email": email_data.email, @@ -202,7 +202,7 @@ pub async fn validate_email_code_str( token: &str, data: &str, ip: &std::net::IpAddr, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { let mut email_data = EmailTokenData::from_json(data)?; let mut twofactor = TwoFactor::find_by_user_and_type(user_id, TwoFactorType::Email as i32, conn) @@ -302,7 +302,7 @@ impl EmailTokenData { } } -pub async fn activate_email_2fa(user: &User, conn: &mut DbConn) -> EmptyResult { +pub async fn activate_email_2fa(user: &User, conn: &DbConn) -> EmptyResult { if user.verified_at.is_none() { err!("Auto-enabling of email 2FA failed because the users email address has not been verified!"); } @@ -332,7 +332,7 @@ pub fn obscure_email(email: &str) -> String { format!("{new_name}@{domain}") } -pub async fn find_and_activate_email_2fa(user_id: &UserId, conn: &mut DbConn) -> EmptyResult { +pub async fn find_and_activate_email_2fa(user_id: &UserId, conn: &DbConn) -> EmptyResult { if let Some(user) = User::find_by_uuid(user_id, conn).await { activate_email_2fa(&user, conn).await } else { diff --git a/src/api/core/two_factor/mod.rs b/src/api/core/two_factor/mod.rs index cfe0be86..416219a8 100644 --- a/src/api/core/two_factor/mod.rs +++ b/src/api/core/two_factor/mod.rs @@ -11,7 +11,13 @@ use crate::{ }, auth::{ClientHeaders, Headers}, crypto, - db::{models::*, DbConn, DbPool}, + db::{ + models::{ + DeviceType, EventType, Membership, MembershipType, OrgPolicyType, Organization, OrganizationId, TwoFactor, + TwoFactorIncomplete, User, UserId, + }, + DbConn, DbPool, + }, mail, util::NumberOrString, CONFIG, @@ -46,8 +52,8 @@ pub fn routes() -> Vec { } #[get("/two-factor")] -async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json { - let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &mut conn).await; +async fn get_twofactor(headers: Headers, conn: DbConn) -> Json { + let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn).await; let twofactors_json: Vec = twofactors.iter().map(TwoFactor::to_json_provider).collect(); Json(json!({ @@ -58,11 +64,11 @@ async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json { } #[post("/two-factor/get-recover", data = "")] -async fn get_recover(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn get_recover(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: PasswordOrOtpData = data.into_inner(); let user = headers.user; - data.validate(&user, true, &mut conn).await?; + data.validate(&user, true, &conn).await?; Ok(Json(json!({ "code": user.totp_recover, @@ -79,13 +85,13 @@ struct RecoverTwoFactor { } #[post("/two-factor/recover", data = "")] -async fn recover(data: Json, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult { +async fn recover(data: Json, client_headers: ClientHeaders, conn: DbConn) -> JsonResult { let data: RecoverTwoFactor = data.into_inner(); use crate::db::models::User; // Get the user - let Some(mut user) = User::find_by_mail(&data.email, &mut conn).await else { + let Some(mut user) = User::find_by_mail(&data.email, &conn).await else { err!("Username or password is incorrect. Try again.") }; @@ -100,25 +106,25 @@ async fn recover(data: Json, client_headers: ClientHeaders, mu } // Remove all twofactors from the user - TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?; - enforce_2fa_policy(&user, &user.uuid, client_headers.device_type, &client_headers.ip.ip, &mut conn).await?; + TwoFactor::delete_all_by_user(&user.uuid, &conn).await?; + enforce_2fa_policy(&user, &user.uuid, client_headers.device_type, &client_headers.ip.ip, &conn).await?; log_user_event( EventType::UserRecovered2fa as i32, &user.uuid, client_headers.device_type, &client_headers.ip.ip, - &mut conn, + &conn, ) .await; // Remove the recovery code, not needed without twofactors user.totp_recover = None; - user.save(&mut conn).await?; + user.save(&conn).await?; Ok(Json(Value::Object(serde_json::Map::new()))) } -async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) { +async fn _generate_recover_code(user: &mut User, conn: &DbConn) { if user.totp_recover.is_none() { let totp_recover = crypto::encode_random_bytes::<20>(BASE32); user.totp_recover = Some(totp_recover); @@ -135,7 +141,7 @@ struct DisableTwoFactorData { } #[post("/two-factor/disable", data = "")] -async fn disable_twofactor(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn disable_twofactor(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: DisableTwoFactorData = data.into_inner(); let user = headers.user; @@ -144,19 +150,19 @@ async fn disable_twofactor(data: Json, headers: Headers, m master_password_hash: data.master_password_hash, otp: data.otp, } - .validate(&user, true, &mut conn) + .validate(&user, true, &conn) .await?; let type_ = data.r#type.into_i32()?; - if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await { - twofactor.delete(&mut conn).await?; - log_user_event(EventType::UserDisabled2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn) + if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await { + twofactor.delete(&conn).await?; + log_user_event(EventType::UserDisabled2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &conn) .await; } - if TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty() { - enforce_2fa_policy(&user, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await?; + if TwoFactor::find_by_user(&user.uuid, &conn).await.is_empty() { + enforce_2fa_policy(&user, &user.uuid, headers.device.atype, &headers.ip.ip, &conn).await?; } Ok(Json(json!({ @@ -176,7 +182,7 @@ pub async fn enforce_2fa_policy( act_user_id: &UserId, device_type: i32, ip: &std::net::IpAddr, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { for member in Membership::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, conn).await.into_iter() @@ -212,7 +218,7 @@ pub async fn enforce_2fa_policy_for_org( act_user_id: &UserId, device_type: i32, ip: &std::net::IpAddr, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { let org = Organization::find_by_uuid(org_id, conn).await.unwrap(); for member in Membership::find_confirmed_by_org(org_id, conn).await.into_iter() { @@ -249,7 +255,7 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) { return; } - let mut conn = match pool.get().await { + let conn = match pool.get().await { Ok(conn) => conn, _ => { error!("Failed to get DB connection in send_incomplete_2fa_notifications()"); @@ -260,9 +266,9 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) { let now = Utc::now().naive_utc(); let time_limit = TimeDelta::try_minutes(CONFIG.incomplete_2fa_time_limit()).unwrap(); let time_before = now - time_limit; - let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await; + let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &conn).await; for login in incomplete_logins { - let user = User::find_by_uuid(&login.user_uuid, &mut conn).await.expect("User not found"); + let user = User::find_by_uuid(&login.user_uuid, &conn).await.expect("User not found"); info!( "User {} did not complete a 2FA login within the configured time limit. IP: {}", user.email, login.ip_address @@ -277,7 +283,7 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) { .await { Ok(_) => { - if let Err(e) = login.delete(&mut conn).await { + if let Err(e) = login.delete(&conn).await { error!("Error deleting incomplete 2FA record: {e:#?}"); } } diff --git a/src/api/core/two_factor/protected_actions.rs b/src/api/core/two_factor/protected_actions.rs index 5e4a65be..bf40c350 100644 --- a/src/api/core/two_factor/protected_actions.rs +++ b/src/api/core/two_factor/protected_actions.rs @@ -55,7 +55,7 @@ impl ProtectedActionData { } #[post("/accounts/request-otp")] -async fn request_otp(headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn request_otp(headers: Headers, conn: DbConn) -> EmptyResult { if !CONFIG.mail_enabled() { err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device."); } @@ -63,10 +63,9 @@ async fn request_otp(headers: Headers, mut conn: DbConn) -> EmptyResult { let user = headers.user; // Only one Protected Action per user is allowed to take place, delete the previous one - if let Some(pa) = - TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::ProtectedActions as i32, &mut conn).await + if let Some(pa) = TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::ProtectedActions as i32, &conn).await { - pa.delete(&mut conn).await?; + pa.delete(&conn).await?; } let generated_token = crypto::generate_email_token(CONFIG.email_token_size()); @@ -74,7 +73,7 @@ async fn request_otp(headers: Headers, mut conn: DbConn) -> EmptyResult { // Uses EmailVerificationChallenge as type to show that it's not verified yet. let twofactor = TwoFactor::new(user.uuid, TwoFactorType::ProtectedActions, pa_data.to_json()); - twofactor.save(&mut conn).await?; + twofactor.save(&conn).await?; mail::send_protected_action_token(&user.email, &pa_data.token).await?; @@ -89,7 +88,7 @@ struct ProtectedActionVerify { } #[post("/accounts/verify-otp", data = "")] -async fn verify_otp(data: Json, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn verify_otp(data: Json, headers: Headers, conn: DbConn) -> EmptyResult { if !CONFIG.mail_enabled() { err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device."); } @@ -99,14 +98,14 @@ async fn verify_otp(data: Json, headers: Headers, mut con // Delete the token after one validation attempt // This endpoint only gets called for the vault export, and doesn't need a second attempt - validate_protected_action_otp(&data.otp, &user.uuid, true, &mut conn).await + validate_protected_action_otp(&data.otp, &user.uuid, true, &conn).await } pub async fn validate_protected_action_otp( otp: &str, user_id: &UserId, delete_if_valid: bool, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { let pa = TwoFactor::find_by_user_and_type(user_id, TwoFactorType::ProtectedActions as i32, conn) .await diff --git a/src/api/core/two_factor/webauthn.rs b/src/api/core/two_factor/webauthn.rs index 2a992dbe..3b88302c 100644 --- a/src/api/core/two_factor/webauthn.rs +++ b/src/api/core/two_factor/webauthn.rs @@ -107,7 +107,7 @@ impl WebauthnRegistration { } #[post("/two-factor/get-webauthn", data = "")] -async fn get_webauthn(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn get_webauthn(data: Json, headers: Headers, conn: DbConn) -> JsonResult { if !CONFIG.domain_set() { err!("`DOMAIN` environment variable is not set. Webauthn disabled") } @@ -115,9 +115,9 @@ async fn get_webauthn(data: Json, headers: Headers, mut conn: let data: PasswordOrOtpData = data.into_inner(); let user = headers.user; - data.validate(&user, false, &mut conn).await?; + data.validate(&user, false, &conn).await?; - let (enabled, registrations) = get_webauthn_registrations(&user.uuid, &mut conn).await?; + let (enabled, registrations) = get_webauthn_registrations(&user.uuid, &conn).await?; let registrations_json: Vec = registrations.iter().map(WebauthnRegistration::to_json).collect(); Ok(Json(json!({ @@ -128,13 +128,13 @@ async fn get_webauthn(data: Json, headers: Headers, mut conn: } #[post("/two-factor/get-webauthn-challenge", data = "")] -async fn generate_webauthn_challenge(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn generate_webauthn_challenge(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: PasswordOrOtpData = data.into_inner(); let user = headers.user; - data.validate(&user, false, &mut conn).await?; + data.validate(&user, false, &conn).await?; - let registrations = get_webauthn_registrations(&user.uuid, &mut conn) + let registrations = get_webauthn_registrations(&user.uuid, &conn) .await? .1 .into_iter() @@ -153,7 +153,7 @@ async fn generate_webauthn_challenge(data: Json, headers: Hea state["rs"]["extensions"].as_object_mut().unwrap().clear(); let type_ = TwoFactorType::WebauthnRegisterChallenge; - TwoFactor::new(user.uuid.clone(), type_, serde_json::to_string(&state)?).save(&mut conn).await?; + TwoFactor::new(user.uuid.clone(), type_, serde_json::to_string(&state)?).save(&conn).await?; // Because for this flow we abuse the passkeys as 2FA, and use it more like a securitykey // we need to modify some of the default settings defined by `start_passkey_registration()`. @@ -252,7 +252,7 @@ impl From for PublicKeyCredential { } #[post("/two-factor/webauthn", data = "")] -async fn activate_webauthn(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn activate_webauthn(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: EnableWebauthnData = data.into_inner(); let mut user = headers.user; @@ -260,15 +260,15 @@ async fn activate_webauthn(data: Json, headers: Headers, mut master_password_hash: data.master_password_hash, otp: data.otp, } - .validate(&user, true, &mut conn) + .validate(&user, true, &conn) .await?; // Retrieve and delete the saved challenge state let type_ = TwoFactorType::WebauthnRegisterChallenge as i32; - let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await { + let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await { Some(tf) => { let state: PasskeyRegistration = serde_json::from_str(&tf.data)?; - tf.delete(&mut conn).await?; + tf.delete(&conn).await?; state } None => err!("Can't recover challenge"), @@ -277,7 +277,7 @@ async fn activate_webauthn(data: Json, headers: Headers, mut // Verify the credentials with the saved state let credential = WEBAUTHN.finish_passkey_registration(&data.device_response.into(), &state)?; - let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1; + let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &conn).await?.1; // TODO: Check for repeated ID's registrations.push(WebauthnRegistration { id: data.id.into_i32()?, @@ -289,11 +289,11 @@ async fn activate_webauthn(data: Json, headers: Headers, mut // Save the registrations and return them TwoFactor::new(user.uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(®istrations)?) - .save(&mut conn) + .save(&conn) .await?; - _generate_recover_code(&mut user, &mut conn).await; + _generate_recover_code(&mut user, &conn).await; - log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; + log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &conn).await; let keys_json: Vec = registrations.iter().map(WebauthnRegistration::to_json).collect(); Ok(Json(json!({ @@ -316,14 +316,14 @@ struct DeleteU2FData { } #[delete("/two-factor/webauthn", data = "")] -async fn delete_webauthn(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn delete_webauthn(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let id = data.id.into_i32()?; if !headers.user.check_valid_password(&data.master_password_hash) { err!("Invalid password"); } let Some(mut tf) = - TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &mut conn).await + TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &conn).await else { err!("Webauthn data not found!") }; @@ -336,12 +336,11 @@ async fn delete_webauthn(data: Json, headers: Headers, mut conn: let removed_item = data.remove(item_pos); tf.data = serde_json::to_string(&data)?; - tf.save(&mut conn).await?; + tf.save(&conn).await?; drop(tf); // If entry is migrated from u2f, delete the u2f entry as well - if let Some(mut u2f) = - TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::U2f as i32, &mut conn).await + if let Some(mut u2f) = TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::U2f as i32, &conn).await { let mut data: Vec = match serde_json::from_str(&u2f.data) { Ok(d) => d, @@ -352,7 +351,7 @@ async fn delete_webauthn(data: Json, headers: Headers, mut conn: let new_data_str = serde_json::to_string(&data)?; u2f.data = new_data_str; - u2f.save(&mut conn).await?; + u2f.save(&conn).await?; } let keys_json: Vec = data.iter().map(WebauthnRegistration::to_json).collect(); @@ -366,7 +365,7 @@ async fn delete_webauthn(data: Json, headers: Headers, mut conn: pub async fn get_webauthn_registrations( user_id: &UserId, - conn: &mut DbConn, + conn: &DbConn, ) -> Result<(bool, Vec), Error> { let type_ = TwoFactorType::Webauthn as i32; match TwoFactor::find_by_user_and_type(user_id, type_, conn).await { @@ -375,7 +374,7 @@ pub async fn get_webauthn_registrations( } } -pub async fn generate_webauthn_login(user_id: &UserId, conn: &mut DbConn) -> JsonResult { +pub async fn generate_webauthn_login(user_id: &UserId, conn: &DbConn) -> JsonResult { // Load saved credentials let creds: Vec = get_webauthn_registrations(user_id, conn).await?.1.into_iter().map(|r| r.credential).collect(); @@ -415,7 +414,7 @@ pub async fn generate_webauthn_login(user_id: &UserId, conn: &mut DbConn) -> Jso Ok(Json(serde_json::to_value(response.public_key)?)) } -pub async fn validate_webauthn_login(user_id: &UserId, response: &str, conn: &mut DbConn) -> EmptyResult { +pub async fn validate_webauthn_login(user_id: &UserId, response: &str, conn: &DbConn) -> EmptyResult { let type_ = TwoFactorType::WebauthnLoginChallenge as i32; let mut state = match TwoFactor::find_by_user_and_type(user_id, type_, conn).await { Some(tf) => { @@ -469,7 +468,7 @@ async fn check_and_update_backup_eligible( rsp: &PublicKeyCredential, registrations: &mut Vec, state: &mut PasskeyAuthentication, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { // The feature flags from the response // For details see: https://www.w3.org/TR/webauthn-3/#sctn-authenticator-data diff --git a/src/api/core/two_factor/yubikey.rs b/src/api/core/two_factor/yubikey.rs index 293b211d..1cf11255 100644 --- a/src/api/core/two_factor/yubikey.rs +++ b/src/api/core/two_factor/yubikey.rs @@ -83,19 +83,19 @@ async fn verify_yubikey_otp(otp: String) -> EmptyResult { } #[post("/two-factor/get-yubikey", data = "")] -async fn generate_yubikey(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn generate_yubikey(data: Json, headers: Headers, conn: DbConn) -> JsonResult { // Make sure the credentials are set get_yubico_credentials()?; let data: PasswordOrOtpData = data.into_inner(); let user = headers.user; - data.validate(&user, false, &mut conn).await?; + data.validate(&user, false, &conn).await?; let user_id = &user.uuid; let yubikey_type = TwoFactorType::YubiKey as i32; - let r = TwoFactor::find_by_user_and_type(user_id, yubikey_type, &mut conn).await; + let r = TwoFactor::find_by_user_and_type(user_id, yubikey_type, &conn).await; if let Some(r) = r { let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?; @@ -116,7 +116,7 @@ async fn generate_yubikey(data: Json, headers: Headers, mut c } #[post("/two-factor/yubikey", data = "")] -async fn activate_yubikey(data: Json, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn activate_yubikey(data: Json, headers: Headers, conn: DbConn) -> JsonResult { let data: EnableYubikeyData = data.into_inner(); let mut user = headers.user; @@ -124,12 +124,12 @@ async fn activate_yubikey(data: Json, headers: Headers, mut c master_password_hash: data.master_password_hash.clone(), otp: data.otp.clone(), } - .validate(&user, true, &mut conn) + .validate(&user, true, &conn) .await?; // Check if we already have some data let mut yubikey_data = - match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::YubiKey as i32, &mut conn).await { + match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::YubiKey as i32, &conn).await { Some(data) => data, None => TwoFactor::new(user.uuid.clone(), TwoFactorType::YubiKey, String::new()), }; @@ -160,11 +160,11 @@ async fn activate_yubikey(data: Json, headers: Headers, mut c }; yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap(); - yubikey_data.save(&mut conn).await?; + yubikey_data.save(&conn).await?; - _generate_recover_code(&mut user, &mut conn).await; + _generate_recover_code(&mut user, &conn).await; - log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; + log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &conn).await; let mut result = jsonify_yubikeys(yubikey_metadata.keys); diff --git a/src/api/identity.rs b/src/api/identity.rs index 04863b58..516c5182 100644 --- a/src/api/identity.rs +++ b/src/api/identity.rs @@ -22,7 +22,13 @@ use crate::{ }, auth, auth::{generate_organization_api_key_login_claims, AuthMethod, ClientHeaders, ClientIp, ClientVersion}, - db::{models::*, DbConn}, + db::{ + models::{ + AuthRequest, AuthRequestId, Device, DeviceId, EventType, Invitation, OrganizationApiKey, OrganizationId, + SsoNonce, SsoUser, TwoFactor, TwoFactorIncomplete, TwoFactorType, User, UserId, + }, + DbConn, + }, error::MapResult, mail, sso, sso::{OIDCCode, OIDCState}, @@ -48,7 +54,7 @@ async fn login( data: Form, client_header: ClientHeaders, client_version: Option, - mut conn: DbConn, + conn: DbConn, ) -> JsonResult { let data: ConnectData = data.into_inner(); @@ -57,7 +63,7 @@ async fn login( let login_result = match data.grant_type.as_ref() { "refresh_token" => { _check_is_some(&data.refresh_token, "refresh_token cannot be blank")?; - _refresh_login(data, &mut conn, &client_header.ip).await + _refresh_login(data, &conn, &client_header.ip).await } "password" if CONFIG.sso_enabled() && CONFIG.sso_only() => err!("SSO sign-in is required"), "password" => { @@ -70,7 +76,7 @@ async fn login( _check_is_some(&data.device_name, "device_name cannot be blank")?; _check_is_some(&data.device_type, "device_type cannot be blank")?; - _password_login(data, &mut user_id, &mut conn, &client_header.ip, &client_version).await + _password_login(data, &mut user_id, &conn, &client_header.ip, &client_version).await } "client_credentials" => { _check_is_some(&data.client_id, "client_id cannot be blank")?; @@ -81,7 +87,7 @@ async fn login( _check_is_some(&data.device_name, "device_name cannot be blank")?; _check_is_some(&data.device_type, "device_type cannot be blank")?; - _api_key_login(data, &mut user_id, &mut conn, &client_header.ip).await + _api_key_login(data, &mut user_id, &conn, &client_header.ip).await } "authorization_code" if CONFIG.sso_enabled() => { _check_is_some(&data.client_id, "client_id cannot be blank")?; @@ -91,7 +97,7 @@ async fn login( _check_is_some(&data.device_name, "device_name cannot be blank")?; _check_is_some(&data.device_type, "device_type cannot be blank")?; - _sso_login(data, &mut user_id, &mut conn, &client_header.ip, &client_version).await + _sso_login(data, &mut user_id, &conn, &client_header.ip, &client_version).await } "authorization_code" => err!("SSO sign-in is not available"), t => err!("Invalid type", t), @@ -105,20 +111,14 @@ async fn login( &user_id, client_header.device_type, &client_header.ip.ip, - &mut conn, + &conn, ) .await; } Err(e) => { if let Some(ev) = e.get_event() { - log_user_event( - ev.event as i32, - &user_id, - client_header.device_type, - &client_header.ip.ip, - &mut conn, - ) - .await + log_user_event(ev.event as i32, &user_id, client_header.device_type, &client_header.ip.ip, &conn) + .await } } } @@ -128,7 +128,7 @@ async fn login( } // Return Status::Unauthorized to trigger logout -async fn _refresh_login(data: ConnectData, conn: &mut DbConn, ip: &ClientIp) -> JsonResult { +async fn _refresh_login(data: ConnectData, conn: &DbConn, ip: &ClientIp) -> JsonResult { // Extract token let refresh_token = match data.refresh_token { Some(token) => token, @@ -166,7 +166,7 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn, ip: &ClientIp) -> async fn _sso_login( data: ConnectData, user_id: &mut Option, - conn: &mut DbConn, + conn: &DbConn, ip: &ClientIp, client_version: &Option, ) -> JsonResult { @@ -319,7 +319,7 @@ async fn _sso_login( async fn _password_login( data: ConnectData, user_id: &mut Option, - conn: &mut DbConn, + conn: &DbConn, ip: &ClientIp, client_version: &Option, ) -> JsonResult { @@ -444,7 +444,7 @@ async fn authenticated_response( auth_tokens: auth::AuthTokens, twofactor_token: Option, now: &NaiveDateTime, - conn: &mut DbConn, + conn: &DbConn, ip: &ClientIp, ) -> JsonResult { if CONFIG.mail_enabled() && device.is_new() { @@ -504,12 +504,7 @@ async fn authenticated_response( Ok(Json(result)) } -async fn _api_key_login( - data: ConnectData, - user_id: &mut Option, - conn: &mut DbConn, - ip: &ClientIp, -) -> JsonResult { +async fn _api_key_login(data: ConnectData, user_id: &mut Option, conn: &DbConn, ip: &ClientIp) -> JsonResult { // Ratelimit the login crate::ratelimit::check_limit_login(&ip.ip)?; @@ -524,7 +519,7 @@ async fn _api_key_login( async fn _user_api_key_login( data: ConnectData, user_id: &mut Option, - conn: &mut DbConn, + conn: &DbConn, ip: &ClientIp, ) -> JsonResult { // Get the user via the client_id @@ -614,7 +609,7 @@ async fn _user_api_key_login( Ok(Json(result)) } -async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: &ClientIp) -> JsonResult { +async fn _organization_api_key_login(data: ConnectData, conn: &DbConn, ip: &ClientIp) -> JsonResult { // Get the org via the client_id let client_id = data.client_id.as_ref().unwrap(); let Some(org_id) = client_id.strip_prefix("organization.") else { @@ -643,7 +638,7 @@ async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: & } /// Retrieves an existing device or creates a new device from ConnectData and the User -async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> ApiResult { +async fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> ApiResult { // On iOS, device_type sends "iOS", on others it sends a number // When unknown or unable to parse, return 14, which is 'Unknown Browser' let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(14); @@ -663,7 +658,7 @@ async fn twofactor_auth( device: &mut Device, ip: &ClientIp, client_version: &Option, - conn: &mut DbConn, + conn: &DbConn, ) -> ApiResult> { let twofactors = TwoFactor::find_by_user(&user.uuid, conn).await; @@ -780,7 +775,7 @@ async fn _json_err_twofactor( user_id: &UserId, data: &ConnectData, client_version: &Option, - conn: &mut DbConn, + conn: &DbConn, ) -> ApiResult { let mut result = json!({ "error" : "invalid_grant", @@ -905,13 +900,13 @@ enum RegisterVerificationResponse { #[post("/accounts/register/send-verification-email", data = "")] async fn register_verification_email( data: Json, - mut conn: DbConn, + conn: DbConn, ) -> ApiResult { let data = data.into_inner(); // the registration can only continue if signup is allowed or there exists an invitation if !(CONFIG.is_signup_allowed(&data.email) - || (!CONFIG.mail_enabled() && Invitation::find_by_mail(&data.email, &mut conn).await.is_some())) + || (!CONFIG.mail_enabled() && Invitation::find_by_mail(&data.email, &conn).await.is_some())) { err!("Registration not allowed or user already exists") } @@ -922,7 +917,7 @@ async fn register_verification_email( let token = auth::encode_jwt(&token_claims); if should_send_mail { - let user = User::find_by_mail(&data.email, &mut conn).await; + let user = User::find_by_mail(&data.email, &conn).await; if user.filter(|u| u.private_key.is_some()).is_some() { // There is still a timing side channel here in that the code // paths that send mail take noticeably longer than ones that diff --git a/src/api/mod.rs b/src/api/mod.rs index 6227b56f..b988f053 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -55,7 +55,7 @@ impl PasswordOrOtpData { /// Tokens used via this struct can be used multiple times during the process /// First for the validation to continue, after that to enable or validate the following actions /// This is different per caller, so it can be adjusted to delete the token or not - pub async fn validate(&self, user: &User, delete_if_valid: bool, conn: &mut DbConn) -> EmptyResult { + pub async fn validate(&self, user: &User, delete_if_valid: bool, conn: &DbConn) -> EmptyResult { use crate::api::core::two_factor::protected_actions::validate_protected_action_otp; match (self.master_password_hash.as_deref(), self.otp.as_deref()) { diff --git a/src/api/notifications.rs b/src/api/notifications.rs index a885e9b4..ff0c04c2 100644 --- a/src/api/notifications.rs +++ b/src/api/notifications.rs @@ -339,7 +339,7 @@ impl WebSocketUsers { } // NOTE: The last modified date needs to be updated before calling these methods - pub async fn send_user_update(&self, ut: UpdateType, user: &User, push_uuid: &Option, conn: &mut DbConn) { + pub async fn send_user_update(&self, ut: UpdateType, user: &User, push_uuid: &Option, conn: &DbConn) { // Skip any processing if both WebSockets and Push are not active if *NOTIFICATIONS_DISABLED { return; @@ -359,7 +359,7 @@ impl WebSocketUsers { } } - pub async fn send_logout(&self, user: &User, acting_device_id: Option, conn: &mut DbConn) { + pub async fn send_logout(&self, user: &User, acting_device_id: Option, conn: &DbConn) { // Skip any processing if both WebSockets and Push are not active if *NOTIFICATIONS_DISABLED { return; @@ -379,7 +379,7 @@ impl WebSocketUsers { } } - pub async fn send_folder_update(&self, ut: UpdateType, folder: &Folder, device: &Device, conn: &mut DbConn) { + pub async fn send_folder_update(&self, ut: UpdateType, folder: &Folder, device: &Device, conn: &DbConn) { // Skip any processing if both WebSockets and Push are not active if *NOTIFICATIONS_DISABLED { return; @@ -410,7 +410,7 @@ impl WebSocketUsers { user_ids: &[UserId], device: &Device, collection_uuids: Option>, - conn: &mut DbConn, + conn: &DbConn, ) { // Skip any processing if both WebSockets and Push are not active if *NOTIFICATIONS_DISABLED { @@ -458,7 +458,7 @@ impl WebSocketUsers { send: &DbSend, user_ids: &[UserId], device: &Device, - conn: &mut DbConn, + conn: &DbConn, ) { // Skip any processing if both WebSockets and Push are not active if *NOTIFICATIONS_DISABLED { @@ -486,13 +486,7 @@ impl WebSocketUsers { } } - pub async fn send_auth_request( - &self, - user_id: &UserId, - auth_request_uuid: &str, - device: &Device, - conn: &mut DbConn, - ) { + pub async fn send_auth_request(&self, user_id: &UserId, auth_request_uuid: &str, device: &Device, conn: &DbConn) { // Skip any processing if both WebSockets and Push are not active if *NOTIFICATIONS_DISABLED { return; @@ -516,7 +510,7 @@ impl WebSocketUsers { user_id: &UserId, auth_request_id: &AuthRequestId, device: &Device, - conn: &mut DbConn, + conn: &DbConn, ) { // Skip any processing if both WebSockets and Push are not active if *NOTIFICATIONS_DISABLED { diff --git a/src/api/push.rs b/src/api/push.rs index f3ade9b0..7fa89e82 100644 --- a/src/api/push.rs +++ b/src/api/push.rs @@ -7,7 +7,10 @@ use tokio::sync::RwLock; use crate::{ api::{ApiResult, EmptyResult, UpdateType}, - db::models::{AuthRequestId, Cipher, Device, DeviceId, Folder, PushId, Send, User, UserId}, + db::{ + models::{AuthRequestId, Cipher, Device, DeviceId, Folder, PushId, Send, User, UserId}, + DbConn, + }, http_client::make_http_request, util::{format_date, get_uuid}, CONFIG, @@ -79,7 +82,7 @@ async fn get_auth_api_token() -> ApiResult { Ok(api_token.access_token.clone()) } -pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbConn) -> EmptyResult { +pub async fn register_push_device(device: &mut Device, conn: &DbConn) -> EmptyResult { if !CONFIG.push_enabled() || !device.is_push_device() { return Ok(()); } @@ -152,7 +155,7 @@ pub async fn unregister_push_device(push_id: &Option) -> EmptyResult { Ok(()) } -pub async fn push_cipher_update(ut: UpdateType, cipher: &Cipher, device: &Device, conn: &mut crate::db::DbConn) { +pub async fn push_cipher_update(ut: UpdateType, cipher: &Cipher, device: &Device, conn: &DbConn) { // We shouldn't send a push notification on cipher update if the cipher belongs to an organization, this isn't implemented in the upstream server too. if cipher.organization_uuid.is_some() { return; @@ -183,7 +186,7 @@ pub async fn push_cipher_update(ut: UpdateType, cipher: &Cipher, device: &Device } } -pub async fn push_logout(user: &User, acting_device_id: Option, conn: &mut crate::db::DbConn) { +pub async fn push_logout(user: &User, acting_device_id: Option, conn: &DbConn) { let acting_device_id: Value = acting_device_id.map(|v| v.to_string().into()).unwrap_or_else(|| Value::Null); if Device::check_user_has_push_device(&user.uuid, conn).await { @@ -203,7 +206,7 @@ pub async fn push_logout(user: &User, acting_device_id: Option, conn: } } -pub async fn push_user_update(ut: UpdateType, user: &User, push_uuid: &Option, conn: &mut crate::db::DbConn) { +pub async fn push_user_update(ut: UpdateType, user: &User, push_uuid: &Option, conn: &DbConn) { if Device::check_user_has_push_device(&user.uuid, conn).await { tokio::task::spawn(send_to_push_relay(json!({ "userId": user.uuid, @@ -221,7 +224,7 @@ pub async fn push_user_update(ut: UpdateType, user: &User, push_uuid: &Option FromRequest<'r> for Headers { let device_id = claims.device; let user_id = claims.sub; - let mut conn = match DbConn::from_request(request).await { + let conn = match DbConn::from_request(request).await { Outcome::Success(conn) => conn, _ => err_handler!("Error getting DB"), }; - let Some(device) = Device::find_by_uuid_and_user(&device_id, &user_id, &mut conn).await else { + let Some(device) = Device::find_by_uuid_and_user(&device_id, &user_id, &conn).await else { err_handler!("Invalid device id") }; - let Some(user) = User::find_by_uuid(&user_id, &mut conn).await else { + let Some(user) = User::find_by_uuid(&user_id, &conn).await else { err_handler!("Device has no user associated") }; @@ -633,7 +633,7 @@ impl<'r> FromRequest<'r> for Headers { // This prevents checking this stamp exception for new requests. let mut user = user; user.reset_stamp_exception(); - if let Err(e) = user.save(&mut conn).await { + if let Err(e) = user.save(&conn).await { error!("Error updating user: {e:#?}"); } err_handler!("Stamp exception is expired") @@ -706,13 +706,13 @@ impl<'r> FromRequest<'r> for OrgHeaders { match url_org_id { Some(org_id) if uuid::Uuid::parse_str(&org_id).is_ok() => { - let mut conn = match DbConn::from_request(request).await { + let conn = match DbConn::from_request(request).await { Outcome::Success(conn) => conn, _ => err_handler!("Error getting DB"), }; let user = headers.user; - let Some(membership) = Membership::find_by_user_and_org(&user.uuid, &org_id, &mut conn).await else { + let Some(membership) = Membership::find_by_user_and_org(&user.uuid, &org_id, &conn).await else { err_handler!("The current user isn't member of the organization"); }; @@ -815,12 +815,12 @@ impl<'r> FromRequest<'r> for ManagerHeaders { if headers.is_confirmed_and_manager() { match get_col_id(request) { Some(col_id) => { - let mut conn = match DbConn::from_request(request).await { + let conn = match DbConn::from_request(request).await { Outcome::Success(conn) => conn, _ => err_handler!("Error getting DB"), }; - if !Collection::can_access_collection(&headers.membership, &col_id, &mut conn).await { + if !Collection::can_access_collection(&headers.membership, &col_id, &conn).await { err_handler!("The current user isn't a manager for this collection") } } @@ -896,7 +896,7 @@ impl ManagerHeaders { pub async fn from_loose( h: ManagerHeadersLoose, collections: &Vec, - conn: &mut DbConn, + conn: &DbConn, ) -> Result { for col_id in collections { if uuid::Uuid::parse_str(col_id.as_ref()).is_err() { @@ -1200,7 +1200,7 @@ pub async fn refresh_tokens( ip: &ClientIp, refresh_token: &str, client_id: Option, - conn: &mut DbConn, + conn: &DbConn, ) -> ApiResult<(Device, AuthTokens)> { let refresh_claims = match decode_refresh(refresh_token) { Err(err) => { diff --git a/src/config.rs b/src/config.rs index 116c9096..7d370264 100644 --- a/src/config.rs +++ b/src/config.rs @@ -12,7 +12,6 @@ use once_cell::sync::Lazy; use reqwest::Url; use crate::{ - db::DbConnType, error::Error, util::{get_env, get_env_bool, get_web_vault_version, is_valid_email, parse_experimental_client_feature_flags}, }; @@ -175,7 +174,7 @@ macro_rules! make_config { let mut config = ConfigItems::default(); let _domain_set = self.domain.is_some(); $($( - config.$name = make_config!{ @build self.$name.clone(), &config, $none_action, $($default)? }; + config.$name = make_config! { @build self.$name.clone(), &config, $none_action, $($default)? }; )+)+ config.domain_set = _domain_set; @@ -195,13 +194,13 @@ macro_rules! make_config { } #[derive(Clone, Default)] - struct ConfigItems { $($( $name: make_config!{@type $ty, $none_action}, )+)+ } + struct ConfigItems { $($( $name: make_config! {@type $ty, $none_action}, )+)+ } #[allow(unused)] impl Config { $($( $(#[doc = $doc])+ - pub fn $name(&self) -> make_config!{@type $ty, $none_action} { + pub fn $name(&self) -> make_config! {@type $ty, $none_action} { self.inner.read().unwrap().config.$name.clone() } )+)+ @@ -242,7 +241,7 @@ macro_rules! make_config { let mut group = serde_json::Map::new(); group.insert("group".into(), (stringify!($group)).into()); group.insert("grouptoggle".into(), (stringify!($($group_enabled)?)).into()); - group.insert("groupdoc".into(), (make_config!{ @show $($groupdoc)? }).into()); + group.insert("groupdoc".into(), (make_config! { @show $($groupdoc)? }).into()); group.insert("elements".into(), serde_json::Value::Array(<[_]>::into_vec(Box::new([ $( @@ -318,7 +317,7 @@ macro_rules! make_config { serde_json::Value::Object({ let mut json = serde_json::Map::new(); $($( - json.insert(stringify!($name).into(), make_config!{ @supportstr $name, cfg.$name, $ty, $none_action }); + json.insert(stringify!($name).into(), make_config! { @supportstr $name, cfg.$name, $ty, $none_action }); )+)+; json }) @@ -815,12 +814,19 @@ make_config! { fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { // Validate connection URL is valid and DB feature is enabled - let url = &cfg.database_url; - if DbConnType::from_url(url)? == DbConnType::sqlite && url.contains('/') { - let path = std::path::Path::new(&url); - if let Some(parent) = path.parent() { - if !parent.is_dir() { - err!(format!("SQLite database directory `{}` does not exist or is not a directory", parent.display())); + #[cfg(sqlite)] + { + use crate::db::DbConnType; + let url = &cfg.database_url; + if DbConnType::from_url(url)? == DbConnType::Sqlite && url.contains('/') { + let path = std::path::Path::new(&url); + if let Some(parent) = path.parent() { + if !parent.is_dir() { + err!(format!( + "SQLite database directory `{}` does not exist or is not a directory", + parent.display() + )); + } } } } diff --git a/src/db/mod.rs b/src/db/mod.rs index 9f5bb150..4f17b080 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -1,8 +1,14 @@ -use std::{sync::Arc, time::Duration}; +mod query_logger; + +use std::{ + sync::{Arc, OnceLock}, + time::Duration, +}; use diesel::{ connection::SimpleConnection, r2d2::{ConnectionManager, CustomizeConnection, Pool, PooledConnection}, + Connection, RunQueryDsl, }; use rocket::{ @@ -21,20 +27,7 @@ use crate::{ CONFIG, }; -#[cfg(sqlite)] -#[path = "schemas/sqlite/schema.rs"] -pub mod __sqlite_schema; - -#[cfg(mysql)] -#[path = "schemas/mysql/schema.rs"] -pub mod __mysql_schema; - -#[cfg(postgresql)] -#[path = "schemas/postgresql/schema.rs"] -pub mod __postgresql_schema; - // These changes are based on Rocket 0.5-rc wrapper of Diesel: https://github.com/SergioBenitez/Rocket/blob/v0.5-rc/contrib/sync_db_pools - // A wrapper around spawn_blocking that propagates panics to the calling code. pub async fn run_blocking(job: F) -> R where @@ -51,162 +44,174 @@ where } // This is used to generate the main DbConn and DbPool enums, which contain one variant for each database supported -macro_rules! generate_connections { - ( $( $name:ident: $ty:ty ),+ ) => { - #[allow(non_camel_case_types, dead_code)] - #[derive(Eq, PartialEq)] - pub enum DbConnType { $( $name, )+ } - - pub struct DbConn { - conn: Arc>>, - permit: Option, - } +#[derive(diesel::MultiConnection)] +pub enum DbConnInner { + #[cfg(mysql)] + Mysql(diesel::mysql::MysqlConnection), + #[cfg(postgresql)] + Postgresql(diesel::pg::PgConnection), + #[cfg(sqlite)] + Sqlite(diesel::sqlite::SqliteConnection), +} + +#[derive(Eq, PartialEq)] +pub enum DbConnType { + #[cfg(mysql)] + Mysql, + #[cfg(postgresql)] + Postgresql, + #[cfg(sqlite)] + Sqlite, +} - #[allow(non_camel_case_types)] - pub enum DbConnInner { $( #[cfg($name)] $name(PooledConnection>), )+ } +pub static ACTIVE_DB_TYPE: OnceLock = OnceLock::new(); - #[derive(Debug)] - pub struct DbConnOptions { - pub init_stmts: String, - } +pub struct DbConn { + conn: Arc>>>>, + permit: Option, +} - $( // Based on . - #[cfg($name)] - impl CustomizeConnection<$ty, diesel::r2d2::Error> for DbConnOptions { - fn on_acquire(&self, conn: &mut $ty) -> Result<(), diesel::r2d2::Error> { - if !self.init_stmts.is_empty() { - conn.batch_execute(&self.init_stmts).map_err(diesel::r2d2::Error::QueryError)?; - } - Ok(()) - } - })+ +#[derive(Debug)] +pub struct DbConnOptions { + pub init_stmts: String, +} - #[derive(Clone)] - pub struct DbPool { - // This is an 'Option' so that we can drop the pool in a 'spawn_blocking'. - pool: Option, - semaphore: Arc +impl CustomizeConnection for DbConnOptions { + fn on_acquire(&self, conn: &mut DbConnInner) -> Result<(), diesel::r2d2::Error> { + if !self.init_stmts.is_empty() { + conn.batch_execute(&self.init_stmts).map_err(diesel::r2d2::Error::QueryError)?; } + Ok(()) + } +} - #[allow(non_camel_case_types)] - #[derive(Clone)] - pub enum DbPoolInner { $( #[cfg($name)] $name(Pool>), )+ } - - impl Drop for DbConn { - fn drop(&mut self) { - let conn = Arc::clone(&self.conn); - let permit = self.permit.take(); +#[derive(Clone)] +pub struct DbPool { + // This is an 'Option' so that we can drop the pool in a 'spawn_blocking'. + pool: Option>>, + semaphore: Arc, +} - // Since connection can't be on the stack in an async fn during an - // await, we have to spawn a new blocking-safe thread... - tokio::task::spawn_blocking(move || { - // And then re-enter the runtime to wait on the async mutex, but in a blocking fashion. - let mut conn = tokio::runtime::Handle::current().block_on(conn.lock_owned()); +impl Drop for DbConn { + fn drop(&mut self) { + let conn = Arc::clone(&self.conn); + let permit = self.permit.take(); - if let Some(conn) = conn.take() { - drop(conn); - } + // Since connection can't be on the stack in an async fn during an + // await, we have to spawn a new blocking-safe thread... + tokio::task::spawn_blocking(move || { + // And then re-enter the runtime to wait on the async mutex, but in a blocking fashion. + let mut conn = tokio::runtime::Handle::current().block_on(conn.lock_owned()); - // Drop permit after the connection is dropped - drop(permit); - }); + if let Some(conn) = conn.take() { + drop(conn); } + + // Drop permit after the connection is dropped + drop(permit); + }); + } +} + +impl Drop for DbPool { + fn drop(&mut self) { + let pool = self.pool.take(); + // Only use spawn_blocking if the Tokio runtime is still available + // Otherwise the pool will be dropped on the current thread + if let Ok(handle) = tokio::runtime::Handle::try_current() { + handle.spawn_blocking(move || drop(pool)); } + } +} - impl Drop for DbPool { - fn drop(&mut self) { - let pool = self.pool.take(); - tokio::task::spawn_blocking(move || drop(pool)); - } +impl DbPool { + // For the given database URL, guess its type, run migrations, create pool, and return it + pub fn from_config() -> Result { + let url = CONFIG.database_url(); + let conn_type = DbConnType::from_url(&url)?; + + // Only set the default instrumentation if the log level is specifically set to either warn, info or debug + if log_enabled!(target: "vaultwarden::db::query_logger", log::Level::Warn) + || log_enabled!(target: "vaultwarden::db::query_logger", log::Level::Info) + || log_enabled!(target: "vaultwarden::db::query_logger", log::Level::Debug) + { + drop(diesel::connection::set_default_instrumentation(query_logger::simple_logger)); } - impl DbPool { - // For the given database URL, guess its type, run migrations, create pool, and return it - pub fn from_config() -> Result { - let url = CONFIG.database_url(); - let conn_type = DbConnType::from_url(&url)?; - - match conn_type { $( - DbConnType::$name => { - #[cfg($name)] - { - pastey::paste!{ [< $name _migrations >]::run_migrations()?; } - let manager = ConnectionManager::new(&url); - let pool = Pool::builder() - .max_size(CONFIG.database_max_conns()) - .min_idle(Some(CONFIG.database_min_conns())) - .idle_timeout(Some(Duration::from_secs(CONFIG.database_idle_timeout()))) - .connection_timeout(Duration::from_secs(CONFIG.database_timeout())) - .connection_customizer(Box::new(DbConnOptions{ - init_stmts: conn_type.get_init_stmts() - })) - .build(manager) - .map_res("Failed to create pool")?; - Ok(DbPool { - pool: Some(DbPoolInner::$name(pool)), - semaphore: Arc::new(Semaphore::new(CONFIG.database_max_conns() as usize)), - }) - } - #[cfg(not($name))] - unreachable!("Trying to use a DB backend when it's feature is disabled") - }, - )+ } + match conn_type { + #[cfg(mysql)] + DbConnType::Mysql => { + mysql_migrations::run_migrations(&url)?; } - // Get a connection from the pool - pub async fn get(&self) -> Result { - let duration = Duration::from_secs(CONFIG.database_timeout()); - let permit = match timeout(duration, Arc::clone(&self.semaphore).acquire_owned()).await { - Ok(p) => p.expect("Semaphore should be open"), - Err(_) => { - err!("Timeout waiting for database connection"); - } - }; - - match self.pool.as_ref().expect("DbPool.pool should always be Some()") { $( - #[cfg($name)] - DbPoolInner::$name(p) => { - let pool = p.clone(); - let c = run_blocking(move || pool.get_timeout(duration)).await.map_res("Error retrieving connection from pool")?; - - Ok(DbConn { - conn: Arc::new(Mutex::new(Some(DbConnInner::$name(c)))), - permit: Some(permit) - }) - }, - )+ } + #[cfg(postgresql)] + DbConnType::Postgresql => { + postgresql_migrations::run_migrations(&url)?; + } + #[cfg(sqlite)] + DbConnType::Sqlite => { + sqlite_migrations::run_migrations(&url)?; } } - }; -} -#[cfg(not(query_logger))] -generate_connections! { - sqlite: diesel::sqlite::SqliteConnection, - mysql: diesel::mysql::MysqlConnection, - postgresql: diesel::pg::PgConnection -} + let max_conns = CONFIG.database_max_conns(); + let manager = ConnectionManager::::new(&url); + let pool = Pool::builder() + .max_size(max_conns) + .min_idle(Some(CONFIG.database_min_conns())) + .idle_timeout(Some(Duration::from_secs(CONFIG.database_idle_timeout()))) + .connection_timeout(Duration::from_secs(CONFIG.database_timeout())) + .connection_customizer(Box::new(DbConnOptions { + init_stmts: conn_type.get_init_stmts(), + })) + .build(manager) + .map_res("Failed to create pool")?; + + // Set a global to determine the database more easily throughout the rest of the code + if ACTIVE_DB_TYPE.set(conn_type).is_err() { + error!("Tried to set the active database connection type more than once.") + } + + Ok(DbPool { + pool: Some(pool), + semaphore: Arc::new(Semaphore::new(max_conns as usize)), + }) + } -#[cfg(query_logger)] -generate_connections! { - sqlite: diesel_logger::LoggingConnection, - mysql: diesel_logger::LoggingConnection, - postgresql: diesel_logger::LoggingConnection + // Get a connection from the pool + pub async fn get(&self) -> Result { + let duration = Duration::from_secs(CONFIG.database_timeout()); + let permit = match timeout(duration, Arc::clone(&self.semaphore).acquire_owned()).await { + Ok(p) => p.expect("Semaphore should be open"), + Err(_) => { + err!("Timeout waiting for database connection"); + } + }; + + let p = self.pool.as_ref().expect("DbPool.pool should always be Some()"); + let pool = p.clone(); + let c = + run_blocking(move || pool.get_timeout(duration)).await.map_res("Error retrieving connection from pool")?; + Ok(DbConn { + conn: Arc::new(Mutex::new(Some(c))), + permit: Some(permit), + }) + } } impl DbConnType { - pub fn from_url(url: &str) -> Result { + pub fn from_url(url: &str) -> Result { // Mysql - if url.starts_with("mysql:") { + if url.len() > 6 && &url[..6] == "mysql:" { #[cfg(mysql)] - return Ok(DbConnType::mysql); + return Ok(DbConnType::Mysql); #[cfg(not(mysql))] err!("`DATABASE_URL` is a MySQL URL, but the 'mysql' feature is not enabled") - // Postgres - } else if url.starts_with("postgresql:") || url.starts_with("postgres:") { + // Postgresql + } else if url.len() > 11 && (&url[..11] == "postgresql:" || &url[..9] == "postgres:") { #[cfg(postgresql)] - return Ok(DbConnType::postgresql); + return Ok(DbConnType::Postgresql); #[cfg(not(postgresql))] err!("`DATABASE_URL` is a PostgreSQL URL, but the 'postgresql' feature is not enabled") @@ -214,7 +219,7 @@ impl DbConnType { //Sqlite } else { #[cfg(sqlite)] - return Ok(DbConnType::sqlite); + return Ok(DbConnType::Sqlite); #[cfg(not(sqlite))] err!("`DATABASE_URL` looks like a SQLite URL, but 'sqlite' feature is not enabled") @@ -232,175 +237,102 @@ impl DbConnType { pub fn default_init_stmts(&self) -> String { match self { - Self::sqlite => "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;".to_string(), - Self::mysql => String::new(), - Self::postgresql => String::new(), + #[cfg(mysql)] + Self::Mysql => String::new(), + #[cfg(postgresql)] + Self::Postgresql => String::new(), + #[cfg(sqlite)] + Self::Sqlite => "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;".to_string(), } } } #[macro_export] -macro_rules! db_run { - // Same for all dbs - ( $conn:ident: $body:block ) => { - db_run! { $conn: sqlite, mysql, postgresql $body } - }; - - ( @raw $conn:ident: $body:block ) => { - db_run! { @raw $conn: sqlite, mysql, postgresql $body } +macro_rules! db_run_base { + ( $conn:ident ) => { + let conn = std::sync::Arc::clone(&$conn.conn); + let mut conn = conn.lock_owned().await; + let $conn = conn.as_mut().expect("internal invariant broken: self.conn is Some"); }; +} - // Different code for each db - ( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{ - #[allow(unused)] use diesel::prelude::*; - #[allow(unused)] use $crate::db::FromDb; - - let conn = $conn.conn.clone(); - let mut conn = conn.lock_owned().await; - match conn.as_mut().expect("internal invariant broken: self.connection is Some") { - $($( - #[cfg($db)] - $crate::db::DbConnInner::$db($conn) => { - pastey::paste! { - #[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *}; - #[allow(unused)] use [<__ $db _model>]::*; - } - - tokio::task::block_in_place(move || { $body }) // Run blocking can't be used due to the 'static limitation, use block_in_place instead - }, - )+)+ - } +#[macro_export] +macro_rules! db_run { + ( $conn:ident: $body:block ) => {{ + db_run_base!($conn); + // Run blocking can't be used due to the 'static limitation, use block_in_place instead + tokio::task::block_in_place(move || $body ) }}; - ( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{ - #[allow(unused)] use diesel::prelude::*; - #[allow(unused)] use $crate::db::FromDb; - - let conn = $conn.conn.clone(); - let mut conn = conn.lock_owned().await; - match conn.as_mut().expect("internal invariant broken: self.connection is Some") { - $($( - #[cfg($db)] - $crate::db::DbConnInner::$db($conn) => { - pastey::paste! { - #[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *}; - // @ RAW: #[allow(unused)] use [<__ $db _model>]::*; - } - - tokio::task::block_in_place(move || { $body }) // Run blocking can't be used due to the 'static limitation, use block_in_place instead - }, - )+)+ - } + ( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{ + db_run_base!($conn); + match std::ops::DerefMut::deref_mut($conn) { + $($( + #[cfg($db)] + pastey::paste!(&mut $crate::db::DbConnInner::[<$db:camel>](ref mut $conn)) => { + // Run blocking can't be used due to the 'static limitation, use block_in_place instead + tokio::task::block_in_place(move || $body ) + }, + )+)+} }}; } -pub trait FromDb { - type Output; - #[allow(clippy::wrong_self_convention)] - fn from_db(self) -> Self::Output; -} - -impl FromDb for Vec { - type Output = Vec; - #[inline(always)] - fn from_db(self) -> Self::Output { - self.into_iter().map(FromDb::from_db).collect() - } -} - -impl FromDb for Option { - type Output = Option; - #[inline(always)] - fn from_db(self) -> Self::Output { - self.map(FromDb::from_db) - } -} - -// For each struct eg. Cipher, we create a CipherDb inside a module named __$db_model (where $db is sqlite, mysql or postgresql), -// to implement the Diesel traits. We also provide methods to convert between them and the basic structs. Later, that module will be auto imported when using db_run! -#[macro_export] -macro_rules! db_object { - ( $( - $( #[$attr:meta] )* - pub struct $name:ident { - $( $( #[$field_attr:meta] )* $vis:vis $field:ident : $typ:ty ),+ - $(,)? - } - )+ ) => { - // Create the normal struct, without attributes - $( pub struct $name { $( /*$( #[$field_attr] )**/ $vis $field : $typ, )+ } )+ - - #[cfg(sqlite)] - pub mod __sqlite_model { $( db_object! { @db sqlite | $( #[$attr] )* | $name | $( $( #[$field_attr] )* $field : $typ ),+ } )+ } - #[cfg(mysql)] - pub mod __mysql_model { $( db_object! { @db mysql | $( #[$attr] )* | $name | $( $( #[$field_attr] )* $field : $typ ),+ } )+ } - #[cfg(postgresql)] - pub mod __postgresql_model { $( db_object! { @db postgresql | $( #[$attr] )* | $name | $( $( #[$field_attr] )* $field : $typ ),+ } )+ } - }; - - ( @db $db:ident | $( #[$attr:meta] )* | $name:ident | $( $( #[$field_attr:meta] )* $vis:vis $field:ident : $typ:ty),+) => { - pastey::paste! { - #[allow(unused)] use super::*; - #[allow(unused)] use diesel::prelude::*; - #[allow(unused)] use $crate::db::[<__ $db _schema>]::*; - - $( #[$attr] )* - pub struct [<$name Db>] { $( - $( #[$field_attr] )* $vis $field : $typ, - )+ } - - impl [<$name Db>] { - #[allow(clippy::wrong_self_convention)] - #[inline(always)] pub fn to_db(x: &super::$name) -> Self { Self { $( $field: x.$field.clone(), )+ } } - } - - impl $crate::db::FromDb for [<$name Db>] { - type Output = super::$name; - #[allow(clippy::wrong_self_convention)] - #[inline(always)] fn from_db(self) -> Self::Output { super::$name { $( $field: self.$field, )+ } } - } - } - }; -} +pub mod schema; // Reexport the models, needs to be after the macros are defined so it can access them pub mod models; /// Creates a back-up of the sqlite database /// MySQL/MariaDB and PostgreSQL are not supported. -pub async fn backup_database(conn: &mut DbConn) -> Result { - db_run! {@raw conn: - postgresql, mysql { - let _ = conn; - err!("PostgreSQL and MySQL/MariaDB do not support this backup feature"); - } - sqlite { - let db_url = CONFIG.database_url(); - let db_path = std::path::Path::new(&db_url).parent().unwrap(); - let backup_file = db_path - .join(format!("db_{}.sqlite3", chrono::Utc::now().format("%Y%m%d_%H%M%S"))) - .to_string_lossy() - .into_owned(); - diesel::sql_query(format!("VACUUM INTO '{backup_file}'")).execute(conn)?; - Ok(backup_file) +#[cfg(sqlite)] +pub fn backup_sqlite() -> Result { + use diesel::Connection; + use std::{fs::File, io::Write}; + + let db_url = CONFIG.database_url(); + if DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::Sqlite).unwrap_or(false) { + // Since we do not allow any schema for sqlite database_url's like `file:` or `sqlite:` to be set, we can assume here it isn't + // This way we can set a readonly flag on the opening mode without issues. + let mut conn = diesel::sqlite::SqliteConnection::establish(&format!("sqlite://{db_url}?mode=ro"))?; + + let db_path = std::path::Path::new(&db_url).parent().unwrap(); + let backup_file = db_path + .join(format!("db_{}.sqlite3", chrono::Utc::now().format("%Y%m%d_%H%M%S"))) + .to_string_lossy() + .into_owned(); + + match File::create(backup_file.clone()) { + Ok(mut f) => { + let serialized_db = conn.serialize_database_to_buffer(); + f.write_all(serialized_db.as_slice()).expect("Error writing SQLite backup"); + Ok(backup_file) + } + Err(e) => { + err_silent!(format!("Unable to save SQLite backup: {e:?}")) + } } + } else { + err_silent!("The database type is not SQLite. Backups only works for SQLite databases") } } +#[cfg(not(sqlite))] +pub fn backup_sqlite() -> Result { + err_silent!("The database type is not SQLite. Backups only works for SQLite databases") +} + /// Get the SQL Server version -pub async fn get_sql_server_version(conn: &mut DbConn) -> String { - db_run! {@raw conn: - postgresql, mysql { - define_sql_function!{ - fn version() -> diesel::sql_types::Text; - } - diesel::select(version()).get_result::(conn).unwrap_or_else(|_| "Unknown".to_string()) +pub async fn get_sql_server_version(conn: &DbConn) -> String { + db_run! { conn: + postgresql,mysql { + diesel::select(diesel::dsl::sql::("version();")) + .get_result::(conn) + .unwrap_or_else(|_| "Unknown".to_string()) } sqlite { - define_sql_function!{ - fn sqlite_version() -> diesel::sql_types::Text; - } - diesel::select(sqlite_version()).get_result::(conn).unwrap_or_else(|_| "Unknown".to_string()) + diesel::select(diesel::dsl::sql::("sqlite_version();")) + .get_result::(conn) + .unwrap_or_else(|_| "Unknown".to_string()) } } } @@ -428,16 +360,14 @@ impl<'r> FromRequest<'r> for DbConn { // https://docs.rs/diesel_migrations/*/diesel_migrations/macro.embed_migrations.html #[cfg(sqlite)] mod sqlite_migrations { + use diesel::{Connection, RunQueryDsl}; use diesel_migrations::{EmbeddedMigrations, MigrationHarness}; pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/sqlite"); - pub fn run_migrations() -> Result<(), super::Error> { - use diesel::{Connection, RunQueryDsl}; - let url = crate::CONFIG.database_url(); - + pub fn run_migrations(url: &str) -> Result<(), super::Error> { // Establish a connection to the sqlite database (this will create a new one, if it does // not exist, and exit if there is an error). - let mut connection = diesel::sqlite::SqliteConnection::establish(&url)?; + let mut connection = diesel::sqlite::SqliteConnection::establish(url)?; // Run the migrations after successfully establishing a connection // Disable Foreign Key Checks during migration @@ -458,15 +388,15 @@ mod sqlite_migrations { #[cfg(mysql)] mod mysql_migrations { + use diesel::{Connection, RunQueryDsl}; use diesel_migrations::{EmbeddedMigrations, MigrationHarness}; pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/mysql"); - pub fn run_migrations() -> Result<(), super::Error> { - use diesel::{Connection, RunQueryDsl}; + pub fn run_migrations(url: &str) -> Result<(), super::Error> { // Make sure the database is up to date (create if it doesn't exist, or run the migrations) - let mut connection = diesel::mysql::MysqlConnection::establish(&crate::CONFIG.database_url())?; - // Disable Foreign Key Checks during migration + let mut connection = diesel::mysql::MysqlConnection::establish(url)?; + // Disable Foreign Key Checks during migration // Scoped to a connection/session. diesel::sql_query("SET FOREIGN_KEY_CHECKS = 0") .execute(&mut connection) @@ -479,13 +409,14 @@ mod mysql_migrations { #[cfg(postgresql)] mod postgresql_migrations { + use diesel::Connection; use diesel_migrations::{EmbeddedMigrations, MigrationHarness}; pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/postgresql"); - pub fn run_migrations() -> Result<(), super::Error> { - use diesel::Connection; + pub fn run_migrations(url: &str) -> Result<(), super::Error> { // Make sure the database is up to date (create if it doesn't exist, or run the migrations) - let mut connection = diesel::pg::PgConnection::establish(&crate::CONFIG.database_url())?; + let mut connection = diesel::pg::PgConnection::establish(url)?; + connection.run_pending_migrations(MIGRATIONS).expect("Error running migrations"); Ok(()) } diff --git a/src/db/models/attachment.rs b/src/db/models/attachment.rs index aafb8766..7fc8664c 100644 --- a/src/db/models/attachment.rs +++ b/src/db/models/attachment.rs @@ -1,25 +1,24 @@ -use std::time::Duration; - use bigdecimal::{BigDecimal, ToPrimitive}; use derive_more::{AsRef, Deref, Display}; +use diesel::prelude::*; use serde_json::Value; +use std::time::Duration; use super::{CipherId, OrganizationId, UserId}; +use crate::db::schema::{attachments, ciphers}; use crate::{config::PathType, CONFIG}; use macros::IdFromParam; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = attachments)] - #[diesel(treat_none_as_null = true)] - #[diesel(primary_key(id))] - pub struct Attachment { - pub id: AttachmentId, - pub cipher_uuid: CipherId, - pub file_name: String, // encrypted - pub file_size: i64, - pub akey: Option, - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = attachments)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(id))] +pub struct Attachment { + pub id: AttachmentId, + pub cipher_uuid: CipherId, + pub file_name: String, // encrypted + pub file_size: i64, + pub akey: Option, } /// Local methods @@ -76,11 +75,11 @@ use crate::error::MapResult; /// Database methods impl Attachment { - pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: sqlite, mysql { match diesel::replace_into(attachments::table) - .values(AttachmentDb::to_db(self)) + .values(self) .execute(conn) { Ok(_) => Ok(()), @@ -88,7 +87,7 @@ impl Attachment { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(attachments::table) .filter(attachments::id.eq(&self.id)) - .set(AttachmentDb::to_db(self)) + .set(self) .execute(conn) .map_res("Error saving attachment") } @@ -96,22 +95,22 @@ impl Attachment { }.map_res("Error saving attachment") } postgresql { - let value = AttachmentDb::to_db(self); diesel::insert_into(attachments::table) - .values(&value) + .values(self) .on_conflict(attachments::id) .do_update() - .set(&value) + .set(self) .execute(conn) .map_res("Error saving attachment") } } } - pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: { - crate::util::retry( - || diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn), + crate::util::retry(|| + diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))) + .execute(conn), 10, ) .map(|_| ()) @@ -132,34 +131,32 @@ impl Attachment { Ok(()) } - pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &DbConn) -> EmptyResult { for attachment in Attachment::find_by_cipher(cipher_uuid, conn).await { attachment.delete(conn).await?; } Ok(()) } - pub async fn find_by_id(id: &AttachmentId, conn: &mut DbConn) -> Option { + pub async fn find_by_id(id: &AttachmentId, conn: &DbConn) -> Option { db_run! { conn: { attachments::table .filter(attachments::id.eq(id.to_lowercase())) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> Vec { + pub async fn find_by_cipher(cipher_uuid: &CipherId, conn: &DbConn) -> Vec { db_run! { conn: { attachments::table .filter(attachments::cipher_uuid.eq(cipher_uuid)) - .load::(conn) + .load::(conn) .expect("Error loading attachments") - .from_db() }} } - pub async fn size_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 { + pub async fn size_by_user(user_uuid: &UserId, conn: &DbConn) -> i64 { db_run! { conn: { let result: Option = attachments::table .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) @@ -176,7 +173,7 @@ impl Attachment { }} } - pub async fn count_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 { + pub async fn count_by_user(user_uuid: &UserId, conn: &DbConn) -> i64 { db_run! { conn: { attachments::table .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) @@ -187,7 +184,7 @@ impl Attachment { }} } - pub async fn size_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { + pub async fn size_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> i64 { db_run! { conn: { let result: Option = attachments::table .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) @@ -204,7 +201,7 @@ impl Attachment { }} } - pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> i64 { db_run! { conn: { attachments::table .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid))) @@ -221,7 +218,7 @@ impl Attachment { pub async fn find_all_by_user_and_orgs( user_uuid: &UserId, org_uuids: &Vec, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec { db_run! { conn: { attachments::table @@ -229,9 +226,8 @@ impl Attachment { .filter(ciphers::user_uuid.eq(user_uuid)) .or_filter(ciphers::organization_uuid.eq_any(org_uuids)) .select(attachments::all_columns) - .load::(conn) + .load::(conn) .expect("Error loading attachments") - .from_db() }} } } diff --git a/src/db/models/auth_request.rs b/src/db/models/auth_request.rs index 31e7e66e..c2af8d74 100644 --- a/src/db/models/auth_request.rs +++ b/src/db/models/auth_request.rs @@ -1,38 +1,38 @@ use super::{DeviceId, OrganizationId, UserId}; +use crate::db::schema::auth_requests; use crate::{crypto::ct_eq, util::format_date}; use chrono::{NaiveDateTime, Utc}; use derive_more::{AsRef, Deref, Display, From}; +use diesel::prelude::*; use macros::UuidFromParam; use serde_json::Value; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset, Deserialize, Serialize)] - #[diesel(table_name = auth_requests)] - #[diesel(treat_none_as_null = true)] - #[diesel(primary_key(uuid))] - pub struct AuthRequest { - pub uuid: AuthRequestId, - pub user_uuid: UserId, - pub organization_uuid: Option, +#[derive(Identifiable, Queryable, Insertable, AsChangeset, Deserialize, Serialize)] +#[diesel(table_name = auth_requests)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(uuid))] +pub struct AuthRequest { + pub uuid: AuthRequestId, + pub user_uuid: UserId, + pub organization_uuid: Option, - pub request_device_identifier: DeviceId, - pub device_type: i32, // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs + pub request_device_identifier: DeviceId, + pub device_type: i32, // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs - pub request_ip: String, - pub response_device_id: Option, + pub request_ip: String, + pub response_device_id: Option, - pub access_code: String, - pub public_key: String, + pub access_code: String, + pub public_key: String, - pub enc_key: Option, + pub enc_key: Option, - pub master_password_hash: Option, - pub approved: Option, - pub creation_date: NaiveDateTime, - pub response_date: Option, + pub master_password_hash: Option, + pub approved: Option, + pub creation_date: NaiveDateTime, + pub response_date: Option, - pub authentication_date: Option, - } + pub authentication_date: Option, } impl AuthRequest { @@ -80,11 +80,11 @@ use crate::api::EmptyResult; use crate::error::MapResult; impl AuthRequest { - pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { db_run! { conn: sqlite, mysql { match diesel::replace_into(auth_requests::table) - .values(AuthRequestDb::to_db(self)) + .values(&*self) .execute(conn) { Ok(_) => Ok(()), @@ -92,7 +92,7 @@ impl AuthRequest { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(auth_requests::table) .filter(auth_requests::uuid.eq(&self.uuid)) - .set(AuthRequestDb::to_db(self)) + .set(&*self) .execute(conn) .map_res("Error auth_request") } @@ -100,71 +100,71 @@ impl AuthRequest { }.map_res("Error auth_request") } postgresql { - let value = AuthRequestDb::to_db(self); diesel::insert_into(auth_requests::table) - .values(&value) + .values(&*self) .on_conflict(auth_requests::uuid) .do_update() - .set(&value) + .set(&*self) .execute(conn) .map_res("Error saving auth_request") } } } - pub async fn find_by_uuid(uuid: &AuthRequestId, conn: &mut DbConn) -> Option { - db_run! {conn: { + pub async fn find_by_uuid(uuid: &AuthRequestId, conn: &DbConn) -> Option { + db_run! { conn: { auth_requests::table .filter(auth_requests::uuid.eq(uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_by_uuid_and_user(uuid: &AuthRequestId, user_uuid: &UserId, conn: &mut DbConn) -> Option { - db_run! {conn: { + pub async fn find_by_uuid_and_user(uuid: &AuthRequestId, user_uuid: &UserId, conn: &DbConn) -> Option { + db_run! { conn: { auth_requests::table .filter(auth_requests::uuid.eq(uuid)) .filter(auth_requests::user_uuid.eq(user_uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { - db_run! {conn: { + pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { + db_run! { conn: { auth_requests::table .filter(auth_requests::user_uuid.eq(user_uuid)) - .load::(conn).expect("Error loading auth_requests").from_db() + .load::(conn) + .expect("Error loading auth_requests") }} } pub async fn find_by_user_and_requested_device( user_uuid: &UserId, device_uuid: &DeviceId, - conn: &mut DbConn, + conn: &DbConn, ) -> Option { - db_run! {conn: { + db_run! { conn: { auth_requests::table .filter(auth_requests::user_uuid.eq(user_uuid)) .filter(auth_requests::request_device_identifier.eq(device_uuid)) .filter(auth_requests::approved.is_null()) .order_by(auth_requests::creation_date.desc()) - .first::(conn).ok().from_db() + .first::(conn) + .ok() }} } - pub async fn find_created_before(dt: &NaiveDateTime, conn: &mut DbConn) -> Vec { - db_run! {conn: { + pub async fn find_created_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec { + db_run! { conn: { auth_requests::table .filter(auth_requests::creation_date.lt(dt)) - .load::(conn).expect("Error loading auth_requests").from_db() + .load::(conn) + .expect("Error loading auth_requests") }} } - pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(auth_requests::table.filter(auth_requests::uuid.eq(&self.uuid))) .execute(conn) @@ -176,7 +176,7 @@ impl AuthRequest { ct_eq(&self.access_code, access_code) } - pub async fn purge_expired_auth_requests(conn: &mut DbConn) { + pub async fn purge_expired_auth_requests(conn: &DbConn) { let expiry_time = Utc::now().naive_utc() - chrono::TimeDelta::try_minutes(5).unwrap(); //after 5 minutes, clients reject the request for auth_request in Self::find_created_before(&expiry_time, conn).await { auth_request.delete(conn).await.ok(); diff --git a/src/db/models/cipher.rs b/src/db/models/cipher.rs index 8cbad4b7..d09d25be 100644 --- a/src/db/models/cipher.rs +++ b/src/db/models/cipher.rs @@ -1,7 +1,12 @@ +use crate::db::schema::{ + ciphers, ciphers_collections, collections, collections_groups, folders, folders_ciphers, groups, groups_users, + users_collections, users_organizations, +}; use crate::util::LowerCase; use crate::CONFIG; use chrono::{NaiveDateTime, TimeDelta, Utc}; use derive_more::{AsRef, Deref, Display, From}; +use diesel::prelude::*; use serde_json::Value; use super::{ @@ -13,39 +18,37 @@ use macros::UuidFromParam; use std::borrow::Cow; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = ciphers)] - #[diesel(treat_none_as_null = true)] - #[diesel(primary_key(uuid))] - pub struct Cipher { - pub uuid: CipherId, - pub created_at: NaiveDateTime, - pub updated_at: NaiveDateTime, - - pub user_uuid: Option, - pub organization_uuid: Option, - - pub key: Option, - - /* - Login = 1, - SecureNote = 2, - Card = 3, - Identity = 4, - SshKey = 5 - */ - pub atype: i32, - pub name: String, - pub notes: Option, - pub fields: Option, - - pub data: String, - - pub password_history: Option, - pub deleted_at: Option, - pub reprompt: Option, - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = ciphers)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(uuid))] +pub struct Cipher { + pub uuid: CipherId, + pub created_at: NaiveDateTime, + pub updated_at: NaiveDateTime, + + pub user_uuid: Option, + pub organization_uuid: Option, + + pub key: Option, + + /* + Login = 1, + SecureNote = 2, + Card = 3, + Identity = 4, + SshKey = 5 + */ + pub atype: i32, + pub name: String, + pub notes: Option, + pub fields: Option, + + pub data: String, + + pub password_history: Option, + pub deleted_at: Option, + pub reprompt: Option, } pub enum RepromptType { @@ -140,7 +143,7 @@ impl Cipher { user_uuid: &UserId, cipher_sync_data: Option<&CipherSyncData>, sync_type: CipherSyncType, - conn: &mut DbConn, + conn: &DbConn, ) -> Result { use crate::util::{format_date, validate_and_format_date}; @@ -402,7 +405,7 @@ impl Cipher { Ok(json_object) } - pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec { + pub async fn update_users_revision(&self, conn: &DbConn) -> Vec { let mut user_uuids = Vec::new(); match self.user_uuid { Some(ref user_uuid) => { @@ -430,14 +433,14 @@ impl Cipher { user_uuids } - pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { self.update_users_revision(conn).await; self.updated_at = Utc::now().naive_utc(); db_run! { conn: sqlite, mysql { match diesel::replace_into(ciphers::table) - .values(CipherDb::to_db(self)) + .values(&*self) .execute(conn) { Ok(_) => Ok(()), @@ -445,7 +448,7 @@ impl Cipher { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(ciphers::table) .filter(ciphers::uuid.eq(&self.uuid)) - .set(CipherDb::to_db(self)) + .set(&*self) .execute(conn) .map_res("Error saving cipher") } @@ -453,19 +456,18 @@ impl Cipher { }.map_res("Error saving cipher") } postgresql { - let value = CipherDb::to_db(self); diesel::insert_into(ciphers::table) - .values(&value) + .values(&*self) .on_conflict(ciphers::uuid) .do_update() - .set(&value) + .set(&*self) .execute(conn) .map_res("Error saving cipher") } } } - pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(&self, conn: &DbConn) -> EmptyResult { self.update_users_revision(conn).await; FolderCipher::delete_all_by_cipher(&self.uuid, conn).await?; @@ -480,7 +482,7 @@ impl Cipher { }} } - pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult { // TODO: Optimize this by executing a DELETE directly on the database, instead of first fetching. for cipher in Self::find_by_org(org_uuid, conn).await { cipher.delete(conn).await?; @@ -488,7 +490,7 @@ impl Cipher { Ok(()) } - pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &DbConn) -> EmptyResult { for cipher in Self::find_owned_by_user(user_uuid, conn).await { cipher.delete(conn).await?; } @@ -496,7 +498,7 @@ impl Cipher { } /// Purge all ciphers that are old enough to be auto-deleted. - pub async fn purge_trash(conn: &mut DbConn) { + pub async fn purge_trash(conn: &DbConn) { if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() { let now = Utc::now().naive_utc(); let dt = now - TimeDelta::try_days(auto_delete_days).unwrap(); @@ -510,7 +512,7 @@ impl Cipher { &self, folder_uuid: Option, user_uuid: &UserId, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { User::update_uuid_revision(user_uuid, conn).await; @@ -550,7 +552,7 @@ impl Cipher { &self, user_uuid: &UserId, cipher_sync_data: Option<&CipherSyncData>, - conn: &mut DbConn, + conn: &DbConn, ) -> bool { if let Some(ref org_uuid) = self.organization_uuid { if let Some(cipher_sync_data) = cipher_sync_data { @@ -569,7 +571,7 @@ impl Cipher { &self, user_uuid: &UserId, cipher_sync_data: Option<&CipherSyncData>, - conn: &mut DbConn, + conn: &DbConn, ) -> bool { if !CONFIG.org_groups_enabled() { return false; @@ -593,7 +595,7 @@ impl Cipher { &self, user_uuid: &UserId, cipher_sync_data: Option<&CipherSyncData>, - conn: &mut DbConn, + conn: &DbConn, ) -> Option<(bool, bool, bool)> { // Check whether this cipher is directly owned by the user, or is in // a collection that the user has full access to. If so, there are no @@ -659,12 +661,8 @@ impl Cipher { Some((read_only, hide_passwords, manage)) } - async fn get_user_collections_access_flags( - &self, - user_uuid: &UserId, - conn: &mut DbConn, - ) -> Vec<(bool, bool, bool)> { - db_run! {conn: { + async fn get_user_collections_access_flags(&self, user_uuid: &UserId, conn: &DbConn) -> Vec<(bool, bool, bool)> { + db_run! { conn: { // Check whether this cipher is in any collections accessible to the // user. If so, retrieve the access flags for each collection. ciphers::table @@ -680,15 +678,11 @@ impl Cipher { }} } - async fn get_group_collections_access_flags( - &self, - user_uuid: &UserId, - conn: &mut DbConn, - ) -> Vec<(bool, bool, bool)> { + async fn get_group_collections_access_flags(&self, user_uuid: &UserId, conn: &DbConn) -> Vec<(bool, bool, bool)> { if !CONFIG.org_groups_enabled() { return Vec::new(); } - db_run! {conn: { + db_run! { conn: { ciphers::table .filter(ciphers::uuid.eq(&self.uuid)) .inner_join(ciphers_collections::table.on( @@ -710,32 +704,32 @@ impl Cipher { }} } - pub async fn is_write_accessible_to_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool { + pub async fn is_write_accessible_to_user(&self, user_uuid: &UserId, conn: &DbConn) -> bool { match self.get_access_restrictions(user_uuid, None, conn).await { Some((read_only, _hide_passwords, manage)) => !read_only || manage, None => false, } } - pub async fn is_accessible_to_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool { + pub async fn is_accessible_to_user(&self, user_uuid: &UserId, conn: &DbConn) -> bool { self.get_access_restrictions(user_uuid, None, conn).await.is_some() } // Returns whether this cipher is a favorite of the specified user. - pub async fn is_favorite(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool { + pub async fn is_favorite(&self, user_uuid: &UserId, conn: &DbConn) -> bool { Favorite::is_favorite(&self.uuid, user_uuid, conn).await } // Sets whether this cipher is a favorite of the specified user. - pub async fn set_favorite(&self, favorite: Option, user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { + pub async fn set_favorite(&self, favorite: Option, user_uuid: &UserId, conn: &DbConn) -> EmptyResult { match favorite { None => Ok(()), // No change requested. Some(status) => Favorite::set_favorite(status, &self.uuid, user_uuid, conn).await, } } - pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &mut DbConn) -> Option { - db_run! {conn: { + pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &DbConn) -> Option { + db_run! { conn: { folders_ciphers::table .inner_join(folders::table) .filter(folders::user_uuid.eq(&user_uuid)) @@ -746,28 +740,26 @@ impl Cipher { }} } - pub async fn find_by_uuid(uuid: &CipherId, conn: &mut DbConn) -> Option { - db_run! {conn: { + pub async fn find_by_uuid(uuid: &CipherId, conn: &DbConn) -> Option { + db_run! { conn: { ciphers::table .filter(ciphers::uuid.eq(uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } pub async fn find_by_uuid_and_org( cipher_uuid: &CipherId, org_uuid: &OrganizationId, - conn: &mut DbConn, + conn: &DbConn, ) -> Option { - db_run! {conn: { + db_run! { conn: { ciphers::table .filter(ciphers::uuid.eq(cipher_uuid)) .filter(ciphers::organization_uuid.eq(org_uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } @@ -787,10 +779,10 @@ impl Cipher { user_uuid: &UserId, visible_only: bool, cipher_uuids: &Vec, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec { if CONFIG.org_groups_enabled() { - db_run! {conn: { + db_run! { conn: { let mut query = ciphers::table .left_join(ciphers_collections::table.on( ciphers::uuid.eq(ciphers_collections::cipher_uuid) @@ -839,10 +831,11 @@ impl Cipher { query .select(ciphers::all_columns) .distinct() - .load::(conn).expect("Error loading ciphers").from_db() + .load::(conn) + .expect("Error loading ciphers") }} } else { - db_run! {conn: { + db_run! { conn: { let mut query = ciphers::table .left_join(ciphers_collections::table.on( ciphers::uuid.eq(ciphers_collections::cipher_uuid) @@ -878,46 +871,44 @@ impl Cipher { query .select(ciphers::all_columns) .distinct() - .load::(conn).expect("Error loading ciphers").from_db() + .load::(conn) + .expect("Error loading ciphers") }} } } // Find all ciphers visible to the specified user. - pub async fn find_by_user_visible(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_by_user_visible(user_uuid: &UserId, conn: &DbConn) -> Vec { Self::find_by_user(user_uuid, true, &vec![], conn).await } pub async fn find_by_user_and_ciphers( user_uuid: &UserId, cipher_uuids: &Vec, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec { Self::find_by_user(user_uuid, true, cipher_uuids, conn).await } - pub async fn find_by_user_and_cipher( - user_uuid: &UserId, - cipher_uuid: &CipherId, - conn: &mut DbConn, - ) -> Option { + pub async fn find_by_user_and_cipher(user_uuid: &UserId, cipher_uuid: &CipherId, conn: &DbConn) -> Option { Self::find_by_user(user_uuid, true, &vec![cipher_uuid.clone()], conn).await.pop() } // Find all ciphers directly owned by the specified user. - pub async fn find_owned_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { - db_run! {conn: { + pub async fn find_owned_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { + db_run! { conn: { ciphers::table .filter( ciphers::user_uuid.eq(user_uuid) .and(ciphers::organization_uuid.is_null()) ) - .load::(conn).expect("Error loading ciphers").from_db() + .load::(conn) + .expect("Error loading ciphers") }} } - pub async fn count_owned_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 { - db_run! {conn: { + pub async fn count_owned_by_user(user_uuid: &UserId, conn: &DbConn) -> i64 { + db_run! { conn: { ciphers::table .filter(ciphers::user_uuid.eq(user_uuid)) .count() @@ -927,16 +918,17 @@ impl Cipher { }} } - pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { - db_run! {conn: { + pub async fn find_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> Vec { + db_run! { conn: { ciphers::table .filter(ciphers::organization_uuid.eq(org_uuid)) - .load::(conn).expect("Error loading ciphers").from_db() + .load::(conn) + .expect("Error loading ciphers") }} } - pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { - db_run! {conn: { + pub async fn count_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> i64 { + db_run! { conn: { ciphers::table .filter(ciphers::organization_uuid.eq(org_uuid)) .count() @@ -946,27 +938,29 @@ impl Cipher { }} } - pub async fn find_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> Vec { - db_run! {conn: { + pub async fn find_by_folder(folder_uuid: &FolderId, conn: &DbConn) -> Vec { + db_run! { conn: { folders_ciphers::table.inner_join(ciphers::table) .filter(folders_ciphers::folder_uuid.eq(folder_uuid)) .select(ciphers::all_columns) - .load::(conn).expect("Error loading ciphers").from_db() + .load::(conn) + .expect("Error loading ciphers") }} } /// Find all ciphers that were deleted before the specified datetime. - pub async fn find_deleted_before(dt: &NaiveDateTime, conn: &mut DbConn) -> Vec { - db_run! {conn: { + pub async fn find_deleted_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec { + db_run! { conn: { ciphers::table .filter(ciphers::deleted_at.lt(dt)) - .load::(conn).expect("Error loading ciphers").from_db() + .load::(conn) + .expect("Error loading ciphers") }} } - pub async fn get_collections(&self, user_uuid: UserId, conn: &mut DbConn) -> Vec { + pub async fn get_collections(&self, user_uuid: UserId, conn: &DbConn) -> Vec { if CONFIG.org_groups_enabled() { - db_run! {conn: { + db_run! { conn: { ciphers_collections::table .filter(ciphers_collections::cipher_uuid.eq(&self.uuid)) .inner_join(collections::table.on( @@ -996,10 +990,11 @@ impl Cipher { .and(collections_groups::read_only.eq(false))) ) .select(ciphers_collections::collection_uuid) - .load::(conn).unwrap_or_default() + .load::(conn) + .unwrap_or_default() }} } else { - db_run! {conn: { + db_run! { conn: { ciphers_collections::table .filter(ciphers_collections::cipher_uuid.eq(&self.uuid)) .inner_join(collections::table.on( @@ -1018,14 +1013,15 @@ impl Cipher { .and(users_collections::read_only.eq(false))) ) .select(ciphers_collections::collection_uuid) - .load::(conn).unwrap_or_default() + .load::(conn) + .unwrap_or_default() }} } } - pub async fn get_admin_collections(&self, user_uuid: UserId, conn: &mut DbConn) -> Vec { + pub async fn get_admin_collections(&self, user_uuid: UserId, conn: &DbConn) -> Vec { if CONFIG.org_groups_enabled() { - db_run! {conn: { + db_run! { conn: { ciphers_collections::table .filter(ciphers_collections::cipher_uuid.eq(&self.uuid)) .inner_join(collections::table.on( @@ -1056,10 +1052,11 @@ impl Cipher { .or(users_organizations::atype.le(MembershipType::Admin as i32)) // User is admin or owner ) .select(ciphers_collections::collection_uuid) - .load::(conn).unwrap_or_default() + .load::(conn) + .unwrap_or_default() }} } else { - db_run! {conn: { + db_run! { conn: { ciphers_collections::table .filter(ciphers_collections::cipher_uuid.eq(&self.uuid)) .inner_join(collections::table.on( @@ -1079,7 +1076,8 @@ impl Cipher { .or(users_organizations::atype.le(MembershipType::Admin as i32)) // User is admin or owner ) .select(ciphers_collections::collection_uuid) - .load::(conn).unwrap_or_default() + .load::(conn) + .unwrap_or_default() }} } } @@ -1088,9 +1086,9 @@ impl Cipher { /// This is used during a full sync so we only need one query for all collections accessible. pub async fn get_collections_with_cipher_by_user( user_uuid: UserId, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec<(CipherId, CollectionId)> { - db_run! {conn: { + db_run! { conn: { ciphers_collections::table .inner_join(collections::table.on( collections::uuid.eq(ciphers_collections::collection_uuid) @@ -1123,7 +1121,8 @@ impl Cipher { .or_filter(collections_groups::collections_uuid.is_not_null()) //Access via group .select(ciphers_collections::all_columns) .distinct() - .load::<(CipherId, CollectionId)>(conn).unwrap_or_default() + .load::<(CipherId, CollectionId)>(conn) + .unwrap_or_default() }} } } diff --git a/src/db/models/collection.rs b/src/db/models/collection.rs index c14c5946..52ded966 100644 --- a/src/db/models/collection.rs +++ b/src/db/models/collection.rs @@ -5,39 +5,41 @@ use super::{ CipherId, CollectionGroup, GroupUser, Membership, MembershipId, MembershipStatus, MembershipType, OrganizationId, User, UserId, }; +use crate::db::schema::{ + ciphers_collections, collections, collections_groups, groups, groups_users, users_collections, users_organizations, +}; use crate::CONFIG; +use diesel::prelude::*; use macros::UuidFromParam; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = collections)] - #[diesel(treat_none_as_null = true)] - #[diesel(primary_key(uuid))] - pub struct Collection { - pub uuid: CollectionId, - pub org_uuid: OrganizationId, - pub name: String, - pub external_id: Option, - } - - #[derive(Identifiable, Queryable, Insertable)] - #[diesel(table_name = users_collections)] - #[diesel(primary_key(user_uuid, collection_uuid))] - pub struct CollectionUser { - pub user_uuid: UserId, - pub collection_uuid: CollectionId, - pub read_only: bool, - pub hide_passwords: bool, - pub manage: bool, - } - - #[derive(Identifiable, Queryable, Insertable)] - #[diesel(table_name = ciphers_collections)] - #[diesel(primary_key(cipher_uuid, collection_uuid))] - pub struct CollectionCipher { - pub cipher_uuid: CipherId, - pub collection_uuid: CollectionId, - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = collections)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(uuid))] +pub struct Collection { + pub uuid: CollectionId, + pub org_uuid: OrganizationId, + pub name: String, + pub external_id: Option, +} + +#[derive(Identifiable, Queryable, Insertable)] +#[diesel(table_name = users_collections)] +#[diesel(primary_key(user_uuid, collection_uuid))] +pub struct CollectionUser { + pub user_uuid: UserId, + pub collection_uuid: CollectionId, + pub read_only: bool, + pub hide_passwords: bool, + pub manage: bool, +} + +#[derive(Identifiable, Queryable, Insertable)] +#[diesel(table_name = ciphers_collections)] +#[diesel(primary_key(cipher_uuid, collection_uuid))] +pub struct CollectionCipher { + pub cipher_uuid: CipherId, + pub collection_uuid: CollectionId, } /// Local methods @@ -83,7 +85,7 @@ impl Collection { &self, user_uuid: &UserId, cipher_sync_data: Option<&crate::api::core::CipherSyncData>, - conn: &mut DbConn, + conn: &DbConn, ) -> Value { let (read_only, hide_passwords, manage) = if let Some(cipher_sync_data) = cipher_sync_data { match cipher_sync_data.members.get(&self.org_uuid) { @@ -135,7 +137,7 @@ impl Collection { json_object } - pub async fn can_access_collection(member: &Membership, col_id: &CollectionId, conn: &mut DbConn) -> bool { + pub async fn can_access_collection(member: &Membership, col_id: &CollectionId, conn: &DbConn) -> bool { member.has_status(MembershipStatus::Confirmed) && (member.has_full_access() || CollectionUser::has_access_to_collection_by_user(col_id, &member.user_uuid, conn).await @@ -152,13 +154,13 @@ use crate::error::MapResult; /// Database methods impl Collection { - pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { self.update_users_revision(conn).await; db_run! { conn: sqlite, mysql { match diesel::replace_into(collections::table) - .values(CollectionDb::to_db(self)) + .values(self) .execute(conn) { Ok(_) => Ok(()), @@ -166,7 +168,7 @@ impl Collection { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(collections::table) .filter(collections::uuid.eq(&self.uuid)) - .set(CollectionDb::to_db(self)) + .set(self) .execute(conn) .map_res("Error saving collection") } @@ -174,19 +176,18 @@ impl Collection { }.map_res("Error saving collection") } postgresql { - let value = CollectionDb::to_db(self); diesel::insert_into(collections::table) - .values(&value) + .values(self) .on_conflict(collections::uuid) .do_update() - .set(&value) + .set(self) .execute(conn) .map_res("Error saving collection") } } } - pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { self.update_users_revision(conn).await; CollectionCipher::delete_all_by_collection(&self.uuid, conn).await?; CollectionUser::delete_all_by_collection(&self.uuid, conn).await?; @@ -199,30 +200,29 @@ impl Collection { }} } - pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult { for collection in Self::find_by_organization(org_uuid, conn).await { collection.delete(conn).await?; } Ok(()) } - pub async fn update_users_revision(&self, conn: &mut DbConn) { + pub async fn update_users_revision(&self, conn: &DbConn) { for member in Membership::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await.iter() { User::update_uuid_revision(&member.user_uuid, conn).await; } } - pub async fn find_by_uuid(uuid: &CollectionId, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid(uuid: &CollectionId, conn: &DbConn) -> Option { db_run! { conn: { collections::table .filter(collections::uuid.eq(uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_by_user_uuid(user_uuid: UserId, conn: &mut DbConn) -> Vec { + pub async fn find_by_user_uuid(user_uuid: UserId, conn: &DbConn) -> Vec { if CONFIG.org_groups_enabled() { db_run! { conn: { collections::table @@ -263,7 +263,8 @@ impl Collection { ) .select(collections::all_columns) .distinct() - .load::(conn).expect("Error loading collections").from_db() + .load::(conn) + .expect("Error loading collections") }} } else { db_run! { conn: { @@ -288,7 +289,8 @@ impl Collection { ) .select(collections::all_columns) .distinct() - .load::(conn).expect("Error loading collections").from_db() + .load::(conn) + .expect("Error loading collections") }} } } @@ -296,7 +298,7 @@ impl Collection { pub async fn find_by_organization_and_user_uuid( org_uuid: &OrganizationId, user_uuid: &UserId, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec { Self::find_by_user_uuid(user_uuid.to_owned(), conn) .await @@ -305,17 +307,16 @@ impl Collection { .collect() } - pub async fn find_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { + pub async fn find_by_organization(org_uuid: &OrganizationId, conn: &DbConn) -> Vec { db_run! { conn: { collections::table .filter(collections::org_uuid.eq(org_uuid)) - .load::(conn) + .load::(conn) .expect("Error loading collections") - .from_db() }} } - pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> i64 { db_run! { conn: { collections::table .filter(collections::org_uuid.eq(org_uuid)) @@ -326,23 +327,18 @@ impl Collection { }} } - pub async fn find_by_uuid_and_org( - uuid: &CollectionId, - org_uuid: &OrganizationId, - conn: &mut DbConn, - ) -> Option { + pub async fn find_by_uuid_and_org(uuid: &CollectionId, org_uuid: &OrganizationId, conn: &DbConn) -> Option { db_run! { conn: { collections::table .filter(collections::uuid.eq(uuid)) .filter(collections::org_uuid.eq(org_uuid)) .select(collections::all_columns) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_by_uuid_and_user(uuid: &CollectionId, user_uuid: UserId, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_user(uuid: &CollectionId, user_uuid: UserId, conn: &DbConn) -> Option { if CONFIG.org_groups_enabled() { db_run! { conn: { collections::table @@ -380,8 +376,8 @@ impl Collection { ) ) ).select(collections::all_columns) - .first::(conn).ok() - .from_db() + .first::(conn) + .ok() }} } else { db_run! { conn: { @@ -403,13 +399,13 @@ impl Collection { users_organizations::atype.le(MembershipType::Admin as i32) // Org admin or owner )) ).select(collections::all_columns) - .first::(conn).ok() - .from_db() + .first::(conn) + .ok() }} } } - pub async fn is_writable_by_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool { + pub async fn is_writable_by_user(&self, user_uuid: &UserId, conn: &DbConn) -> bool { let user_uuid = user_uuid.to_string(); if CONFIG.org_groups_enabled() { db_run! { conn: { @@ -471,7 +467,7 @@ impl Collection { } } - pub async fn hide_passwords_for_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool { + pub async fn hide_passwords_for_user(&self, user_uuid: &UserId, conn: &DbConn) -> bool { let user_uuid = user_uuid.to_string(); db_run! { conn: { collections::table @@ -517,7 +513,7 @@ impl Collection { }} } - pub async fn is_manageable_by_user(&self, user_uuid: &UserId, conn: &mut DbConn) -> bool { + pub async fn is_manageable_by_user(&self, user_uuid: &UserId, conn: &DbConn) -> bool { let user_uuid = user_uuid.to_string(); db_run! { conn: { collections::table @@ -569,7 +565,7 @@ impl CollectionUser { pub async fn find_by_organization_and_user_uuid( org_uuid: &OrganizationId, user_uuid: &UserId, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec { db_run! { conn: { users_collections::table @@ -577,15 +573,14 @@ impl CollectionUser { .inner_join(collections::table.on(collections::uuid.eq(users_collections::collection_uuid))) .filter(collections::org_uuid.eq(org_uuid)) .select(users_collections::all_columns) - .load::(conn) + .load::(conn) .expect("Error loading users_collections") - .from_db() }} } pub async fn find_by_organization_swap_user_uuid_with_member_uuid( org_uuid: &OrganizationId, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec { let col_users = db_run! { conn: { users_collections::table @@ -594,9 +589,8 @@ impl CollectionUser { .inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid))) .filter(users_organizations::org_uuid.eq(org_uuid)) .select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords, users_collections::manage)) - .load::(conn) + .load::(conn) .expect("Error loading users_collections") - .from_db() }}; col_users.into_iter().map(|c| c.into()).collect() } @@ -607,7 +601,7 @@ impl CollectionUser { read_only: bool, hide_passwords: bool, manage: bool, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { User::update_uuid_revision(user_uuid, conn).await; @@ -664,7 +658,7 @@ impl CollectionUser { } } - pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { User::update_uuid_revision(&self.user_uuid, conn).await; db_run! { conn: { @@ -678,21 +672,20 @@ impl CollectionUser { }} } - pub async fn find_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> Vec { + pub async fn find_by_collection(collection_uuid: &CollectionId, conn: &DbConn) -> Vec { db_run! { conn: { users_collections::table .filter(users_collections::collection_uuid.eq(collection_uuid)) .select(users_collections::all_columns) - .load::(conn) + .load::(conn) .expect("Error loading users_collections") - .from_db() }} } pub async fn find_by_org_and_coll_swap_user_uuid_with_member_uuid( org_uuid: &OrganizationId, collection_uuid: &CollectionId, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec { let col_users = db_run! { conn: { users_collections::table @@ -700,9 +693,8 @@ impl CollectionUser { .filter(users_organizations::org_uuid.eq(org_uuid)) .inner_join(users_organizations::table.on(users_organizations::user_uuid.eq(users_collections::user_uuid))) .select((users_organizations::uuid, users_collections::collection_uuid, users_collections::read_only, users_collections::hide_passwords, users_collections::manage)) - .load::(conn) + .load::(conn) .expect("Error loading users_collections") - .from_db() }}; col_users.into_iter().map(|c| c.into()).collect() } @@ -710,31 +702,29 @@ impl CollectionUser { pub async fn find_by_collection_and_user( collection_uuid: &CollectionId, user_uuid: &UserId, - conn: &mut DbConn, + conn: &DbConn, ) -> Option { db_run! { conn: { users_collections::table .filter(users_collections::collection_uuid.eq(collection_uuid)) .filter(users_collections::user_uuid.eq(user_uuid)) .select(users_collections::all_columns) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { users_collections::table .filter(users_collections::user_uuid.eq(user_uuid)) .select(users_collections::all_columns) - .load::(conn) + .load::(conn) .expect("Error loading users_collections") - .from_db() }} } - pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &DbConn) -> EmptyResult { for collection in CollectionUser::find_by_collection(collection_uuid, conn).await.iter() { User::update_uuid_revision(&collection.user_uuid, conn).await; } @@ -749,7 +739,7 @@ impl CollectionUser { pub async fn delete_all_by_user_and_org( user_uuid: &UserId, org_uuid: &OrganizationId, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await; @@ -766,18 +756,14 @@ impl CollectionUser { }} } - pub async fn has_access_to_collection_by_user( - col_id: &CollectionId, - user_uuid: &UserId, - conn: &mut DbConn, - ) -> bool { + pub async fn has_access_to_collection_by_user(col_id: &CollectionId, user_uuid: &UserId, conn: &DbConn) -> bool { Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some() } } /// Database methods impl CollectionCipher { - pub async fn save(cipher_uuid: &CipherId, collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult { + pub async fn save(cipher_uuid: &CipherId, collection_uuid: &CollectionId, conn: &DbConn) -> EmptyResult { Self::update_users_revision(collection_uuid, conn).await; db_run! { conn: @@ -807,7 +793,7 @@ impl CollectionCipher { } } - pub async fn delete(cipher_uuid: &CipherId, collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(cipher_uuid: &CipherId, collection_uuid: &CollectionId, conn: &DbConn) -> EmptyResult { Self::update_users_revision(collection_uuid, conn).await; db_run! { conn: { @@ -821,7 +807,7 @@ impl CollectionCipher { }} } - pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(ciphers_collections::table.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid))) .execute(conn) @@ -829,7 +815,7 @@ impl CollectionCipher { }} } - pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(ciphers_collections::table.filter(ciphers_collections::collection_uuid.eq(collection_uuid))) .execute(conn) @@ -837,7 +823,7 @@ impl CollectionCipher { }} } - pub async fn update_users_revision(collection_uuid: &CollectionId, conn: &mut DbConn) { + pub async fn update_users_revision(collection_uuid: &CollectionId, conn: &DbConn) { if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn).await { collection.update_users_revision(conn).await; } diff --git a/src/db/models/device.rs b/src/db/models/device.rs index 005d942d..b5e394ee 100644 --- a/src/db/models/device.rs +++ b/src/db/models/device.rs @@ -5,32 +5,32 @@ use derive_more::{Display, From}; use serde_json::Value; use super::{AuthRequest, UserId}; +use crate::db::schema::devices; use crate::{ crypto, util::{format_date, get_uuid}, }; +use diesel::prelude::*; use macros::{IdFromParam, UuidFromParam}; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = devices)] - #[diesel(treat_none_as_null = true)] - #[diesel(primary_key(uuid, user_uuid))] - pub struct Device { - pub uuid: DeviceId, - pub created_at: NaiveDateTime, - pub updated_at: NaiveDateTime, - - pub user_uuid: UserId, - - pub name: String, - pub atype: i32, // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs - pub push_uuid: Option, - pub push_token: Option, - - pub refresh_token: String, - pub twofactor_remember: Option, - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = devices)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(uuid, user_uuid))] +pub struct Device { + pub uuid: DeviceId, + pub created_at: NaiveDateTime, + pub updated_at: NaiveDateTime, + + pub user_uuid: UserId, + + pub name: String, + pub atype: i32, // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs + pub push_uuid: Option, + pub push_token: Option, + + pub refresh_token: String, + pub twofactor_remember: Option, } /// Local methods @@ -115,13 +115,7 @@ use crate::error::MapResult; /// Database methods impl Device { - pub async fn new( - uuid: DeviceId, - user_uuid: UserId, - name: String, - atype: i32, - conn: &mut DbConn, - ) -> ApiResult { + pub async fn new(uuid: DeviceId, user_uuid: UserId, name: String, atype: i32, conn: &DbConn) -> ApiResult { let now = Utc::now().naive_utc(); let device = Self { @@ -142,18 +136,24 @@ impl Device { device.inner_save(conn).await.map(|()| device) } - async fn inner_save(&self, conn: &mut DbConn) -> EmptyResult { + async fn inner_save(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: sqlite, mysql { - crate::util::retry( - || diesel::replace_into(devices::table).values(DeviceDb::to_db(self)).execute(conn), + crate::util::retry(|| + diesel::replace_into(devices::table) + .values(self) + .execute(conn), 10, ).map_res("Error saving device") } postgresql { - let value = DeviceDb::to_db(self); - crate::util::retry( - || diesel::insert_into(devices::table).values(&value).on_conflict((devices::uuid, devices::user_uuid)).do_update().set(&value).execute(conn), + crate::util::retry(|| + diesel::insert_into(devices::table) + .values(self) + .on_conflict((devices::uuid, devices::user_uuid)) + .do_update() + .set(self) + .execute(conn), 10, ).map_res("Error saving device") } @@ -161,12 +161,12 @@ impl Device { } // Should only be called after user has passed authentication - pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { self.updated_at = Utc::now().naive_utc(); self.inner_save(conn).await } - pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(devices::table.filter(devices::user_uuid.eq(user_uuid))) .execute(conn) @@ -174,18 +174,17 @@ impl Device { }} } - pub async fn find_by_uuid_and_user(uuid: &DeviceId, user_uuid: &UserId, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_user(uuid: &DeviceId, user_uuid: &UserId, conn: &DbConn) -> Option { db_run! { conn: { devices::table .filter(devices::uuid.eq(uuid)) .filter(devices::user_uuid.eq(user_uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_with_auth_request_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_with_auth_request_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { let devices = Self::find_by_user(user_uuid, conn).await; let mut result = Vec::new(); for device in devices { @@ -195,27 +194,25 @@ impl Device { result } - pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { devices::table .filter(devices::user_uuid.eq(user_uuid)) - .load::(conn) + .load::(conn) .expect("Error loading devices") - .from_db() }} } - pub async fn find_by_uuid(uuid: &DeviceId, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid(uuid: &DeviceId, conn: &DbConn) -> Option { db_run! { conn: { devices::table .filter(devices::uuid.eq(uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn clear_push_token_by_uuid(uuid: &DeviceId, conn: &mut DbConn) -> EmptyResult { + pub async fn clear_push_token_by_uuid(uuid: &DeviceId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::update(devices::table) .filter(devices::uuid.eq(uuid)) @@ -224,39 +221,36 @@ impl Device { .map_res("Error removing push token") }} } - pub async fn find_by_refresh_token(refresh_token: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_refresh_token(refresh_token: &str, conn: &DbConn) -> Option { db_run! { conn: { devices::table .filter(devices::refresh_token.eq(refresh_token)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_latest_active_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Option { + pub async fn find_latest_active_by_user(user_uuid: &UserId, conn: &DbConn) -> Option { db_run! { conn: { devices::table .filter(devices::user_uuid.eq(user_uuid)) .order(devices::updated_at.desc()) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_push_devices_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_push_devices_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { devices::table .filter(devices::user_uuid.eq(user_uuid)) .filter(devices::push_token.is_not_null()) - .load::(conn) + .load::(conn) .expect("Error loading push devices") - .from_db() }} } - pub async fn check_user_has_push_device(user_uuid: &UserId, conn: &mut DbConn) -> bool { + pub async fn check_user_has_push_device(user_uuid: &UserId, conn: &DbConn) -> bool { db_run! { conn: { devices::table .filter(devices::user_uuid.eq(user_uuid)) diff --git a/src/db/models/emergency_access.rs b/src/db/models/emergency_access.rs index e3803b1a..aa1eed7c 100644 --- a/src/db/models/emergency_access.rs +++ b/src/db/models/emergency_access.rs @@ -3,32 +3,31 @@ use derive_more::{AsRef, Deref, Display, From}; use serde_json::Value; use super::{User, UserId}; +use crate::db::schema::emergency_access; use crate::{api::EmptyResult, db::DbConn, error::MapResult}; +use diesel::prelude::*; use macros::UuidFromParam; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = emergency_access)] - #[diesel(treat_none_as_null = true)] - #[diesel(primary_key(uuid))] - pub struct EmergencyAccess { - pub uuid: EmergencyAccessId, - pub grantor_uuid: UserId, - pub grantee_uuid: Option, - pub email: Option, - pub key_encrypted: Option, - pub atype: i32, //EmergencyAccessType - pub status: i32, //EmergencyAccessStatus - pub wait_time_days: i32, - pub recovery_initiated_at: Option, - pub last_notification_at: Option, - pub updated_at: NaiveDateTime, - pub created_at: NaiveDateTime, - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = emergency_access)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(uuid))] +pub struct EmergencyAccess { + pub uuid: EmergencyAccessId, + pub grantor_uuid: UserId, + pub grantee_uuid: Option, + pub email: Option, + pub key_encrypted: Option, + pub atype: i32, //EmergencyAccessType + pub status: i32, //EmergencyAccessStatus + pub wait_time_days: i32, + pub recovery_initiated_at: Option, + pub last_notification_at: Option, + pub updated_at: NaiveDateTime, + pub created_at: NaiveDateTime, } // Local methods - impl EmergencyAccess { pub fn new(grantor_uuid: UserId, email: String, status: i32, atype: i32, wait_time_days: i32) -> Self { let now = Utc::now().naive_utc(); @@ -67,7 +66,7 @@ impl EmergencyAccess { }) } - pub async fn to_json_grantor_details(&self, conn: &mut DbConn) -> Value { + pub async fn to_json_grantor_details(&self, conn: &DbConn) -> Value { let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).await.expect("Grantor user not found."); json!({ @@ -83,7 +82,7 @@ impl EmergencyAccess { }) } - pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option { + pub async fn to_json_grantee_details(&self, conn: &DbConn) -> Option { let grantee_user = if let Some(grantee_uuid) = &self.grantee_uuid { User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.") } else if let Some(email) = self.email.as_deref() { @@ -140,14 +139,14 @@ pub enum EmergencyAccessStatus { // region Database methods impl EmergencyAccess { - pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { User::update_uuid_revision(&self.grantor_uuid, conn).await; self.updated_at = Utc::now().naive_utc(); db_run! { conn: sqlite, mysql { match diesel::replace_into(emergency_access::table) - .values(EmergencyAccessDb::to_db(self)) + .values(&*self) .execute(conn) { Ok(_) => Ok(()), @@ -155,7 +154,7 @@ impl EmergencyAccess { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(emergency_access::table) .filter(emergency_access::uuid.eq(&self.uuid)) - .set(EmergencyAccessDb::to_db(self)) + .set(&*self) .execute(conn) .map_res("Error updating emergency access") } @@ -163,12 +162,11 @@ impl EmergencyAccess { }.map_res("Error saving emergency access") } postgresql { - let value = EmergencyAccessDb::to_db(self); diesel::insert_into(emergency_access::table) - .values(&value) + .values(&*self) .on_conflict(emergency_access::uuid) .do_update() - .set(&value) + .set(&*self) .execute(conn) .map_res("Error saving emergency access") } @@ -179,14 +177,14 @@ impl EmergencyAccess { &mut self, status: i32, date: &NaiveDateTime, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { // Update the grantee so that it will refresh it's status. User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await; self.status = status; date.clone_into(&mut self.updated_at); - db_run! {conn: { + db_run! { conn: { crate::util::retry(|| { diesel::update(emergency_access::table.filter(emergency_access::uuid.eq(&self.uuid))) .set((emergency_access::status.eq(status), emergency_access::updated_at.eq(date))) @@ -196,15 +194,11 @@ impl EmergencyAccess { }} } - pub async fn update_last_notification_date_and_save( - &mut self, - date: &NaiveDateTime, - conn: &mut DbConn, - ) -> EmptyResult { + pub async fn update_last_notification_date_and_save(&mut self, date: &NaiveDateTime, conn: &DbConn) -> EmptyResult { self.last_notification_at = Some(date.to_owned()); date.clone_into(&mut self.updated_at); - db_run! {conn: { + db_run! { conn: { crate::util::retry(|| { diesel::update(emergency_access::table.filter(emergency_access::uuid.eq(&self.uuid))) .set((emergency_access::last_notification_at.eq(date), emergency_access::updated_at.eq(date))) @@ -214,7 +208,7 @@ impl EmergencyAccess { }} } - pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &DbConn) -> EmptyResult { for ea in Self::find_all_by_grantor_uuid(user_uuid, conn).await { ea.delete(conn).await?; } @@ -224,14 +218,14 @@ impl EmergencyAccess { Ok(()) } - pub async fn delete_all_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_grantee_email(grantee_email: &str, conn: &DbConn) -> EmptyResult { for ea in Self::find_all_invited_by_grantee_email(grantee_email, conn).await { ea.delete(conn).await?; } Ok(()) } - pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { User::update_uuid_revision(&self.grantor_uuid, conn).await; db_run! { conn: { @@ -245,109 +239,108 @@ impl EmergencyAccess { grantor_uuid: &UserId, grantee_uuid: &UserId, email: &str, - conn: &mut DbConn, + conn: &DbConn, ) -> Option { db_run! { conn: { emergency_access::table .filter(emergency_access::grantor_uuid.eq(grantor_uuid)) .filter(emergency_access::grantee_uuid.eq(grantee_uuid).or(emergency_access::email.eq(email))) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } - pub async fn find_all_recoveries_initiated(conn: &mut DbConn) -> Vec { + pub async fn find_all_recoveries_initiated(conn: &DbConn) -> Vec { db_run! { conn: { emergency_access::table .filter(emergency_access::status.eq(EmergencyAccessStatus::RecoveryInitiated as i32)) .filter(emergency_access::recovery_initiated_at.is_not_null()) - .load::(conn).expect("Error loading emergency_access").from_db() + .load::(conn) + .expect("Error loading emergency_access") }} } pub async fn find_by_uuid_and_grantor_uuid( uuid: &EmergencyAccessId, grantor_uuid: &UserId, - conn: &mut DbConn, + conn: &DbConn, ) -> Option { db_run! { conn: { emergency_access::table .filter(emergency_access::uuid.eq(uuid)) .filter(emergency_access::grantor_uuid.eq(grantor_uuid)) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } pub async fn find_by_uuid_and_grantee_uuid( uuid: &EmergencyAccessId, grantee_uuid: &UserId, - conn: &mut DbConn, + conn: &DbConn, ) -> Option { db_run! { conn: { emergency_access::table .filter(emergency_access::uuid.eq(uuid)) .filter(emergency_access::grantee_uuid.eq(grantee_uuid)) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } pub async fn find_by_uuid_and_grantee_email( uuid: &EmergencyAccessId, grantee_email: &str, - conn: &mut DbConn, + conn: &DbConn, ) -> Option { db_run! { conn: { emergency_access::table .filter(emergency_access::uuid.eq(uuid)) .filter(emergency_access::email.eq(grantee_email)) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } - pub async fn find_all_by_grantee_uuid(grantee_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_all_by_grantee_uuid(grantee_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { emergency_access::table .filter(emergency_access::grantee_uuid.eq(grantee_uuid)) - .load::(conn).expect("Error loading emergency_access").from_db() + .load::(conn) + .expect("Error loading emergency_access") }} } - pub async fn find_invited_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> Option { + pub async fn find_invited_by_grantee_email(grantee_email: &str, conn: &DbConn) -> Option { db_run! { conn: { emergency_access::table .filter(emergency_access::email.eq(grantee_email)) .filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32)) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } - pub async fn find_all_invited_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> Vec { + pub async fn find_all_invited_by_grantee_email(grantee_email: &str, conn: &DbConn) -> Vec { db_run! { conn: { emergency_access::table .filter(emergency_access::email.eq(grantee_email)) .filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32)) - .load::(conn).expect("Error loading emergency_access").from_db() + .load::(conn) + .expect("Error loading emergency_access") }} } - pub async fn find_all_by_grantor_uuid(grantor_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_all_by_grantor_uuid(grantor_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { emergency_access::table .filter(emergency_access::grantor_uuid.eq(grantor_uuid)) - .load::(conn).expect("Error loading emergency_access").from_db() + .load::(conn) + .expect("Error loading emergency_access") }} } - pub async fn accept_invite( - &mut self, - grantee_uuid: &UserId, - grantee_email: &str, - conn: &mut DbConn, - ) -> EmptyResult { + pub async fn accept_invite(&mut self, grantee_uuid: &UserId, grantee_email: &str, conn: &DbConn) -> EmptyResult { if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email { err!("User email does not match invite."); } diff --git a/src/db/models/event.rs b/src/db/models/event.rs index 7e6bdf34..bd4b2310 100644 --- a/src/db/models/event.rs +++ b/src/db/models/event.rs @@ -3,37 +3,37 @@ use chrono::{NaiveDateTime, TimeDelta, Utc}; use serde_json::Value; use super::{CipherId, CollectionId, GroupId, MembershipId, OrgPolicyId, OrganizationId, UserId}; +use crate::db::schema::{event, users_organizations}; use crate::{api::EmptyResult, db::DbConn, error::MapResult, CONFIG}; +use diesel::prelude::*; // https://bitwarden.com/help/event-logs/ -db_object! { - // Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Services/Implementations/EventService.cs - // Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/AdminConsole/Public/Models/Response/EventResponseModel.cs - // Upstream SQL: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Sql/dbo/Tables/Event.sql - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = event)] - #[diesel(treat_none_as_null = true)] - #[diesel(primary_key(uuid))] - pub struct Event { - pub uuid: EventId, - pub event_type: i32, // EventType - pub user_uuid: Option, - pub org_uuid: Option, - pub cipher_uuid: Option, - pub collection_uuid: Option, - pub group_uuid: Option, - pub org_user_uuid: Option, - pub act_user_uuid: Option, - // Upstream enum: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs - pub device_type: Option, - pub ip_address: Option, - pub event_date: NaiveDateTime, - pub policy_uuid: Option, - pub provider_uuid: Option, - pub provider_user_uuid: Option, - pub provider_org_uuid: Option, - } +// Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Services/Implementations/EventService.cs +// Upstream: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Api/AdminConsole/Public/Models/Response/EventResponseModel.cs +// Upstream SQL: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Sql/dbo/Tables/Event.sql +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = event)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(uuid))] +pub struct Event { + pub uuid: EventId, + pub event_type: i32, // EventType + pub user_uuid: Option, + pub org_uuid: Option, + pub cipher_uuid: Option, + pub collection_uuid: Option, + pub group_uuid: Option, + pub org_user_uuid: Option, + pub act_user_uuid: Option, + // Upstream enum: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/Enums/DeviceType.cs + pub device_type: Option, + pub ip_address: Option, + pub event_date: NaiveDateTime, + pub policy_uuid: Option, + pub provider_uuid: Option, + pub provider_user_uuid: Option, + pub provider_org_uuid: Option, } // Upstream enum: https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Enums/EventType.cs @@ -193,27 +193,27 @@ impl Event { /// ############# /// Basic Queries - pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: sqlite, mysql { diesel::replace_into(event::table) - .values(EventDb::to_db(self)) + .values(self) .execute(conn) .map_res("Error saving event") } postgresql { diesel::insert_into(event::table) - .values(EventDb::to_db(self)) + .values(self) .on_conflict(event::uuid) .do_update() - .set(EventDb::to_db(self)) + .set(self) .execute(conn) .map_res("Error saving event") } } } - pub async fn save_user_event(events: Vec, conn: &mut DbConn) -> EmptyResult { + pub async fn save_user_event(events: Vec, conn: &DbConn) -> EmptyResult { // Special save function which is able to handle multiple events. // SQLite doesn't support the DEFAULT argument, and does not support inserting multiple values at the same time. // MySQL and PostgreSQL do. @@ -224,14 +224,13 @@ impl Event { sqlite { for event in events { diesel::insert_or_ignore_into(event::table) - .values(EventDb::to_db(&event)) + .values(&event) .execute(conn) .unwrap_or_default(); } Ok(()) } mysql { - let events: Vec = events.iter().map(EventDb::to_db).collect(); diesel::insert_or_ignore_into(event::table) .values(&events) .execute(conn) @@ -239,7 +238,6 @@ impl Event { Ok(()) } postgresql { - let events: Vec = events.iter().map(EventDb::to_db).collect(); diesel::insert_into(event::table) .values(&events) .on_conflict_do_nothing() @@ -250,7 +248,7 @@ impl Event { } } - pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(event::table.filter(event::uuid.eq(self.uuid))) .execute(conn) @@ -264,7 +262,7 @@ impl Event { org_uuid: &OrganizationId, start: &NaiveDateTime, end: &NaiveDateTime, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec { db_run! { conn: { event::table @@ -272,13 +270,12 @@ impl Event { .filter(event::event_date.between(start, end)) .order_by(event::event_date.desc()) .limit(Self::PAGE_SIZE) - .load::(conn) + .load::(conn) .expect("Error filtering events") - .from_db() }} } - pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> i64 { db_run! { conn: { event::table .filter(event::org_uuid.eq(org_uuid)) @@ -294,7 +291,7 @@ impl Event { member_uuid: &MembershipId, start: &NaiveDateTime, end: &NaiveDateTime, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec { db_run! { conn: { event::table @@ -305,9 +302,8 @@ impl Event { .select(event::all_columns) .order_by(event::event_date.desc()) .limit(Self::PAGE_SIZE) - .load::(conn) + .load::(conn) .expect("Error filtering events") - .from_db() }} } @@ -315,7 +311,7 @@ impl Event { cipher_uuid: &CipherId, start: &NaiveDateTime, end: &NaiveDateTime, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec { db_run! { conn: { event::table @@ -323,13 +319,12 @@ impl Event { .filter(event::event_date.between(start, end)) .order_by(event::event_date.desc()) .limit(Self::PAGE_SIZE) - .load::(conn) + .load::(conn) .expect("Error filtering events") - .from_db() }} } - pub async fn clean_events(conn: &mut DbConn) -> EmptyResult { + pub async fn clean_events(conn: &DbConn) -> EmptyResult { if let Some(days_to_retain) = CONFIG.events_days_retain() { let dt = Utc::now().naive_utc() - TimeDelta::try_days(days_to_retain).unwrap(); db_run! { conn: { diff --git a/src/db/models/favorite.rs b/src/db/models/favorite.rs index de2e0feb..d7aa74bb 100644 --- a/src/db/models/favorite.rs +++ b/src/db/models/favorite.rs @@ -1,13 +1,13 @@ use super::{CipherId, User, UserId}; +use crate::db::schema::favorites; +use diesel::prelude::*; -db_object! { - #[derive(Identifiable, Queryable, Insertable)] - #[diesel(table_name = favorites)] - #[diesel(primary_key(user_uuid, cipher_uuid))] - pub struct Favorite { - pub user_uuid: UserId, - pub cipher_uuid: CipherId, - } +#[derive(Identifiable, Queryable, Insertable)] +#[diesel(table_name = favorites)] +#[diesel(primary_key(user_uuid, cipher_uuid))] +pub struct Favorite { + pub user_uuid: UserId, + pub cipher_uuid: CipherId, } use crate::db::DbConn; @@ -17,14 +17,16 @@ use crate::error::MapResult; impl Favorite { // Returns whether the specified cipher is a favorite of the specified user. - pub async fn is_favorite(cipher_uuid: &CipherId, user_uuid: &UserId, conn: &mut DbConn) -> bool { + pub async fn is_favorite(cipher_uuid: &CipherId, user_uuid: &UserId, conn: &DbConn) -> bool { db_run! { conn: { let query = favorites::table .filter(favorites::cipher_uuid.eq(cipher_uuid)) .filter(favorites::user_uuid.eq(user_uuid)) .count(); - query.first::(conn).ok().unwrap_or(0) != 0 + query.first::(conn) + .ok() + .unwrap_or(0) != 0 }} } @@ -33,7 +35,7 @@ impl Favorite { favorite: bool, cipher_uuid: &CipherId, user_uuid: &UserId, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, conn).await, favorite); match (old, new) { @@ -67,7 +69,7 @@ impl Favorite { } // Delete all favorite entries associated with the specified cipher. - pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(favorites::table.filter(favorites::cipher_uuid.eq(cipher_uuid))) .execute(conn) @@ -76,7 +78,7 @@ impl Favorite { } // Delete all favorite entries associated with the specified user. - pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(favorites::table.filter(favorites::user_uuid.eq(user_uuid))) .execute(conn) @@ -86,7 +88,7 @@ impl Favorite { /// Return a vec with (cipher_uuid) this will only contain favorite flagged ciphers /// This is used during a full sync so we only need one query for all favorite cipher matches. - pub async fn get_all_cipher_uuid_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn get_all_cipher_uuid_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { favorites::table .filter(favorites::user_uuid.eq(user_uuid)) diff --git a/src/db/models/folder.rs b/src/db/models/folder.rs index 654ccd6d..b4cbc7ff 100644 --- a/src/db/models/folder.rs +++ b/src/db/models/folder.rs @@ -3,27 +3,27 @@ use derive_more::{AsRef, Deref, Display, From}; use serde_json::Value; use super::{CipherId, User, UserId}; +use crate::db::schema::{folders, folders_ciphers}; +use diesel::prelude::*; use macros::UuidFromParam; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = folders)] - #[diesel(primary_key(uuid))] - pub struct Folder { - pub uuid: FolderId, - pub created_at: NaiveDateTime, - pub updated_at: NaiveDateTime, - pub user_uuid: UserId, - pub name: String, - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = folders)] +#[diesel(primary_key(uuid))] +pub struct Folder { + pub uuid: FolderId, + pub created_at: NaiveDateTime, + pub updated_at: NaiveDateTime, + pub user_uuid: UserId, + pub name: String, +} - #[derive(Identifiable, Queryable, Insertable)] - #[diesel(table_name = folders_ciphers)] - #[diesel(primary_key(cipher_uuid, folder_uuid))] - pub struct FolderCipher { - pub cipher_uuid: CipherId, - pub folder_uuid: FolderId, - } +#[derive(Identifiable, Queryable, Insertable)] +#[diesel(table_name = folders_ciphers)] +#[diesel(primary_key(cipher_uuid, folder_uuid))] +pub struct FolderCipher { + pub cipher_uuid: CipherId, + pub folder_uuid: FolderId, } /// Local methods @@ -69,14 +69,14 @@ use crate::error::MapResult; /// Database methods impl Folder { - pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { User::update_uuid_revision(&self.user_uuid, conn).await; self.updated_at = Utc::now().naive_utc(); db_run! { conn: sqlite, mysql { match diesel::replace_into(folders::table) - .values(FolderDb::to_db(self)) + .values(&*self) .execute(conn) { Ok(_) => Ok(()), @@ -84,7 +84,7 @@ impl Folder { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(folders::table) .filter(folders::uuid.eq(&self.uuid)) - .set(FolderDb::to_db(self)) + .set(&*self) .execute(conn) .map_res("Error saving folder") } @@ -92,19 +92,18 @@ impl Folder { }.map_res("Error saving folder") } postgresql { - let value = FolderDb::to_db(self); diesel::insert_into(folders::table) - .values(&value) + .values(&*self) .on_conflict(folders::uuid) .do_update() - .set(&value) + .set(&*self) .execute(conn) .map_res("Error saving folder") } } } - pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(&self, conn: &DbConn) -> EmptyResult { User::update_uuid_revision(&self.user_uuid, conn).await; FolderCipher::delete_all_by_folder(&self.uuid, conn).await?; @@ -115,50 +114,48 @@ impl Folder { }} } - pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &DbConn) -> EmptyResult { for folder in Self::find_by_user(user_uuid, conn).await { folder.delete(conn).await?; } Ok(()) } - pub async fn find_by_uuid_and_user(uuid: &FolderId, user_uuid: &UserId, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_user(uuid: &FolderId, user_uuid: &UserId, conn: &DbConn) -> Option { db_run! { conn: { folders::table .filter(folders::uuid.eq(uuid)) .filter(folders::user_uuid.eq(user_uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { folders::table .filter(folders::user_uuid.eq(user_uuid)) - .load::(conn) + .load::(conn) .expect("Error loading folders") - .from_db() }} } } impl FolderCipher { - pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: sqlite, mysql { // Not checking for ForeignKey Constraints here. // Table folders_ciphers does not have ForeignKey Constraints which would cause conflicts. // This table has no constraints pointing to itself, but only to others. diesel::replace_into(folders_ciphers::table) - .values(FolderCipherDb::to_db(self)) + .values(self) .execute(conn) .map_res("Error adding cipher to folder") } postgresql { diesel::insert_into(folders_ciphers::table) - .values(FolderCipherDb::to_db(self)) + .values(self) .on_conflict((folders_ciphers::cipher_uuid, folders_ciphers::folder_uuid)) .do_nothing() .execute(conn) @@ -167,7 +164,7 @@ impl FolderCipher { } } - pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete( folders_ciphers::table @@ -179,7 +176,7 @@ impl FolderCipher { }} } - pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(folders_ciphers::table.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid))) .execute(conn) @@ -187,7 +184,7 @@ impl FolderCipher { }} } - pub async fn delete_all_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_folder(folder_uuid: &FolderId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid))) .execute(conn) @@ -198,31 +195,29 @@ impl FolderCipher { pub async fn find_by_folder_and_cipher( folder_uuid: &FolderId, cipher_uuid: &CipherId, - conn: &mut DbConn, + conn: &DbConn, ) -> Option { db_run! { conn: { folders_ciphers::table .filter(folders_ciphers::folder_uuid.eq(folder_uuid)) .filter(folders_ciphers::cipher_uuid.eq(cipher_uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_by_folder(folder_uuid: &FolderId, conn: &mut DbConn) -> Vec { + pub async fn find_by_folder(folder_uuid: &FolderId, conn: &DbConn) -> Vec { db_run! { conn: { folders_ciphers::table .filter(folders_ciphers::folder_uuid.eq(folder_uuid)) - .load::(conn) + .load::(conn) .expect("Error loading folders") - .from_db() }} } /// Return a vec with (cipher_uuid, folder_uuid) /// This is used during a full sync so we only need one query for all folder matches. - pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec<(CipherId, FolderId)> { + pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec<(CipherId, FolderId)> { db_run! { conn: { folders_ciphers::table .inner_join(folders::table) diff --git a/src/db/models/group.rs b/src/db/models/group.rs index 310576c4..a24b5325 100644 --- a/src/db/models/group.rs +++ b/src/db/models/group.rs @@ -1,45 +1,45 @@ use super::{CollectionId, Membership, MembershipId, OrganizationId, User, UserId}; use crate::api::EmptyResult; +use crate::db::schema::{collections_groups, groups, groups_users, users_organizations}; use crate::db::DbConn; use crate::error::MapResult; use chrono::{NaiveDateTime, Utc}; use derive_more::{AsRef, Deref, Display, From}; +use diesel::prelude::*; use macros::UuidFromParam; use serde_json::Value; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = groups)] - #[diesel(treat_none_as_null = true)] - #[diesel(primary_key(uuid))] - pub struct Group { - pub uuid: GroupId, - pub organizations_uuid: OrganizationId, - pub name: String, - pub access_all: bool, - pub external_id: Option, - pub creation_date: NaiveDateTime, - pub revision_date: NaiveDateTime, - } - - #[derive(Identifiable, Queryable, Insertable)] - #[diesel(table_name = collections_groups)] - #[diesel(primary_key(collections_uuid, groups_uuid))] - pub struct CollectionGroup { - pub collections_uuid: CollectionId, - pub groups_uuid: GroupId, - pub read_only: bool, - pub hide_passwords: bool, - pub manage: bool, - } - - #[derive(Identifiable, Queryable, Insertable)] - #[diesel(table_name = groups_users)] - #[diesel(primary_key(groups_uuid, users_organizations_uuid))] - pub struct GroupUser { - pub groups_uuid: GroupId, - pub users_organizations_uuid: MembershipId - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = groups)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(uuid))] +pub struct Group { + pub uuid: GroupId, + pub organizations_uuid: OrganizationId, + pub name: String, + pub access_all: bool, + pub external_id: Option, + pub creation_date: NaiveDateTime, + pub revision_date: NaiveDateTime, +} + +#[derive(Identifiable, Queryable, Insertable)] +#[diesel(table_name = collections_groups)] +#[diesel(primary_key(collections_uuid, groups_uuid))] +pub struct CollectionGroup { + pub collections_uuid: CollectionId, + pub groups_uuid: GroupId, + pub read_only: bool, + pub hide_passwords: bool, + pub manage: bool, +} + +#[derive(Identifiable, Queryable, Insertable)] +#[diesel(table_name = groups_users)] +#[diesel(primary_key(groups_uuid, users_organizations_uuid))] +pub struct GroupUser { + pub groups_uuid: GroupId, + pub users_organizations_uuid: MembershipId, } /// Local methods @@ -77,7 +77,7 @@ impl Group { }) } - pub async fn to_json_details(&self, conn: &mut DbConn) -> Value { + pub async fn to_json_details(&self, conn: &DbConn) -> Value { // If both read_only and hide_passwords are false, then manage should be true // You can't have an entry with read_only and manage, or hide_passwords and manage // Or an entry with everything to false @@ -156,13 +156,13 @@ impl GroupUser { /// Database methods impl Group { - pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { self.revision_date = Utc::now().naive_utc(); db_run! { conn: sqlite, mysql { match diesel::replace_into(groups::table) - .values(GroupDb::to_db(self)) + .values(&*self) .execute(conn) { Ok(_) => Ok(()), @@ -170,7 +170,7 @@ impl Group { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(groups::table) .filter(groups::uuid.eq(&self.uuid)) - .set(GroupDb::to_db(self)) + .set(&*self) .execute(conn) .map_res("Error saving group") } @@ -178,36 +178,34 @@ impl Group { }.map_res("Error saving group") } postgresql { - let value = GroupDb::to_db(self); diesel::insert_into(groups::table) - .values(&value) + .values(&*self) .on_conflict(groups::uuid) .do_update() - .set(&value) + .set(&*self) .execute(conn) .map_res("Error saving group") } } } - pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult { for group in Self::find_by_organization(org_uuid, conn).await { group.delete(conn).await?; } Ok(()) } - pub async fn find_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { + pub async fn find_by_organization(org_uuid: &OrganizationId, conn: &DbConn) -> Vec { db_run! { conn: { groups::table .filter(groups::organizations_uuid.eq(org_uuid)) - .load::(conn) + .load::(conn) .expect("Error loading groups") - .from_db() }} } - pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> i64 { db_run! { conn: { groups::table .filter(groups::organizations_uuid.eq(org_uuid)) @@ -218,33 +216,31 @@ impl Group { }} } - pub async fn find_by_uuid_and_org(uuid: &GroupId, org_uuid: &OrganizationId, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid_and_org(uuid: &GroupId, org_uuid: &OrganizationId, conn: &DbConn) -> Option { db_run! { conn: { groups::table .filter(groups::uuid.eq(uuid)) .filter(groups::organizations_uuid.eq(org_uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } pub async fn find_by_external_id_and_org( external_id: &str, org_uuid: &OrganizationId, - conn: &mut DbConn, + conn: &DbConn, ) -> Option { db_run! { conn: { groups::table .filter(groups::external_id.eq(external_id)) .filter(groups::organizations_uuid.eq(org_uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } //Returns all organizations the user has full access to - pub async fn get_orgs_by_user_with_full_access(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn get_orgs_by_user_with_full_access(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { groups_users::table .inner_join(users_organizations::table.on( @@ -262,7 +258,7 @@ impl Group { }} } - pub async fn is_in_full_access_group(user_uuid: &UserId, org_uuid: &OrganizationId, conn: &mut DbConn) -> bool { + pub async fn is_in_full_access_group(user_uuid: &UserId, org_uuid: &OrganizationId, conn: &DbConn) -> bool { db_run! { conn: { groups::table .inner_join(groups_users::table.on( @@ -280,7 +276,7 @@ impl Group { }} } - pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(&self, conn: &DbConn) -> EmptyResult { CollectionGroup::delete_all_by_group(&self.uuid, conn).await?; GroupUser::delete_all_by_group(&self.uuid, conn).await?; @@ -291,14 +287,14 @@ impl Group { }} } - pub async fn update_revision(uuid: &GroupId, conn: &mut DbConn) { + pub async fn update_revision(uuid: &GroupId, conn: &DbConn) { if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await { warn!("Failed to update revision for {uuid}: {e:#?}"); } } - async fn _update_revision(uuid: &GroupId, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult { - db_run! {conn: { + async fn _update_revision(uuid: &GroupId, date: &NaiveDateTime, conn: &DbConn) -> EmptyResult { + db_run! { conn: { crate::util::retry(|| { diesel::update(groups::table.filter(groups::uuid.eq(uuid))) .set(groups::revision_date.eq(date)) @@ -310,7 +306,7 @@ impl Group { } impl CollectionGroup { - pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { let group_users = GroupUser::find_by_group(&self.groups_uuid, conn).await; for group_user in group_users { group_user.update_user_revision(conn).await; @@ -369,17 +365,16 @@ impl CollectionGroup { } } - pub async fn find_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> Vec { + pub async fn find_by_group(group_uuid: &GroupId, conn: &DbConn) -> Vec { db_run! { conn: { collections_groups::table .filter(collections_groups::groups_uuid.eq(group_uuid)) - .load::(conn) + .load::(conn) .expect("Error loading collection groups") - .from_db() }} } - pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { collections_groups::table .inner_join(groups_users::table.on( @@ -390,24 +385,22 @@ impl CollectionGroup { )) .filter(users_organizations::user_uuid.eq(user_uuid)) .select(collections_groups::all_columns) - .load::(conn) + .load::(conn) .expect("Error loading user collection groups") - .from_db() }} } - pub async fn find_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> Vec { + pub async fn find_by_collection(collection_uuid: &CollectionId, conn: &DbConn) -> Vec { db_run! { conn: { collections_groups::table .filter(collections_groups::collections_uuid.eq(collection_uuid)) .select(collections_groups::all_columns) - .load::(conn) + .load::(conn) .expect("Error loading collection groups") - .from_db() }} } - pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(&self, conn: &DbConn) -> EmptyResult { let group_users = GroupUser::find_by_group(&self.groups_uuid, conn).await; for group_user in group_users { group_user.update_user_revision(conn).await; @@ -422,7 +415,7 @@ impl CollectionGroup { }} } - pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &DbConn) -> EmptyResult { let group_users = GroupUser::find_by_group(group_uuid, conn).await; for group_user in group_users { group_user.update_user_revision(conn).await; @@ -436,7 +429,7 @@ impl CollectionGroup { }} } - pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_collection(collection_uuid: &CollectionId, conn: &DbConn) -> EmptyResult { let collection_assigned_to_groups = CollectionGroup::find_by_collection(collection_uuid, conn).await; for collection_assigned_to_group in collection_assigned_to_groups { let group_users = GroupUser::find_by_group(&collection_assigned_to_group.groups_uuid, conn).await; @@ -455,7 +448,7 @@ impl CollectionGroup { } impl GroupUser { - pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { self.update_user_revision(conn).await; db_run! { conn: @@ -501,30 +494,28 @@ impl GroupUser { } } - pub async fn find_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> Vec { + pub async fn find_by_group(group_uuid: &GroupId, conn: &DbConn) -> Vec { db_run! { conn: { groups_users::table .filter(groups_users::groups_uuid.eq(group_uuid)) - .load::(conn) + .load::(conn) .expect("Error loading group users") - .from_db() }} } - pub async fn find_by_member(member_uuid: &MembershipId, conn: &mut DbConn) -> Vec { + pub async fn find_by_member(member_uuid: &MembershipId, conn: &DbConn) -> Vec { db_run! { conn: { groups_users::table .filter(groups_users::users_organizations_uuid.eq(member_uuid)) - .load::(conn) + .load::(conn) .expect("Error loading groups for user") - .from_db() }} } pub async fn has_access_to_collection_by_member( collection_uuid: &CollectionId, member_uuid: &MembershipId, - conn: &mut DbConn, + conn: &DbConn, ) -> bool { db_run! { conn: { groups_users::table @@ -542,7 +533,7 @@ impl GroupUser { pub async fn has_full_access_by_member( org_uuid: &OrganizationId, member_uuid: &MembershipId, - conn: &mut DbConn, + conn: &DbConn, ) -> bool { db_run! { conn: { groups_users::table @@ -558,7 +549,7 @@ impl GroupUser { }} } - pub async fn update_user_revision(&self, conn: &mut DbConn) { + pub async fn update_user_revision(&self, conn: &DbConn) { match Membership::find_by_uuid(&self.users_organizations_uuid, conn).await { Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await, None => warn!("Member could not be found!"), @@ -568,7 +559,7 @@ impl GroupUser { pub async fn delete_by_group_and_member( group_uuid: &GroupId, member_uuid: &MembershipId, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { match Membership::find_by_uuid(member_uuid, conn).await { Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await, @@ -584,7 +575,7 @@ impl GroupUser { }} } - pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_group(group_uuid: &GroupId, conn: &DbConn) -> EmptyResult { let group_users = GroupUser::find_by_group(group_uuid, conn).await; for group_user in group_users { group_user.update_user_revision(conn).await; @@ -598,7 +589,7 @@ impl GroupUser { }} } - pub async fn delete_all_by_member(member_uuid: &MembershipId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_member(member_uuid: &MembershipId, conn: &DbConn) -> EmptyResult { match Membership::find_by_uuid(member_uuid, conn).await { Some(member) => User::update_uuid_revision(&member.user_uuid, conn).await, None => warn!("Member could not be found!"), diff --git a/src/db/models/org_policy.rs b/src/db/models/org_policy.rs index aac145cb..15e3a5ae 100644 --- a/src/db/models/org_policy.rs +++ b/src/db/models/org_policy.rs @@ -3,22 +3,22 @@ use serde::Deserialize; use serde_json::Value; use crate::api::EmptyResult; +use crate::db::schema::{org_policies, users_organizations}; use crate::db::DbConn; use crate::error::MapResult; +use diesel::prelude::*; use super::{Membership, MembershipId, MembershipStatus, MembershipType, OrganizationId, TwoFactor, UserId}; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = org_policies)] - #[diesel(primary_key(uuid))] - pub struct OrgPolicy { - pub uuid: OrgPolicyId, - pub org_uuid: OrganizationId, - pub atype: i32, - pub enabled: bool, - pub data: String, - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = org_policies)] +#[diesel(primary_key(uuid))] +pub struct OrgPolicy { + pub uuid: OrgPolicyId, + pub org_uuid: OrganizationId, + pub atype: i32, + pub enabled: bool, + pub data: String, } // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Enums/PolicyType.cs @@ -106,11 +106,11 @@ impl OrgPolicy { /// Database methods impl OrgPolicy { - pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: sqlite, mysql { match diesel::replace_into(org_policies::table) - .values(OrgPolicyDb::to_db(self)) + .values(self) .execute(conn) { Ok(_) => Ok(()), @@ -118,7 +118,7 @@ impl OrgPolicy { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(org_policies::table) .filter(org_policies::uuid.eq(&self.uuid)) - .set(OrgPolicyDb::to_db(self)) + .set(self) .execute(conn) .map_res("Error saving org_policy") } @@ -126,7 +126,6 @@ impl OrgPolicy { }.map_res("Error saving org_policy") } postgresql { - let value = OrgPolicyDb::to_db(self); // We need to make sure we're not going to violate the unique constraint on org_uuid and atype. // This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does // not support multiple constraints on ON CONFLICT clauses. @@ -139,17 +138,17 @@ impl OrgPolicy { .map_res("Error deleting org_policy for insert")?; diesel::insert_into(org_policies::table) - .values(&value) + .values(self) .on_conflict(org_policies::uuid) .do_update() - .set(&value) + .set(self) .execute(conn) .map_res("Error saving org_policy") } } } - pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(org_policies::table.filter(org_policies::uuid.eq(self.uuid))) .execute(conn) @@ -157,17 +156,16 @@ impl OrgPolicy { }} } - pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { + pub async fn find_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> Vec { db_run! { conn: { org_policies::table .filter(org_policies::org_uuid.eq(org_uuid)) - .load::(conn) + .load::(conn) .expect("Error loading org_policy") - .from_db() }} } - pub async fn find_confirmed_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_confirmed_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { org_policies::table .inner_join( @@ -179,28 +177,26 @@ impl OrgPolicy { users_organizations::status.eq(MembershipStatus::Confirmed as i32) ) .select(org_policies::all_columns) - .load::(conn) + .load::(conn) .expect("Error loading org_policy") - .from_db() }} } pub async fn find_by_org_and_type( org_uuid: &OrganizationId, policy_type: OrgPolicyType, - conn: &mut DbConn, + conn: &DbConn, ) -> Option { db_run! { conn: { org_policies::table .filter(org_policies::org_uuid.eq(org_uuid)) .filter(org_policies::atype.eq(policy_type as i32)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid))) .execute(conn) @@ -229,16 +225,15 @@ impl OrgPolicy { .filter(org_policies::atype.eq(policy_type as i32)) .filter(org_policies::enabled.eq(true)) .select(org_policies::all_columns) - .load::(conn) + .load::(conn) .expect("Error loading org_policy") - .from_db() }} } pub async fn find_confirmed_by_user_and_active_policy( user_uuid: &UserId, policy_type: OrgPolicyType, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec { db_run! { conn: { org_policies::table @@ -253,9 +248,8 @@ impl OrgPolicy { .filter(org_policies::atype.eq(policy_type as i32)) .filter(org_policies::enabled.eq(true)) .select(org_policies::all_columns) - .load::(conn) + .load::(conn) .expect("Error loading org_policy") - .from_db() }} } @@ -266,7 +260,7 @@ impl OrgPolicy { user_uuid: &UserId, policy_type: OrgPolicyType, exclude_org_uuid: Option<&OrganizationId>, - conn: &mut DbConn, + conn: &DbConn, ) -> bool { for policy in OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(user_uuid, policy_type, conn).await @@ -289,7 +283,7 @@ impl OrgPolicy { user_uuid: &UserId, org_uuid: &OrganizationId, exclude_current_org: bool, - conn: &mut DbConn, + conn: &DbConn, ) -> OrgPolicyResult { // Enforce TwoFactor/TwoStep login if TwoFactor::find_by_user(user_uuid, conn).await.is_empty() { @@ -315,7 +309,7 @@ impl OrgPolicy { Ok(()) } - pub async fn org_is_reset_password_auto_enroll(org_uuid: &OrganizationId, conn: &mut DbConn) -> bool { + pub async fn org_is_reset_password_auto_enroll(org_uuid: &OrganizationId, conn: &DbConn) -> bool { match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await { Some(policy) => match serde_json::from_str::(&policy.data) { Ok(opts) => { @@ -331,7 +325,7 @@ impl OrgPolicy { /// Returns true if the user belongs to an org that has enabled the `DisableHideEmail` /// option of the `Send Options` policy, and the user is not an owner or admin of that org. - pub async fn is_hide_email_disabled(user_uuid: &UserId, conn: &mut DbConn) -> bool { + pub async fn is_hide_email_disabled(user_uuid: &UserId, conn: &DbConn) -> bool { for policy in OrgPolicy::find_confirmed_by_user_and_active_policy(user_uuid, OrgPolicyType::SendOptions, conn).await { @@ -351,11 +345,7 @@ impl OrgPolicy { false } - pub async fn is_enabled_for_member( - member_uuid: &MembershipId, - policy_type: OrgPolicyType, - conn: &mut DbConn, - ) -> bool { + pub async fn is_enabled_for_member(member_uuid: &MembershipId, policy_type: OrgPolicyType, conn: &DbConn) -> bool { if let Some(member) = Membership::find_by_uuid(member_uuid, conn).await { if let Some(policy) = OrgPolicy::find_by_org_and_type(&member.org_uuid, policy_type, conn).await { return policy.enabled; diff --git a/src/db/models/organization.rs b/src/db/models/organization.rs index 77cf91c0..386e3660 100644 --- a/src/db/models/organization.rs +++ b/src/db/models/organization.rs @@ -1,5 +1,6 @@ use chrono::{NaiveDateTime, Utc}; use derive_more::{AsRef, Deref, Display, From}; +use diesel::prelude::*; use num_traits::FromPrimitive; use serde_json::Value; use std::{ @@ -11,51 +12,53 @@ use super::{ CipherId, Collection, CollectionGroup, CollectionId, CollectionUser, Group, GroupId, GroupUser, OrgPolicy, OrgPolicyType, TwoFactor, User, UserId, }; +use crate::db::schema::{ + ciphers, ciphers_collections, collections_groups, groups, groups_users, org_policies, organization_api_key, + organizations, users, users_collections, users_organizations, +}; use crate::CONFIG; use macros::UuidFromParam; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = organizations)] - #[diesel(treat_none_as_null = true)] - #[diesel(primary_key(uuid))] - pub struct Organization { - pub uuid: OrganizationId, - pub name: String, - pub billing_email: String, - pub private_key: Option, - pub public_key: Option, - } - - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = users_organizations)] - #[diesel(treat_none_as_null = true)] - #[diesel(primary_key(uuid))] - pub struct Membership { - pub uuid: MembershipId, - pub user_uuid: UserId, - pub org_uuid: OrganizationId, - - pub invited_by_email: Option, - - pub access_all: bool, - pub akey: String, - pub status: i32, - pub atype: i32, - pub reset_password_key: Option, - pub external_id: Option, - } - - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = organization_api_key)] - #[diesel(primary_key(uuid, org_uuid))] - pub struct OrganizationApiKey { - pub uuid: OrgApiKeyId, - pub org_uuid: OrganizationId, - pub atype: i32, - pub api_key: String, - pub revision_date: NaiveDateTime, - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = organizations)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(uuid))] +pub struct Organization { + pub uuid: OrganizationId, + pub name: String, + pub billing_email: String, + pub private_key: Option, + pub public_key: Option, +} + +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = users_organizations)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(uuid))] +pub struct Membership { + pub uuid: MembershipId, + pub user_uuid: UserId, + pub org_uuid: OrganizationId, + + pub invited_by_email: Option, + + pub access_all: bool, + pub akey: String, + pub status: i32, + pub atype: i32, + pub reset_password_key: Option, + pub external_id: Option, +} + +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = organization_api_key)] +#[diesel(primary_key(uuid, org_uuid))] +pub struct OrganizationApiKey { + pub uuid: OrgApiKeyId, + pub org_uuid: OrganizationId, + pub atype: i32, + pub api_key: String, + pub revision_date: NaiveDateTime, } // https://github.com/bitwarden/server/blob/9ebe16587175b1c0e9208f84397bb75d0d595510/src/Core/AdminConsole/Enums/OrganizationUserStatusType.cs @@ -325,7 +328,7 @@ use crate::error::MapResult; /// Database methods impl Organization { - pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { if !crate::util::is_valid_email(&self.billing_email) { err!(format!("BillingEmail {} is not a valid email address", self.billing_email)) } @@ -337,7 +340,7 @@ impl Organization { db_run! { conn: sqlite, mysql { match diesel::replace_into(organizations::table) - .values(OrganizationDb::to_db(self)) + .values(self) .execute(conn) { Ok(_) => Ok(()), @@ -345,7 +348,7 @@ impl Organization { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(organizations::table) .filter(organizations::uuid.eq(&self.uuid)) - .set(OrganizationDb::to_db(self)) + .set(self) .execute(conn) .map_res("Error saving organization") } @@ -354,19 +357,18 @@ impl Organization { } postgresql { - let value = OrganizationDb::to_db(self); diesel::insert_into(organizations::table) - .values(&value) + .values(self) .on_conflict(organizations::uuid) .do_update() - .set(&value) + .set(self) .execute(conn) .map_res("Error saving organization") } } } - pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { use super::{Cipher, Collection}; Cipher::delete_all_by_organization(&self.uuid, conn).await?; @@ -383,31 +385,33 @@ impl Organization { }} } - pub async fn find_by_uuid(uuid: &OrganizationId, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid(uuid: &OrganizationId, conn: &DbConn) -> Option { db_run! { conn: { organizations::table .filter(organizations::uuid.eq(uuid)) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } - pub async fn find_by_name(name: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_name(name: &str, conn: &DbConn) -> Option { db_run! { conn: { organizations::table .filter(organizations::name.eq(name)) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } - pub async fn get_all(conn: &mut DbConn) -> Vec { + pub async fn get_all(conn: &DbConn) -> Vec { db_run! { conn: { - organizations::table.load::(conn).expect("Error loading organizations").from_db() + organizations::table + .load::(conn) + .expect("Error loading organizations") }} } - pub async fn find_main_org_user_email(user_email: &str, conn: &mut DbConn) -> Option { + pub async fn find_main_org_user_email(user_email: &str, conn: &DbConn) -> Option { let lower_mail = user_email.to_lowercase(); db_run! { conn: { @@ -418,12 +422,12 @@ impl Organization { .filter(users_organizations::status.ne(MembershipStatus::Revoked as i32)) .order(users_organizations::atype.asc()) .select(organizations::all_columns) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } - pub async fn find_org_user_email(user_email: &str, conn: &mut DbConn) -> Vec { + pub async fn find_org_user_email(user_email: &str, conn: &DbConn) -> Vec { let lower_mail = user_email.to_lowercase(); db_run! { conn: { @@ -434,15 +438,14 @@ impl Organization { .filter(users_organizations::status.ne(MembershipStatus::Revoked as i32)) .order(users_organizations::atype.asc()) .select(organizations::all_columns) - .load::(conn) + .load::(conn) .expect("Error loading user orgs") - .from_db() }} } } impl Membership { - pub async fn to_json(&self, conn: &mut DbConn) -> Value { + pub async fn to_json(&self, conn: &DbConn) -> Value { let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap(); // HACK: Convert the manager type to a custom type @@ -533,12 +536,7 @@ impl Membership { }) } - pub async fn to_json_user_details( - &self, - include_collections: bool, - include_groups: bool, - conn: &mut DbConn, - ) -> Value { + pub async fn to_json_user_details(&self, include_collections: bool, include_groups: bool, conn: &DbConn) -> Value { let user = User::find_by_uuid(&self.user_uuid, conn).await.unwrap(); // Because BitWarden want the status to be -1 for revoked users we need to catch that here. @@ -680,7 +678,7 @@ impl Membership { }) } - pub async fn to_json_details(&self, conn: &mut DbConn) -> Value { + pub async fn to_json_details(&self, conn: &DbConn) -> Value { let coll_uuids = if self.access_all { vec![] // If we have complete access, no need to fill the array } else { @@ -720,7 +718,7 @@ impl Membership { }) } - pub async fn to_json_mini_details(&self, conn: &mut DbConn) -> Value { + pub async fn to_json_mini_details(&self, conn: &DbConn) -> Value { let user = User::find_by_uuid(&self.user_uuid, conn).await.unwrap(); // Because Bitwarden wants the status to be -1 for revoked users we need to catch that here. @@ -742,13 +740,13 @@ impl Membership { }) } - pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { User::update_uuid_revision(&self.user_uuid, conn).await; db_run! { conn: sqlite, mysql { match diesel::replace_into(users_organizations::table) - .values(MembershipDb::to_db(self)) + .values(self) .execute(conn) { Ok(_) => Ok(()), @@ -756,7 +754,7 @@ impl Membership { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(users_organizations::table) .filter(users_organizations::uuid.eq(&self.uuid)) - .set(MembershipDb::to_db(self)) + .set(self) .execute(conn) .map_res("Error adding user to organization") }, @@ -764,19 +762,18 @@ impl Membership { }.map_res("Error adding user to organization") } postgresql { - let value = MembershipDb::to_db(self); diesel::insert_into(users_organizations::table) - .values(&value) + .values(self) .on_conflict(users_organizations::uuid) .do_update() - .set(&value) + .set(self) .execute(conn) .map_res("Error adding user to organization") } } } - pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { User::update_uuid_revision(&self.user_uuid, conn).await; CollectionUser::delete_all_by_user_and_org(&self.user_uuid, &self.org_uuid, conn).await?; @@ -789,25 +786,21 @@ impl Membership { }} } - pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult { for member in Self::find_by_org(org_uuid, conn).await { member.delete(conn).await?; } Ok(()) } - pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &DbConn) -> EmptyResult { for member in Self::find_any_state_by_user(user_uuid, conn).await { member.delete(conn).await?; } Ok(()) } - pub async fn find_by_email_and_org( - email: &str, - org_uuid: &OrganizationId, - conn: &mut DbConn, - ) -> Option { + pub async fn find_by_email_and_org(email: &str, org_uuid: &OrganizationId, conn: &DbConn) -> Option { if let Some(user) = User::find_by_mail(email, conn).await { if let Some(member) = Membership::find_by_user_and_org(&user.uuid, org_uuid, conn).await { return Some(member); @@ -829,52 +822,48 @@ impl Membership { (self.access_all || self.atype >= MembershipType::Admin) && self.has_status(MembershipStatus::Confirmed) } - pub async fn find_by_uuid(uuid: &MembershipId, conn: &mut DbConn) -> Option { + pub async fn find_by_uuid(uuid: &MembershipId, conn: &DbConn) -> Option { db_run! { conn: { users_organizations::table .filter(users_organizations::uuid.eq(uuid)) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } - pub async fn find_by_uuid_and_org( - uuid: &MembershipId, - org_uuid: &OrganizationId, - conn: &mut DbConn, - ) -> Option { + pub async fn find_by_uuid_and_org(uuid: &MembershipId, org_uuid: &OrganizationId, conn: &DbConn) -> Option { db_run! { conn: { users_organizations::table .filter(users_organizations::uuid.eq(uuid)) .filter(users_organizations::org_uuid.eq(org_uuid)) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } - pub async fn find_confirmed_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_confirmed_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) .filter(users_organizations::status.eq(MembershipStatus::Confirmed as i32)) - .load::(conn) - .unwrap_or_default().from_db() + .load::(conn) + .unwrap_or_default() }} } - pub async fn find_invited_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_invited_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) .filter(users_organizations::status.eq(MembershipStatus::Invited as i32)) - .load::(conn) - .unwrap_or_default().from_db() + .load::(conn) + .unwrap_or_default() }} } // Should be used only when email are disabled. // In Organizations::send_invite status is set to Accepted only if the user has a password. - pub async fn accept_user_invitations(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { + pub async fn accept_user_invitations(user_uuid: &UserId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::update(users_organizations::table) .filter(users_organizations::user_uuid.eq(user_uuid)) @@ -885,16 +874,16 @@ impl Membership { }} } - pub async fn find_any_state_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_any_state_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) - .load::(conn) - .unwrap_or_default().from_db() + .load::(conn) + .unwrap_or_default() }} } - pub async fn count_accepted_and_confirmed_by_user(user_uuid: &UserId, conn: &mut DbConn) -> i64 { + pub async fn count_accepted_and_confirmed_by_user(user_uuid: &UserId, conn: &DbConn) -> i64 { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) @@ -905,27 +894,27 @@ impl Membership { }} } - pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { + pub async fn find_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) - .load::(conn) - .expect("Error loading user organizations").from_db() + .load::(conn) + .expect("Error loading user organizations") }} } - pub async fn find_confirmed_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { + pub async fn find_confirmed_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) .filter(users_organizations::status.eq(MembershipStatus::Confirmed as i32)) - .load::(conn) - .unwrap_or_default().from_db() + .load::(conn) + .unwrap_or_default() }} } // Get all users which are either owner or admin, or a manager which can manage/access all - pub async fn find_confirmed_and_manage_all_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { + pub async fn find_confirmed_and_manage_all_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -934,12 +923,12 @@ impl Membership { users_organizations::atype.eq_any(vec![MembershipType::Owner as i32, MembershipType::Admin as i32]) .or(users_organizations::atype.eq(MembershipType::Manager as i32).and(users_organizations::access_all.eq(true))) ) - .load::(conn) - .unwrap_or_default().from_db() + .load::(conn) + .unwrap_or_default() }} } - pub async fn count_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> i64 { + pub async fn count_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> i64 { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -950,24 +939,20 @@ impl Membership { }} } - pub async fn find_by_org_and_type( - org_uuid: &OrganizationId, - atype: MembershipType, - conn: &mut DbConn, - ) -> Vec { + pub async fn find_by_org_and_type(org_uuid: &OrganizationId, atype: MembershipType, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) .filter(users_organizations::atype.eq(atype as i32)) - .load::(conn) - .expect("Error loading user organizations").from_db() + .load::(conn) + .expect("Error loading user organizations") }} } pub async fn count_confirmed_by_org_and_type( org_uuid: &OrganizationId, atype: MembershipType, - conn: &mut DbConn, + conn: &DbConn, ) -> i64 { db_run! { conn: { users_organizations::table @@ -980,24 +965,20 @@ impl Membership { }} } - pub async fn find_by_user_and_org( - user_uuid: &UserId, - org_uuid: &OrganizationId, - conn: &mut DbConn, - ) -> Option { + pub async fn find_by_user_and_org(user_uuid: &UserId, org_uuid: &OrganizationId, conn: &DbConn) -> Option { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) .filter(users_organizations::org_uuid.eq(org_uuid)) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } pub async fn find_confirmed_by_user_and_org( user_uuid: &UserId, org_uuid: &OrganizationId, - conn: &mut DbConn, + conn: &DbConn, ) -> Option { db_run! { conn: { users_organizations::table @@ -1006,21 +987,21 @@ impl Membership { .filter( users_organizations::status.eq(MembershipStatus::Confirmed as i32) ) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } - pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) - .load::(conn) - .expect("Error loading user organizations").from_db() + .load::(conn) + .expect("Error loading user organizations") }} } - pub async fn get_orgs_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn get_orgs_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) @@ -1030,11 +1011,7 @@ impl Membership { }} } - pub async fn find_by_user_and_policy( - user_uuid: &UserId, - policy_type: OrgPolicyType, - conn: &mut DbConn, - ) -> Vec { + pub async fn find_by_user_and_policy(user_uuid: &UserId, policy_type: OrgPolicyType, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .inner_join( @@ -1048,16 +1025,12 @@ impl Membership { users_organizations::status.eq(MembershipStatus::Confirmed as i32) ) .select(users_organizations::all_columns) - .load::(conn) - .unwrap_or_default().from_db() + .load::(conn) + .unwrap_or_default() }} } - pub async fn find_by_cipher_and_org( - cipher_uuid: &CipherId, - org_uuid: &OrganizationId, - conn: &mut DbConn, - ) -> Vec { + pub async fn find_by_cipher_and_org(cipher_uuid: &CipherId, org_uuid: &OrganizationId, conn: &DbConn) -> Vec { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -1076,14 +1049,15 @@ impl Membership { ) .select(users_organizations::all_columns) .distinct() - .load::(conn).expect("Error loading user organizations").from_db() + .load::(conn) + .expect("Error loading user organizations") }} } pub async fn find_by_cipher_and_org_with_group( cipher_uuid: &CipherId, org_uuid: &OrganizationId, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec { db_run! { conn: { users_organizations::table @@ -1106,15 +1080,12 @@ impl Membership { ) .select(users_organizations::all_columns) .distinct() - .load::(conn).expect("Error loading user organizations with groups").from_db() + .load::(conn) + .expect("Error loading user organizations with groups") }} } - pub async fn user_has_ge_admin_access_to_cipher( - user_uuid: &UserId, - cipher_uuid: &CipherId, - conn: &mut DbConn, - ) -> bool { + pub async fn user_has_ge_admin_access_to_cipher(user_uuid: &UserId, cipher_uuid: &CipherId, conn: &DbConn) -> bool { db_run! { conn: { users_organizations::table .inner_join(ciphers::table.on(ciphers::uuid.eq(cipher_uuid).and(ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())))) @@ -1122,14 +1093,15 @@ impl Membership { .filter(users_organizations::atype.eq_any(vec![MembershipType::Owner as i32, MembershipType::Admin as i32])) .count() .first::(conn) - .ok().unwrap_or(0) != 0 + .ok() + .unwrap_or(0) != 0 }} } pub async fn find_by_collection_and_org( collection_uuid: &CollectionId, org_uuid: &OrganizationId, - conn: &mut DbConn, + conn: &DbConn, ) -> Vec { db_run! { conn: { users_organizations::table @@ -1143,33 +1115,31 @@ impl Membership { ) ) .select(users_organizations::all_columns) - .load::(conn).expect("Error loading user organizations").from_db() + .load::(conn) + .expect("Error loading user organizations") }} } - pub async fn find_by_external_id_and_org( - ext_id: &str, - org_uuid: &OrganizationId, - conn: &mut DbConn, - ) -> Option { - db_run! {conn: { + pub async fn find_by_external_id_and_org(ext_id: &str, org_uuid: &OrganizationId, conn: &DbConn) -> Option { + db_run! { conn: { users_organizations::table .filter( users_organizations::external_id.eq(ext_id) .and(users_organizations::org_uuid.eq(org_uuid)) ) - .first::(conn).ok().from_db() + .first::(conn) + .ok() }} } - pub async fn find_main_user_org(user_uuid: &str, conn: &mut DbConn) -> Option { + pub async fn find_main_user_org(user_uuid: &str, conn: &DbConn) -> Option { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) .filter(users_organizations::status.ne(MembershipStatus::Revoked as i32)) .order(users_organizations::atype.asc()) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } } @@ -1179,7 +1149,7 @@ impl OrganizationApiKey { db_run! { conn: sqlite, mysql { match diesel::replace_into(organization_api_key::table) - .values(OrganizationApiKeyDb::to_db(self)) + .values(self) .execute(conn) { Ok(_) => Ok(()), @@ -1187,7 +1157,7 @@ impl OrganizationApiKey { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(organization_api_key::table) .filter(organization_api_key::uuid.eq(&self.uuid)) - .set(OrganizationApiKeyDb::to_db(self)) + .set(self) .execute(conn) .map_res("Error saving organization") } @@ -1196,12 +1166,11 @@ impl OrganizationApiKey { } postgresql { - let value = OrganizationApiKeyDb::to_db(self); diesel::insert_into(organization_api_key::table) - .values(&value) + .values(self) .on_conflict((organization_api_key::uuid, organization_api_key::org_uuid)) .do_update() - .set(&value) + .set(self) .execute(conn) .map_res("Error saving organization") } @@ -1212,12 +1181,12 @@ impl OrganizationApiKey { db_run! { conn: { organization_api_key::table .filter(organization_api_key::org_uuid.eq(org_uuid)) - .first::(conn) - .ok().from_db() + .first::(conn) + .ok() }} } - pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_organization(org_uuid: &OrganizationId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(organization_api_key::table.filter(organization_api_key::org_uuid.eq(org_uuid))) .execute(conn) diff --git a/src/db/models/send.rs b/src/db/models/send.rs index bf82c181..368c9272 100644 --- a/src/db/models/send.rs +++ b/src/db/models/send.rs @@ -4,40 +4,40 @@ use serde_json::Value; use crate::{config::PathType, util::LowerCase, CONFIG}; use super::{OrganizationId, User, UserId}; +use crate::db::schema::sends; +use diesel::prelude::*; use id::SendId; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = sends)] - #[diesel(treat_none_as_null = true)] - #[diesel(primary_key(uuid))] - pub struct Send { - pub uuid: SendId, - - pub user_uuid: Option, - pub organization_uuid: Option, - - pub name: String, - pub notes: Option, - - pub atype: i32, - pub data: String, - pub akey: String, - pub password_hash: Option>, - password_salt: Option>, - password_iter: Option, - - pub max_access_count: Option, - pub access_count: i32, - - pub creation_date: NaiveDateTime, - pub revision_date: NaiveDateTime, - pub expiration_date: Option, - pub deletion_date: NaiveDateTime, - - pub disabled: bool, - pub hide_email: Option, - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = sends)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(uuid))] +pub struct Send { + pub uuid: SendId, + + pub user_uuid: Option, + pub organization_uuid: Option, + + pub name: String, + pub notes: Option, + + pub atype: i32, + pub data: String, + pub akey: String, + pub password_hash: Option>, + password_salt: Option>, + password_iter: Option, + + pub max_access_count: Option, + pub access_count: i32, + + pub creation_date: NaiveDateTime, + pub revision_date: NaiveDateTime, + pub expiration_date: Option, + pub deletion_date: NaiveDateTime, + + pub disabled: bool, + pub hide_email: Option, } #[derive(Copy, Clone, PartialEq, Eq, num_derive::FromPrimitive)] @@ -103,7 +103,7 @@ impl Send { } } - pub async fn creator_identifier(&self, conn: &mut DbConn) -> Option { + pub async fn creator_identifier(&self, conn: &DbConn) -> Option { if let Some(hide_email) = self.hide_email { if hide_email { return None; @@ -155,7 +155,7 @@ impl Send { }) } - pub async fn to_json_access(&self, conn: &mut DbConn) -> Value { + pub async fn to_json_access(&self, conn: &DbConn) -> Value { use crate::util::format_date; let mut data = serde_json::from_str::>(&self.data).map(|d| d.data).unwrap_or_default(); @@ -187,14 +187,14 @@ use crate::error::MapResult; use crate::util::NumberOrString; impl Send { - pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { self.update_users_revision(conn).await; self.revision_date = Utc::now().naive_utc(); db_run! { conn: sqlite, mysql { match diesel::replace_into(sends::table) - .values(SendDb::to_db(self)) + .values(&*self) .execute(conn) { Ok(_) => Ok(()), @@ -202,7 +202,7 @@ impl Send { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(sends::table) .filter(sends::uuid.eq(&self.uuid)) - .set(SendDb::to_db(self)) + .set(&*self) .execute(conn) .map_res("Error saving send") } @@ -210,19 +210,18 @@ impl Send { }.map_res("Error saving send") } postgresql { - let value = SendDb::to_db(self); diesel::insert_into(sends::table) - .values(&value) + .values(&*self) .on_conflict(sends::uuid) .do_update() - .set(&value) + .set(&*self) .execute(conn) .map_res("Error saving send") } } } - pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(&self, conn: &DbConn) -> EmptyResult { self.update_users_revision(conn).await; if self.atype == SendType::File as i32 { @@ -238,13 +237,13 @@ impl Send { } /// Purge all sends that are past their deletion date. - pub async fn purge(conn: &mut DbConn) { + pub async fn purge(conn: &DbConn) { for send in Self::find_by_past_deletion_date(conn).await { send.delete(conn).await.ok(); } } - pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec { + pub async fn update_users_revision(&self, conn: &DbConn) -> Vec { let mut user_uuids = Vec::new(); match &self.user_uuid { Some(user_uuid) => { @@ -258,14 +257,14 @@ impl Send { user_uuids } - pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &DbConn) -> EmptyResult { for send in Self::find_by_user(user_uuid, conn).await { send.delete(conn).await?; } Ok(()) } - pub async fn find_by_access_id(access_id: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_access_id(access_id: &str, conn: &DbConn) -> Option { use data_encoding::BASE64URL_NOPAD; use uuid::Uuid; @@ -281,36 +280,35 @@ impl Send { Self::find_by_uuid(&uuid, conn).await } - pub async fn find_by_uuid(uuid: &SendId, conn: &mut DbConn) -> Option { - db_run! {conn: { + pub async fn find_by_uuid(uuid: &SendId, conn: &DbConn) -> Option { + db_run! { conn: { sends::table .filter(sends::uuid.eq(uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_by_uuid_and_user(uuid: &SendId, user_uuid: &UserId, conn: &mut DbConn) -> Option { - db_run! {conn: { + pub async fn find_by_uuid_and_user(uuid: &SendId, user_uuid: &UserId, conn: &DbConn) -> Option { + db_run! { conn: { sends::table .filter(sends::uuid.eq(uuid)) .filter(sends::user_uuid.eq(user_uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { - db_run! {conn: { + pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { + db_run! { conn: { sends::table .filter(sends::user_uuid.eq(user_uuid)) - .load::(conn).expect("Error loading sends").from_db() + .load::(conn) + .expect("Error loading sends") }} } - pub async fn size_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Option { + pub async fn size_by_user(user_uuid: &UserId, conn: &DbConn) -> Option { let sends = Self::find_by_user(user_uuid, conn).await; #[derive(serde::Deserialize)] @@ -333,20 +331,22 @@ impl Send { Some(total) } - pub async fn find_by_org(org_uuid: &OrganizationId, conn: &mut DbConn) -> Vec { - db_run! {conn: { + pub async fn find_by_org(org_uuid: &OrganizationId, conn: &DbConn) -> Vec { + db_run! { conn: { sends::table .filter(sends::organization_uuid.eq(org_uuid)) - .load::(conn).expect("Error loading sends").from_db() + .load::(conn) + .expect("Error loading sends") }} } - pub async fn find_by_past_deletion_date(conn: &mut DbConn) -> Vec { + pub async fn find_by_past_deletion_date(conn: &DbConn) -> Vec { let now = Utc::now().naive_utc(); - db_run! {conn: { + db_run! { conn: { sends::table .filter(sends::deletion_date.lt(now)) - .load::(conn).expect("Error loading sends").from_db() + .load::(conn) + .expect("Error loading sends") }} } } diff --git a/src/db/models/sso_nonce.rs b/src/db/models/sso_nonce.rs index 2246a437..c0e16076 100644 --- a/src/db/models/sso_nonce.rs +++ b/src/db/models/sso_nonce.rs @@ -1,21 +1,21 @@ use chrono::{NaiveDateTime, Utc}; use crate::api::EmptyResult; +use crate::db::schema::sso_nonce; use crate::db::{DbConn, DbPool}; use crate::error::MapResult; use crate::sso::{OIDCState, NONCE_EXPIRATION}; +use diesel::prelude::*; -db_object! { - #[derive(Identifiable, Queryable, Insertable)] - #[diesel(table_name = sso_nonce)] - #[diesel(primary_key(state))] - pub struct SsoNonce { - pub state: OIDCState, - pub nonce: String, - pub verifier: Option, - pub redirect_uri: String, - pub created_at: NaiveDateTime, - } +#[derive(Identifiable, Queryable, Insertable)] +#[diesel(table_name = sso_nonce)] +#[diesel(primary_key(state))] +pub struct SsoNonce { + pub state: OIDCState, + pub nonce: String, + pub verifier: Option, + pub redirect_uri: String, + pub created_at: NaiveDateTime, } /// Local methods @@ -35,25 +35,24 @@ impl SsoNonce { /// Database methods impl SsoNonce { - pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: sqlite, mysql { diesel::replace_into(sso_nonce::table) - .values(SsoNonceDb::to_db(self)) + .values(self) .execute(conn) .map_res("Error saving SSO nonce") } postgresql { - let value = SsoNonceDb::to_db(self); diesel::insert_into(sso_nonce::table) - .values(&value) + .values(self) .execute(conn) .map_res("Error saving SSO nonce") } } } - pub async fn delete(state: &OIDCState, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(state: &OIDCState, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(sso_nonce::table.filter(sso_nonce::state.eq(state))) .execute(conn) @@ -67,9 +66,8 @@ impl SsoNonce { sso_nonce::table .filter(sso_nonce::state.eq(state)) .filter(sso_nonce::created_at.ge(oldest)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } diff --git a/src/db/models/two_factor.rs b/src/db/models/two_factor.rs index 46b097bb..f0a1e663 100644 --- a/src/db/models/two_factor.rs +++ b/src/db/models/two_factor.rs @@ -1,23 +1,23 @@ use super::UserId; use crate::api::core::two_factor::webauthn::WebauthnRegistration; +use crate::db::schema::twofactor; use crate::{api::EmptyResult, db::DbConn, error::MapResult}; +use diesel::prelude::*; use serde_json::Value; use webauthn_rs::prelude::{Credential, ParsedAttestation}; use webauthn_rs_core::proto::CredentialV3; use webauthn_rs_proto::{AttestationFormat, RegisteredExtensions}; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = twofactor)] - #[diesel(primary_key(uuid))] - pub struct TwoFactor { - pub uuid: TwoFactorId, - pub user_uuid: UserId, - pub atype: i32, - pub enabled: bool, - pub data: String, - pub last_used: i64, - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = twofactor)] +#[diesel(primary_key(uuid))] +pub struct TwoFactor { + pub uuid: TwoFactorId, + pub user_uuid: UserId, + pub atype: i32, + pub enabled: bool, + pub data: String, + pub last_used: i64, } #[allow(dead_code)] @@ -76,11 +76,11 @@ impl TwoFactor { /// Database methods impl TwoFactor { - pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: sqlite, mysql { match diesel::replace_into(twofactor::table) - .values(TwoFactorDb::to_db(self)) + .values(self) .execute(conn) { Ok(_) => Ok(()), @@ -88,7 +88,7 @@ impl TwoFactor { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(twofactor::table) .filter(twofactor::uuid.eq(&self.uuid)) - .set(TwoFactorDb::to_db(self)) + .set(self) .execute(conn) .map_res("Error saving twofactor") } @@ -96,7 +96,6 @@ impl TwoFactor { }.map_res("Error saving twofactor") } postgresql { - let value = TwoFactorDb::to_db(self); // We need to make sure we're not going to violate the unique constraint on user_uuid and atype. // This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does // not support multiple constraints on ON CONFLICT clauses. @@ -105,17 +104,17 @@ impl TwoFactor { .map_res("Error deleting twofactor for insert")?; diesel::insert_into(twofactor::table) - .values(&value) + .values(self) .on_conflict(twofactor::uuid) .do_update() - .set(&value) + .set(self) .execute(conn) .map_res("Error saving twofactor") } } } - pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(twofactor::table.filter(twofactor::uuid.eq(self.uuid))) .execute(conn) @@ -123,29 +122,27 @@ impl TwoFactor { }} } - pub async fn find_by_user(user_uuid: &UserId, conn: &mut DbConn) -> Vec { + pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec { db_run! { conn: { twofactor::table .filter(twofactor::user_uuid.eq(user_uuid)) .filter(twofactor::atype.lt(1000)) // Filter implementation types - .load::(conn) + .load::(conn) .expect("Error loading twofactor") - .from_db() }} } - pub async fn find_by_user_and_type(user_uuid: &UserId, atype: i32, conn: &mut DbConn) -> Option { + pub async fn find_by_user_and_type(user_uuid: &UserId, atype: i32, conn: &DbConn) -> Option { db_run! { conn: { twofactor::table .filter(twofactor::user_uuid.eq(user_uuid)) .filter(twofactor::atype.eq(atype)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid))) .execute(conn) @@ -153,13 +150,12 @@ impl TwoFactor { }} } - pub async fn migrate_u2f_to_webauthn(conn: &mut DbConn) -> EmptyResult { + pub async fn migrate_u2f_to_webauthn(conn: &DbConn) -> EmptyResult { let u2f_factors = db_run! { conn: { twofactor::table .filter(twofactor::atype.eq(TwoFactorType::U2f as i32)) - .load::(conn) + .load::(conn) .expect("Error loading twofactor") - .from_db() }}; use crate::api::core::two_factor::webauthn::U2FRegistration; @@ -231,13 +227,12 @@ impl TwoFactor { Ok(()) } - pub async fn migrate_credential_to_passkey(conn: &mut DbConn) -> EmptyResult { + pub async fn migrate_credential_to_passkey(conn: &DbConn) -> EmptyResult { let webauthn_factors = db_run! { conn: { twofactor::table .filter(twofactor::atype.eq(TwoFactorType::Webauthn as i32)) - .load::(conn) + .load::(conn) .expect("Error loading twofactor") - .from_db() }}; for webauthn_factor in webauthn_factors { diff --git a/src/db/models/two_factor_duo_context.rs b/src/db/models/two_factor_duo_context.rs index 3e742d35..205a57d8 100644 --- a/src/db/models/two_factor_duo_context.rs +++ b/src/db/models/two_factor_duo_context.rs @@ -1,33 +1,30 @@ use chrono::Utc; +use crate::db::schema::twofactor_duo_ctx; use crate::{api::EmptyResult, db::DbConn, error::MapResult}; +use diesel::prelude::*; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = twofactor_duo_ctx)] - #[diesel(primary_key(state))] - pub struct TwoFactorDuoContext { - pub state: String, - pub user_email: String, - pub nonce: String, - pub exp: i64, - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = twofactor_duo_ctx)] +#[diesel(primary_key(state))] +pub struct TwoFactorDuoContext { + pub state: String, + pub user_email: String, + pub nonce: String, + pub exp: i64, } impl TwoFactorDuoContext { - pub async fn find_by_state(state: &str, conn: &mut DbConn) -> Option { - db_run! { - conn: { - twofactor_duo_ctx::table - .filter(twofactor_duo_ctx::state.eq(state)) - .first::(conn) - .ok() - .from_db() - } - } + pub async fn find_by_state(state: &str, conn: &DbConn) -> Option { + db_run! { conn: { + twofactor_duo_ctx::table + .filter(twofactor_duo_ctx::state.eq(state)) + .first::(conn) + .ok() + }} } - pub async fn save(state: &str, user_email: &str, nonce: &str, ttl: i64, conn: &mut DbConn) -> EmptyResult { + pub async fn save(state: &str, user_email: &str, nonce: &str, ttl: i64, conn: &DbConn) -> EmptyResult { // A saved context should never be changed, only created or deleted. let exists = Self::find_by_state(state, conn).await; if exists.is_some() { @@ -36,47 +33,40 @@ impl TwoFactorDuoContext { let exp = Utc::now().timestamp() + ttl; - db_run! { - conn: { - diesel::insert_into(twofactor_duo_ctx::table) - .values(( - twofactor_duo_ctx::state.eq(state), - twofactor_duo_ctx::user_email.eq(user_email), - twofactor_duo_ctx::nonce.eq(nonce), - twofactor_duo_ctx::exp.eq(exp) - )) - .execute(conn) - .map_res("Error saving context to twofactor_duo_ctx") - } - } + db_run! { conn: { + diesel::insert_into(twofactor_duo_ctx::table) + .values(( + twofactor_duo_ctx::state.eq(state), + twofactor_duo_ctx::user_email.eq(user_email), + twofactor_duo_ctx::nonce.eq(nonce), + twofactor_duo_ctx::exp.eq(exp) + )) + .execute(conn) + .map_res("Error saving context to twofactor_duo_ctx") + }} } - pub async fn find_expired(conn: &mut DbConn) -> Vec { + pub async fn find_expired(conn: &DbConn) -> Vec { let now = Utc::now().timestamp(); - db_run! { - conn: { - twofactor_duo_ctx::table - .filter(twofactor_duo_ctx::exp.lt(now)) - .load::(conn) - .expect("Error finding expired contexts in twofactor_duo_ctx") - .from_db() - } - } + db_run! { conn: { + twofactor_duo_ctx::table + .filter(twofactor_duo_ctx::exp.lt(now)) + .load::(conn) + .expect("Error finding expired contexts in twofactor_duo_ctx") + }} } - pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult { - db_run! { - conn: { - diesel::delete( - twofactor_duo_ctx::table - .filter(twofactor_duo_ctx::state.eq(&self.state))) - .execute(conn) - .map_res("Error deleting from twofactor_duo_ctx") - } - } + pub async fn delete(&self, conn: &DbConn) -> EmptyResult { + db_run! { conn: { + diesel::delete( + twofactor_duo_ctx::table + .filter(twofactor_duo_ctx::state.eq(&self.state))) + .execute(conn) + .map_res("Error deleting from twofactor_duo_ctx") + }} } - pub async fn purge_expired_duo_contexts(conn: &mut DbConn) { + pub async fn purge_expired_duo_contexts(conn: &DbConn) { for context in Self::find_expired(conn).await { context.delete(conn).await.ok(); } diff --git a/src/db/models/two_factor_incomplete.rs b/src/db/models/two_factor_incomplete.rs index b8dc4ad7..2f7e4779 100644 --- a/src/db/models/two_factor_incomplete.rs +++ b/src/db/models/two_factor_incomplete.rs @@ -1,5 +1,6 @@ use chrono::{NaiveDateTime, Utc}; +use crate::db::schema::twofactor_incomplete; use crate::{ api::EmptyResult, auth::ClientIp, @@ -10,22 +11,21 @@ use crate::{ error::MapResult, CONFIG, }; +use diesel::prelude::*; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset)] - #[diesel(table_name = twofactor_incomplete)] - #[diesel(primary_key(user_uuid, device_uuid))] - pub struct TwoFactorIncomplete { - pub user_uuid: UserId, - // This device UUID is simply what's claimed by the device. It doesn't - // necessarily correspond to any UUID in the devices table, since a device - // must complete 2FA login before being added into the devices table. - pub device_uuid: DeviceId, - pub device_name: String, - pub device_type: i32, - pub login_time: NaiveDateTime, - pub ip_address: String, - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset)] +#[diesel(table_name = twofactor_incomplete)] +#[diesel(primary_key(user_uuid, device_uuid))] +pub struct TwoFactorIncomplete { + pub user_uuid: UserId, + // This device UUID is simply what's claimed by the device. It doesn't + // necessarily correspond to any UUID in the devices table, since a device + // must complete 2FA login before being added into the devices table. + pub device_uuid: DeviceId, + pub device_name: String, + pub device_type: i32, + pub login_time: NaiveDateTime, + pub ip_address: String, } impl TwoFactorIncomplete { @@ -35,7 +35,7 @@ impl TwoFactorIncomplete { device_name: &str, device_type: i32, ip: &ClientIp, - conn: &mut DbConn, + conn: &DbConn, ) -> EmptyResult { if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() { return Ok(()); @@ -64,7 +64,7 @@ impl TwoFactorIncomplete { }} } - pub async fn mark_complete(user_uuid: &UserId, device_uuid: &DeviceId, conn: &mut DbConn) -> EmptyResult { + pub async fn mark_complete(user_uuid: &UserId, device_uuid: &DeviceId, conn: &DbConn) -> EmptyResult { if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() { return Ok(()); } @@ -72,40 +72,30 @@ impl TwoFactorIncomplete { Self::delete_by_user_and_device(user_uuid, device_uuid, conn).await } - pub async fn find_by_user_and_device( - user_uuid: &UserId, - device_uuid: &DeviceId, - conn: &mut DbConn, - ) -> Option { + pub async fn find_by_user_and_device(user_uuid: &UserId, device_uuid: &DeviceId, conn: &DbConn) -> Option { db_run! { conn: { twofactor_incomplete::table .filter(twofactor_incomplete::user_uuid.eq(user_uuid)) .filter(twofactor_incomplete::device_uuid.eq(device_uuid)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_logins_before(dt: &NaiveDateTime, conn: &mut DbConn) -> Vec { - db_run! {conn: { + pub async fn find_logins_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec { + db_run! { conn: { twofactor_incomplete::table .filter(twofactor_incomplete::login_time.lt(dt)) - .load::(conn) + .load::(conn) .expect("Error loading twofactor_incomplete") - .from_db() }} } - pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn).await } - pub async fn delete_by_user_and_device( - user_uuid: &UserId, - device_uuid: &DeviceId, - conn: &mut DbConn, - ) -> EmptyResult { + pub async fn delete_by_user_and_device(user_uuid: &UserId, device_uuid: &DeviceId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(twofactor_incomplete::table .filter(twofactor_incomplete::user_uuid.eq(user_uuid)) @@ -115,7 +105,7 @@ impl TwoFactorIncomplete { }} } - pub async fn delete_all_by_user(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { + pub async fn delete_all_by_user(user_uuid: &UserId, conn: &DbConn) -> EmptyResult { db_run! { conn: { diesel::delete(twofactor_incomplete::table.filter(twofactor_incomplete::user_uuid.eq(user_uuid))) .execute(conn) diff --git a/src/db/models/user.rs b/src/db/models/user.rs index 3a3b5157..b8b94682 100644 --- a/src/db/models/user.rs +++ b/src/db/models/user.rs @@ -1,5 +1,7 @@ +use crate::db::schema::{devices, invitations, sso_users, users}; use chrono::{NaiveDateTime, TimeDelta, Utc}; use derive_more::{AsRef, Deref, Display, From}; +use diesel::prelude::*; use serde_json::Value; use super::{ @@ -17,70 +19,68 @@ use crate::{ }; use macros::UuidFromParam; -db_object! { - #[derive(Identifiable, Queryable, Insertable, AsChangeset, Selectable)] - #[diesel(table_name = users)] - #[diesel(treat_none_as_null = true)] - #[diesel(primary_key(uuid))] - pub struct User { - pub uuid: UserId, - pub enabled: bool, - pub created_at: NaiveDateTime, - pub updated_at: NaiveDateTime, - pub verified_at: Option, - pub last_verifying_at: Option, - pub login_verify_count: i32, - - pub email: String, - pub email_new: Option, - pub email_new_token: Option, - pub name: String, - - pub password_hash: Vec, - pub salt: Vec, - pub password_iterations: i32, - pub password_hint: Option, - - pub akey: String, - pub private_key: Option, - pub public_key: Option, - - #[diesel(column_name = "totp_secret")] // Note, this is only added to the UserDb structs, not to User - _totp_secret: Option, - pub totp_recover: Option, - - pub security_stamp: String, - pub stamp_exception: Option, - - pub equivalent_domains: String, - pub excluded_globals: String, - - pub client_kdf_type: i32, - pub client_kdf_iter: i32, - pub client_kdf_memory: Option, - pub client_kdf_parallelism: Option, - - pub api_key: Option, - - pub avatar_color: Option, - - pub external_id: Option, // Todo: Needs to be removed in the future, this is not used anymore. - } +#[derive(Identifiable, Queryable, Insertable, AsChangeset, Selectable)] +#[diesel(table_name = users)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(uuid))] +pub struct User { + pub uuid: UserId, + pub enabled: bool, + pub created_at: NaiveDateTime, + pub updated_at: NaiveDateTime, + pub verified_at: Option, + pub last_verifying_at: Option, + pub login_verify_count: i32, + + pub email: String, + pub email_new: Option, + pub email_new_token: Option, + pub name: String, + + pub password_hash: Vec, + pub salt: Vec, + pub password_iterations: i32, + pub password_hint: Option, + + pub akey: String, + pub private_key: Option, + pub public_key: Option, + + #[diesel(column_name = "totp_secret")] // Note, this is only added to the UserDb structs, not to User + _totp_secret: Option, + pub totp_recover: Option, - #[derive(Identifiable, Queryable, Insertable)] - #[diesel(table_name = invitations)] - #[diesel(primary_key(email))] - pub struct Invitation { - pub email: String, - } + pub security_stamp: String, + pub stamp_exception: Option, - #[derive(Identifiable, Queryable, Insertable, Selectable)] - #[diesel(table_name = sso_users)] - #[diesel(primary_key(user_uuid))] - pub struct SsoUser { - pub user_uuid: UserId, - pub identifier: OIDCIdentifier, - } + pub equivalent_domains: String, + pub excluded_globals: String, + + pub client_kdf_type: i32, + pub client_kdf_iter: i32, + pub client_kdf_memory: Option, + pub client_kdf_parallelism: Option, + + pub api_key: Option, + + pub avatar_color: Option, + + pub external_id: Option, // Todo: Needs to be removed in the future, this is not used anymore. +} + +#[derive(Identifiable, Queryable, Insertable)] +#[diesel(table_name = invitations)] +#[diesel(primary_key(email))] +pub struct Invitation { + pub email: String, +} + +#[derive(Identifiable, Queryable, Insertable, Selectable)] +#[diesel(table_name = sso_users)] +#[diesel(primary_key(user_uuid))] +pub struct SsoUser { + pub user_uuid: UserId, + pub identifier: OIDCIdentifier, } pub enum UserKdfType { @@ -236,7 +236,7 @@ impl User { /// Database methods impl User { - pub async fn to_json(&self, conn: &mut DbConn) -> Value { + pub async fn to_json(&self, conn: &DbConn) -> Value { let mut orgs_json = Vec::new(); for c in Membership::find_confirmed_by_user(&self.uuid, conn).await { orgs_json.push(c.to_json(conn).await); @@ -275,17 +275,17 @@ impl User { }) } - pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&mut self, conn: &DbConn) -> EmptyResult { if !crate::util::is_valid_email(&self.email) { err!(format!("User email {} is not a valid email address", self.email)) } self.updated_at = Utc::now().naive_utc(); - db_run! {conn: + db_run! { conn: sqlite, mysql { match diesel::replace_into(users::table) - .values(UserDb::to_db(self)) + .values(&*self) .execute(conn) { Ok(_) => Ok(()), @@ -293,7 +293,7 @@ impl User { Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { diesel::update(users::table) .filter(users::uuid.eq(&self.uuid)) - .set(UserDb::to_db(self)) + .set(&*self) .execute(conn) .map_res("Error saving user") } @@ -301,19 +301,18 @@ impl User { }.map_res("Error saving user") } postgresql { - let value = UserDb::to_db(self); diesel::insert_into(users::table) // Insert or update - .values(&value) + .values(&*self) .on_conflict(users::uuid) .do_update() - .set(&value) + .set(&*self) .execute(conn) .map_res("Error saving user") } } } - pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { for member in Membership::find_confirmed_by_user(&self.uuid, conn).await { if member.atype == MembershipType::Owner && Membership::count_confirmed_by_org_and_type(&member.org_uuid, MembershipType::Owner, conn).await <= 1 @@ -334,23 +333,23 @@ impl User { TwoFactorIncomplete::delete_all_by_user(&self.uuid, conn).await?; Invitation::take(&self.email, conn).await; // Delete invitation if any - db_run! {conn: { + db_run! { conn: { diesel::delete(users::table.filter(users::uuid.eq(self.uuid))) .execute(conn) .map_res("Error deleting user") }} } - pub async fn update_uuid_revision(uuid: &UserId, conn: &mut DbConn) { + pub async fn update_uuid_revision(uuid: &UserId, conn: &DbConn) { if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await { warn!("Failed to update revision for {uuid}: {e:#?}"); } } - pub async fn update_all_revisions(conn: &mut DbConn) -> EmptyResult { + pub async fn update_all_revisions(conn: &DbConn) -> EmptyResult { let updated_at = Utc::now().naive_utc(); - db_run! {conn: { + db_run! { conn: { retry(|| { diesel::update(users::table) .set(users::updated_at.eq(updated_at)) @@ -360,14 +359,14 @@ impl User { }} } - pub async fn update_revision(&mut self, conn: &mut DbConn) -> EmptyResult { + pub async fn update_revision(&mut self, conn: &DbConn) -> EmptyResult { self.updated_at = Utc::now().naive_utc(); Self::_update_revision(&self.uuid, &self.updated_at, conn).await } - async fn _update_revision(uuid: &UserId, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult { - db_run! {conn: { + async fn _update_revision(uuid: &UserId, date: &NaiveDateTime, conn: &DbConn) -> EmptyResult { + db_run! { conn: { retry(|| { diesel::update(users::table.filter(users::uuid.eq(uuid))) .set(users::updated_at.eq(date)) @@ -377,49 +376,49 @@ impl User { }} } - pub async fn find_by_mail(mail: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_mail(mail: &str, conn: &DbConn) -> Option { let lower_mail = mail.to_lowercase(); - db_run! {conn: { + db_run! { conn: { users::table .filter(users::email.eq(lower_mail)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn find_by_uuid(uuid: &UserId, conn: &mut DbConn) -> Option { - db_run! {conn: { - users::table.filter(users::uuid.eq(uuid)).first::(conn).ok().from_db() + pub async fn find_by_uuid(uuid: &UserId, conn: &DbConn) -> Option { + db_run! { conn: { + users::table + .filter(users::uuid.eq(uuid)) + .first::(conn) + .ok() }} } - pub async fn find_by_device_id(device_uuid: &DeviceId, conn: &mut DbConn) -> Option { + pub async fn find_by_device_id(device_uuid: &DeviceId, conn: &DbConn) -> Option { db_run! { conn: { users::table .inner_join(devices::table.on(devices::user_uuid.eq(users::uuid))) .filter(devices::uuid.eq(device_uuid)) .select(users::all_columns) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn get_all(conn: &mut DbConn) -> Vec<(User, Option)> { - db_run! {conn: { + pub async fn get_all(conn: &DbConn) -> Vec<(Self, Option)> { + db_run! { conn: { users::table .left_join(sso_users::table) - .select(<(UserDb, Option)>::as_select()) + .select(<(Self, Option)>::as_select()) .load(conn) .expect("Error loading groups for user") .into_iter() - .map(|(user, sso_user)| { (user.from_db(), sso_user.from_db()) }) .collect() }} } - pub async fn last_active(&self, conn: &mut DbConn) -> Option { + pub async fn last_active(&self, conn: &DbConn) -> Option { match Device::find_latest_active_by_user(&self.uuid, conn).await { Some(device) => Some(device.updated_at), None => None, @@ -435,23 +434,23 @@ impl Invitation { } } - pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { if !crate::util::is_valid_email(&self.email) { err!(format!("Invitation email {} is not a valid email address", self.email)) } - db_run! {conn: + db_run! { conn: sqlite, mysql { // Not checking for ForeignKey Constraints here // Table invitations does not have any ForeignKey Constraints. diesel::replace_into(invitations::table) - .values(InvitationDb::to_db(self)) + .values(self) .execute(conn) .map_res("Error saving invitation") } postgresql { diesel::insert_into(invitations::table) - .values(InvitationDb::to_db(self)) + .values(self) .on_conflict(invitations::email) .do_nothing() .execute(conn) @@ -460,26 +459,25 @@ impl Invitation { } } - pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { - db_run! {conn: { + pub async fn delete(self, conn: &DbConn) -> EmptyResult { + db_run! { conn: { diesel::delete(invitations::table.filter(invitations::email.eq(self.email))) .execute(conn) .map_res("Error deleting invitation") }} } - pub async fn find_by_mail(mail: &str, conn: &mut DbConn) -> Option { + pub async fn find_by_mail(mail: &str, conn: &DbConn) -> Option { let lower_mail = mail.to_lowercase(); - db_run! {conn: { + db_run! { conn: { invitations::table .filter(invitations::email.eq(lower_mail)) - .first::(conn) + .first::(conn) .ok() - .from_db() }} } - pub async fn take(mail: &str, conn: &mut DbConn) -> bool { + pub async fn take(mail: &str, conn: &DbConn) -> bool { match Self::find_by_mail(mail, conn).await { Some(invitation) => invitation.delete(conn).await.is_ok(), None => false, @@ -508,52 +506,49 @@ impl Invitation { pub struct UserId(String); impl SsoUser { - pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { + pub async fn save(&self, conn: &DbConn) -> EmptyResult { db_run! { conn: sqlite, mysql { diesel::replace_into(sso_users::table) - .values(SsoUserDb::to_db(self)) + .values(self) .execute(conn) .map_res("Error saving SSO user") } postgresql { - let value = SsoUserDb::to_db(self); diesel::insert_into(sso_users::table) - .values(&value) + .values(self) .execute(conn) .map_res("Error saving SSO user") } } } - pub async fn find_by_identifier(identifier: &str, conn: &DbConn) -> Option<(User, SsoUser)> { - db_run! {conn: { + pub async fn find_by_identifier(identifier: &str, conn: &DbConn) -> Option<(User, Self)> { + db_run! { conn: { users::table .inner_join(sso_users::table) - .select(<(UserDb, SsoUserDb)>::as_select()) + .select(<(User, Self)>::as_select()) .filter(sso_users::identifier.eq(identifier)) - .first::<(UserDb, SsoUserDb)>(conn) + .first::<(User, Self)>(conn) .ok() - .map(|(user, sso_user)| { (user.from_db(), sso_user.from_db()) }) }} } - pub async fn find_by_mail(mail: &str, conn: &DbConn) -> Option<(User, Option)> { + pub async fn find_by_mail(mail: &str, conn: &DbConn) -> Option<(User, Option)> { let lower_mail = mail.to_lowercase(); - db_run! {conn: { + db_run! { conn: { users::table .left_join(sso_users::table) - .select(<(UserDb, Option)>::as_select()) + .select(<(User, Option)>::as_select()) .filter(users::email.eq(lower_mail)) - .first::<(UserDb, Option)>(conn) + .first::<(User, Option)>(conn) .ok() - .map(|(user, sso_user)| { (user.from_db(), sso_user.from_db()) }) }} } - pub async fn delete(user_uuid: &UserId, conn: &mut DbConn) -> EmptyResult { - db_run! {conn: { + pub async fn delete(user_uuid: &UserId, conn: &DbConn) -> EmptyResult { + db_run! { conn: { diesel::delete(sso_users::table.filter(sso_users::user_uuid.eq(user_uuid))) .execute(conn) .map_res("Error deleting sso user") diff --git a/src/db/query_logger.rs b/src/db/query_logger.rs new file mode 100644 index 00000000..693054d8 --- /dev/null +++ b/src/db/query_logger.rs @@ -0,0 +1,57 @@ +use dashmap::DashMap; +use diesel::connection::{Instrumentation, InstrumentationEvent}; +use std::{ + sync::{Arc, LazyLock}, + thread, + time::Instant, +}; + +pub static QUERY_PERF_TRACKER: LazyLock>> = + LazyLock::new(|| Arc::new(DashMap::new())); + +pub fn simple_logger() -> Option> { + Some(Box::new(|event: InstrumentationEvent<'_>| match event { + InstrumentationEvent::StartEstablishConnection { + url, + .. + } => { + debug!("Establishing connection: {url}") + } + InstrumentationEvent::FinishEstablishConnection { + url, + error, + .. + } => { + if let Some(e) = error { + error!("Error during establishing a connection with {url}: {e:?}") + } else { + debug!("Connection established: {url}") + } + } + InstrumentationEvent::StartQuery { + query, + .. + } => { + let query_string = format!("{query:?}"); + let start = Instant::now(); + QUERY_PERF_TRACKER.insert((thread::current().id(), query_string), start); + } + InstrumentationEvent::FinishQuery { + query, + .. + } => { + let query_string = format!("{query:?}"); + if let Some((_, start)) = QUERY_PERF_TRACKER.remove(&(thread::current().id(), query_string.clone())) { + let duration = start.elapsed(); + if duration.as_secs() >= 5 { + warn!("SLOW QUERY [{:.2}s]: {}", duration.as_secs_f32(), query_string); + } else if duration.as_secs() >= 1 { + info!("SLOW QUERY [{:.2}s]: {}", duration.as_secs_f32(), query_string); + } else { + debug!("QUERY [{:?}]: {}", duration, query_string); + } + } + } + _ => {} + })) +} diff --git a/src/db/schemas/postgresql/schema.rs b/src/db/schema.rs similarity index 100% rename from src/db/schemas/postgresql/schema.rs rename to src/db/schema.rs diff --git a/src/db/schemas/mysql/schema.rs b/src/db/schemas/mysql/schema.rs deleted file mode 100644 index 001e43b4..00000000 --- a/src/db/schemas/mysql/schema.rs +++ /dev/null @@ -1,395 +0,0 @@ -table! { - attachments (id) { - id -> Text, - cipher_uuid -> Text, - file_name -> Text, - file_size -> BigInt, - akey -> Nullable, - } -} - -table! { - ciphers (uuid) { - uuid -> Text, - created_at -> Datetime, - updated_at -> Datetime, - user_uuid -> Nullable, - organization_uuid -> Nullable, - key -> Nullable, - atype -> Integer, - name -> Text, - notes -> Nullable, - fields -> Nullable, - data -> Text, - password_history -> Nullable, - deleted_at -> Nullable, - reprompt -> Nullable, - } -} - -table! { - ciphers_collections (cipher_uuid, collection_uuid) { - cipher_uuid -> Text, - collection_uuid -> Text, - } -} - -table! { - collections (uuid) { - uuid -> Text, - org_uuid -> Text, - name -> Text, - external_id -> Nullable, - } -} - -table! { - devices (uuid, user_uuid) { - uuid -> Text, - created_at -> Datetime, - updated_at -> Datetime, - user_uuid -> Text, - name -> Text, - atype -> Integer, - push_uuid -> Nullable, - push_token -> Nullable, - refresh_token -> Text, - twofactor_remember -> Nullable, - } -} - -table! { - event (uuid) { - uuid -> Varchar, - event_type -> Integer, - user_uuid -> Nullable, - org_uuid -> Nullable, - cipher_uuid -> Nullable, - collection_uuid -> Nullable, - group_uuid -> Nullable, - org_user_uuid -> Nullable, - act_user_uuid -> Nullable, - device_type -> Nullable, - ip_address -> Nullable, - event_date -> Timestamp, - policy_uuid -> Nullable, - provider_uuid -> Nullable, - provider_user_uuid -> Nullable, - provider_org_uuid -> Nullable, - } -} - -table! { - favorites (user_uuid, cipher_uuid) { - user_uuid -> Text, - cipher_uuid -> Text, - } -} - -table! { - folders (uuid) { - uuid -> Text, - created_at -> Datetime, - updated_at -> Datetime, - user_uuid -> Text, - name -> Text, - } -} - -table! { - folders_ciphers (cipher_uuid, folder_uuid) { - cipher_uuid -> Text, - folder_uuid -> Text, - } -} - -table! { - invitations (email) { - email -> Text, - } -} - -table! { - org_policies (uuid) { - uuid -> Text, - org_uuid -> Text, - atype -> Integer, - enabled -> Bool, - data -> Text, - } -} - -table! { - organizations (uuid) { - uuid -> Text, - name -> Text, - billing_email -> Text, - private_key -> Nullable, - public_key -> Nullable, - } -} - -table! { - sends (uuid) { - uuid -> Text, - user_uuid -> Nullable, - organization_uuid -> Nullable, - name -> Text, - notes -> Nullable, - atype -> Integer, - data -> Text, - akey -> Text, - password_hash -> Nullable, - password_salt -> Nullable, - password_iter -> Nullable, - max_access_count -> Nullable, - access_count -> Integer, - creation_date -> Datetime, - revision_date -> Datetime, - expiration_date -> Nullable, - deletion_date -> Datetime, - disabled -> Bool, - hide_email -> Nullable, - } -} - -table! { - twofactor (uuid) { - uuid -> Text, - user_uuid -> Text, - atype -> Integer, - enabled -> Bool, - data -> Text, - last_used -> BigInt, - } -} - -table! { - twofactor_incomplete (user_uuid, device_uuid) { - user_uuid -> Text, - device_uuid -> Text, - device_name -> Text, - device_type -> Integer, - login_time -> Timestamp, - ip_address -> Text, - } -} - -table! { - twofactor_duo_ctx (state) { - state -> Text, - user_email -> Text, - nonce -> Text, - exp -> BigInt, - } -} - -table! { - users (uuid) { - uuid -> Text, - enabled -> Bool, - created_at -> Datetime, - updated_at -> Datetime, - verified_at -> Nullable, - last_verifying_at -> Nullable, - login_verify_count -> Integer, - email -> Text, - email_new -> Nullable, - email_new_token -> Nullable, - name -> Text, - password_hash -> Binary, - salt -> Binary, - password_iterations -> Integer, - password_hint -> Nullable, - akey -> Text, - private_key -> Nullable, - public_key -> Nullable, - totp_secret -> Nullable, - totp_recover -> Nullable, - security_stamp -> Text, - stamp_exception -> Nullable, - equivalent_domains -> Text, - excluded_globals -> Text, - client_kdf_type -> Integer, - client_kdf_iter -> Integer, - client_kdf_memory -> Nullable, - client_kdf_parallelism -> Nullable, - api_key -> Nullable, - avatar_color -> Nullable, - external_id -> Nullable, - } -} - -table! { - users_collections (user_uuid, collection_uuid) { - user_uuid -> Text, - collection_uuid -> Text, - read_only -> Bool, - hide_passwords -> Bool, - manage -> Bool, - } -} - -table! { - users_organizations (uuid) { - uuid -> Text, - user_uuid -> Text, - org_uuid -> Text, - invited_by_email -> Nullable, - access_all -> Bool, - akey -> Text, - status -> Integer, - atype -> Integer, - reset_password_key -> Nullable, - external_id -> Nullable, - } -} - -table! { - organization_api_key (uuid, org_uuid) { - uuid -> Text, - org_uuid -> Text, - atype -> Integer, - api_key -> Text, - revision_date -> Timestamp, - } -} - -table! { - sso_nonce (state) { - state -> Text, - nonce -> Text, - verifier -> Nullable, - redirect_uri -> Text, - created_at -> Timestamp, - } -} - -table! { - sso_users (user_uuid) { - user_uuid -> Text, - identifier -> Text, - } -} - -table! { - emergency_access (uuid) { - uuid -> Text, - grantor_uuid -> Text, - grantee_uuid -> Nullable, - email -> Nullable, - key_encrypted -> Nullable, - atype -> Integer, - status -> Integer, - wait_time_days -> Integer, - recovery_initiated_at -> Nullable, - last_notification_at -> Nullable, - updated_at -> Timestamp, - created_at -> Timestamp, - } -} - -table! { - groups (uuid) { - uuid -> Text, - organizations_uuid -> Text, - name -> Text, - access_all -> Bool, - external_id -> Nullable, - creation_date -> Timestamp, - revision_date -> Timestamp, - } -} - -table! { - groups_users (groups_uuid, users_organizations_uuid) { - groups_uuid -> Text, - users_organizations_uuid -> Text, - } -} - -table! { - collections_groups (collections_uuid, groups_uuid) { - collections_uuid -> Text, - groups_uuid -> Text, - read_only -> Bool, - hide_passwords -> Bool, - manage -> Bool, - } -} - -table! { - auth_requests (uuid) { - uuid -> Text, - user_uuid -> Text, - organization_uuid -> Nullable, - request_device_identifier -> Text, - device_type -> Integer, - request_ip -> Text, - response_device_id -> Nullable, - access_code -> Text, - public_key -> Text, - enc_key -> Nullable, - master_password_hash -> Nullable, - approved -> Nullable, - creation_date -> Timestamp, - response_date -> Nullable, - authentication_date -> Nullable, - } -} - -joinable!(attachments -> ciphers (cipher_uuid)); -joinable!(ciphers -> organizations (organization_uuid)); -joinable!(ciphers -> users (user_uuid)); -joinable!(ciphers_collections -> ciphers (cipher_uuid)); -joinable!(ciphers_collections -> collections (collection_uuid)); -joinable!(collections -> organizations (org_uuid)); -joinable!(devices -> users (user_uuid)); -joinable!(folders -> users (user_uuid)); -joinable!(folders_ciphers -> ciphers (cipher_uuid)); -joinable!(folders_ciphers -> folders (folder_uuid)); -joinable!(org_policies -> organizations (org_uuid)); -joinable!(sends -> organizations (organization_uuid)); -joinable!(sends -> users (user_uuid)); -joinable!(twofactor -> users (user_uuid)); -joinable!(users_collections -> collections (collection_uuid)); -joinable!(users_collections -> users (user_uuid)); -joinable!(users_organizations -> organizations (org_uuid)); -joinable!(users_organizations -> users (user_uuid)); -joinable!(users_organizations -> ciphers (org_uuid)); -joinable!(organization_api_key -> organizations (org_uuid)); -joinable!(emergency_access -> users (grantor_uuid)); -joinable!(groups -> organizations (organizations_uuid)); -joinable!(groups_users -> users_organizations (users_organizations_uuid)); -joinable!(groups_users -> groups (groups_uuid)); -joinable!(collections_groups -> collections (collections_uuid)); -joinable!(collections_groups -> groups (groups_uuid)); -joinable!(event -> users_organizations (uuid)); -joinable!(auth_requests -> users (user_uuid)); -joinable!(sso_users -> users (user_uuid)); - -allow_tables_to_appear_in_same_query!( - attachments, - ciphers, - ciphers_collections, - collections, - devices, - folders, - folders_ciphers, - invitations, - org_policies, - organizations, - sends, - sso_users, - twofactor, - users, - users_collections, - users_organizations, - organization_api_key, - emergency_access, - groups, - groups_users, - collections_groups, - event, - auth_requests, -); diff --git a/src/db/schemas/sqlite/schema.rs b/src/db/schemas/sqlite/schema.rs deleted file mode 100644 index a0f31f1e..00000000 --- a/src/db/schemas/sqlite/schema.rs +++ /dev/null @@ -1,395 +0,0 @@ -table! { - attachments (id) { - id -> Text, - cipher_uuid -> Text, - file_name -> Text, - file_size -> BigInt, - akey -> Nullable, - } -} - -table! { - ciphers (uuid) { - uuid -> Text, - created_at -> Timestamp, - updated_at -> Timestamp, - user_uuid -> Nullable, - organization_uuid -> Nullable, - key -> Nullable, - atype -> Integer, - name -> Text, - notes -> Nullable, - fields -> Nullable, - data -> Text, - password_history -> Nullable, - deleted_at -> Nullable, - reprompt -> Nullable, - } -} - -table! { - ciphers_collections (cipher_uuid, collection_uuid) { - cipher_uuid -> Text, - collection_uuid -> Text, - } -} - -table! { - collections (uuid) { - uuid -> Text, - org_uuid -> Text, - name -> Text, - external_id -> Nullable, - } -} - -table! { - devices (uuid, user_uuid) { - uuid -> Text, - created_at -> Timestamp, - updated_at -> Timestamp, - user_uuid -> Text, - name -> Text, - atype -> Integer, - push_uuid -> Nullable, - push_token -> Nullable, - refresh_token -> Text, - twofactor_remember -> Nullable, - } -} - -table! { - event (uuid) { - uuid -> Text, - event_type -> Integer, - user_uuid -> Nullable, - org_uuid -> Nullable, - cipher_uuid -> Nullable, - collection_uuid -> Nullable, - group_uuid -> Nullable, - org_user_uuid -> Nullable, - act_user_uuid -> Nullable, - device_type -> Nullable, - ip_address -> Nullable, - event_date -> Timestamp, - policy_uuid -> Nullable, - provider_uuid -> Nullable, - provider_user_uuid -> Nullable, - provider_org_uuid -> Nullable, - } -} - -table! { - favorites (user_uuid, cipher_uuid) { - user_uuid -> Text, - cipher_uuid -> Text, - } -} - -table! { - folders (uuid) { - uuid -> Text, - created_at -> Timestamp, - updated_at -> Timestamp, - user_uuid -> Text, - name -> Text, - } -} - -table! { - folders_ciphers (cipher_uuid, folder_uuid) { - cipher_uuid -> Text, - folder_uuid -> Text, - } -} - -table! { - invitations (email) { - email -> Text, - } -} - -table! { - org_policies (uuid) { - uuid -> Text, - org_uuid -> Text, - atype -> Integer, - enabled -> Bool, - data -> Text, - } -} - -table! { - organizations (uuid) { - uuid -> Text, - name -> Text, - billing_email -> Text, - private_key -> Nullable, - public_key -> Nullable, - } -} - -table! { - sends (uuid) { - uuid -> Text, - user_uuid -> Nullable, - organization_uuid -> Nullable, - name -> Text, - notes -> Nullable, - atype -> Integer, - data -> Text, - akey -> Text, - password_hash -> Nullable, - password_salt -> Nullable, - password_iter -> Nullable, - max_access_count -> Nullable, - access_count -> Integer, - creation_date -> Timestamp, - revision_date -> Timestamp, - expiration_date -> Nullable, - deletion_date -> Timestamp, - disabled -> Bool, - hide_email -> Nullable, - } -} - -table! { - twofactor (uuid) { - uuid -> Text, - user_uuid -> Text, - atype -> Integer, - enabled -> Bool, - data -> Text, - last_used -> BigInt, - } -} - -table! { - twofactor_incomplete (user_uuid, device_uuid) { - user_uuid -> Text, - device_uuid -> Text, - device_name -> Text, - device_type -> Integer, - login_time -> Timestamp, - ip_address -> Text, - } -} - -table! { - twofactor_duo_ctx (state) { - state -> Text, - user_email -> Text, - nonce -> Text, - exp -> BigInt, - } -} - -table! { - users (uuid) { - uuid -> Text, - enabled -> Bool, - created_at -> Timestamp, - updated_at -> Timestamp, - verified_at -> Nullable, - last_verifying_at -> Nullable, - login_verify_count -> Integer, - email -> Text, - email_new -> Nullable, - email_new_token -> Nullable, - name -> Text, - password_hash -> Binary, - salt -> Binary, - password_iterations -> Integer, - password_hint -> Nullable, - akey -> Text, - private_key -> Nullable, - public_key -> Nullable, - totp_secret -> Nullable, - totp_recover -> Nullable, - security_stamp -> Text, - stamp_exception -> Nullable, - equivalent_domains -> Text, - excluded_globals -> Text, - client_kdf_type -> Integer, - client_kdf_iter -> Integer, - client_kdf_memory -> Nullable, - client_kdf_parallelism -> Nullable, - api_key -> Nullable, - avatar_color -> Nullable, - external_id -> Nullable, - } -} - -table! { - users_collections (user_uuid, collection_uuid) { - user_uuid -> Text, - collection_uuid -> Text, - read_only -> Bool, - hide_passwords -> Bool, - manage -> Bool, - } -} - -table! { - users_organizations (uuid) { - uuid -> Text, - user_uuid -> Text, - org_uuid -> Text, - invited_by_email -> Nullable, - access_all -> Bool, - akey -> Text, - status -> Integer, - atype -> Integer, - reset_password_key -> Nullable, - external_id -> Nullable, - } -} - -table! { - organization_api_key (uuid, org_uuid) { - uuid -> Text, - org_uuid -> Text, - atype -> Integer, - api_key -> Text, - revision_date -> Timestamp, - } -} - -table! { - sso_nonce (state) { - state -> Text, - nonce -> Text, - verifier -> Nullable, - redirect_uri -> Text, - created_at -> Timestamp, - } -} - -table! { - sso_users (user_uuid) { - user_uuid -> Text, - identifier -> Text, - } -} - -table! { - emergency_access (uuid) { - uuid -> Text, - grantor_uuid -> Text, - grantee_uuid -> Nullable, - email -> Nullable, - key_encrypted -> Nullable, - atype -> Integer, - status -> Integer, - wait_time_days -> Integer, - recovery_initiated_at -> Nullable, - last_notification_at -> Nullable, - updated_at -> Timestamp, - created_at -> Timestamp, - } -} - -table! { - groups (uuid) { - uuid -> Text, - organizations_uuid -> Text, - name -> Text, - access_all -> Bool, - external_id -> Nullable, - creation_date -> Timestamp, - revision_date -> Timestamp, - } -} - -table! { - groups_users (groups_uuid, users_organizations_uuid) { - groups_uuid -> Text, - users_organizations_uuid -> Text, - } -} - -table! { - collections_groups (collections_uuid, groups_uuid) { - collections_uuid -> Text, - groups_uuid -> Text, - read_only -> Bool, - hide_passwords -> Bool, - manage -> Bool, - } -} - -table! { - auth_requests (uuid) { - uuid -> Text, - user_uuid -> Text, - organization_uuid -> Nullable, - request_device_identifier -> Text, - device_type -> Integer, - request_ip -> Text, - response_device_id -> Nullable, - access_code -> Text, - public_key -> Text, - enc_key -> Nullable, - master_password_hash -> Nullable, - approved -> Nullable, - creation_date -> Timestamp, - response_date -> Nullable, - authentication_date -> Nullable, - } -} - -joinable!(attachments -> ciphers (cipher_uuid)); -joinable!(ciphers -> organizations (organization_uuid)); -joinable!(ciphers -> users (user_uuid)); -joinable!(ciphers_collections -> ciphers (cipher_uuid)); -joinable!(ciphers_collections -> collections (collection_uuid)); -joinable!(collections -> organizations (org_uuid)); -joinable!(devices -> users (user_uuid)); -joinable!(folders -> users (user_uuid)); -joinable!(folders_ciphers -> ciphers (cipher_uuid)); -joinable!(folders_ciphers -> folders (folder_uuid)); -joinable!(org_policies -> organizations (org_uuid)); -joinable!(sends -> organizations (organization_uuid)); -joinable!(sends -> users (user_uuid)); -joinable!(twofactor -> users (user_uuid)); -joinable!(users_collections -> collections (collection_uuid)); -joinable!(users_collections -> users (user_uuid)); -joinable!(users_organizations -> organizations (org_uuid)); -joinable!(users_organizations -> users (user_uuid)); -joinable!(users_organizations -> ciphers (org_uuid)); -joinable!(organization_api_key -> organizations (org_uuid)); -joinable!(emergency_access -> users (grantor_uuid)); -joinable!(groups -> organizations (organizations_uuid)); -joinable!(groups_users -> users_organizations (users_organizations_uuid)); -joinable!(groups_users -> groups (groups_uuid)); -joinable!(collections_groups -> collections (collections_uuid)); -joinable!(collections_groups -> groups (groups_uuid)); -joinable!(event -> users_organizations (uuid)); -joinable!(auth_requests -> users (user_uuid)); -joinable!(sso_users -> users (user_uuid)); - -allow_tables_to_appear_in_same_query!( - attachments, - ciphers, - ciphers_collections, - collections, - devices, - folders, - folders_ciphers, - invitations, - org_policies, - organizations, - sends, - sso_users, - twofactor, - users, - users_collections, - users_organizations, - organization_api_key, - emergency_access, - groups, - groups_users, - collections_groups, - event, - auth_requests, -); diff --git a/src/main.rs b/src/main.rs index 3195300b..b681437a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -71,7 +71,7 @@ pub use util::is_running_in_container; #[rocket::main] async fn main() -> Result<(), Error> { - parse_args().await; + parse_args(); launch_info(); let level = init_logging()?; @@ -87,8 +87,8 @@ async fn main() -> Result<(), Error> { let pool = create_db_pool().await; schedule_jobs(pool.clone()); - db::models::TwoFactor::migrate_u2f_to_webauthn(&mut pool.get().await.unwrap()).await.unwrap(); - db::models::TwoFactor::migrate_credential_to_passkey(&mut pool.get().await.unwrap()).await.unwrap(); + db::models::TwoFactor::migrate_u2f_to_webauthn(&pool.get().await.unwrap()).await.unwrap(); + db::models::TwoFactor::migrate_credential_to_passkey(&pool.get().await.unwrap()).await.unwrap(); let extra_debug = matches!(level, log::LevelFilter::Trace | log::LevelFilter::Debug); launch_rocket(pool, extra_debug).await // Blocks until program termination. @@ -117,7 +117,7 @@ PRESETS: m= t= p= pub const VERSION: Option<&str> = option_env!("VW_VERSION"); -async fn parse_args() { +fn parse_args() { let mut pargs = pico_args::Arguments::from_env(); let version = VERSION.unwrap_or("(Version info from Git not present)"); @@ -188,7 +188,7 @@ async fn parse_args() { exit(1); } } else if command == "backup" { - match backup_sqlite().await { + match db::backup_sqlite() { Ok(f) => { println!("Backup to '{f}' was successful"); exit(0); @@ -203,23 +203,6 @@ async fn parse_args() { } } -async fn backup_sqlite() -> Result { - use crate::db::{backup_database, DbConnType}; - if DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false) { - // Establish a connection to the sqlite database - let mut conn = db::DbPool::from_config() - .expect("SQLite database connection failed") - .get() - .await - .expect("Unable to get SQLite db pool"); - - let backup_file = backup_database(&mut conn).await?; - Ok(backup_file) - } else { - err_silent!("The database type is not SQLite. Backups only works for SQLite databases") - } -} - fn launch_info() { println!( "\ @@ -285,13 +268,6 @@ fn init_logging() -> Result { log::LevelFilter::Off }; - let diesel_logger_level: log::LevelFilter = - if cfg!(feature = "query_logger") && std::env::var("QUERY_LOGGER").is_ok() { - log::LevelFilter::Debug - } else { - log::LevelFilter::Off - }; - // Only show Rocket underscore `_` logs when the level is Debug or higher // Else this will bloat the log output with useless messages. let rocket_underscore_level = if level >= log::LevelFilter::Debug { @@ -342,9 +318,15 @@ fn init_logging() -> Result { // Variable level for hickory used by reqwest ("hickory_resolver::name_server::name_server", hickory_level), ("hickory_proto::xfer", hickory_level), - ("diesel_logger", diesel_logger_level), // SMTP ("lettre::transport::smtp", smtp_log_level), + // Set query_logger default to Off, but can be overwritten manually + // You can set LOG_LEVEL=info,vaultwarden::db::query_logger= to overwrite it. + // This makes it possible to do the following: + // warn = Print slow queries only, 5 seconds or longer + // info = Print slow queries only, 1 second or longer + // debug = Print all queries + ("vaultwarden::db::query_logger", log::LevelFilter::Off), ]); for (path, level) in levels_override.into_iter() { @@ -614,20 +596,24 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error> CONFIG.shutdown(); }); - #[cfg(unix)] + #[cfg(all(unix, sqlite))] { - tokio::spawn(async move { - let mut signal_user1 = tokio::signal::unix::signal(SignalKind::user_defined1()).unwrap(); - loop { - // If we need more signals to act upon, we might want to use select! here. - // With only one item to listen for this is enough. - let _ = signal_user1.recv().await; - match backup_sqlite().await { - Ok(f) => info!("Backup to '{f}' was successful"), - Err(e) => error!("Backup failed. {e:?}"), + if db::ACTIVE_DB_TYPE.get() != Some(&db::DbConnType::Sqlite) { + debug!("PostgreSQL and MySQL/MariaDB do not support this backup feature, skip adding USR1 signal."); + } else { + tokio::spawn(async move { + let mut signal_user1 = tokio::signal::unix::signal(SignalKind::user_defined1()).unwrap(); + loop { + // If we need more signals to act upon, we might want to use select! here. + // With only one item to listen for this is enough. + let _ = signal_user1.recv().await; + match db::backup_sqlite() { + Ok(f) => info!("Backup to '{f}' was successful"), + Err(e) => error!("Backup failed. {e:?}"), + } } - } - }); + }); + } } instance.launch().await?; diff --git a/src/sso.rs b/src/sso.rs index 8e746114..a7730576 100644 --- a/src/sso.rs +++ b/src/sso.rs @@ -165,12 +165,7 @@ pub fn decode_state(base64_state: String) -> ApiResult { // The `nonce` allow to protect against replay attacks // redirect_uri from: https://github.com/bitwarden/server/blob/main/src/Identity/IdentityServer/ApiClient.cs -pub async fn authorize_url( - state: OIDCState, - client_id: &str, - raw_redirect_uri: &str, - mut conn: DbConn, -) -> ApiResult { +pub async fn authorize_url(state: OIDCState, client_id: &str, raw_redirect_uri: &str, conn: DbConn) -> ApiResult { let redirect_uri = match client_id { "web" | "browser" => format!("{}/sso-connector.html", CONFIG.domain()), "desktop" | "mobile" => "bitwarden://sso-callback".to_string(), @@ -185,7 +180,7 @@ pub async fn authorize_url( }; let (auth_url, nonce) = Client::authorize_url(state, redirect_uri).await?; - nonce.save(&mut conn).await?; + nonce.save(&conn).await?; Ok(auth_url) } @@ -235,7 +230,7 @@ pub struct UserInformation { pub user_name: Option, } -async fn decode_code_claims(code: &str, conn: &mut DbConn) -> ApiResult<(OIDCCode, OIDCState)> { +async fn decode_code_claims(code: &str, conn: &DbConn) -> ApiResult<(OIDCCode, OIDCState)> { match auth::decode_jwt::(code, SSO_JWT_ISSUER.to_string()) { Ok(code_claims) => match code_claims.code { OIDCCodeWrapper::Ok { @@ -265,7 +260,7 @@ async fn decode_code_claims(code: &str, conn: &mut DbConn) -> ApiResult<(OIDCCod // - second time we will rely on the `AC_CACHE` since the `code` has already been exchanged. // The `nonce` will ensure that the user is authorized only once. // We return only the `UserInformation` to force calling `redeem` to obtain the `refresh_token`. -pub async fn exchange_code(wrapped_code: &str, conn: &mut DbConn) -> ApiResult { +pub async fn exchange_code(wrapped_code: &str, conn: &DbConn) -> ApiResult { use openidconnect::OAuth2TokenResponse; let (code, state) = decode_code_claims(wrapped_code, conn).await?; @@ -330,7 +325,7 @@ pub async fn exchange_code(wrapped_code: &str, conn: &mut DbConn) -> ApiResult ApiResult { +pub async fn redeem(state: &OIDCState, conn: &DbConn) -> ApiResult { if let Err(err) = SsoNonce::delete(state, conn).await { error!("Failed to delete database sso_nonce using {state}: {err}") }