From bb41f64c0ad45a142c1cac7b83422ab30c7955d7 Mon Sep 17 00:00:00 2001 From: Daniel Date: Sun, 23 Nov 2025 22:48:23 +0200 Subject: [PATCH 1/4] Switch to multiple runners per arch (#6472) - now uses arm64 native runners for faster compilation --- .github/workflows/release.yml | 300 +++++++++++++++++++++++----------- docker/DockerSettings.yaml | 1 - docker/Dockerfile.alpine | 12 +- docker/Dockerfile.j2 | 6 +- 4 files changed, 215 insertions(+), 104 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 35c6d3ac..95220f42 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,6 +16,23 @@ concurrency: # Don't cancel other runs when creating a tag cancel-in-progress: ${{ github.ref_type == 'branch' }} +defaults: + run: + shell: bash + +env: + # The *_REPO variables need to be configured as repository variables + # Append `/settings/variables/actions` to your repo url + # DOCKERHUB_REPO needs to be 'index.docker.io//' + # Check for Docker hub credentials in secrets + HAVE_DOCKERHUB_LOGIN: ${{ vars.DOCKERHUB_REPO != '' && secrets.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }} + # GHCR_REPO needs to be 'ghcr.io//' + # Check for Github credentials in secrets + HAVE_GHCR_LOGIN: ${{ vars.GHCR_REPO != '' && github.repository_owner != '' && secrets.GITHUB_TOKEN != '' }} + # QUAY_REPO needs to be 'quay.io//' + # Check for Quay.io credentials in secrets + HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }} + jobs: docker-build: name: Build Vaultwarden containers @@ -25,7 +42,7 @@ jobs: contents: read attestations: write # Needed to generate an artifact attestation for a build id-token: write # Needed to mint the OIDC token necessary to request a Sigstore signing certificate - runs-on: ubuntu-24.04 + runs-on: ${{ contains(matrix.arch, 'arm') && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} timeout-minutes: 120 # Start a local docker registry to extract the compiled binaries to upload as artifacts and attest them services: @@ -36,20 +53,12 @@ jobs: env: SOURCE_COMMIT: ${{ github.sha }} SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}" - # The *_REPO variables need to be configured as repository variables - # Append `/settings/variables/actions` to your repo url - # DOCKERHUB_REPO needs to be 'index.docker.io//' - # Check for Docker hub credentials in secrets - HAVE_DOCKERHUB_LOGIN: ${{ vars.DOCKERHUB_REPO != '' && secrets.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }} - # GHCR_REPO needs to be 'ghcr.io//' - # Check for Github credentials in secrets - HAVE_GHCR_LOGIN: ${{ vars.GHCR_REPO != '' && github.repository_owner != '' && secrets.GITHUB_TOKEN != '' }} - # QUAY_REPO needs to be 'quay.io//' - # Check for Quay.io credentials in secrets - HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }} strategy: matrix: + arch: ["amd64", "arm64", "arm/v7", "arm/v6"] base_image: ["debian","alpine"] + outputs: + base-tags: ${{ steps.determine-version.outputs.BASE_TAGS }} steps: - name: Initialize QEMU binfmt support @@ -78,17 +87,26 @@ jobs: persist-credentials: false fetch-depth: 0 + # Normalize the architecture string for use in paths and cache keys + - name: Normalize architecture string + env: + MATRIX_ARCH: ${{ matrix.arch }} + run: | + # Replace slashes with nothing to create a safe string for paths/cache keys + NORMALIZED_ARCH="${MATRIX_ARCH//\/}" + echo "NORMALIZED_ARCH=${NORMALIZED_ARCH}" | tee -a "${GITHUB_ENV}" + # Determine Base Tags and Source Version - name: Determine Base Tags and Source Version - shell: bash + id: determine-version env: REF_TYPE: ${{ github.ref_type }} run: | # Check which main tag we are going to build determined by ref_type if [[ "${REF_TYPE}" == "tag" ]]; then - echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}" + echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_OUTPUT}" elif [[ "${REF_TYPE}" == "branch" ]]; then - echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}" + echo "BASE_TAGS=testing" | tee -a "${GITHUB_OUTPUT}" fi # Get the Source Version for this release @@ -111,7 +129,6 @@ jobs: - name: Add registry for DockerHub if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }} - shell: bash env: DOCKERHUB_REPO: ${{ vars.DOCKERHUB_REPO }} run: | @@ -128,7 +145,6 @@ jobs: - name: Add registry for ghcr.io if: ${{ env.HAVE_GHCR_LOGIN == 'true' }} - shell: bash env: GHCR_REPO: ${{ vars.GHCR_REPO }} run: | @@ -145,23 +161,22 @@ jobs: - name: Add registry for Quay.io if: ${{ env.HAVE_QUAY_LOGIN == 'true' }} - shell: bash env: QUAY_REPO: ${{ vars.QUAY_REPO }} run: | echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${QUAY_REPO}" | tee -a "${GITHUB_ENV}" - name: Configure build cache from/to - shell: bash env: GHCR_REPO: ${{ vars.GHCR_REPO }} BASE_IMAGE: ${{ matrix.base_image }} + NORMALIZED_ARCH: ${{ env.NORMALIZED_ARCH }} run: | # # Check if there is a GitHub Container Registry Login and use it for caching if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then - echo "BAKE_CACHE_FROM=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE}" | tee -a "${GITHUB_ENV}" - echo "BAKE_CACHE_TO=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}" + echo "BAKE_CACHE_FROM=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE}-${NORMALIZED_ARCH}" | tee -a "${GITHUB_ENV}" + echo "BAKE_CACHE_TO=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE}-${NORMALIZED_ARCH},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}" else echo "BAKE_CACHE_FROM=" echo "BAKE_CACHE_TO=" @@ -169,31 +184,45 @@ jobs: # - name: Add localhost registry - shell: bash run: | echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}" + - name: Generate tags + id: tags + env: + CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}" + run: | + # Convert comma-separated list to newline-separated set commands + TAGS=$(echo "${CONTAINER_REGISTRIES}" | tr ',' '\n' | sed "s|.*|*.tags=&|") + + # Output for use in next step + { + echo "TAGS<> "$GITHUB_ENV" + - name: Bake ${{ matrix.base_image }} containers id: bake_vw uses: docker/bake-action@3acf805d94d93a86cce4ca44798a76464a75b88c # v6.9.0 env: - BASE_TAGS: "${{ env.BASE_TAGS }}" + BASE_TAGS: "${{ steps.determine-version.outputs.BASE_TAGS }}" SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}" SOURCE_VERSION: "${{ env.SOURCE_VERSION }}" SOURCE_REPOSITORY_URL: "${{ env.SOURCE_REPOSITORY_URL }}" - CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}" with: pull: true - push: true source: . files: docker/docker-bake.hcl targets: "${{ matrix.base_image }}-multi" set: | *.cache-from=${{ env.BAKE_CACHE_FROM }} *.cache-to=${{ env.BAKE_CACHE_TO }} + *.platform=linux/${{ matrix.arch }} + ${{ env.TAGS }} + *.output=type=image,push-by-digest=true,name-canonical=true,push=true,compression=zstd - name: Extract digest SHA - shell: bash env: BAKE_METADATA: ${{ steps.bake_vw.outputs.metadata }} BASE_IMAGE: ${{ matrix.base_image }} @@ -201,38 +230,30 @@ jobs: GET_DIGEST_SHA="$(jq -r --arg base "$BASE_IMAGE" '.[$base + "-multi"]."containerimage.digest"' <<< "${BAKE_METADATA}")" echo "DIGEST_SHA=${GET_DIGEST_SHA}" | tee -a "${GITHUB_ENV}" - # Attest container images - - name: Attest - docker.io - ${{ matrix.base_image }} - if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} - uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 - with: - subject-name: ${{ vars.DOCKERHUB_REPO }} - subject-digest: ${{ env.DIGEST_SHA }} - push-to-registry: true - - - name: Attest - ghcr.io - ${{ matrix.base_image }} - if: ${{ env.HAVE_GHCR_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} - uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 - with: - subject-name: ${{ vars.GHCR_REPO }} - subject-digest: ${{ env.DIGEST_SHA }} - push-to-registry: true + - name: Export digest + env: + DIGEST_SHA: ${{ env.DIGEST_SHA }} + RUNNER_TEMP: ${{ runner.temp }} + run: | + mkdir -p "${RUNNER_TEMP}"/digests + digest="${DIGEST_SHA}" + touch "${RUNNER_TEMP}/digests/${digest#sha256:}" - - name: Attest - quay.io - ${{ matrix.base_image }} - if: ${{ env.HAVE_QUAY_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} - uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 + - name: Upload digest + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: - subject-name: ${{ vars.QUAY_REPO }} - subject-digest: ${{ env.DIGEST_SHA }} - push-to-registry: true - + name: digests-${{ env.NORMALIZED_ARCH }}-${{ matrix.base_image }} + path: ${{ runner.temp }}/digests/* + if-no-files-found: error + retention-days: 1 # Extract the Alpine binaries from the containers - name: Extract binaries - shell: bash env: REF_TYPE: ${{ github.ref_type }} BASE_IMAGE: ${{ matrix.base_image }} + DIGEST_SHA: ${{ env.DIGEST_SHA }} + NORMALIZED_ARCH: ${{ env.NORMALIZED_ARCH }} run: | # Check which main tag we are going to build determined by ref_type if [[ "${REF_TYPE}" == "tag" ]]; then @@ -246,60 +267,151 @@ jobs: EXTRACT_TAG="${EXTRACT_TAG}-alpine" fi - # After each extraction the image is removed. - # This is needed because using different platforms doesn't trigger a new pull/download - - # Extract amd64 binary - docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" - docker cp amd64:/vaultwarden vaultwarden-amd64-${BASE_IMAGE} - docker rm --force amd64 - docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" - - # Extract arm64 binary - docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" - docker cp arm64:/vaultwarden vaultwarden-arm64-${BASE_IMAGE} - docker rm --force arm64 - docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" - - # Extract armv7 binary - docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" - docker cp armv7:/vaultwarden vaultwarden-armv7-${BASE_IMAGE} - docker rm --force armv7 - docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" - - # Extract armv6 binary - docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" - docker cp armv6:/vaultwarden vaultwarden-armv6-${BASE_IMAGE} - docker rm --force armv6 - docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}" + CONTAINER_ID="$(docker create "localhost:5000/vaultwarden/server:${EXTRACT_TAG}@${DIGEST_SHA}")" + + # Copy the binary + docker cp "$CONTAINER_ID":/vaultwarden vaultwarden-"${NORMALIZED_ARCH}" + + # Clean up + docker rm "$CONTAINER_ID" # Upload artifacts to Github Actions and Attest the binaries - - name: "Upload amd64 artifact ${{ matrix.base_image }}" - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + - name: Attest binaries + uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 with: - name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64-${{ matrix.base_image }} - path: vaultwarden-amd64-${{ matrix.base_image }} + subject-path: vaultwarden-${{ env.NORMALIZED_ARCH }} - - name: "Upload arm64 artifact ${{ matrix.base_image }}" + - name: Upload binaries as artifacts uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: - name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64-${{ matrix.base_image }} - path: vaultwarden-arm64-${{ matrix.base_image }} + name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-${{ env.NORMALIZED_ARCH }}-${{ matrix.base_image }} + path: vaultwarden-${{ env.NORMALIZED_ARCH }} - - name: "Upload armv7 artifact ${{ matrix.base_image }}" - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + merge-manifests: + name: Merge manifests + runs-on: ubuntu-latest + needs: docker-build + + env: + BASE_TAGS: ${{ needs.docker-build.outputs.base-tags }} + + permissions: + packages: write # Needed to upload packages and artifacts + attestations: write # Needed to generate an artifact attestation for a build + id-token: write # Needed to mint the OIDC token necessary to request a Sigstore signing certificate + + strategy: + matrix: + base_image: ["debian","alpine"] + + steps: + - name: Download digests + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: - name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7-${{ matrix.base_image }} - path: vaultwarden-armv7-${{ matrix.base_image }} + path: ${{ runner.temp }}/digests + pattern: digests-*-${{ matrix.base_image }} + merge-multiple: true - - name: "Upload armv6 artifact ${{ matrix.base_image }}" - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + # Login to Docker Hub + - name: Login to Docker Hub + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 with: - name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6-${{ matrix.base_image }} - path: vaultwarden-armv6-${{ matrix.base_image }} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }} - - name: "Attest artifacts ${{ matrix.base_image }}" + - name: Add registry for DockerHub + if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }} + env: + DOCKERHUB_REPO: ${{ vars.DOCKERHUB_REPO }} + run: | + echo "CONTAINER_REGISTRIES=${DOCKERHUB_REPO}" | tee -a "${GITHUB_ENV}" + + # Login to GitHub Container Registry + - name: Login to GitHub Container Registry + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + if: ${{ env.HAVE_GHCR_LOGIN == 'true' }} + + - name: Add registry for ghcr.io + if: ${{ env.HAVE_GHCR_LOGIN == 'true' }} + env: + GHCR_REPO: ${{ vars.GHCR_REPO }} + run: | + echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${GHCR_REPO}" | tee -a "${GITHUB_ENV}" + + # Login to Quay.io + - name: Login to Quay.io + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_TOKEN }} + if: ${{ env.HAVE_QUAY_LOGIN == 'true' }} + + - name: Add registry for Quay.io + if: ${{ env.HAVE_QUAY_LOGIN == 'true' }} + env: + QUAY_REPO: ${{ vars.QUAY_REPO }} + run: | + echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${QUAY_REPO}" | tee -a "${GITHUB_ENV}" + + - name: Create manifest list, push it and extract digest SHA + working-directory: ${{ runner.temp }}/digests + env: + BASE_IMAGE: "${{ matrix.base_image }}" + BASE_TAGS: "${{ env.BASE_TAGS }}" + CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}" + run: | + set +e + IFS=',' read -ra IMAGES <<< "${CONTAINER_REGISTRIES}" + for img in "${IMAGES[@]}"; do + echo "Creating manifest for $img:${BASE_TAGS}-${BASE_IMAGE}" + + OUTPUT=$(docker buildx imagetools create \ + -t "$img:${BASE_TAGS}-${BASE_IMAGE}" \ + $(printf "$img:${BASE_TAGS}-${BASE_IMAGE}@sha256:%s " *) 2>&1) + STATUS=$? + + if [ $STATUS -ne 0 ]; then + echo "Manifest creation failed for $img" + echo "$OUTPUT" + exit $STATUS + fi + + echo "Manifest created for $img" + echo "$OUTPUT" + done + set -e + + # Extract digest SHA for subsequent steps + GET_DIGEST_SHA="$(echo "$OUTPUT" | grep -oE 'sha256:[a-f0-9]{64}' | tail -1)" + echo "DIGEST_SHA=${GET_DIGEST_SHA}" | tee -a "${GITHUB_ENV}" + + # Attest container images + - name: Attest - docker.io - ${{ matrix.base_image }} + if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && env.DIGEST_SHA != ''}} + uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 + with: + subject-name: ${{ vars.DOCKERHUB_REPO }} + subject-digest: ${{ env.DIGEST_SHA }} + push-to-registry: true + + - name: Attest - ghcr.io - ${{ matrix.base_image }} + if: ${{ env.HAVE_GHCR_LOGIN == 'true' && env.DIGEST_SHA != ''}} uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 with: - subject-path: vaultwarden-* - # End Upload artifacts to Github Actions + subject-name: ${{ vars.GHCR_REPO }} + subject-digest: ${{ env.DIGEST_SHA }} + push-to-registry: true + + - name: Attest - quay.io - ${{ matrix.base_image }} + if: ${{ env.HAVE_QUAY_LOGIN == 'true' && env.DIGEST_SHA != ''}} + uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 + with: + subject-name: ${{ vars.QUAY_REPO }} + subject-digest: ${{ env.DIGEST_SHA }} + push-to-registry: true diff --git a/docker/DockerSettings.yaml b/docker/DockerSettings.yaml index 1707affe..119dbf9f 100644 --- a/docker/DockerSettings.yaml +++ b/docker/DockerSettings.yaml @@ -17,7 +17,6 @@ build_stage_image: platform: "$BUILDPLATFORM" alpine: image: "build_${TARGETARCH}${TARGETVARIANT}" - platform: "linux/amd64" # The Alpine build images only have linux/amd64 images arch_image: amd64: "ghcr.io/blackdex/rust-musl:x86_64-musl-stable-{{rust_version}}" arm64: "ghcr.io/blackdex/rust-musl:aarch64-musl-stable-{{rust_version}}" diff --git a/docker/Dockerfile.alpine b/docker/Dockerfile.alpine index 0b0a7c10..f593e279 100644 --- a/docker/Dockerfile.alpine +++ b/docker/Dockerfile.alpine @@ -30,16 +30,16 @@ FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:50662dccf4908ac2128cd44981c52fcb4e3e8dd56f21823c8d5e91267ff741fa AS vault ########################## ALPINE BUILD IMAGES ########################## -## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 +## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 and linux/arm64 ## And for Alpine we define all build images here, they will only be loaded when actually used -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.91.0 AS build_amd64 -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.91.0 AS build_arm64 -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.91.0 AS build_armv7 -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.91.0 AS build_armv6 +FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.91.0 AS build_amd64 +FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.91.0 AS build_arm64 +FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.91.0 AS build_armv7 +FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.91.0 AS build_armv6 ########################## BUILD IMAGE ########################## # hadolint ignore=DL3006 -FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} AS build +FROM --platform=$BUILDPLATFORM build_${TARGETARCH}${TARGETVARIANT} AS build ARG TARGETARCH ARG TARGETVARIANT ARG TARGETPLATFORM diff --git a/docker/Dockerfile.j2 b/docker/Dockerfile.j2 index 4816dacb..0501b3ff 100644 --- a/docker/Dockerfile.j2 +++ b/docker/Dockerfile.j2 @@ -36,16 +36,16 @@ FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_diges FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx {% elif base == "alpine" %} ########################## ALPINE BUILD IMAGES ########################## -## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 +## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 and linux/arm64 ## And for Alpine we define all build images here, they will only be loaded when actually used {% for arch in build_stage_image[base].arch_image %} -FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} AS build_{{ arch }} +FROM --platform=$BUILDPLATFORM {{ build_stage_image[base].arch_image[arch] }} AS build_{{ arch }} {% endfor %} {% endif %} ########################## BUILD IMAGE ########################## # hadolint ignore=DL3006 -FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} AS build +FROM --platform=$BUILDPLATFORM {{ build_stage_image[base].image }} AS build {% if base == "debian" %} COPY --from=xx / / {% endif %} From 7f7b412220822d1e6a396e0174d1f577cfce839f Mon Sep 17 00:00:00 2001 From: Mathijs van Veluw Date: Sun, 23 Nov 2025 21:50:31 +0100 Subject: [PATCH 2/4] Fix icon redirect caching (#6487) As reported in #6477, redirection of favicon's didn't allowed caching. This commit fixes this by adding the `Cached` wrapper around the response. It will use the same TTL's used for downloading icon's locally. Also removed `_` as valid domain character, these should not be used in FQDN's at all. Those only serve as special chars used in domain labels, mostly used in SRV or TXT records. Fixes #6477 Signed-off-by: BlackDex --- src/api/icons.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/api/icons.rs b/src/api/icons.rs index 4e2aef1c..5003a421 100644 --- a/src/api/icons.rs +++ b/src/api/icons.rs @@ -82,19 +82,19 @@ static ICON_SIZE_REGEX: LazyLock = LazyLock::new(|| Regex::new(r"(?x)(\d+ // It is used to prevent sending a specific header which breaks icon downloads. // If this function needs to be renamed, also adjust the code in `util.rs` #[get("//icon.png")] -fn icon_external(domain: &str) -> Option { +fn icon_external(domain: &str) -> Cached> { if !is_valid_domain(domain) { warn!("Invalid domain: {domain}"); - return None; + return Cached::ttl(None, CONFIG.icon_cache_negttl(), true); } if should_block_address(domain) { warn!("Blocked address: {domain}"); - return None; + return Cached::ttl(None, CONFIG.icon_cache_negttl(), true); } let url = CONFIG._icon_service_url().replace("{}", domain); - match CONFIG.icon_redirect_code() { + let redir = match CONFIG.icon_redirect_code() { 301 => Some(Redirect::moved(url)), // legacy permanent redirect 302 => Some(Redirect::found(url)), // legacy temporary redirect 307 => Some(Redirect::temporary(url)), @@ -103,7 +103,8 @@ fn icon_external(domain: &str) -> Option { error!("Unexpected redirect code {}", CONFIG.icon_redirect_code()); None } - } + }; + Cached::ttl(redir, CONFIG.icon_cache_ttl(), true) } #[get("//icon.png")] @@ -141,7 +142,7 @@ async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec)> { /// This does some manual checks and makes use of Url to do some basic checking. /// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255. fn is_valid_domain(domain: &str) -> bool { - const ALLOWED_CHARS: &str = "_-."; + const ALLOWED_CHARS: &str = "-."; // If parsing the domain fails using Url, it will not work with reqwest. if let Err(parse_error) = url::Url::parse(format!("https://{domain}").as_str()) { From 35e1a306f3399e3caf33791a759f16e47d3339e1 Mon Sep 17 00:00:00 2001 From: Timshel Date: Sun, 23 Nov 2025 21:54:37 +0100 Subject: [PATCH 3/4] Fix around singleorg policy (#6247) Co-authored-by: Timshel --- src/api/admin.rs | 21 +----- src/api/core/mod.rs | 23 +----- src/api/core/organizations.rs | 75 ++++--------------- src/db/models/mod.rs | 2 +- src/db/models/org_policy.rs | 60 +++++++-------- src/db/models/organization.rs | 7 +- .../send_single_org_removed_from_org.hbs | 4 +- .../send_single_org_removed_from_org.html.hbs | 4 +- 8 files changed, 63 insertions(+), 133 deletions(-) diff --git a/src/api/admin.rs b/src/api/admin.rs index 8b6101fb..d36da8f9 100644 --- a/src/api/admin.rs +++ b/src/api/admin.rs @@ -23,7 +23,7 @@ use crate::{ backup_sqlite, get_sql_server_version, models::{ Attachment, Cipher, Collection, Device, Event, EventType, Group, Invitation, Membership, MembershipId, - MembershipType, OrgPolicy, OrgPolicyErr, Organization, OrganizationId, SsoUser, TwoFactor, User, UserId, + MembershipType, OrgPolicy, Organization, OrganizationId, SsoUser, TwoFactor, User, UserId, }, DbConn, DbConnType, ACTIVE_DB_TYPE, }, @@ -556,23 +556,9 @@ async fn update_membership_type(data: Json, token: AdminToke } } + member_to_edit.atype = new_type; // This check is also done at api::organizations::{accept_invite, _confirm_invite, _activate_member, edit_member}, update_membership_type - // It returns different error messages per function. - if new_type < MembershipType::Admin { - match OrgPolicy::is_user_allowed(&member_to_edit.user_uuid, &member_to_edit.org_uuid, true, &conn).await { - Ok(_) => {} - Err(OrgPolicyErr::TwoFactorMissing) => { - if CONFIG.email_2fa_auto_fallback() { - two_factor::email::find_and_activate_email_2fa(&member_to_edit.user_uuid, &conn).await?; - } else { - err!("You cannot modify this user to this type because they have not setup 2FA"); - } - } - Err(OrgPolicyErr::SingleOrgEnforced) => { - err!("You cannot modify this user to this type because it is a member of an organization which forbids it"); - } - } - } + OrgPolicy::check_user_allowed(&member_to_edit, "modify", &conn).await?; log_event( EventType::OrganizationUserUpdated as i32, @@ -585,7 +571,6 @@ async fn update_membership_type(data: Json, token: AdminToke ) .await; - member_to_edit.atype = new_type; member_to_edit.save(&conn).await } diff --git a/src/api/core/mod.rs b/src/api/core/mod.rs index d5ca0cc9..173a06b6 100644 --- a/src/api/core/mod.rs +++ b/src/api/core/mod.rs @@ -53,7 +53,7 @@ use crate::{ api::{EmptyResult, JsonResult, Notify, UpdateType}, auth::Headers, db::{ - models::{Membership, MembershipStatus, MembershipType, OrgPolicy, OrgPolicyErr, Organization, User}, + models::{Membership, MembershipStatus, OrgPolicy, Organization, User}, DbConn, }, error::Error, @@ -269,27 +269,12 @@ async fn accept_org_invite( err!("User already accepted the invitation"); } - // This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type - // It returns different error messages per function. - if member.atype < MembershipType::Admin { - match OrgPolicy::is_user_allowed(&member.user_uuid, &member.org_uuid, false, conn).await { - Ok(_) => {} - Err(OrgPolicyErr::TwoFactorMissing) => { - if crate::CONFIG.email_2fa_auto_fallback() { - two_factor::email::activate_email_2fa(user, conn).await?; - } else { - err!("You cannot join this organization until you enable two-step login on your user account"); - } - } - Err(OrgPolicyErr::SingleOrgEnforced) => { - err!("You cannot join this organization because you are a member of an organization which forbids it"); - } - } - } - member.status = MembershipStatus::Accepted as i32; member.reset_password_key = reset_password_key; + // This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type + OrgPolicy::check_user_allowed(&member, "join", conn).await?; + member.save(conn).await?; if crate::CONFIG.mail_enabled() { diff --git a/src/api/core/organizations.rs b/src/api/core/organizations.rs index b8715ab7..e8cca467 100644 --- a/src/api/core/organizations.rs +++ b/src/api/core/organizations.rs @@ -15,7 +15,7 @@ use crate::{ models::{ Cipher, CipherId, Collection, CollectionCipher, CollectionGroup, CollectionId, CollectionUser, EventType, Group, GroupId, GroupUser, Invitation, Membership, MembershipId, MembershipStatus, MembershipType, - OrgPolicy, OrgPolicyErr, OrgPolicyType, Organization, OrganizationApiKey, OrganizationId, User, UserId, + OrgPolicy, OrgPolicyType, Organization, OrganizationApiKey, OrganizationId, User, UserId, }, DbConn, }, @@ -1463,27 +1463,12 @@ async fn _confirm_invite( err!("User in invalid state") } - // This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type - // It returns different error messages per function. - if member_to_confirm.atype < MembershipType::Admin { - match OrgPolicy::is_user_allowed(&member_to_confirm.user_uuid, org_id, true, conn).await { - Ok(_) => {} - Err(OrgPolicyErr::TwoFactorMissing) => { - if CONFIG.email_2fa_auto_fallback() { - two_factor::email::find_and_activate_email_2fa(&member_to_confirm.user_uuid, conn).await?; - } else { - err!("You cannot confirm this user because they have not setup 2FA"); - } - } - Err(OrgPolicyErr::SingleOrgEnforced) => { - err!("You cannot confirm this user because they are a member of an organization which forbids it"); - } - } - } - member_to_confirm.status = MembershipStatus::Confirmed as i32; member_to_confirm.akey = key.to_string(); + // This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type + OrgPolicy::check_user_allowed(&member_to_confirm, "confirm", conn).await?; + log_event( EventType::OrganizationUserConfirmed as i32, &member_to_confirm.uuid, @@ -1631,27 +1616,13 @@ async fn edit_member( } } - // This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type - // It returns different error messages per function. - if new_type < MembershipType::Admin { - match OrgPolicy::is_user_allowed(&member_to_edit.user_uuid, &org_id, true, &conn).await { - Ok(_) => {} - Err(OrgPolicyErr::TwoFactorMissing) => { - if CONFIG.email_2fa_auto_fallback() { - two_factor::email::find_and_activate_email_2fa(&member_to_edit.user_uuid, &conn).await?; - } else { - err!("You cannot modify this user to this type because they have not setup 2FA"); - } - } - Err(OrgPolicyErr::SingleOrgEnforced) => { - err!("You cannot modify this user to this type because they are a member of an organization which forbids it"); - } - } - } - member_to_edit.access_all = access_all; member_to_edit.atype = new_type as i32; + // This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type + // We need to perform the check after changing the type since `admin` is exempt. + OrgPolicy::check_user_allowed(&member_to_edit, "modify", &conn).await?; + // Delete all the odd collections for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &member_to_edit.user_uuid, &conn).await { c.delete(&conn).await?; @@ -2154,14 +2125,14 @@ async fn put_policy( // When enabling the SingleOrg policy, remove this org's members that are members of other orgs if pol_type_enum == OrgPolicyType::SingleOrg && data.enabled { - for member in Membership::find_by_org(&org_id, &conn).await.into_iter() { + for mut member in Membership::find_by_org(&org_id, &conn).await.into_iter() { // Policy only applies to non-Owner/non-Admin members who have accepted joining the org // Exclude invited and revoked users when checking for this policy. // Those users will not be allowed to accept or be activated because of the policy checks done there. - // We check if the count is larger then 1, because it includes this organization also. if member.atype < MembershipType::Admin && member.status != MembershipStatus::Invited as i32 - && Membership::count_accepted_and_confirmed_by_user(&member.user_uuid, &conn).await > 1 + && Membership::count_accepted_and_confirmed_by_user(&member.user_uuid, &member.org_uuid, &conn).await + > 0 { if CONFIG.mail_enabled() { let org = Organization::find_by_uuid(&member.org_uuid, &conn).await.unwrap(); @@ -2181,7 +2152,8 @@ async fn put_policy( ) .await; - member.delete(&conn).await?; + member.revoke(); + member.save(&conn).await?; } } } @@ -2628,25 +2600,10 @@ async fn _restore_member( err!("Only owners can restore other owners") } - // This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type - // It returns different error messages per function. - if member.atype < MembershipType::Admin { - match OrgPolicy::is_user_allowed(&member.user_uuid, org_id, false, conn).await { - Ok(_) => {} - Err(OrgPolicyErr::TwoFactorMissing) => { - if CONFIG.email_2fa_auto_fallback() { - two_factor::email::find_and_activate_email_2fa(&member.user_uuid, conn).await?; - } else { - err!("You cannot restore this user because they have not setup 2FA"); - } - } - Err(OrgPolicyErr::SingleOrgEnforced) => { - err!("You cannot restore this user because they are a member of an organization which forbids it"); - } - } - } - member.restore(); + // This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type + // This check need to be done after restoring to work with the correct status + OrgPolicy::check_user_allowed(&member, "restore", conn).await?; member.save(conn).await?; log_event( diff --git a/src/db/models/mod.rs b/src/db/models/mod.rs index a9406ed0..75c58626 100644 --- a/src/db/models/mod.rs +++ b/src/db/models/mod.rs @@ -27,7 +27,7 @@ pub use self::event::{Event, EventType}; pub use self::favorite::Favorite; pub use self::folder::{Folder, FolderCipher, FolderId}; pub use self::group::{CollectionGroup, Group, GroupId, GroupUser}; -pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyId, OrgPolicyType}; +pub use self::org_policy::{OrgPolicy, OrgPolicyId, OrgPolicyType}; pub use self::organization::{ Membership, MembershipId, MembershipStatus, MembershipType, OrgApiKeyId, Organization, OrganizationApiKey, OrganizationId, diff --git a/src/db/models/org_policy.rs b/src/db/models/org_policy.rs index 92665574..9b4c8b34 100644 --- a/src/db/models/org_policy.rs +++ b/src/db/models/org_policy.rs @@ -2,10 +2,12 @@ use derive_more::{AsRef, From}; use serde::Deserialize; use serde_json::Value; +use crate::api::core::two_factor; use crate::api::EmptyResult; use crate::db::schema::{org_policies, users_organizations}; use crate::db::DbConn; use crate::error::MapResult; +use crate::CONFIG; use diesel::prelude::*; use super::{Membership, MembershipId, MembershipStatus, MembershipType, OrganizationId, TwoFactor, UserId}; @@ -58,14 +60,6 @@ pub struct ResetPasswordDataModel { pub auto_enroll_enabled: bool, } -pub type OrgPolicyResult = Result<(), OrgPolicyErr>; - -#[derive(Debug)] -pub enum OrgPolicyErr { - TwoFactorMissing, - SingleOrgEnforced, -} - /// Local methods impl OrgPolicy { pub fn new(org_uuid: OrganizationId, atype: OrgPolicyType, enabled: bool, data: String) -> Self { @@ -280,31 +274,35 @@ impl OrgPolicy { false } - pub async fn is_user_allowed( - user_uuid: &UserId, - org_uuid: &OrganizationId, - exclude_current_org: bool, - conn: &DbConn, - ) -> OrgPolicyResult { - // Enforce TwoFactor/TwoStep login - if TwoFactor::find_by_user(user_uuid, conn).await.is_empty() { - match Self::find_by_org_and_type(org_uuid, OrgPolicyType::TwoFactorAuthentication, conn).await { - Some(p) if p.enabled => { - return Err(OrgPolicyErr::TwoFactorMissing); + pub async fn check_user_allowed(m: &Membership, action: &str, conn: &DbConn) -> EmptyResult { + if m.atype < MembershipType::Admin && m.status > (MembershipStatus::Invited as i32) { + // Enforce TwoFactor/TwoStep login + if let Some(p) = Self::find_by_org_and_type(&m.org_uuid, OrgPolicyType::TwoFactorAuthentication, conn).await + { + if p.enabled && TwoFactor::find_by_user(&m.user_uuid, conn).await.is_empty() { + if CONFIG.email_2fa_auto_fallback() { + two_factor::email::find_and_activate_email_2fa(&m.user_uuid, conn).await?; + } else { + err!(format!("Cannot {} because 2FA is required (membership {})", action, m.uuid)); + } } - _ => {} - }; - } + } + + // Check if the user is part of another Organization with SingleOrg activated + if Self::is_applicable_to_user(&m.user_uuid, OrgPolicyType::SingleOrg, Some(&m.org_uuid), conn).await { + err!(format!( + "Cannot {} because another organization policy forbids it (membership {})", + action, m.uuid + )); + } - // Enforce Single Organization Policy of other organizations user is a member of - // This check here needs to exclude this current org-id, else an accepted user can not be confirmed. - let exclude_org = if exclude_current_org { - Some(org_uuid) - } else { - None - }; - if Self::is_applicable_to_user(user_uuid, OrgPolicyType::SingleOrg, exclude_org, conn).await { - return Err(OrgPolicyErr::SingleOrgEnforced); + if let Some(p) = Self::find_by_org_and_type(&m.org_uuid, OrgPolicyType::SingleOrg, conn).await { + if p.enabled + && Membership::count_accepted_and_confirmed_by_user(&m.user_uuid, &m.org_uuid, conn).await > 0 + { + err!(format!("Cannot {} because the organization policy forbids being part of other organization (membership {})", action, m.uuid)); + } + } } Ok(()) diff --git a/src/db/models/organization.rs b/src/db/models/organization.rs index 640e47e7..0b722ef6 100644 --- a/src/db/models/organization.rs +++ b/src/db/models/organization.rs @@ -883,10 +883,15 @@ impl Membership { }} } - pub async fn count_accepted_and_confirmed_by_user(user_uuid: &UserId, conn: &DbConn) -> i64 { + pub async fn count_accepted_and_confirmed_by_user( + user_uuid: &UserId, + excluded_org: &OrganizationId, + conn: &DbConn, + ) -> i64 { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) + .filter(users_organizations::org_uuid.ne(excluded_org)) .filter(users_organizations::status.eq(MembershipStatus::Accepted as i32).or(users_organizations::status.eq(MembershipStatus::Confirmed as i32))) .count() .first::(conn) diff --git a/src/static/templates/email/send_single_org_removed_from_org.hbs b/src/static/templates/email/send_single_org_removed_from_org.hbs index ec77cf63..5fe93902 100644 --- a/src/static/templates/email/send_single_org_removed_from_org.hbs +++ b/src/static/templates/email/send_single_org_removed_from_org.hbs @@ -1,4 +1,4 @@ -You have been removed from {{{org_name}}} +Your access to {{{org_name}}} has been revoked -Your user account has been removed from the *{{org_name}}* organization because you are a part of another organization. The {{org_name}} organization has enabled a policy that prevents users from being a part of multiple organizations. Before you can re-join this organization you need to leave all other organizations or join with a different account. +Your access to the *{{org_name}}* organization has been revoked because you are a part of another organization. The {{org_name}} organization has enabled a policy that prevents users from being a part of multiple organizations. Before your access can be restored you need to leave all other organizations or join with a different account. {{> email/email_footer_text }} diff --git a/src/static/templates/email/send_single_org_removed_from_org.html.hbs b/src/static/templates/email/send_single_org_removed_from_org.html.hbs index e4026628..39527f4e 100644 --- a/src/static/templates/email/send_single_org_removed_from_org.html.hbs +++ b/src/static/templates/email/send_single_org_removed_from_org.html.hbs @@ -1,10 +1,10 @@ -You have been removed from {{{org_name}}} +Your access to {{{org_name}}} has been revoked {{> email/email_header }}
- Your user account has been removed from the {{org_name}} organization because you are a part of another organization. The {{org_name}} organization has enabled a policy that prevents users from being a part of multiple organizations. Before you can re-join this organization you need to leave all other organizations or join with a different account. + Your access to the {{org_name}} organization has been revoked because you are a part of another organization. The {{org_name}} organization has enabled a policy that prevents users from being a part of multiple organizations. Before your access can be restored you need to leave all other organizations or join with a different account.
From aad1f19b45073d7f670a8310689c3245940d667a Mon Sep 17 00:00:00 2001 From: Stefan Melmuk <509385+stefan0xC@users.noreply.github.com> Date: Sun, 23 Nov 2025 21:55:20 +0100 Subject: [PATCH 4/4] fix email as 2fa provider (#6473) --- src/api/core/two_factor/email.rs | 21 ++++++++++----------- src/db/models/user.rs | 14 +------------- 2 files changed, 11 insertions(+), 24 deletions(-) diff --git a/src/api/core/two_factor/email.rs b/src/api/core/two_factor/email.rs index 63e4508b..cc6909af 100644 --- a/src/api/core/two_factor/email.rs +++ b/src/api/core/two_factor/email.rs @@ -10,7 +10,7 @@ use crate::{ auth::Headers, crypto, db::{ - models::{DeviceId, EventType, TwoFactor, TwoFactorType, User, UserId}, + models::{EventType, TwoFactor, TwoFactorType, User, UserId}, DbConn, }, error::{Error, MapResult}, @@ -24,16 +24,10 @@ pub fn routes() -> Vec { #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct SendEmailLoginData { - #[serde(alias = "DeviceIdentifier")] - device_identifier: DeviceId, - - #[allow(unused)] #[serde(alias = "Email")] - email: Option, - - #[allow(unused)] + email: String, #[serde(alias = "MasterPasswordHash")] - master_password_hash: Option, + master_password_hash: String, } /// User is trying to login and wants to use email 2FA. @@ -45,14 +39,19 @@ async fn send_email_login(data: Json, conn: DbConn) -> Empty use crate::db::models::User; // Get the user - let Some(user) = User::find_by_device_id(&data.device_identifier, &conn).await else { - err!("Cannot find user. Try again.") + let Some(user) = User::find_by_mail(&data.email, &conn).await else { + err!("Username or password is incorrect. Try again.") }; if !CONFIG._enable_email_2fa() { err!("Email 2FA is disabled") } + // Check password + if !user.check_valid_password(&data.master_password_hash) { + err!("Username or password is incorrect. Try again.") + } + send_token(&user.uuid, &conn).await?; Ok(()) diff --git a/src/db/models/user.rs b/src/db/models/user.rs index e14c4218..c7f4e1bc 100644 --- a/src/db/models/user.rs +++ b/src/db/models/user.rs @@ -1,4 +1,4 @@ -use crate::db::schema::{devices, invitations, sso_users, users}; +use crate::db::schema::{invitations, sso_users, users}; use chrono::{NaiveDateTime, TimeDelta, Utc}; use derive_more::{AsRef, Deref, Display, From}; use diesel::prelude::*; @@ -10,7 +10,6 @@ use super::{ use crate::{ api::EmptyResult, crypto, - db::models::DeviceId, db::DbConn, error::MapResult, sso::OIDCIdentifier, @@ -387,17 +386,6 @@ impl User { }} } - pub async fn find_by_device_id(device_uuid: &DeviceId, conn: &DbConn) -> Option { - db_run! { conn: { - users::table - .inner_join(devices::table.on(devices::user_uuid.eq(users::uuid))) - .filter(devices::uuid.eq(device_uuid)) - .select(users::all_columns) - .first::(conn) - .ok() - }} - } - pub async fn get_all(conn: &DbConn) -> Vec<(Self, Option)> { db_run! { conn: { users::table