Browse Source

Merge branch 'main' into update-crates

pull/6485/head
Daniel García 1 week ago
committed by GitHub
parent
commit
98bf0e3c72
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 300
      .github/workflows/release.yml
  2. 1
      docker/DockerSettings.yaml
  3. 12
      docker/Dockerfile.alpine
  4. 6
      docker/Dockerfile.j2
  5. 21
      src/api/admin.rs
  6. 23
      src/api/core/mod.rs
  7. 75
      src/api/core/organizations.rs
  8. 21
      src/api/core/two_factor/email.rs
  9. 13
      src/api/icons.rs
  10. 2
      src/db/models/mod.rs
  11. 56
      src/db/models/org_policy.rs
  12. 7
      src/db/models/organization.rs
  13. 14
      src/db/models/user.rs
  14. 4
      src/static/templates/email/send_single_org_removed_from_org.hbs
  15. 4
      src/static/templates/email/send_single_org_removed_from_org.html.hbs

300
.github/workflows/release.yml

@ -16,6 +16,23 @@ concurrency:
# Don't cancel other runs when creating a tag # Don't cancel other runs when creating a tag
cancel-in-progress: ${{ github.ref_type == 'branch' }} cancel-in-progress: ${{ github.ref_type == 'branch' }}
defaults:
run:
shell: bash
env:
# The *_REPO variables need to be configured as repository variables
# Append `/settings/variables/actions` to your repo url
# DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>'
# Check for Docker hub credentials in secrets
HAVE_DOCKERHUB_LOGIN: ${{ vars.DOCKERHUB_REPO != '' && secrets.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
# GHCR_REPO needs to be 'ghcr.io/<user>/<repo>'
# Check for Github credentials in secrets
HAVE_GHCR_LOGIN: ${{ vars.GHCR_REPO != '' && github.repository_owner != '' && secrets.GITHUB_TOKEN != '' }}
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
# Check for Quay.io credentials in secrets
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
jobs: jobs:
docker-build: docker-build:
name: Build Vaultwarden containers name: Build Vaultwarden containers
@ -25,7 +42,7 @@ jobs:
contents: read contents: read
attestations: write # Needed to generate an artifact attestation for a build attestations: write # Needed to generate an artifact attestation for a build
id-token: write # Needed to mint the OIDC token necessary to request a Sigstore signing certificate id-token: write # Needed to mint the OIDC token necessary to request a Sigstore signing certificate
runs-on: ubuntu-24.04 runs-on: ${{ contains(matrix.arch, 'arm') && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }}
timeout-minutes: 120 timeout-minutes: 120
# Start a local docker registry to extract the compiled binaries to upload as artifacts and attest them # Start a local docker registry to extract the compiled binaries to upload as artifacts and attest them
services: services:
@ -36,20 +53,12 @@ jobs:
env: env:
SOURCE_COMMIT: ${{ github.sha }} SOURCE_COMMIT: ${{ github.sha }}
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}" SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}"
# The *_REPO variables need to be configured as repository variables
# Append `/settings/variables/actions` to your repo url
# DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>'
# Check for Docker hub credentials in secrets
HAVE_DOCKERHUB_LOGIN: ${{ vars.DOCKERHUB_REPO != '' && secrets.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
# GHCR_REPO needs to be 'ghcr.io/<user>/<repo>'
# Check for Github credentials in secrets
HAVE_GHCR_LOGIN: ${{ vars.GHCR_REPO != '' && github.repository_owner != '' && secrets.GITHUB_TOKEN != '' }}
# QUAY_REPO needs to be 'quay.io/<user>/<repo>'
# Check for Quay.io credentials in secrets
HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }}
strategy: strategy:
matrix: matrix:
arch: ["amd64", "arm64", "arm/v7", "arm/v6"]
base_image: ["debian","alpine"] base_image: ["debian","alpine"]
outputs:
base-tags: ${{ steps.determine-version.outputs.BASE_TAGS }}
steps: steps:
- name: Initialize QEMU binfmt support - name: Initialize QEMU binfmt support
@ -78,17 +87,26 @@ jobs:
persist-credentials: false persist-credentials: false
fetch-depth: 0 fetch-depth: 0
# Normalize the architecture string for use in paths and cache keys
- name: Normalize architecture string
env:
MATRIX_ARCH: ${{ matrix.arch }}
run: |
# Replace slashes with nothing to create a safe string for paths/cache keys
NORMALIZED_ARCH="${MATRIX_ARCH//\/}"
echo "NORMALIZED_ARCH=${NORMALIZED_ARCH}" | tee -a "${GITHUB_ENV}"
# Determine Base Tags and Source Version # Determine Base Tags and Source Version
- name: Determine Base Tags and Source Version - name: Determine Base Tags and Source Version
shell: bash id: determine-version
env: env:
REF_TYPE: ${{ github.ref_type }} REF_TYPE: ${{ github.ref_type }}
run: | run: |
# Check which main tag we are going to build determined by ref_type # Check which main tag we are going to build determined by ref_type
if [[ "${REF_TYPE}" == "tag" ]]; then if [[ "${REF_TYPE}" == "tag" ]]; then
echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}" echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_OUTPUT}"
elif [[ "${REF_TYPE}" == "branch" ]]; then elif [[ "${REF_TYPE}" == "branch" ]]; then
echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}" echo "BASE_TAGS=testing" | tee -a "${GITHUB_OUTPUT}"
fi fi
# Get the Source Version for this release # Get the Source Version for this release
@ -111,7 +129,6 @@ jobs:
- name: Add registry for DockerHub - name: Add registry for DockerHub
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }} if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
shell: bash
env: env:
DOCKERHUB_REPO: ${{ vars.DOCKERHUB_REPO }} DOCKERHUB_REPO: ${{ vars.DOCKERHUB_REPO }}
run: | run: |
@ -128,7 +145,6 @@ jobs:
- name: Add registry for ghcr.io - name: Add registry for ghcr.io
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }} if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
shell: bash
env: env:
GHCR_REPO: ${{ vars.GHCR_REPO }} GHCR_REPO: ${{ vars.GHCR_REPO }}
run: | run: |
@ -145,23 +161,22 @@ jobs:
- name: Add registry for Quay.io - name: Add registry for Quay.io
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }} if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
shell: bash
env: env:
QUAY_REPO: ${{ vars.QUAY_REPO }} QUAY_REPO: ${{ vars.QUAY_REPO }}
run: | run: |
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${QUAY_REPO}" | tee -a "${GITHUB_ENV}" echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${QUAY_REPO}" | tee -a "${GITHUB_ENV}"
- name: Configure build cache from/to - name: Configure build cache from/to
shell: bash
env: env:
GHCR_REPO: ${{ vars.GHCR_REPO }} GHCR_REPO: ${{ vars.GHCR_REPO }}
BASE_IMAGE: ${{ matrix.base_image }} BASE_IMAGE: ${{ matrix.base_image }}
NORMALIZED_ARCH: ${{ env.NORMALIZED_ARCH }}
run: | run: |
# #
# Check if there is a GitHub Container Registry Login and use it for caching # Check if there is a GitHub Container Registry Login and use it for caching
if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then if [[ -n "${HAVE_GHCR_LOGIN}" ]]; then
echo "BAKE_CACHE_FROM=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE}" | tee -a "${GITHUB_ENV}" echo "BAKE_CACHE_FROM=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE}-${NORMALIZED_ARCH}" | tee -a "${GITHUB_ENV}"
echo "BAKE_CACHE_TO=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}" echo "BAKE_CACHE_TO=type=registry,ref=${GHCR_REPO}-buildcache:${BASE_IMAGE}-${NORMALIZED_ARCH},compression=zstd,mode=max" | tee -a "${GITHUB_ENV}"
else else
echo "BAKE_CACHE_FROM=" echo "BAKE_CACHE_FROM="
echo "BAKE_CACHE_TO=" echo "BAKE_CACHE_TO="
@ -169,31 +184,45 @@ jobs:
# #
- name: Add localhost registry - name: Add localhost registry
shell: bash
run: | run: |
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}" echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
- name: Generate tags
id: tags
env:
CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}"
run: |
# Convert comma-separated list to newline-separated set commands
TAGS=$(echo "${CONTAINER_REGISTRIES}" | tr ',' '\n' | sed "s|.*|*.tags=&|")
# Output for use in next step
{
echo "TAGS<<EOF"
echo "$TAGS"
echo "EOF"
} >> "$GITHUB_ENV"
- name: Bake ${{ matrix.base_image }} containers - name: Bake ${{ matrix.base_image }} containers
id: bake_vw id: bake_vw
uses: docker/bake-action@3acf805d94d93a86cce4ca44798a76464a75b88c # v6.9.0 uses: docker/bake-action@3acf805d94d93a86cce4ca44798a76464a75b88c # v6.9.0
env: env:
BASE_TAGS: "${{ env.BASE_TAGS }}" BASE_TAGS: "${{ steps.determine-version.outputs.BASE_TAGS }}"
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}" SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
SOURCE_VERSION: "${{ env.SOURCE_VERSION }}" SOURCE_VERSION: "${{ env.SOURCE_VERSION }}"
SOURCE_REPOSITORY_URL: "${{ env.SOURCE_REPOSITORY_URL }}" SOURCE_REPOSITORY_URL: "${{ env.SOURCE_REPOSITORY_URL }}"
CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}"
with: with:
pull: true pull: true
push: true
source: . source: .
files: docker/docker-bake.hcl files: docker/docker-bake.hcl
targets: "${{ matrix.base_image }}-multi" targets: "${{ matrix.base_image }}-multi"
set: | set: |
*.cache-from=${{ env.BAKE_CACHE_FROM }} *.cache-from=${{ env.BAKE_CACHE_FROM }}
*.cache-to=${{ env.BAKE_CACHE_TO }} *.cache-to=${{ env.BAKE_CACHE_TO }}
*.platform=linux/${{ matrix.arch }}
${{ env.TAGS }}
*.output=type=image,push-by-digest=true,name-canonical=true,push=true,compression=zstd
- name: Extract digest SHA - name: Extract digest SHA
shell: bash
env: env:
BAKE_METADATA: ${{ steps.bake_vw.outputs.metadata }} BAKE_METADATA: ${{ steps.bake_vw.outputs.metadata }}
BASE_IMAGE: ${{ matrix.base_image }} BASE_IMAGE: ${{ matrix.base_image }}
@ -201,38 +230,30 @@ jobs:
GET_DIGEST_SHA="$(jq -r --arg base "$BASE_IMAGE" '.[$base + "-multi"]."containerimage.digest"' <<< "${BAKE_METADATA}")" GET_DIGEST_SHA="$(jq -r --arg base "$BASE_IMAGE" '.[$base + "-multi"]."containerimage.digest"' <<< "${BAKE_METADATA}")"
echo "DIGEST_SHA=${GET_DIGEST_SHA}" | tee -a "${GITHUB_ENV}" echo "DIGEST_SHA=${GET_DIGEST_SHA}" | tee -a "${GITHUB_ENV}"
# Attest container images - name: Export digest
- name: Attest - docker.io - ${{ matrix.base_image }} env:
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} DIGEST_SHA: ${{ env.DIGEST_SHA }}
uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 RUNNER_TEMP: ${{ runner.temp }}
with: run: |
subject-name: ${{ vars.DOCKERHUB_REPO }} mkdir -p "${RUNNER_TEMP}"/digests
subject-digest: ${{ env.DIGEST_SHA }} digest="${DIGEST_SHA}"
push-to-registry: true touch "${RUNNER_TEMP}/digests/${digest#sha256:}"
- name: Attest - ghcr.io - ${{ matrix.base_image }}
if: ${{ env.HAVE_GHCR_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}}
uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0
with:
subject-name: ${{ vars.GHCR_REPO }}
subject-digest: ${{ env.DIGEST_SHA }}
push-to-registry: true
- name: Attest - quay.io - ${{ matrix.base_image }} - name: Upload digest
if: ${{ env.HAVE_QUAY_LOGIN == 'true' && steps.bake_vw.outputs.metadata != ''}} uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0
with: with:
subject-name: ${{ vars.QUAY_REPO }} name: digests-${{ env.NORMALIZED_ARCH }}-${{ matrix.base_image }}
subject-digest: ${{ env.DIGEST_SHA }} path: ${{ runner.temp }}/digests/*
push-to-registry: true if-no-files-found: error
retention-days: 1
# Extract the Alpine binaries from the containers # Extract the Alpine binaries from the containers
- name: Extract binaries - name: Extract binaries
shell: bash
env: env:
REF_TYPE: ${{ github.ref_type }} REF_TYPE: ${{ github.ref_type }}
BASE_IMAGE: ${{ matrix.base_image }} BASE_IMAGE: ${{ matrix.base_image }}
DIGEST_SHA: ${{ env.DIGEST_SHA }}
NORMALIZED_ARCH: ${{ env.NORMALIZED_ARCH }}
run: | run: |
# Check which main tag we are going to build determined by ref_type # Check which main tag we are going to build determined by ref_type
if [[ "${REF_TYPE}" == "tag" ]]; then if [[ "${REF_TYPE}" == "tag" ]]; then
@ -246,60 +267,151 @@ jobs:
EXTRACT_TAG="${EXTRACT_TAG}-alpine" EXTRACT_TAG="${EXTRACT_TAG}-alpine"
fi fi
# After each extraction the image is removed. CONTAINER_ID="$(docker create "localhost:5000/vaultwarden/server:${EXTRACT_TAG}@${DIGEST_SHA}")"
# This is needed because using different platforms doesn't trigger a new pull/download
# Copy the binary
# Extract amd64 binary docker cp "$CONTAINER_ID":/vaultwarden vaultwarden-"${NORMALIZED_ARCH}"
docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
docker cp amd64:/vaultwarden vaultwarden-amd64-${BASE_IMAGE} # Clean up
docker rm --force amd64 docker rm "$CONTAINER_ID"
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
# Extract arm64 binary
docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
docker cp arm64:/vaultwarden vaultwarden-arm64-${BASE_IMAGE}
docker rm --force arm64
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
# Extract armv7 binary
docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
docker cp armv7:/vaultwarden vaultwarden-armv7-${BASE_IMAGE}
docker rm --force armv7
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
# Extract armv6 binary
docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
docker cp armv6:/vaultwarden vaultwarden-armv6-${BASE_IMAGE}
docker rm --force armv6
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
# Upload artifacts to Github Actions and Attest the binaries # Upload artifacts to Github Actions and Attest the binaries
- name: "Upload amd64 artifact ${{ matrix.base_image }}" - name: Attest binaries
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64-${{ matrix.base_image }} subject-path: vaultwarden-${{ env.NORMALIZED_ARCH }}
path: vaultwarden-amd64-${{ matrix.base_image }}
- name: "Upload arm64 artifact ${{ matrix.base_image }}" - name: Upload binaries as artifacts
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64-${{ matrix.base_image }} name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-${{ env.NORMALIZED_ARCH }}-${{ matrix.base_image }}
path: vaultwarden-arm64-${{ matrix.base_image }} path: vaultwarden-${{ env.NORMALIZED_ARCH }}
- name: "Upload armv7 artifact ${{ matrix.base_image }}" merge-manifests:
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 name: Merge manifests
runs-on: ubuntu-latest
needs: docker-build
env:
BASE_TAGS: ${{ needs.docker-build.outputs.base-tags }}
permissions:
packages: write # Needed to upload packages and artifacts
attestations: write # Needed to generate an artifact attestation for a build
id-token: write # Needed to mint the OIDC token necessary to request a Sigstore signing certificate
strategy:
matrix:
base_image: ["debian","alpine"]
steps:
- name: Download digests
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7-${{ matrix.base_image }} path: ${{ runner.temp }}/digests
path: vaultwarden-armv7-${{ matrix.base_image }} pattern: digests-*-${{ matrix.base_image }}
merge-multiple: true
- name: "Upload armv6 artifact ${{ matrix.base_image }}" # Login to Docker Hub
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 - name: Login to Docker Hub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
- name: Add registry for DockerHub
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }}
env:
DOCKERHUB_REPO: ${{ vars.DOCKERHUB_REPO }}
run: |
echo "CONTAINER_REGISTRIES=${DOCKERHUB_REPO}" | tee -a "${GITHUB_ENV}"
# Login to GitHub Container Registry
- name: Login to GitHub Container Registry
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6-${{ matrix.base_image }} registry: ghcr.io
path: vaultwarden-armv6-${{ matrix.base_image }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
- name: Add registry for ghcr.io
if: ${{ env.HAVE_GHCR_LOGIN == 'true' }}
env:
GHCR_REPO: ${{ vars.GHCR_REPO }}
run: |
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${GHCR_REPO}" | tee -a "${GITHUB_ENV}"
- name: "Attest artifacts ${{ matrix.base_image }}" # Login to Quay.io
- name: Login to Quay.io
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_TOKEN }}
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
- name: Add registry for Quay.io
if: ${{ env.HAVE_QUAY_LOGIN == 'true' }}
env:
QUAY_REPO: ${{ vars.QUAY_REPO }}
run: |
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${QUAY_REPO}" | tee -a "${GITHUB_ENV}"
- name: Create manifest list, push it and extract digest SHA
working-directory: ${{ runner.temp }}/digests
env:
BASE_IMAGE: "${{ matrix.base_image }}"
BASE_TAGS: "${{ env.BASE_TAGS }}"
CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}"
run: |
set +e
IFS=',' read -ra IMAGES <<< "${CONTAINER_REGISTRIES}"
for img in "${IMAGES[@]}"; do
echo "Creating manifest for $img:${BASE_TAGS}-${BASE_IMAGE}"
OUTPUT=$(docker buildx imagetools create \
-t "$img:${BASE_TAGS}-${BASE_IMAGE}" \
$(printf "$img:${BASE_TAGS}-${BASE_IMAGE}@sha256:%s " *) 2>&1)
STATUS=$?
if [ $STATUS -ne 0 ]; then
echo "Manifest creation failed for $img"
echo "$OUTPUT"
exit $STATUS
fi
echo "Manifest created for $img"
echo "$OUTPUT"
done
set -e
# Extract digest SHA for subsequent steps
GET_DIGEST_SHA="$(echo "$OUTPUT" | grep -oE 'sha256:[a-f0-9]{64}' | tail -1)"
echo "DIGEST_SHA=${GET_DIGEST_SHA}" | tee -a "${GITHUB_ENV}"
# Attest container images
- name: Attest - docker.io - ${{ matrix.base_image }}
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && env.DIGEST_SHA != ''}}
uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0
with:
subject-name: ${{ vars.DOCKERHUB_REPO }}
subject-digest: ${{ env.DIGEST_SHA }}
push-to-registry: true
- name: Attest - ghcr.io - ${{ matrix.base_image }}
if: ${{ env.HAVE_GHCR_LOGIN == 'true' && env.DIGEST_SHA != ''}}
uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0 uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0
with: with:
subject-path: vaultwarden-* subject-name: ${{ vars.GHCR_REPO }}
# End Upload artifacts to Github Actions subject-digest: ${{ env.DIGEST_SHA }}
push-to-registry: true
- name: Attest - quay.io - ${{ matrix.base_image }}
if: ${{ env.HAVE_QUAY_LOGIN == 'true' && env.DIGEST_SHA != ''}}
uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0
with:
subject-name: ${{ vars.QUAY_REPO }}
subject-digest: ${{ env.DIGEST_SHA }}
push-to-registry: true

1
docker/DockerSettings.yaml

@ -17,7 +17,6 @@ build_stage_image:
platform: "$BUILDPLATFORM" platform: "$BUILDPLATFORM"
alpine: alpine:
image: "build_${TARGETARCH}${TARGETVARIANT}" image: "build_${TARGETARCH}${TARGETVARIANT}"
platform: "linux/amd64" # The Alpine build images only have linux/amd64 images
arch_image: arch_image:
amd64: "ghcr.io/blackdex/rust-musl:x86_64-musl-stable-{{rust_version}}" amd64: "ghcr.io/blackdex/rust-musl:x86_64-musl-stable-{{rust_version}}"
arm64: "ghcr.io/blackdex/rust-musl:aarch64-musl-stable-{{rust_version}}" arm64: "ghcr.io/blackdex/rust-musl:aarch64-musl-stable-{{rust_version}}"

12
docker/Dockerfile.alpine

@ -30,16 +30,16 @@
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:50662dccf4908ac2128cd44981c52fcb4e3e8dd56f21823c8d5e91267ff741fa AS vault FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:50662dccf4908ac2128cd44981c52fcb4e3e8dd56f21823c8d5e91267ff741fa AS vault
########################## ALPINE BUILD IMAGES ########################## ########################## ALPINE BUILD IMAGES ##########################
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 ## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 and linux/arm64
## And for Alpine we define all build images here, they will only be loaded when actually used ## And for Alpine we define all build images here, they will only be loaded when actually used
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.91.1 AS build_amd64 FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.91.1 AS build_amd64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.91.1 AS build_arm64 FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.91.1 AS build_arm64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.91.1 AS build_armv7 FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.91.1 AS build_armv7
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.91.1 AS build_armv6 FROM --platform=$BUILDPLATFORM ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.91.1 AS build_armv6
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006 # hadolint ignore=DL3006
FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} AS build FROM --platform=$BUILDPLATFORM build_${TARGETARCH}${TARGETVARIANT} AS build
ARG TARGETARCH ARG TARGETARCH
ARG TARGETVARIANT ARG TARGETVARIANT
ARG TARGETPLATFORM ARG TARGETPLATFORM

6
docker/Dockerfile.j2

@ -36,16 +36,16 @@ FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_diges
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx
{% elif base == "alpine" %} {% elif base == "alpine" %}
########################## ALPINE BUILD IMAGES ########################## ########################## ALPINE BUILD IMAGES ##########################
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 ## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 and linux/arm64
## And for Alpine we define all build images here, they will only be loaded when actually used ## And for Alpine we define all build images here, they will only be loaded when actually used
{% for arch in build_stage_image[base].arch_image %} {% for arch in build_stage_image[base].arch_image %}
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} AS build_{{ arch }} FROM --platform=$BUILDPLATFORM {{ build_stage_image[base].arch_image[arch] }} AS build_{{ arch }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006 # hadolint ignore=DL3006
FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} AS build FROM --platform=$BUILDPLATFORM {{ build_stage_image[base].image }} AS build
{% if base == "debian" %} {% if base == "debian" %}
COPY --from=xx / / COPY --from=xx / /
{% endif %} {% endif %}

21
src/api/admin.rs

@ -23,7 +23,7 @@ use crate::{
backup_sqlite, get_sql_server_version, backup_sqlite, get_sql_server_version,
models::{ models::{
Attachment, Cipher, Collection, Device, Event, EventType, Group, Invitation, Membership, MembershipId, Attachment, Cipher, Collection, Device, Event, EventType, Group, Invitation, Membership, MembershipId,
MembershipType, OrgPolicy, OrgPolicyErr, Organization, OrganizationId, SsoUser, TwoFactor, User, UserId, MembershipType, OrgPolicy, Organization, OrganizationId, SsoUser, TwoFactor, User, UserId,
}, },
DbConn, DbConnType, ACTIVE_DB_TYPE, DbConn, DbConnType, ACTIVE_DB_TYPE,
}, },
@ -556,23 +556,9 @@ async fn update_membership_type(data: Json<MembershipTypeData>, token: AdminToke
} }
} }
member_to_edit.atype = new_type;
// This check is also done at api::organizations::{accept_invite, _confirm_invite, _activate_member, edit_member}, update_membership_type // This check is also done at api::organizations::{accept_invite, _confirm_invite, _activate_member, edit_member}, update_membership_type
// It returns different error messages per function. OrgPolicy::check_user_allowed(&member_to_edit, "modify", &conn).await?;
if new_type < MembershipType::Admin {
match OrgPolicy::is_user_allowed(&member_to_edit.user_uuid, &member_to_edit.org_uuid, true, &conn).await {
Ok(_) => {}
Err(OrgPolicyErr::TwoFactorMissing) => {
if CONFIG.email_2fa_auto_fallback() {
two_factor::email::find_and_activate_email_2fa(&member_to_edit.user_uuid, &conn).await?;
} else {
err!("You cannot modify this user to this type because they have not setup 2FA");
}
}
Err(OrgPolicyErr::SingleOrgEnforced) => {
err!("You cannot modify this user to this type because it is a member of an organization which forbids it");
}
}
}
log_event( log_event(
EventType::OrganizationUserUpdated as i32, EventType::OrganizationUserUpdated as i32,
@ -585,7 +571,6 @@ async fn update_membership_type(data: Json<MembershipTypeData>, token: AdminToke
) )
.await; .await;
member_to_edit.atype = new_type;
member_to_edit.save(&conn).await member_to_edit.save(&conn).await
} }

23
src/api/core/mod.rs

@ -53,7 +53,7 @@ use crate::{
api::{EmptyResult, JsonResult, Notify, UpdateType}, api::{EmptyResult, JsonResult, Notify, UpdateType},
auth::Headers, auth::Headers,
db::{ db::{
models::{Membership, MembershipStatus, MembershipType, OrgPolicy, OrgPolicyErr, Organization, User}, models::{Membership, MembershipStatus, OrgPolicy, Organization, User},
DbConn, DbConn,
}, },
error::Error, error::Error,
@ -269,27 +269,12 @@ async fn accept_org_invite(
err!("User already accepted the invitation"); err!("User already accepted the invitation");
} }
// This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type
// It returns different error messages per function.
if member.atype < MembershipType::Admin {
match OrgPolicy::is_user_allowed(&member.user_uuid, &member.org_uuid, false, conn).await {
Ok(_) => {}
Err(OrgPolicyErr::TwoFactorMissing) => {
if crate::CONFIG.email_2fa_auto_fallback() {
two_factor::email::activate_email_2fa(user, conn).await?;
} else {
err!("You cannot join this organization until you enable two-step login on your user account");
}
}
Err(OrgPolicyErr::SingleOrgEnforced) => {
err!("You cannot join this organization because you are a member of an organization which forbids it");
}
}
}
member.status = MembershipStatus::Accepted as i32; member.status = MembershipStatus::Accepted as i32;
member.reset_password_key = reset_password_key; member.reset_password_key = reset_password_key;
// This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type
OrgPolicy::check_user_allowed(&member, "join", conn).await?;
member.save(conn).await?; member.save(conn).await?;
if crate::CONFIG.mail_enabled() { if crate::CONFIG.mail_enabled() {

75
src/api/core/organizations.rs

@ -15,7 +15,7 @@ use crate::{
models::{ models::{
Cipher, CipherId, Collection, CollectionCipher, CollectionGroup, CollectionId, CollectionUser, EventType, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup, CollectionId, CollectionUser, EventType,
Group, GroupId, GroupUser, Invitation, Membership, MembershipId, MembershipStatus, MembershipType, Group, GroupId, GroupUser, Invitation, Membership, MembershipId, MembershipStatus, MembershipType,
OrgPolicy, OrgPolicyErr, OrgPolicyType, Organization, OrganizationApiKey, OrganizationId, User, UserId, OrgPolicy, OrgPolicyType, Organization, OrganizationApiKey, OrganizationId, User, UserId,
}, },
DbConn, DbConn,
}, },
@ -1463,27 +1463,12 @@ async fn _confirm_invite(
err!("User in invalid state") err!("User in invalid state")
} }
// This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type
// It returns different error messages per function.
if member_to_confirm.atype < MembershipType::Admin {
match OrgPolicy::is_user_allowed(&member_to_confirm.user_uuid, org_id, true, conn).await {
Ok(_) => {}
Err(OrgPolicyErr::TwoFactorMissing) => {
if CONFIG.email_2fa_auto_fallback() {
two_factor::email::find_and_activate_email_2fa(&member_to_confirm.user_uuid, conn).await?;
} else {
err!("You cannot confirm this user because they have not setup 2FA");
}
}
Err(OrgPolicyErr::SingleOrgEnforced) => {
err!("You cannot confirm this user because they are a member of an organization which forbids it");
}
}
}
member_to_confirm.status = MembershipStatus::Confirmed as i32; member_to_confirm.status = MembershipStatus::Confirmed as i32;
member_to_confirm.akey = key.to_string(); member_to_confirm.akey = key.to_string();
// This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type
OrgPolicy::check_user_allowed(&member_to_confirm, "confirm", conn).await?;
log_event( log_event(
EventType::OrganizationUserConfirmed as i32, EventType::OrganizationUserConfirmed as i32,
&member_to_confirm.uuid, &member_to_confirm.uuid,
@ -1631,27 +1616,13 @@ async fn edit_member(
} }
} }
// This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type
// It returns different error messages per function.
if new_type < MembershipType::Admin {
match OrgPolicy::is_user_allowed(&member_to_edit.user_uuid, &org_id, true, &conn).await {
Ok(_) => {}
Err(OrgPolicyErr::TwoFactorMissing) => {
if CONFIG.email_2fa_auto_fallback() {
two_factor::email::find_and_activate_email_2fa(&member_to_edit.user_uuid, &conn).await?;
} else {
err!("You cannot modify this user to this type because they have not setup 2FA");
}
}
Err(OrgPolicyErr::SingleOrgEnforced) => {
err!("You cannot modify this user to this type because they are a member of an organization which forbids it");
}
}
}
member_to_edit.access_all = access_all; member_to_edit.access_all = access_all;
member_to_edit.atype = new_type as i32; member_to_edit.atype = new_type as i32;
// This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type
// We need to perform the check after changing the type since `admin` is exempt.
OrgPolicy::check_user_allowed(&member_to_edit, "modify", &conn).await?;
// Delete all the odd collections // Delete all the odd collections
for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &member_to_edit.user_uuid, &conn).await { for c in CollectionUser::find_by_organization_and_user_uuid(&org_id, &member_to_edit.user_uuid, &conn).await {
c.delete(&conn).await?; c.delete(&conn).await?;
@ -2154,14 +2125,14 @@ async fn put_policy(
// When enabling the SingleOrg policy, remove this org's members that are members of other orgs // When enabling the SingleOrg policy, remove this org's members that are members of other orgs
if pol_type_enum == OrgPolicyType::SingleOrg && data.enabled { if pol_type_enum == OrgPolicyType::SingleOrg && data.enabled {
for member in Membership::find_by_org(&org_id, &conn).await.into_iter() { for mut member in Membership::find_by_org(&org_id, &conn).await.into_iter() {
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org // Policy only applies to non-Owner/non-Admin members who have accepted joining the org
// Exclude invited and revoked users when checking for this policy. // Exclude invited and revoked users when checking for this policy.
// Those users will not be allowed to accept or be activated because of the policy checks done there. // Those users will not be allowed to accept or be activated because of the policy checks done there.
// We check if the count is larger then 1, because it includes this organization also.
if member.atype < MembershipType::Admin if member.atype < MembershipType::Admin
&& member.status != MembershipStatus::Invited as i32 && member.status != MembershipStatus::Invited as i32
&& Membership::count_accepted_and_confirmed_by_user(&member.user_uuid, &conn).await > 1 && Membership::count_accepted_and_confirmed_by_user(&member.user_uuid, &member.org_uuid, &conn).await
> 0
{ {
if CONFIG.mail_enabled() { if CONFIG.mail_enabled() {
let org = Organization::find_by_uuid(&member.org_uuid, &conn).await.unwrap(); let org = Organization::find_by_uuid(&member.org_uuid, &conn).await.unwrap();
@ -2181,7 +2152,8 @@ async fn put_policy(
) )
.await; .await;
member.delete(&conn).await?; member.revoke();
member.save(&conn).await?;
} }
} }
} }
@ -2628,25 +2600,10 @@ async fn _restore_member(
err!("Only owners can restore other owners") err!("Only owners can restore other owners")
} }
// This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type
// It returns different error messages per function.
if member.atype < MembershipType::Admin {
match OrgPolicy::is_user_allowed(&member.user_uuid, org_id, false, conn).await {
Ok(_) => {}
Err(OrgPolicyErr::TwoFactorMissing) => {
if CONFIG.email_2fa_auto_fallback() {
two_factor::email::find_and_activate_email_2fa(&member.user_uuid, conn).await?;
} else {
err!("You cannot restore this user because they have not setup 2FA");
}
}
Err(OrgPolicyErr::SingleOrgEnforced) => {
err!("You cannot restore this user because they are a member of an organization which forbids it");
}
}
}
member.restore(); member.restore();
// This check is also done at accept_invite, _confirm_invite, _activate_member, edit_member, admin::update_membership_type
// This check need to be done after restoring to work with the correct status
OrgPolicy::check_user_allowed(&member, "restore", conn).await?;
member.save(conn).await?; member.save(conn).await?;
log_event( log_event(

21
src/api/core/two_factor/email.rs

@ -10,7 +10,7 @@ use crate::{
auth::Headers, auth::Headers,
crypto, crypto,
db::{ db::{
models::{DeviceId, EventType, TwoFactor, TwoFactorType, User, UserId}, models::{EventType, TwoFactor, TwoFactorType, User, UserId},
DbConn, DbConn,
}, },
error::{Error, MapResult}, error::{Error, MapResult},
@ -24,16 +24,10 @@ pub fn routes() -> Vec<Route> {
#[derive(Deserialize)] #[derive(Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct SendEmailLoginData { struct SendEmailLoginData {
#[serde(alias = "DeviceIdentifier")]
device_identifier: DeviceId,
#[allow(unused)]
#[serde(alias = "Email")] #[serde(alias = "Email")]
email: Option<String>, email: String,
#[allow(unused)]
#[serde(alias = "MasterPasswordHash")] #[serde(alias = "MasterPasswordHash")]
master_password_hash: Option<String>, master_password_hash: String,
} }
/// User is trying to login and wants to use email 2FA. /// User is trying to login and wants to use email 2FA.
@ -45,14 +39,19 @@ async fn send_email_login(data: Json<SendEmailLoginData>, conn: DbConn) -> Empty
use crate::db::models::User; use crate::db::models::User;
// Get the user // Get the user
let Some(user) = User::find_by_device_id(&data.device_identifier, &conn).await else { let Some(user) = User::find_by_mail(&data.email, &conn).await else {
err!("Cannot find user. Try again.") err!("Username or password is incorrect. Try again.")
}; };
if !CONFIG._enable_email_2fa() { if !CONFIG._enable_email_2fa() {
err!("Email 2FA is disabled") err!("Email 2FA is disabled")
} }
// Check password
if !user.check_valid_password(&data.master_password_hash) {
err!("Username or password is incorrect. Try again.")
}
send_token(&user.uuid, &conn).await?; send_token(&user.uuid, &conn).await?;
Ok(()) Ok(())

13
src/api/icons.rs

@ -82,19 +82,19 @@ static ICON_SIZE_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"(?x)(\d+
// It is used to prevent sending a specific header which breaks icon downloads. // It is used to prevent sending a specific header which breaks icon downloads.
// If this function needs to be renamed, also adjust the code in `util.rs` // If this function needs to be renamed, also adjust the code in `util.rs`
#[get("/<domain>/icon.png")] #[get("/<domain>/icon.png")]
fn icon_external(domain: &str) -> Option<Redirect> { fn icon_external(domain: &str) -> Cached<Option<Redirect>> {
if !is_valid_domain(domain) { if !is_valid_domain(domain) {
warn!("Invalid domain: {domain}"); warn!("Invalid domain: {domain}");
return None; return Cached::ttl(None, CONFIG.icon_cache_negttl(), true);
} }
if should_block_address(domain) { if should_block_address(domain) {
warn!("Blocked address: {domain}"); warn!("Blocked address: {domain}");
return None; return Cached::ttl(None, CONFIG.icon_cache_negttl(), true);
} }
let url = CONFIG._icon_service_url().replace("{}", domain); let url = CONFIG._icon_service_url().replace("{}", domain);
match CONFIG.icon_redirect_code() { let redir = match CONFIG.icon_redirect_code() {
301 => Some(Redirect::moved(url)), // legacy permanent redirect 301 => Some(Redirect::moved(url)), // legacy permanent redirect
302 => Some(Redirect::found(url)), // legacy temporary redirect 302 => Some(Redirect::found(url)), // legacy temporary redirect
307 => Some(Redirect::temporary(url)), 307 => Some(Redirect::temporary(url)),
@ -103,7 +103,8 @@ fn icon_external(domain: &str) -> Option<Redirect> {
error!("Unexpected redirect code {}", CONFIG.icon_redirect_code()); error!("Unexpected redirect code {}", CONFIG.icon_redirect_code());
None None
} }
} };
Cached::ttl(redir, CONFIG.icon_cache_ttl(), true)
} }
#[get("/<domain>/icon.png")] #[get("/<domain>/icon.png")]
@ -141,7 +142,7 @@ async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
/// This does some manual checks and makes use of Url to do some basic checking. /// This does some manual checks and makes use of Url to do some basic checking.
/// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255. /// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255.
fn is_valid_domain(domain: &str) -> bool { fn is_valid_domain(domain: &str) -> bool {
const ALLOWED_CHARS: &str = "_-."; const ALLOWED_CHARS: &str = "-.";
// If parsing the domain fails using Url, it will not work with reqwest. // If parsing the domain fails using Url, it will not work with reqwest.
if let Err(parse_error) = url::Url::parse(format!("https://{domain}").as_str()) { if let Err(parse_error) = url::Url::parse(format!("https://{domain}").as_str()) {

2
src/db/models/mod.rs

@ -27,7 +27,7 @@ pub use self::event::{Event, EventType};
pub use self::favorite::Favorite; pub use self::favorite::Favorite;
pub use self::folder::{Folder, FolderCipher, FolderId}; pub use self::folder::{Folder, FolderCipher, FolderId};
pub use self::group::{CollectionGroup, Group, GroupId, GroupUser}; pub use self::group::{CollectionGroup, Group, GroupId, GroupUser};
pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyId, OrgPolicyType}; pub use self::org_policy::{OrgPolicy, OrgPolicyId, OrgPolicyType};
pub use self::organization::{ pub use self::organization::{
Membership, MembershipId, MembershipStatus, MembershipType, OrgApiKeyId, Organization, OrganizationApiKey, Membership, MembershipId, MembershipStatus, MembershipType, OrgApiKeyId, Organization, OrganizationApiKey,
OrganizationId, OrganizationId,

56
src/db/models/org_policy.rs

@ -2,10 +2,12 @@ use derive_more::{AsRef, From};
use serde::Deserialize; use serde::Deserialize;
use serde_json::Value; use serde_json::Value;
use crate::api::core::two_factor;
use crate::api::EmptyResult; use crate::api::EmptyResult;
use crate::db::schema::{org_policies, users_organizations}; use crate::db::schema::{org_policies, users_organizations};
use crate::db::DbConn; use crate::db::DbConn;
use crate::error::MapResult; use crate::error::MapResult;
use crate::CONFIG;
use diesel::prelude::*; use diesel::prelude::*;
use super::{Membership, MembershipId, MembershipStatus, MembershipType, OrganizationId, TwoFactor, UserId}; use super::{Membership, MembershipId, MembershipStatus, MembershipType, OrganizationId, TwoFactor, UserId};
@ -58,14 +60,6 @@ pub struct ResetPasswordDataModel {
pub auto_enroll_enabled: bool, pub auto_enroll_enabled: bool,
} }
pub type OrgPolicyResult = Result<(), OrgPolicyErr>;
#[derive(Debug)]
pub enum OrgPolicyErr {
TwoFactorMissing,
SingleOrgEnforced,
}
/// Local methods /// Local methods
impl OrgPolicy { impl OrgPolicy {
pub fn new(org_uuid: OrganizationId, atype: OrgPolicyType, enabled: bool, data: String) -> Self { pub fn new(org_uuid: OrganizationId, atype: OrgPolicyType, enabled: bool, data: String) -> Self {
@ -280,31 +274,35 @@ impl OrgPolicy {
false false
} }
pub async fn is_user_allowed( pub async fn check_user_allowed(m: &Membership, action: &str, conn: &DbConn) -> EmptyResult {
user_uuid: &UserId, if m.atype < MembershipType::Admin && m.status > (MembershipStatus::Invited as i32) {
org_uuid: &OrganizationId,
exclude_current_org: bool,
conn: &DbConn,
) -> OrgPolicyResult {
// Enforce TwoFactor/TwoStep login // Enforce TwoFactor/TwoStep login
if TwoFactor::find_by_user(user_uuid, conn).await.is_empty() { if let Some(p) = Self::find_by_org_and_type(&m.org_uuid, OrgPolicyType::TwoFactorAuthentication, conn).await
match Self::find_by_org_and_type(org_uuid, OrgPolicyType::TwoFactorAuthentication, conn).await { {
Some(p) if p.enabled => { if p.enabled && TwoFactor::find_by_user(&m.user_uuid, conn).await.is_empty() {
return Err(OrgPolicyErr::TwoFactorMissing); if CONFIG.email_2fa_auto_fallback() {
two_factor::email::find_and_activate_email_2fa(&m.user_uuid, conn).await?;
} else {
err!(format!("Cannot {} because 2FA is required (membership {})", action, m.uuid));
}
} }
_ => {}
};
} }
// Enforce Single Organization Policy of other organizations user is a member of // Check if the user is part of another Organization with SingleOrg activated
// This check here needs to exclude this current org-id, else an accepted user can not be confirmed. if Self::is_applicable_to_user(&m.user_uuid, OrgPolicyType::SingleOrg, Some(&m.org_uuid), conn).await {
let exclude_org = if exclude_current_org { err!(format!(
Some(org_uuid) "Cannot {} because another organization policy forbids it (membership {})",
} else { action, m.uuid
None ));
}; }
if Self::is_applicable_to_user(user_uuid, OrgPolicyType::SingleOrg, exclude_org, conn).await {
return Err(OrgPolicyErr::SingleOrgEnforced); if let Some(p) = Self::find_by_org_and_type(&m.org_uuid, OrgPolicyType::SingleOrg, conn).await {
if p.enabled
&& Membership::count_accepted_and_confirmed_by_user(&m.user_uuid, &m.org_uuid, conn).await > 0
{
err!(format!("Cannot {} because the organization policy forbids being part of other organization (membership {})", action, m.uuid));
}
}
} }
Ok(()) Ok(())

7
src/db/models/organization.rs

@ -883,10 +883,15 @@ impl Membership {
}} }}
} }
pub async fn count_accepted_and_confirmed_by_user(user_uuid: &UserId, conn: &DbConn) -> i64 { pub async fn count_accepted_and_confirmed_by_user(
user_uuid: &UserId,
excluded_org: &OrganizationId,
conn: &DbConn,
) -> i64 {
db_run! { conn: { db_run! { conn: {
users_organizations::table users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid)) .filter(users_organizations::user_uuid.eq(user_uuid))
.filter(users_organizations::org_uuid.ne(excluded_org))
.filter(users_organizations::status.eq(MembershipStatus::Accepted as i32).or(users_organizations::status.eq(MembershipStatus::Confirmed as i32))) .filter(users_organizations::status.eq(MembershipStatus::Accepted as i32).or(users_organizations::status.eq(MembershipStatus::Confirmed as i32)))
.count() .count()
.first::<i64>(conn) .first::<i64>(conn)

14
src/db/models/user.rs

@ -1,4 +1,4 @@
use crate::db::schema::{devices, invitations, sso_users, users}; use crate::db::schema::{invitations, sso_users, users};
use chrono::{NaiveDateTime, TimeDelta, Utc}; use chrono::{NaiveDateTime, TimeDelta, Utc};
use derive_more::{AsRef, Deref, Display, From}; use derive_more::{AsRef, Deref, Display, From};
use diesel::prelude::*; use diesel::prelude::*;
@ -10,7 +10,6 @@ use super::{
use crate::{ use crate::{
api::EmptyResult, api::EmptyResult,
crypto, crypto,
db::models::DeviceId,
db::DbConn, db::DbConn,
error::MapResult, error::MapResult,
sso::OIDCIdentifier, sso::OIDCIdentifier,
@ -387,17 +386,6 @@ impl User {
}} }}
} }
pub async fn find_by_device_id(device_uuid: &DeviceId, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
users::table
.inner_join(devices::table.on(devices::user_uuid.eq(users::uuid)))
.filter(devices::uuid.eq(device_uuid))
.select(users::all_columns)
.first::<Self>(conn)
.ok()
}}
}
pub async fn get_all(conn: &DbConn) -> Vec<(Self, Option<SsoUser>)> { pub async fn get_all(conn: &DbConn) -> Vec<(Self, Option<SsoUser>)> {
db_run! { conn: { db_run! { conn: {
users::table users::table

4
src/static/templates/email/send_single_org_removed_from_org.hbs

@ -1,4 +1,4 @@
You have been removed from {{{org_name}}} Your access to {{{org_name}}} has been revoked
<!----------------> <!---------------->
Your user account has been removed from the *{{org_name}}* organization because you are a part of another organization. The {{org_name}} organization has enabled a policy that prevents users from being a part of multiple organizations. Before you can re-join this organization you need to leave all other organizations or join with a different account. Your access to the *{{org_name}}* organization has been revoked because you are a part of another organization. The {{org_name}} organization has enabled a policy that prevents users from being a part of multiple organizations. Before your access can be restored you need to leave all other organizations or join with a different account.
{{> email/email_footer_text }} {{> email/email_footer_text }}

4
src/static/templates/email/send_single_org_removed_from_org.html.hbs

@ -1,10 +1,10 @@
You have been removed from {{{org_name}}} Your access to {{{org_name}}} has been revoked
<!----------------> <!---------------->
{{> email/email_header }} {{> email/email_header }}
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;"> <table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;"> <tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center"> <td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none; text-align: center;" valign="top" align="center">
Your user account has been removed from the <b style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">{{org_name}}</b> organization because you are a part of another organization. The {{org_name}} organization has enabled a policy that prevents users from being a part of multiple organizations. Before you can re-join this organization you need to leave all other organizations or join with a different account. Your access to the <b style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">{{org_name}}</b> organization has been revoked because you are a part of another organization. The {{org_name}} organization has enabled a policy that prevents users from being a part of multiple organizations. Before your access can be restored you need to leave all other organizations or join with a different account.
</td> </td>
</tr> </tr>
</table> </table>

Loading…
Cancel
Save