117 changed files with 6465 additions and 2052 deletions
@ -0,0 +1,119 @@ |
|||
name: Release |
|||
|
|||
on: |
|||
push: |
|||
paths: |
|||
- ".github/workflows/release.yml" |
|||
- "src/**" |
|||
- "migrations/**" |
|||
- "hooks/**" |
|||
- "docker/**" |
|||
- "Cargo.*" |
|||
- "build.rs" |
|||
- "diesel.toml" |
|||
- "rust-toolchain" |
|||
|
|||
branches: # Only on paths above |
|||
- main |
|||
|
|||
tags: # Always, regardless of paths above |
|||
- '*' |
|||
|
|||
jobs: |
|||
# https://github.com/marketplace/actions/skip-duplicate-actions |
|||
# Some checks to determine if we need to continue with building a new docker. |
|||
# We will skip this check if we are creating a tag, because that has the same hash as a previous run already. |
|||
skip_check: |
|||
runs-on: ubuntu-latest |
|||
if: ${{ github.repository == 'dani-garcia/vaultwarden' }} |
|||
outputs: |
|||
should_skip: ${{ steps.skip_check.outputs.should_skip }} |
|||
steps: |
|||
- name: Skip Duplicates Actions |
|||
id: skip_check |
|||
uses: fkirc/skip-duplicate-actions@f75dd6564bb646f95277dc8c3b80612e46a4a1ea # v3.4.1 |
|||
with: |
|||
cancel_others: 'true' |
|||
# Only run this when not creating a tag |
|||
if: ${{ startsWith(github.ref, 'refs/heads/') }} |
|||
|
|||
docker-build: |
|||
runs-on: ubuntu-latest |
|||
needs: skip_check |
|||
# Start a local docker registry to be used to generate multi-arch images. |
|||
services: |
|||
registry: |
|||
image: registry:2 |
|||
ports: |
|||
- 5000:5000 |
|||
env: |
|||
DOCKER_BUILDKIT: 1 # Disabled for now, but we should look at this because it will speedup building! |
|||
# DOCKER_REPO/secrets.DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>' |
|||
DOCKER_REPO: ${{ secrets.DOCKERHUB_REPO }} |
|||
SOURCE_COMMIT: ${{ github.sha }} |
|||
SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}" |
|||
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }} |
|||
strategy: |
|||
matrix: |
|||
base_image: ["debian","alpine"] |
|||
|
|||
steps: |
|||
# Checkout the repo |
|||
- name: Checkout |
|||
uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4 |
|||
with: |
|||
fetch-depth: 0 |
|||
|
|||
# Login to Docker Hub |
|||
- name: Login to Docker Hub |
|||
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 # v1.10.0 |
|||
with: |
|||
username: ${{ secrets.DOCKERHUB_USERNAME }} |
|||
password: ${{ secrets.DOCKERHUB_TOKEN }} |
|||
|
|||
# Determine Docker Tag |
|||
- name: Init Variables |
|||
id: vars |
|||
shell: bash |
|||
run: | |
|||
# Check which main tag we are going to build determined by github.ref |
|||
if [[ "${{ github.ref }}" == refs/tags/* ]]; then |
|||
echo "set-output name=DOCKER_TAG::${GITHUB_REF#refs/*/}" |
|||
echo "::set-output name=DOCKER_TAG::${GITHUB_REF#refs/*/}" |
|||
elif [[ "${{ github.ref }}" == refs/heads/* ]]; then |
|||
echo "set-output name=DOCKER_TAG::testing" |
|||
echo "::set-output name=DOCKER_TAG::testing" |
|||
fi |
|||
# End Determine Docker Tag |
|||
|
|||
- name: Build Debian based images |
|||
shell: bash |
|||
env: |
|||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}" |
|||
run: | |
|||
./hooks/build |
|||
if: ${{ matrix.base_image == 'debian' }} |
|||
|
|||
- name: Push Debian based images |
|||
shell: bash |
|||
env: |
|||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}" |
|||
run: | |
|||
./hooks/push |
|||
if: ${{ matrix.base_image == 'debian' }} |
|||
|
|||
- name: Build Alpine based images |
|||
shell: bash |
|||
env: |
|||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine" |
|||
run: | |
|||
./hooks/build |
|||
if: ${{ matrix.base_image == 'alpine' }} |
|||
|
|||
- name: Push Alpine based images |
|||
shell: bash |
|||
env: |
|||
DOCKER_TAG: "${{steps.vars.outputs.DOCKER_TAG}}-alpine" |
|||
run: | |
|||
./hooks/push |
|||
if: ${{ matrix.base_image == 'alpine' }} |
File diff suppressed because it is too large
@ -0,0 +1,129 @@ |
|||
# syntax=docker/dockerfile:1 |
|||
|
|||
# This file was generated using a Jinja2 template. |
|||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles. |
|||
|
|||
# Using multistage build: |
|||
# https://docs.docker.com/develop/develop-images/multistage-build/ |
|||
# https://whitfin.io/speeding-up-rust-docker-builds/ |
|||
####################### VAULT BUILD IMAGE ####################### |
|||
# The web-vault digest specifies a particular web-vault build on Docker Hub. |
|||
# Using the digest instead of the tag name provides better security, |
|||
# as the digest of an image is immutable, whereas a tag name can later |
|||
# be changed to point to a malicious image. |
|||
# |
|||
# To verify the current digest for a given tag name: |
|||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, |
|||
# click the tag name to view the digest of the image it currently points to. |
|||
# - From the command line: |
|||
# $ docker pull vaultwarden/web-vault:v2.25.1b |
|||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b |
|||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba] |
|||
# |
|||
# - Conversely, to get the tag name from the digest: |
|||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba |
|||
# [vaultwarden/web-vault:v2.25.1b] |
|||
# |
|||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault |
|||
|
|||
########################## BUILD IMAGE ########################## |
|||
FROM rust:1.58-buster as build |
|||
|
|||
|
|||
|
|||
# Build time options to avoid dpkg warnings and help with reproducible builds. |
|||
ENV DEBIAN_FRONTEND=noninteractive \ |
|||
LANG=C.UTF-8 \ |
|||
TZ=UTC \ |
|||
TERM=xterm-256color \ |
|||
CARGO_HOME="/root/.cargo" \ |
|||
USER="root" |
|||
|
|||
|
|||
# Create CARGO_HOME folder and don't download rust docs |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \ |
|||
&& rustup set profile minimal |
|||
|
|||
# Install DB packages |
|||
RUN apt-get update \ |
|||
&& apt-get install -y \ |
|||
--no-install-recommends \ |
|||
libmariadb-dev \ |
|||
libpq-dev \ |
|||
&& apt-get clean \ |
|||
&& rm -rf /var/lib/apt/lists/* |
|||
|
|||
# Creates a dummy project used to grab dependencies |
|||
RUN USER=root cargo new --bin /app |
|||
WORKDIR /app |
|||
|
|||
# Copies over *only* your manifests and build files |
|||
COPY ./Cargo.* ./ |
|||
COPY ./rust-toolchain ./rust-toolchain |
|||
COPY ./build.rs ./build.rs |
|||
|
|||
|
|||
# Configure the DB ARG as late as possible to not invalidate the cached layers above |
|||
ARG DB=sqlite,mysql,postgresql |
|||
|
|||
# Builds your dependencies and removes the |
|||
# dummy project, except the target folder |
|||
# This folder contains the compiled dependencies |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release \ |
|||
&& find . -not -path "./target*" -delete |
|||
|
|||
# Copies the complete project |
|||
# To avoid copying unneeded files, use .dockerignore |
|||
COPY . . |
|||
|
|||
# Make sure that we actually build the project |
|||
RUN touch src/main.rs |
|||
|
|||
# Builds again, this time it'll just be |
|||
# your actual source files being built |
|||
# hadolint ignore=DL3059 |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release |
|||
|
|||
######################## RUNTIME IMAGE ######################## |
|||
# Create a new stage with a minimal image |
|||
# because we already have a binary built |
|||
FROM debian:buster-slim |
|||
|
|||
ENV ROCKET_ENV="staging" \ |
|||
ROCKET_PORT=80 \ |
|||
ROCKET_WORKERS=10 |
|||
|
|||
|
|||
# Create data folder and Install needed libraries |
|||
RUN mkdir /data \ |
|||
&& apt-get update && apt-get install -y \ |
|||
--no-install-recommends \ |
|||
openssl \ |
|||
ca-certificates \ |
|||
curl \ |
|||
dumb-init \ |
|||
libmariadb-dev-compat \ |
|||
libpq5 \ |
|||
&& apt-get clean \ |
|||
&& rm -rf /var/lib/apt/lists/* |
|||
|
|||
|
|||
VOLUME /data |
|||
EXPOSE 80 |
|||
EXPOSE 3012 |
|||
|
|||
# Copies the files from the context (Rocket.toml file and web-vault) |
|||
# and the binary from the "build" stage to the current stage |
|||
WORKDIR / |
|||
COPY Rocket.toml . |
|||
COPY --from=vault /web-vault ./web-vault |
|||
COPY --from=build /app/target/release/vaultwarden . |
|||
|
|||
COPY docker/healthcheck.sh /healthcheck.sh |
|||
COPY docker/start.sh /start.sh |
|||
|
|||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] |
|||
|
|||
# Configures the startup! |
|||
ENTRYPOINT ["/usr/bin/dumb-init", "--"] |
|||
CMD ["/start.sh"] |
@ -0,0 +1,121 @@ |
|||
# syntax=docker/dockerfile:1 |
|||
|
|||
# This file was generated using a Jinja2 template. |
|||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles. |
|||
|
|||
# Using multistage build: |
|||
# https://docs.docker.com/develop/develop-images/multistage-build/ |
|||
# https://whitfin.io/speeding-up-rust-docker-builds/ |
|||
####################### VAULT BUILD IMAGE ####################### |
|||
# The web-vault digest specifies a particular web-vault build on Docker Hub. |
|||
# Using the digest instead of the tag name provides better security, |
|||
# as the digest of an image is immutable, whereas a tag name can later |
|||
# be changed to point to a malicious image. |
|||
# |
|||
# To verify the current digest for a given tag name: |
|||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, |
|||
# click the tag name to view the digest of the image it currently points to. |
|||
# - From the command line: |
|||
# $ docker pull vaultwarden/web-vault:v2.25.1b |
|||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b |
|||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba] |
|||
# |
|||
# - Conversely, to get the tag name from the digest: |
|||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba |
|||
# [vaultwarden/web-vault:v2.25.1b] |
|||
# |
|||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault |
|||
|
|||
########################## BUILD IMAGE ########################## |
|||
FROM blackdex/rust-musl:x86_64-musl-nightly-2022-01-23 as build |
|||
|
|||
|
|||
|
|||
# Build time options to avoid dpkg warnings and help with reproducible builds. |
|||
ENV DEBIAN_FRONTEND=noninteractive \ |
|||
LANG=C.UTF-8 \ |
|||
TZ=UTC \ |
|||
TERM=xterm-256color \ |
|||
CARGO_HOME="/root/.cargo" \ |
|||
USER="root" |
|||
|
|||
|
|||
# Create CARGO_HOME folder and don't download rust docs |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \ |
|||
&& rustup set profile minimal |
|||
|
|||
ENV RUSTFLAGS='-C link-arg=-s' |
|||
|
|||
# Creates a dummy project used to grab dependencies |
|||
RUN USER=root cargo new --bin /app |
|||
WORKDIR /app |
|||
|
|||
# Copies over *only* your manifests and build files |
|||
COPY ./Cargo.* ./ |
|||
COPY ./rust-toolchain ./rust-toolchain |
|||
COPY ./build.rs ./build.rs |
|||
|
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add x86_64-unknown-linux-musl |
|||
|
|||
# Configure the DB ARG as late as possible to not invalidate the cached layers above |
|||
ARG DB=sqlite,mysql,postgresql |
|||
|
|||
# Builds your dependencies and removes the |
|||
# dummy project, except the target folder |
|||
# This folder contains the compiled dependencies |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl \ |
|||
&& find . -not -path "./target*" -delete |
|||
|
|||
# Copies the complete project |
|||
# To avoid copying unneeded files, use .dockerignore |
|||
COPY . . |
|||
|
|||
# Make sure that we actually build the project |
|||
RUN touch src/main.rs |
|||
|
|||
# Builds again, this time it'll just be |
|||
# your actual source files being built |
|||
# hadolint ignore=DL3059 |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=x86_64-unknown-linux-musl |
|||
|
|||
######################## RUNTIME IMAGE ######################## |
|||
# Create a new stage with a minimal image |
|||
# because we already have a binary built |
|||
FROM alpine:3.15 |
|||
|
|||
ENV ROCKET_ENV="staging" \ |
|||
ROCKET_PORT=80 \ |
|||
ROCKET_WORKERS=10 \ |
|||
SSL_CERT_DIR=/etc/ssl/certs |
|||
|
|||
|
|||
|
|||
# Create data folder and Install needed libraries |
|||
RUN mkdir /data \ |
|||
&& apk add --no-cache \ |
|||
openssl \ |
|||
tzdata \ |
|||
curl \ |
|||
dumb-init \ |
|||
ca-certificates |
|||
|
|||
|
|||
VOLUME /data |
|||
EXPOSE 80 |
|||
EXPOSE 3012 |
|||
|
|||
# Copies the files from the context (Rocket.toml file and web-vault) |
|||
# and the binary from the "build" stage to the current stage |
|||
WORKDIR / |
|||
COPY Rocket.toml . |
|||
COPY --from=vault /web-vault ./web-vault |
|||
COPY --from=build /app/target/x86_64-unknown-linux-musl/release/vaultwarden . |
|||
|
|||
COPY docker/healthcheck.sh /healthcheck.sh |
|||
COPY docker/start.sh /start.sh |
|||
|
|||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] |
|||
|
|||
# Configures the startup! |
|||
ENTRYPOINT ["/usr/bin/dumb-init", "--"] |
|||
CMD ["/start.sh"] |
@ -0,0 +1,125 @@ |
|||
# syntax=docker/dockerfile:1 |
|||
|
|||
# This file was generated using a Jinja2 template. |
|||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles. |
|||
|
|||
# Using multistage build: |
|||
# https://docs.docker.com/develop/develop-images/multistage-build/ |
|||
# https://whitfin.io/speeding-up-rust-docker-builds/ |
|||
####################### VAULT BUILD IMAGE ####################### |
|||
# The web-vault digest specifies a particular web-vault build on Docker Hub. |
|||
# Using the digest instead of the tag name provides better security, |
|||
# as the digest of an image is immutable, whereas a tag name can later |
|||
# be changed to point to a malicious image. |
|||
# |
|||
# To verify the current digest for a given tag name: |
|||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, |
|||
# click the tag name to view the digest of the image it currently points to. |
|||
# - From the command line: |
|||
# $ docker pull vaultwarden/web-vault:v2.25.1b |
|||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b |
|||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba] |
|||
# |
|||
# - Conversely, to get the tag name from the digest: |
|||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba |
|||
# [vaultwarden/web-vault:v2.25.1b] |
|||
# |
|||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault |
|||
|
|||
########################## BUILD IMAGE ########################## |
|||
FROM blackdex/rust-musl:aarch64-musl-nightly-2022-01-23 as build |
|||
|
|||
|
|||
|
|||
# Build time options to avoid dpkg warnings and help with reproducible builds. |
|||
ENV DEBIAN_FRONTEND=noninteractive \ |
|||
LANG=C.UTF-8 \ |
|||
TZ=UTC \ |
|||
TERM=xterm-256color \ |
|||
CARGO_HOME="/root/.cargo" \ |
|||
USER="root" |
|||
|
|||
|
|||
# Create CARGO_HOME folder and don't download rust docs |
|||
RUN mkdir -pv "${CARGO_HOME}" \ |
|||
&& rustup set profile minimal |
|||
|
|||
ENV RUSTFLAGS='-C link-arg=-s' |
|||
|
|||
# Creates a dummy project used to grab dependencies |
|||
RUN USER=root cargo new --bin /app |
|||
WORKDIR /app |
|||
|
|||
# Copies over *only* your manifests and build files |
|||
COPY ./Cargo.* ./ |
|||
COPY ./rust-toolchain ./rust-toolchain |
|||
COPY ./build.rs ./build.rs |
|||
|
|||
RUN rustup target add aarch64-unknown-linux-musl |
|||
|
|||
# Configure the DB ARG as late as possible to not invalidate the cached layers above |
|||
ARG DB=sqlite,mysql,postgresql |
|||
|
|||
# Builds your dependencies and removes the |
|||
# dummy project, except the target folder |
|||
# This folder contains the compiled dependencies |
|||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \ |
|||
&& find . -not -path "./target*" -delete |
|||
|
|||
# Copies the complete project |
|||
# To avoid copying unneeded files, use .dockerignore |
|||
COPY . . |
|||
|
|||
# Make sure that we actually build the project |
|||
RUN touch src/main.rs |
|||
|
|||
# Builds again, this time it'll just be |
|||
# your actual source files being built |
|||
# hadolint ignore=DL3059 |
|||
RUN cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl |
|||
|
|||
######################## RUNTIME IMAGE ######################## |
|||
# Create a new stage with a minimal image |
|||
# because we already have a binary built |
|||
FROM balenalib/aarch64-alpine:3.15 |
|||
|
|||
ENV ROCKET_ENV="staging" \ |
|||
ROCKET_PORT=80 \ |
|||
ROCKET_WORKERS=10 \ |
|||
SSL_CERT_DIR=/etc/ssl/certs |
|||
|
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-start" ] |
|||
|
|||
# Create data folder and Install needed libraries |
|||
RUN mkdir /data \ |
|||
&& apk add --no-cache \ |
|||
openssl \ |
|||
tzdata \ |
|||
curl \ |
|||
dumb-init \ |
|||
ca-certificates |
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-end" ] |
|||
|
|||
VOLUME /data |
|||
EXPOSE 80 |
|||
EXPOSE 3012 |
|||
|
|||
# Copies the files from the context (Rocket.toml file and web-vault) |
|||
# and the binary from the "build" stage to the current stage |
|||
WORKDIR / |
|||
COPY Rocket.toml . |
|||
COPY --from=vault /web-vault ./web-vault |
|||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden . |
|||
|
|||
COPY docker/healthcheck.sh /healthcheck.sh |
|||
COPY docker/start.sh /start.sh |
|||
|
|||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] |
|||
|
|||
# Configures the startup! |
|||
ENTRYPOINT ["/usr/bin/dumb-init", "--"] |
|||
CMD ["/start.sh"] |
@ -0,0 +1,153 @@ |
|||
# syntax=docker/dockerfile:1 |
|||
|
|||
# This file was generated using a Jinja2 template. |
|||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles. |
|||
|
|||
# Using multistage build: |
|||
# https://docs.docker.com/develop/develop-images/multistage-build/ |
|||
# https://whitfin.io/speeding-up-rust-docker-builds/ |
|||
####################### VAULT BUILD IMAGE ####################### |
|||
# The web-vault digest specifies a particular web-vault build on Docker Hub. |
|||
# Using the digest instead of the tag name provides better security, |
|||
# as the digest of an image is immutable, whereas a tag name can later |
|||
# be changed to point to a malicious image. |
|||
# |
|||
# To verify the current digest for a given tag name: |
|||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, |
|||
# click the tag name to view the digest of the image it currently points to. |
|||
# - From the command line: |
|||
# $ docker pull vaultwarden/web-vault:v2.25.1b |
|||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b |
|||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba] |
|||
# |
|||
# - Conversely, to get the tag name from the digest: |
|||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba |
|||
# [vaultwarden/web-vault:v2.25.1b] |
|||
# |
|||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault |
|||
|
|||
########################## BUILD IMAGE ########################## |
|||
FROM rust:1.58-buster as build |
|||
|
|||
|
|||
|
|||
# Build time options to avoid dpkg warnings and help with reproducible builds. |
|||
ENV DEBIAN_FRONTEND=noninteractive \ |
|||
LANG=C.UTF-8 \ |
|||
TZ=UTC \ |
|||
TERM=xterm-256color \ |
|||
CARGO_HOME="/root/.cargo" \ |
|||
USER="root" |
|||
|
|||
|
|||
# Create CARGO_HOME folder and don't download rust docs |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \ |
|||
&& rustup set profile minimal |
|||
|
|||
# |
|||
# Install required build libs for arm64 architecture. |
|||
# hadolint ignore=DL3059 |
|||
RUN dpkg --add-architecture arm64 \ |
|||
&& apt-get update \ |
|||
&& apt-get install -y \ |
|||
--no-install-recommends \ |
|||
libssl-dev:arm64 \ |
|||
libc6-dev:arm64 \ |
|||
libpq5:arm64 \ |
|||
libpq-dev:arm64 \ |
|||
libmariadb3:arm64 \ |
|||
libmariadb-dev:arm64 \ |
|||
libmariadb-dev-compat:arm64 \ |
|||
gcc-aarch64-linux-gnu \ |
|||
# |
|||
# Make sure cargo has the right target config |
|||
&& echo '[target.aarch64-unknown-linux-gnu]' >> "${CARGO_HOME}/config" \ |
|||
&& echo 'linker = "aarch64-linux-gnu-gcc"' >> "${CARGO_HOME}/config" \ |
|||
&& echo 'rustflags = ["-L/usr/lib/aarch64-linux-gnu"]' >> "${CARGO_HOME}/config" |
|||
|
|||
# Set arm specific environment values |
|||
ENV CC_aarch64_unknown_linux_gnu="/usr/bin/aarch64-linux-gnu-gcc" \ |
|||
CROSS_COMPILE="1" \ |
|||
OPENSSL_INCLUDE_DIR="/usr/include/aarch64-linux-gnu" \ |
|||
OPENSSL_LIB_DIR="/usr/lib/aarch64-linux-gnu" |
|||
|
|||
|
|||
# Creates a dummy project used to grab dependencies |
|||
RUN USER=root cargo new --bin /app |
|||
WORKDIR /app |
|||
|
|||
# Copies over *only* your manifests and build files |
|||
COPY ./Cargo.* ./ |
|||
COPY ./rust-toolchain ./rust-toolchain |
|||
COPY ./build.rs ./build.rs |
|||
|
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-gnu |
|||
|
|||
# Configure the DB ARG as late as possible to not invalidate the cached layers above |
|||
ARG DB=sqlite,mysql,postgresql |
|||
|
|||
# Builds your dependencies and removes the |
|||
# dummy project, except the target folder |
|||
# This folder contains the compiled dependencies |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu \ |
|||
&& find . -not -path "./target*" -delete |
|||
|
|||
# Copies the complete project |
|||
# To avoid copying unneeded files, use .dockerignore |
|||
COPY . . |
|||
|
|||
# Make sure that we actually build the project |
|||
RUN touch src/main.rs |
|||
|
|||
# Builds again, this time it'll just be |
|||
# your actual source files being built |
|||
# hadolint ignore=DL3059 |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-gnu |
|||
|
|||
######################## RUNTIME IMAGE ######################## |
|||
# Create a new stage with a minimal image |
|||
# because we already have a binary built |
|||
FROM balenalib/aarch64-debian:buster |
|||
|
|||
ENV ROCKET_ENV="staging" \ |
|||
ROCKET_PORT=80 \ |
|||
ROCKET_WORKERS=10 |
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-start" ] |
|||
|
|||
# Create data folder and Install needed libraries |
|||
RUN mkdir /data \ |
|||
&& apt-get update && apt-get install -y \ |
|||
--no-install-recommends \ |
|||
openssl \ |
|||
ca-certificates \ |
|||
curl \ |
|||
dumb-init \ |
|||
libmariadb-dev-compat \ |
|||
libpq5 \ |
|||
&& apt-get clean \ |
|||
&& rm -rf /var/lib/apt/lists/* |
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-end" ] |
|||
|
|||
VOLUME /data |
|||
EXPOSE 80 |
|||
EXPOSE 3012 |
|||
|
|||
# Copies the files from the context (Rocket.toml file and web-vault) |
|||
# and the binary from the "build" stage to the current stage |
|||
WORKDIR / |
|||
COPY Rocket.toml . |
|||
COPY --from=vault /web-vault ./web-vault |
|||
COPY --from=build /app/target/aarch64-unknown-linux-gnu/release/vaultwarden . |
|||
|
|||
COPY docker/healthcheck.sh /healthcheck.sh |
|||
COPY docker/start.sh /start.sh |
|||
|
|||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] |
|||
|
|||
# Configures the startup! |
|||
ENTRYPOINT ["/usr/bin/dumb-init", "--"] |
|||
CMD ["/start.sh"] |
@ -0,0 +1,125 @@ |
|||
# syntax=docker/dockerfile:1 |
|||
|
|||
# This file was generated using a Jinja2 template. |
|||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles. |
|||
|
|||
# Using multistage build: |
|||
# https://docs.docker.com/develop/develop-images/multistage-build/ |
|||
# https://whitfin.io/speeding-up-rust-docker-builds/ |
|||
####################### VAULT BUILD IMAGE ####################### |
|||
# The web-vault digest specifies a particular web-vault build on Docker Hub. |
|||
# Using the digest instead of the tag name provides better security, |
|||
# as the digest of an image is immutable, whereas a tag name can later |
|||
# be changed to point to a malicious image. |
|||
# |
|||
# To verify the current digest for a given tag name: |
|||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, |
|||
# click the tag name to view the digest of the image it currently points to. |
|||
# - From the command line: |
|||
# $ docker pull vaultwarden/web-vault:v2.25.1b |
|||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b |
|||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba] |
|||
# |
|||
# - Conversely, to get the tag name from the digest: |
|||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba |
|||
# [vaultwarden/web-vault:v2.25.1b] |
|||
# |
|||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault |
|||
|
|||
########################## BUILD IMAGE ########################## |
|||
FROM blackdex/rust-musl:aarch64-musl-nightly-2022-01-23 as build |
|||
|
|||
|
|||
|
|||
# Build time options to avoid dpkg warnings and help with reproducible builds. |
|||
ENV DEBIAN_FRONTEND=noninteractive \ |
|||
LANG=C.UTF-8 \ |
|||
TZ=UTC \ |
|||
TERM=xterm-256color \ |
|||
CARGO_HOME="/root/.cargo" \ |
|||
USER="root" |
|||
|
|||
|
|||
# Create CARGO_HOME folder and don't download rust docs |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \ |
|||
&& rustup set profile minimal |
|||
|
|||
ENV RUSTFLAGS='-C link-arg=-s' |
|||
|
|||
# Creates a dummy project used to grab dependencies |
|||
RUN USER=root cargo new --bin /app |
|||
WORKDIR /app |
|||
|
|||
# Copies over *only* your manifests and build files |
|||
COPY ./Cargo.* ./ |
|||
COPY ./rust-toolchain ./rust-toolchain |
|||
COPY ./build.rs ./build.rs |
|||
|
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add aarch64-unknown-linux-musl |
|||
|
|||
# Configure the DB ARG as late as possible to not invalidate the cached layers above |
|||
ARG DB=sqlite,mysql,postgresql |
|||
|
|||
# Builds your dependencies and removes the |
|||
# dummy project, except the target folder |
|||
# This folder contains the compiled dependencies |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl \ |
|||
&& find . -not -path "./target*" -delete |
|||
|
|||
# Copies the complete project |
|||
# To avoid copying unneeded files, use .dockerignore |
|||
COPY . . |
|||
|
|||
# Make sure that we actually build the project |
|||
RUN touch src/main.rs |
|||
|
|||
# Builds again, this time it'll just be |
|||
# your actual source files being built |
|||
# hadolint ignore=DL3059 |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=aarch64-unknown-linux-musl |
|||
|
|||
######################## RUNTIME IMAGE ######################## |
|||
# Create a new stage with a minimal image |
|||
# because we already have a binary built |
|||
FROM balenalib/aarch64-alpine:3.15 |
|||
|
|||
ENV ROCKET_ENV="staging" \ |
|||
ROCKET_PORT=80 \ |
|||
ROCKET_WORKERS=10 \ |
|||
SSL_CERT_DIR=/etc/ssl/certs |
|||
|
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-start" ] |
|||
|
|||
# Create data folder and Install needed libraries |
|||
RUN mkdir /data \ |
|||
&& apk add --no-cache \ |
|||
openssl \ |
|||
tzdata \ |
|||
curl \ |
|||
dumb-init \ |
|||
ca-certificates |
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-end" ] |
|||
|
|||
VOLUME /data |
|||
EXPOSE 80 |
|||
EXPOSE 3012 |
|||
|
|||
# Copies the files from the context (Rocket.toml file and web-vault) |
|||
# and the binary from the "build" stage to the current stage |
|||
WORKDIR / |
|||
COPY Rocket.toml . |
|||
COPY --from=vault /web-vault ./web-vault |
|||
COPY --from=build /app/target/aarch64-unknown-linux-musl/release/vaultwarden . |
|||
|
|||
COPY docker/healthcheck.sh /healthcheck.sh |
|||
COPY docker/start.sh /start.sh |
|||
|
|||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] |
|||
|
|||
# Configures the startup! |
|||
ENTRYPOINT ["/usr/bin/dumb-init", "--"] |
|||
CMD ["/start.sh"] |
@ -0,0 +1,125 @@ |
|||
# syntax=docker/dockerfile:1 |
|||
|
|||
# This file was generated using a Jinja2 template. |
|||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles. |
|||
|
|||
# Using multistage build: |
|||
# https://docs.docker.com/develop/develop-images/multistage-build/ |
|||
# https://whitfin.io/speeding-up-rust-docker-builds/ |
|||
####################### VAULT BUILD IMAGE ####################### |
|||
# The web-vault digest specifies a particular web-vault build on Docker Hub. |
|||
# Using the digest instead of the tag name provides better security, |
|||
# as the digest of an image is immutable, whereas a tag name can later |
|||
# be changed to point to a malicious image. |
|||
# |
|||
# To verify the current digest for a given tag name: |
|||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, |
|||
# click the tag name to view the digest of the image it currently points to. |
|||
# - From the command line: |
|||
# $ docker pull vaultwarden/web-vault:v2.25.1b |
|||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b |
|||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba] |
|||
# |
|||
# - Conversely, to get the tag name from the digest: |
|||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba |
|||
# [vaultwarden/web-vault:v2.25.1b] |
|||
# |
|||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault |
|||
|
|||
########################## BUILD IMAGE ########################## |
|||
FROM blackdex/rust-musl:arm-musleabi-nightly-2022-01-23 as build |
|||
|
|||
|
|||
|
|||
# Build time options to avoid dpkg warnings and help with reproducible builds. |
|||
ENV DEBIAN_FRONTEND=noninteractive \ |
|||
LANG=C.UTF-8 \ |
|||
TZ=UTC \ |
|||
TERM=xterm-256color \ |
|||
CARGO_HOME="/root/.cargo" \ |
|||
USER="root" |
|||
|
|||
|
|||
# Create CARGO_HOME folder and don't download rust docs |
|||
RUN mkdir -pv "${CARGO_HOME}" \ |
|||
&& rustup set profile minimal |
|||
|
|||
ENV RUSTFLAGS='-C link-arg=-s' |
|||
|
|||
# Creates a dummy project used to grab dependencies |
|||
RUN USER=root cargo new --bin /app |
|||
WORKDIR /app |
|||
|
|||
# Copies over *only* your manifests and build files |
|||
COPY ./Cargo.* ./ |
|||
COPY ./rust-toolchain ./rust-toolchain |
|||
COPY ./build.rs ./build.rs |
|||
|
|||
RUN rustup target add arm-unknown-linux-musleabi |
|||
|
|||
# Configure the DB ARG as late as possible to not invalidate the cached layers above |
|||
ARG DB=sqlite,mysql,postgresql |
|||
|
|||
# Builds your dependencies and removes the |
|||
# dummy project, except the target folder |
|||
# This folder contains the compiled dependencies |
|||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \ |
|||
&& find . -not -path "./target*" -delete |
|||
|
|||
# Copies the complete project |
|||
# To avoid copying unneeded files, use .dockerignore |
|||
COPY . . |
|||
|
|||
# Make sure that we actually build the project |
|||
RUN touch src/main.rs |
|||
|
|||
# Builds again, this time it'll just be |
|||
# your actual source files being built |
|||
# hadolint ignore=DL3059 |
|||
RUN cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi |
|||
|
|||
######################## RUNTIME IMAGE ######################## |
|||
# Create a new stage with a minimal image |
|||
# because we already have a binary built |
|||
FROM balenalib/rpi-alpine:3.15 |
|||
|
|||
ENV ROCKET_ENV="staging" \ |
|||
ROCKET_PORT=80 \ |
|||
ROCKET_WORKERS=10 \ |
|||
SSL_CERT_DIR=/etc/ssl/certs |
|||
|
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-start" ] |
|||
|
|||
# Create data folder and Install needed libraries |
|||
RUN mkdir /data \ |
|||
&& apk add --no-cache \ |
|||
openssl \ |
|||
tzdata \ |
|||
curl \ |
|||
dumb-init \ |
|||
ca-certificates |
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-end" ] |
|||
|
|||
VOLUME /data |
|||
EXPOSE 80 |
|||
EXPOSE 3012 |
|||
|
|||
# Copies the files from the context (Rocket.toml file and web-vault) |
|||
# and the binary from the "build" stage to the current stage |
|||
WORKDIR / |
|||
COPY Rocket.toml . |
|||
COPY --from=vault /web-vault ./web-vault |
|||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden . |
|||
|
|||
COPY docker/healthcheck.sh /healthcheck.sh |
|||
COPY docker/start.sh /start.sh |
|||
|
|||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] |
|||
|
|||
# Configures the startup! |
|||
ENTRYPOINT ["/usr/bin/dumb-init", "--"] |
|||
CMD ["/start.sh"] |
@ -0,0 +1,153 @@ |
|||
# syntax=docker/dockerfile:1 |
|||
|
|||
# This file was generated using a Jinja2 template. |
|||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles. |
|||
|
|||
# Using multistage build: |
|||
# https://docs.docker.com/develop/develop-images/multistage-build/ |
|||
# https://whitfin.io/speeding-up-rust-docker-builds/ |
|||
####################### VAULT BUILD IMAGE ####################### |
|||
# The web-vault digest specifies a particular web-vault build on Docker Hub. |
|||
# Using the digest instead of the tag name provides better security, |
|||
# as the digest of an image is immutable, whereas a tag name can later |
|||
# be changed to point to a malicious image. |
|||
# |
|||
# To verify the current digest for a given tag name: |
|||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, |
|||
# click the tag name to view the digest of the image it currently points to. |
|||
# - From the command line: |
|||
# $ docker pull vaultwarden/web-vault:v2.25.1b |
|||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b |
|||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba] |
|||
# |
|||
# - Conversely, to get the tag name from the digest: |
|||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba |
|||
# [vaultwarden/web-vault:v2.25.1b] |
|||
# |
|||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault |
|||
|
|||
########################## BUILD IMAGE ########################## |
|||
FROM rust:1.58-buster as build |
|||
|
|||
|
|||
|
|||
# Build time options to avoid dpkg warnings and help with reproducible builds. |
|||
ENV DEBIAN_FRONTEND=noninteractive \ |
|||
LANG=C.UTF-8 \ |
|||
TZ=UTC \ |
|||
TERM=xterm-256color \ |
|||
CARGO_HOME="/root/.cargo" \ |
|||
USER="root" |
|||
|
|||
|
|||
# Create CARGO_HOME folder and don't download rust docs |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \ |
|||
&& rustup set profile minimal |
|||
|
|||
# |
|||
# Install required build libs for armel architecture. |
|||
# hadolint ignore=DL3059 |
|||
RUN dpkg --add-architecture armel \ |
|||
&& apt-get update \ |
|||
&& apt-get install -y \ |
|||
--no-install-recommends \ |
|||
libssl-dev:armel \ |
|||
libc6-dev:armel \ |
|||
libpq5:armel \ |
|||
libpq-dev:armel \ |
|||
libmariadb3:armel \ |
|||
libmariadb-dev:armel \ |
|||
libmariadb-dev-compat:armel \ |
|||
gcc-arm-linux-gnueabi \ |
|||
# |
|||
# Make sure cargo has the right target config |
|||
&& echo '[target.arm-unknown-linux-gnueabi]' >> "${CARGO_HOME}/config" \ |
|||
&& echo 'linker = "arm-linux-gnueabi-gcc"' >> "${CARGO_HOME}/config" \ |
|||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabi"]' >> "${CARGO_HOME}/config" |
|||
|
|||
# Set arm specific environment values |
|||
ENV CC_arm_unknown_linux_gnueabi="/usr/bin/arm-linux-gnueabi-gcc" \ |
|||
CROSS_COMPILE="1" \ |
|||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabi" \ |
|||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabi" |
|||
|
|||
|
|||
# Creates a dummy project used to grab dependencies |
|||
RUN USER=root cargo new --bin /app |
|||
WORKDIR /app |
|||
|
|||
# Copies over *only* your manifests and build files |
|||
COPY ./Cargo.* ./ |
|||
COPY ./rust-toolchain ./rust-toolchain |
|||
COPY ./build.rs ./build.rs |
|||
|
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-gnueabi |
|||
|
|||
# Configure the DB ARG as late as possible to not invalidate the cached layers above |
|||
ARG DB=sqlite,mysql,postgresql |
|||
|
|||
# Builds your dependencies and removes the |
|||
# dummy project, except the target folder |
|||
# This folder contains the compiled dependencies |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi \ |
|||
&& find . -not -path "./target*" -delete |
|||
|
|||
# Copies the complete project |
|||
# To avoid copying unneeded files, use .dockerignore |
|||
COPY . . |
|||
|
|||
# Make sure that we actually build the project |
|||
RUN touch src/main.rs |
|||
|
|||
# Builds again, this time it'll just be |
|||
# your actual source files being built |
|||
# hadolint ignore=DL3059 |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-gnueabi |
|||
|
|||
######################## RUNTIME IMAGE ######################## |
|||
# Create a new stage with a minimal image |
|||
# because we already have a binary built |
|||
FROM balenalib/rpi-debian:buster |
|||
|
|||
ENV ROCKET_ENV="staging" \ |
|||
ROCKET_PORT=80 \ |
|||
ROCKET_WORKERS=10 |
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-start" ] |
|||
|
|||
# Create data folder and Install needed libraries |
|||
RUN mkdir /data \ |
|||
&& apt-get update && apt-get install -y \ |
|||
--no-install-recommends \ |
|||
openssl \ |
|||
ca-certificates \ |
|||
curl \ |
|||
dumb-init \ |
|||
libmariadb-dev-compat \ |
|||
libpq5 \ |
|||
&& apt-get clean \ |
|||
&& rm -rf /var/lib/apt/lists/* |
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-end" ] |
|||
|
|||
VOLUME /data |
|||
EXPOSE 80 |
|||
EXPOSE 3012 |
|||
|
|||
# Copies the files from the context (Rocket.toml file and web-vault) |
|||
# and the binary from the "build" stage to the current stage |
|||
WORKDIR / |
|||
COPY Rocket.toml . |
|||
COPY --from=vault /web-vault ./web-vault |
|||
COPY --from=build /app/target/arm-unknown-linux-gnueabi/release/vaultwarden . |
|||
|
|||
COPY docker/healthcheck.sh /healthcheck.sh |
|||
COPY docker/start.sh /start.sh |
|||
|
|||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] |
|||
|
|||
# Configures the startup! |
|||
ENTRYPOINT ["/usr/bin/dumb-init", "--"] |
|||
CMD ["/start.sh"] |
@ -0,0 +1,125 @@ |
|||
# syntax=docker/dockerfile:1 |
|||
|
|||
# This file was generated using a Jinja2 template. |
|||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles. |
|||
|
|||
# Using multistage build: |
|||
# https://docs.docker.com/develop/develop-images/multistage-build/ |
|||
# https://whitfin.io/speeding-up-rust-docker-builds/ |
|||
####################### VAULT BUILD IMAGE ####################### |
|||
# The web-vault digest specifies a particular web-vault build on Docker Hub. |
|||
# Using the digest instead of the tag name provides better security, |
|||
# as the digest of an image is immutable, whereas a tag name can later |
|||
# be changed to point to a malicious image. |
|||
# |
|||
# To verify the current digest for a given tag name: |
|||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, |
|||
# click the tag name to view the digest of the image it currently points to. |
|||
# - From the command line: |
|||
# $ docker pull vaultwarden/web-vault:v2.25.1b |
|||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b |
|||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba] |
|||
# |
|||
# - Conversely, to get the tag name from the digest: |
|||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba |
|||
# [vaultwarden/web-vault:v2.25.1b] |
|||
# |
|||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault |
|||
|
|||
########################## BUILD IMAGE ########################## |
|||
FROM blackdex/rust-musl:arm-musleabi-nightly-2022-01-23 as build |
|||
|
|||
|
|||
|
|||
# Build time options to avoid dpkg warnings and help with reproducible builds. |
|||
ENV DEBIAN_FRONTEND=noninteractive \ |
|||
LANG=C.UTF-8 \ |
|||
TZ=UTC \ |
|||
TERM=xterm-256color \ |
|||
CARGO_HOME="/root/.cargo" \ |
|||
USER="root" |
|||
|
|||
|
|||
# Create CARGO_HOME folder and don't download rust docs |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \ |
|||
&& rustup set profile minimal |
|||
|
|||
ENV RUSTFLAGS='-C link-arg=-s' |
|||
|
|||
# Creates a dummy project used to grab dependencies |
|||
RUN USER=root cargo new --bin /app |
|||
WORKDIR /app |
|||
|
|||
# Copies over *only* your manifests and build files |
|||
COPY ./Cargo.* ./ |
|||
COPY ./rust-toolchain ./rust-toolchain |
|||
COPY ./build.rs ./build.rs |
|||
|
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add arm-unknown-linux-musleabi |
|||
|
|||
# Configure the DB ARG as late as possible to not invalidate the cached layers above |
|||
ARG DB=sqlite,mysql,postgresql |
|||
|
|||
# Builds your dependencies and removes the |
|||
# dummy project, except the target folder |
|||
# This folder contains the compiled dependencies |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi \ |
|||
&& find . -not -path "./target*" -delete |
|||
|
|||
# Copies the complete project |
|||
# To avoid copying unneeded files, use .dockerignore |
|||
COPY . . |
|||
|
|||
# Make sure that we actually build the project |
|||
RUN touch src/main.rs |
|||
|
|||
# Builds again, this time it'll just be |
|||
# your actual source files being built |
|||
# hadolint ignore=DL3059 |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=arm-unknown-linux-musleabi |
|||
|
|||
######################## RUNTIME IMAGE ######################## |
|||
# Create a new stage with a minimal image |
|||
# because we already have a binary built |
|||
FROM balenalib/rpi-alpine:3.15 |
|||
|
|||
ENV ROCKET_ENV="staging" \ |
|||
ROCKET_PORT=80 \ |
|||
ROCKET_WORKERS=10 \ |
|||
SSL_CERT_DIR=/etc/ssl/certs |
|||
|
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-start" ] |
|||
|
|||
# Create data folder and Install needed libraries |
|||
RUN mkdir /data \ |
|||
&& apk add --no-cache \ |
|||
openssl \ |
|||
tzdata \ |
|||
curl \ |
|||
dumb-init \ |
|||
ca-certificates |
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-end" ] |
|||
|
|||
VOLUME /data |
|||
EXPOSE 80 |
|||
EXPOSE 3012 |
|||
|
|||
# Copies the files from the context (Rocket.toml file and web-vault) |
|||
# and the binary from the "build" stage to the current stage |
|||
WORKDIR / |
|||
COPY Rocket.toml . |
|||
COPY --from=vault /web-vault ./web-vault |
|||
COPY --from=build /app/target/arm-unknown-linux-musleabi/release/vaultwarden . |
|||
|
|||
COPY docker/healthcheck.sh /healthcheck.sh |
|||
COPY docker/start.sh /start.sh |
|||
|
|||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] |
|||
|
|||
# Configures the startup! |
|||
ENTRYPOINT ["/usr/bin/dumb-init", "--"] |
|||
CMD ["/start.sh"] |
@ -0,0 +1,153 @@ |
|||
# syntax=docker/dockerfile:1 |
|||
|
|||
# This file was generated using a Jinja2 template. |
|||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles. |
|||
|
|||
# Using multistage build: |
|||
# https://docs.docker.com/develop/develop-images/multistage-build/ |
|||
# https://whitfin.io/speeding-up-rust-docker-builds/ |
|||
####################### VAULT BUILD IMAGE ####################### |
|||
# The web-vault digest specifies a particular web-vault build on Docker Hub. |
|||
# Using the digest instead of the tag name provides better security, |
|||
# as the digest of an image is immutable, whereas a tag name can later |
|||
# be changed to point to a malicious image. |
|||
# |
|||
# To verify the current digest for a given tag name: |
|||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, |
|||
# click the tag name to view the digest of the image it currently points to. |
|||
# - From the command line: |
|||
# $ docker pull vaultwarden/web-vault:v2.25.1b |
|||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b |
|||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba] |
|||
# |
|||
# - Conversely, to get the tag name from the digest: |
|||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba |
|||
# [vaultwarden/web-vault:v2.25.1b] |
|||
# |
|||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault |
|||
|
|||
########################## BUILD IMAGE ########################## |
|||
FROM rust:1.58-buster as build |
|||
|
|||
|
|||
|
|||
# Build time options to avoid dpkg warnings and help with reproducible builds. |
|||
ENV DEBIAN_FRONTEND=noninteractive \ |
|||
LANG=C.UTF-8 \ |
|||
TZ=UTC \ |
|||
TERM=xterm-256color \ |
|||
CARGO_HOME="/root/.cargo" \ |
|||
USER="root" |
|||
|
|||
|
|||
# Create CARGO_HOME folder and don't download rust docs |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \ |
|||
&& rustup set profile minimal |
|||
|
|||
# |
|||
# Install required build libs for armhf architecture. |
|||
# hadolint ignore=DL3059 |
|||
RUN dpkg --add-architecture armhf \ |
|||
&& apt-get update \ |
|||
&& apt-get install -y \ |
|||
--no-install-recommends \ |
|||
libssl-dev:armhf \ |
|||
libc6-dev:armhf \ |
|||
libpq5:armhf \ |
|||
libpq-dev:armhf \ |
|||
libmariadb3:armhf \ |
|||
libmariadb-dev:armhf \ |
|||
libmariadb-dev-compat:armhf \ |
|||
gcc-arm-linux-gnueabihf \ |
|||
# |
|||
# Make sure cargo has the right target config |
|||
&& echo '[target.armv7-unknown-linux-gnueabihf]' >> "${CARGO_HOME}/config" \ |
|||
&& echo 'linker = "arm-linux-gnueabihf-gcc"' >> "${CARGO_HOME}/config" \ |
|||
&& echo 'rustflags = ["-L/usr/lib/arm-linux-gnueabihf"]' >> "${CARGO_HOME}/config" |
|||
|
|||
# Set arm specific environment values |
|||
ENV CC_armv7_unknown_linux_gnueabihf="/usr/bin/arm-linux-gnueabihf-gcc" \ |
|||
CROSS_COMPILE="1" \ |
|||
OPENSSL_INCLUDE_DIR="/usr/include/arm-linux-gnueabihf" \ |
|||
OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf" |
|||
|
|||
|
|||
# Creates a dummy project used to grab dependencies |
|||
RUN USER=root cargo new --bin /app |
|||
WORKDIR /app |
|||
|
|||
# Copies over *only* your manifests and build files |
|||
COPY ./Cargo.* ./ |
|||
COPY ./rust-toolchain ./rust-toolchain |
|||
COPY ./build.rs ./build.rs |
|||
|
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-gnueabihf |
|||
|
|||
# Configure the DB ARG as late as possible to not invalidate the cached layers above |
|||
ARG DB=sqlite,mysql,postgresql |
|||
|
|||
# Builds your dependencies and removes the |
|||
# dummy project, except the target folder |
|||
# This folder contains the compiled dependencies |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf \ |
|||
&& find . -not -path "./target*" -delete |
|||
|
|||
# Copies the complete project |
|||
# To avoid copying unneeded files, use .dockerignore |
|||
COPY . . |
|||
|
|||
# Make sure that we actually build the project |
|||
RUN touch src/main.rs |
|||
|
|||
# Builds again, this time it'll just be |
|||
# your actual source files being built |
|||
# hadolint ignore=DL3059 |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-gnueabihf |
|||
|
|||
######################## RUNTIME IMAGE ######################## |
|||
# Create a new stage with a minimal image |
|||
# because we already have a binary built |
|||
FROM balenalib/armv7hf-debian:buster |
|||
|
|||
ENV ROCKET_ENV="staging" \ |
|||
ROCKET_PORT=80 \ |
|||
ROCKET_WORKERS=10 |
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-start" ] |
|||
|
|||
# Create data folder and Install needed libraries |
|||
RUN mkdir /data \ |
|||
&& apt-get update && apt-get install -y \ |
|||
--no-install-recommends \ |
|||
openssl \ |
|||
ca-certificates \ |
|||
curl \ |
|||
dumb-init \ |
|||
libmariadb-dev-compat \ |
|||
libpq5 \ |
|||
&& apt-get clean \ |
|||
&& rm -rf /var/lib/apt/lists/* |
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-end" ] |
|||
|
|||
VOLUME /data |
|||
EXPOSE 80 |
|||
EXPOSE 3012 |
|||
|
|||
# Copies the files from the context (Rocket.toml file and web-vault) |
|||
# and the binary from the "build" stage to the current stage |
|||
WORKDIR / |
|||
COPY Rocket.toml . |
|||
COPY --from=vault /web-vault ./web-vault |
|||
COPY --from=build /app/target/armv7-unknown-linux-gnueabihf/release/vaultwarden . |
|||
|
|||
COPY docker/healthcheck.sh /healthcheck.sh |
|||
COPY docker/start.sh /start.sh |
|||
|
|||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] |
|||
|
|||
# Configures the startup! |
|||
ENTRYPOINT ["/usr/bin/dumb-init", "--"] |
|||
CMD ["/start.sh"] |
@ -0,0 +1,128 @@ |
|||
# syntax=docker/dockerfile:1 |
|||
|
|||
# This file was generated using a Jinja2 template. |
|||
# Please make your changes in `Dockerfile.j2` and then `make` the individual Dockerfiles. |
|||
|
|||
# Using multistage build: |
|||
# https://docs.docker.com/develop/develop-images/multistage-build/ |
|||
# https://whitfin.io/speeding-up-rust-docker-builds/ |
|||
####################### VAULT BUILD IMAGE ####################### |
|||
# The web-vault digest specifies a particular web-vault build on Docker Hub. |
|||
# Using the digest instead of the tag name provides better security, |
|||
# as the digest of an image is immutable, whereas a tag name can later |
|||
# be changed to point to a malicious image. |
|||
# |
|||
# To verify the current digest for a given tag name: |
|||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, |
|||
# click the tag name to view the digest of the image it currently points to. |
|||
# - From the command line: |
|||
# $ docker pull vaultwarden/web-vault:v2.25.1b |
|||
# $ docker image inspect --format "{{.RepoDigests}}" vaultwarden/web-vault:v2.25.1b |
|||
# [vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba] |
|||
# |
|||
# - Conversely, to get the tag name from the digest: |
|||
# $ docker image inspect --format "{{.RepoTags}}" vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba |
|||
# [vaultwarden/web-vault:v2.25.1b] |
|||
# |
|||
FROM vaultwarden/web-vault@sha256:9b82318d553d72f091e8755f5aff80eed495f90bbe5b0703522953480f5c2fba as vault |
|||
|
|||
########################## BUILD IMAGE ########################## |
|||
FROM blackdex/rust-musl:armv7-musleabihf-nightly-2022-01-23 as build |
|||
|
|||
|
|||
|
|||
# Build time options to avoid dpkg warnings and help with reproducible builds. |
|||
ENV DEBIAN_FRONTEND=noninteractive \ |
|||
LANG=C.UTF-8 \ |
|||
TZ=UTC \ |
|||
TERM=xterm-256color \ |
|||
CARGO_HOME="/root/.cargo" \ |
|||
USER="root" |
|||
|
|||
|
|||
# Create CARGO_HOME folder and don't download rust docs |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry mkdir -pv "${CARGO_HOME}" \ |
|||
&& rustup set profile minimal |
|||
|
|||
ENV RUSTFLAGS='-C link-arg=-s' |
|||
ENV CFLAGS_armv7_unknown_linux_musleabihf="-mfpu=vfpv3-d16" |
|||
|
|||
# Creates a dummy project used to grab dependencies |
|||
RUN USER=root cargo new --bin /app |
|||
WORKDIR /app |
|||
|
|||
# Copies over *only* your manifests and build files |
|||
COPY ./Cargo.* ./ |
|||
COPY ./rust-toolchain ./rust-toolchain |
|||
COPY ./build.rs ./build.rs |
|||
|
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry rustup target add armv7-unknown-linux-musleabihf |
|||
|
|||
# Configure the DB ARG as late as possible to not invalidate the cached layers above |
|||
ARG DB=sqlite,mysql,postgresql |
|||
|
|||
# Builds your dependencies and removes the |
|||
# dummy project, except the target folder |
|||
# This folder contains the compiled dependencies |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf \ |
|||
&& find . -not -path "./target*" -delete |
|||
|
|||
# Copies the complete project |
|||
# To avoid copying unneeded files, use .dockerignore |
|||
COPY . . |
|||
|
|||
# Make sure that we actually build the project |
|||
RUN touch src/main.rs |
|||
|
|||
# Builds again, this time it'll just be |
|||
# your actual source files being built |
|||
# hadolint ignore=DL3059 |
|||
RUN --mount=type=cache,target=/root/.cargo/git --mount=type=cache,target=/root/.cargo/registry cargo build --features ${DB} --release --target=armv7-unknown-linux-musleabihf |
|||
# hadolint ignore=DL3059 |
|||
RUN musl-strip target/armv7-unknown-linux-musleabihf/release/vaultwarden |
|||
|
|||
######################## RUNTIME IMAGE ######################## |
|||
# Create a new stage with a minimal image |
|||
# because we already have a binary built |
|||
FROM balenalib/armv7hf-alpine:3.15 |
|||
|
|||
ENV ROCKET_ENV="staging" \ |
|||
ROCKET_PORT=80 \ |
|||
ROCKET_WORKERS=10 \ |
|||
SSL_CERT_DIR=/etc/ssl/certs |
|||
|
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-start" ] |
|||
|
|||
# Create data folder and Install needed libraries |
|||
RUN mkdir /data \ |
|||
&& apk add --no-cache \ |
|||
openssl \ |
|||
tzdata \ |
|||
curl \ |
|||
dumb-init \ |
|||
ca-certificates |
|||
|
|||
# hadolint ignore=DL3059 |
|||
RUN [ "cross-build-end" ] |
|||
|
|||
VOLUME /data |
|||
EXPOSE 80 |
|||
EXPOSE 3012 |
|||
|
|||
# Copies the files from the context (Rocket.toml file and web-vault) |
|||
# and the binary from the "build" stage to the current stage |
|||
WORKDIR / |
|||
COPY Rocket.toml . |
|||
COPY --from=vault /web-vault ./web-vault |
|||
COPY --from=build /app/target/armv7-unknown-linux-musleabihf/release/vaultwarden . |
|||
|
|||
COPY docker/healthcheck.sh /healthcheck.sh |
|||
COPY docker/start.sh /start.sh |
|||
|
|||
HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] |
|||
|
|||
# Configures the startup! |
|||
ENTRYPOINT ["/usr/bin/dumb-init", "--"] |
|||
CMD ["/start.sh"] |
@ -0,0 +1 @@ |
|||
DROP TABLE emergency_access; |
@ -0,0 +1,14 @@ |
|||
CREATE TABLE emergency_access ( |
|||
uuid CHAR(36) NOT NULL PRIMARY KEY, |
|||
grantor_uuid CHAR(36) REFERENCES users (uuid), |
|||
grantee_uuid CHAR(36) REFERENCES users (uuid), |
|||
email VARCHAR(255), |
|||
key_encrypted TEXT, |
|||
atype INTEGER NOT NULL, |
|||
status INTEGER NOT NULL, |
|||
wait_time_days INTEGER NOT NULL, |
|||
recovery_initiated_at DATETIME, |
|||
last_notification_at DATETIME, |
|||
updated_at DATETIME NOT NULL, |
|||
created_at DATETIME NOT NULL |
|||
); |
@ -0,0 +1 @@ |
|||
DROP TABLE twofactor_incomplete; |
@ -0,0 +1,9 @@ |
|||
CREATE TABLE twofactor_incomplete ( |
|||
user_uuid CHAR(36) NOT NULL REFERENCES users(uuid), |
|||
device_uuid CHAR(36) NOT NULL, |
|||
device_name TEXT NOT NULL, |
|||
login_time DATETIME NOT NULL, |
|||
ip_address TEXT NOT NULL, |
|||
|
|||
PRIMARY KEY (user_uuid, device_uuid) |
|||
); |
@ -0,0 +1,2 @@ |
|||
ALTER TABLE users |
|||
ADD COLUMN api_key VARCHAR(255); |
@ -0,0 +1 @@ |
|||
DROP TABLE emergency_access; |
@ -0,0 +1,14 @@ |
|||
CREATE TABLE emergency_access ( |
|||
uuid CHAR(36) NOT NULL PRIMARY KEY, |
|||
grantor_uuid CHAR(36) REFERENCES users (uuid), |
|||
grantee_uuid CHAR(36) REFERENCES users (uuid), |
|||
email VARCHAR(255), |
|||
key_encrypted TEXT, |
|||
atype INTEGER NOT NULL, |
|||
status INTEGER NOT NULL, |
|||
wait_time_days INTEGER NOT NULL, |
|||
recovery_initiated_at TIMESTAMP, |
|||
last_notification_at TIMESTAMP, |
|||
updated_at TIMESTAMP NOT NULL, |
|||
created_at TIMESTAMP NOT NULL |
|||
); |
@ -0,0 +1 @@ |
|||
DROP TABLE twofactor_incomplete; |
@ -0,0 +1,9 @@ |
|||
CREATE TABLE twofactor_incomplete ( |
|||
user_uuid VARCHAR(40) NOT NULL REFERENCES users(uuid), |
|||
device_uuid VARCHAR(40) NOT NULL, |
|||
device_name TEXT NOT NULL, |
|||
login_time TIMESTAMP NOT NULL, |
|||
ip_address TEXT NOT NULL, |
|||
|
|||
PRIMARY KEY (user_uuid, device_uuid) |
|||
); |
@ -0,0 +1,2 @@ |
|||
ALTER TABLE users |
|||
ADD COLUMN api_key TEXT; |
@ -0,0 +1 @@ |
|||
DROP TABLE emergency_access; |
@ -0,0 +1,14 @@ |
|||
CREATE TABLE emergency_access ( |
|||
uuid TEXT NOT NULL PRIMARY KEY, |
|||
grantor_uuid TEXT REFERENCES users (uuid), |
|||
grantee_uuid TEXT REFERENCES users (uuid), |
|||
email TEXT, |
|||
key_encrypted TEXT, |
|||
atype INTEGER NOT NULL, |
|||
status INTEGER NOT NULL, |
|||
wait_time_days INTEGER NOT NULL, |
|||
recovery_initiated_at DATETIME, |
|||
last_notification_at DATETIME, |
|||
updated_at DATETIME NOT NULL, |
|||
created_at DATETIME NOT NULL |
|||
); |
@ -0,0 +1 @@ |
|||
DROP TABLE twofactor_incomplete; |
@ -0,0 +1,9 @@ |
|||
CREATE TABLE twofactor_incomplete ( |
|||
user_uuid TEXT NOT NULL REFERENCES users(uuid), |
|||
device_uuid TEXT NOT NULL, |
|||
device_name TEXT NOT NULL, |
|||
login_time DATETIME NOT NULL, |
|||
ip_address TEXT NOT NULL, |
|||
|
|||
PRIMARY KEY (user_uuid, device_uuid) |
|||
); |
@ -0,0 +1,2 @@ |
|||
ALTER TABLE users |
|||
ADD COLUMN api_key TEXT; |
@ -1 +1 @@ |
|||
nightly-2021-08-22 |
|||
nightly-2022-01-23 |
|||
|
@ -1,24 +1,804 @@ |
|||
use chrono::{Duration, Utc}; |
|||
use rocket::Route; |
|||
use rocket_contrib::json::Json; |
|||
use serde_json::Value; |
|||
use std::borrow::Borrow; |
|||
|
|||
use crate::{api::JsonResult, auth::Headers, db::DbConn}; |
|||
use crate::{ |
|||
api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString}, |
|||
auth::{decode_emergency_access_invite, Headers}, |
|||
db::{models::*, DbConn, DbPool}, |
|||
mail, CONFIG, |
|||
}; |
|||
|
|||
pub fn routes() -> Vec<Route> { |
|||
routes![get_contacts,] |
|||
routes![ |
|||
get_contacts, |
|||
get_grantees, |
|||
get_emergency_access, |
|||
put_emergency_access, |
|||
delete_emergency_access, |
|||
post_delete_emergency_access, |
|||
send_invite, |
|||
resend_invite, |
|||
accept_invite, |
|||
confirm_emergency_access, |
|||
initiate_emergency_access, |
|||
approve_emergency_access, |
|||
reject_emergency_access, |
|||
takeover_emergency_access, |
|||
password_emergency_access, |
|||
view_emergency_access, |
|||
policies_emergency_access, |
|||
] |
|||
} |
|||
|
|||
/// This endpoint is expected to return at least something.
|
|||
/// If we return an error message that will trigger error toasts for the user.
|
|||
/// To prevent this we just return an empty json result with no Data.
|
|||
/// When this feature is going to be implemented it also needs to return this empty Data
|
|||
/// instead of throwing an error/4XX unless it really is an error.
|
|||
// region get
|
|||
|
|||
#[get("/emergency-access/trusted")] |
|||
fn get_contacts(_headers: Headers, _conn: DbConn) -> JsonResult { |
|||
debug!("Emergency access is not supported."); |
|||
fn get_contacts(headers: Headers, conn: DbConn) -> JsonResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &conn); |
|||
|
|||
let emergency_access_list_json: Vec<Value> = |
|||
emergency_access_list.iter().map(|e| e.to_json_grantee_details(&conn)).collect(); |
|||
|
|||
Ok(Json(json!({ |
|||
"Data": emergency_access_list_json, |
|||
"Object": "list", |
|||
"ContinuationToken": null |
|||
}))) |
|||
} |
|||
|
|||
#[get("/emergency-access/granted")] |
|||
fn get_grantees(headers: Headers, conn: DbConn) -> JsonResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &conn); |
|||
|
|||
let emergency_access_list_json: Vec<Value> = |
|||
emergency_access_list.iter().map(|e| e.to_json_grantor_details(&conn)).collect(); |
|||
|
|||
Ok(Json(json!({ |
|||
"Data": emergency_access_list_json, |
|||
"Object": "list", |
|||
"ContinuationToken": null |
|||
}))) |
|||
} |
|||
|
|||
#[get("/emergency-access/<emer_id>")] |
|||
fn get_emergency_access(emer_id: String, conn: DbConn) -> JsonResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
match EmergencyAccess::find_by_uuid(&emer_id, &conn) { |
|||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&conn))), |
|||
None => err!("Emergency access not valid."), |
|||
} |
|||
} |
|||
|
|||
// endregion
|
|||
|
|||
// region put/post
|
|||
|
|||
#[derive(Deserialize, Debug)] |
|||
#[allow(non_snake_case)] |
|||
struct EmergencyAccessUpdateData { |
|||
Type: NumberOrString, |
|||
WaitTimeDays: i32, |
|||
KeyEncrypted: Option<String>, |
|||
} |
|||
|
|||
#[put("/emergency-access/<emer_id>", data = "<data>")] |
|||
fn put_emergency_access(emer_id: String, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult { |
|||
post_emergency_access(emer_id, data, conn) |
|||
} |
|||
|
|||
#[post("/emergency-access/<emer_id>", data = "<data>")] |
|||
fn post_emergency_access(emer_id: String, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let data: EmergencyAccessUpdateData = data.into_inner().data; |
|||
|
|||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { |
|||
Some(emergency_access) => emergency_access, |
|||
None => err!("Emergency access not valid."), |
|||
}; |
|||
|
|||
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) { |
|||
Some(new_type) => new_type as i32, |
|||
None => err!("Invalid emergency access type."), |
|||
}; |
|||
|
|||
emergency_access.atype = new_type; |
|||
emergency_access.wait_time_days = data.WaitTimeDays; |
|||
emergency_access.key_encrypted = data.KeyEncrypted; |
|||
|
|||
emergency_access.save(&conn)?; |
|||
Ok(Json(emergency_access.to_json())) |
|||
} |
|||
|
|||
// endregion
|
|||
|
|||
// region delete
|
|||
|
|||
#[delete("/emergency-access/<emer_id>")] |
|||
fn delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let grantor_user = headers.user; |
|||
|
|||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { |
|||
Some(emer) => { |
|||
if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) { |
|||
err!("Emergency access not valid.") |
|||
} |
|||
emer |
|||
} |
|||
None => err!("Emergency access not valid."), |
|||
}; |
|||
emergency_access.delete(&conn)?; |
|||
Ok(()) |
|||
} |
|||
|
|||
#[post("/emergency-access/<emer_id>/delete")] |
|||
fn post_delete_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult { |
|||
delete_emergency_access(emer_id, headers, conn) |
|||
} |
|||
|
|||
// endregion
|
|||
|
|||
// region invite
|
|||
|
|||
#[derive(Deserialize, Debug)] |
|||
#[allow(non_snake_case)] |
|||
struct EmergencyAccessInviteData { |
|||
Email: String, |
|||
Type: NumberOrString, |
|||
WaitTimeDays: i32, |
|||
} |
|||
|
|||
#[post("/emergency-access/invite", data = "<data>")] |
|||
fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, conn: DbConn) -> EmptyResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let data: EmergencyAccessInviteData = data.into_inner().data; |
|||
let email = data.Email.to_lowercase(); |
|||
let wait_time_days = data.WaitTimeDays; |
|||
|
|||
let emergency_access_status = EmergencyAccessStatus::Invited as i32; |
|||
|
|||
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) { |
|||
Some(new_type) => new_type as i32, |
|||
None => err!("Invalid emergency access type."), |
|||
}; |
|||
|
|||
let grantor_user = headers.user; |
|||
|
|||
// avoid setting yourself as emergency contact
|
|||
if email == grantor_user.email { |
|||
err!("You can not set yourself as an emergency contact.") |
|||
} |
|||
|
|||
let grantee_user = match User::find_by_mail(&email, &conn) { |
|||
None => { |
|||
if !CONFIG.invitations_allowed() { |
|||
err!(format!("Grantee user does not exist: {}", email)) |
|||
} |
|||
|
|||
if !CONFIG.is_email_domain_allowed(&email) { |
|||
err!("Email domain not eligible for invitations") |
|||
} |
|||
|
|||
if !CONFIG.mail_enabled() { |
|||
let invitation = Invitation::new(email.clone()); |
|||
invitation.save(&conn)?; |
|||
} |
|||
|
|||
let mut user = User::new(email.clone()); |
|||
user.save(&conn)?; |
|||
user |
|||
} |
|||
Some(user) => user, |
|||
}; |
|||
|
|||
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email( |
|||
&grantor_user.uuid, |
|||
&grantee_user.uuid, |
|||
&grantee_user.email, |
|||
&conn, |
|||
) |
|||
.is_some() |
|||
{ |
|||
err!(format!("Grantee user already invited: {}", email)) |
|||
} |
|||
|
|||
let mut new_emergency_access = EmergencyAccess::new( |
|||
grantor_user.uuid.clone(), |
|||
Some(grantee_user.email.clone()), |
|||
emergency_access_status, |
|||
new_type, |
|||
wait_time_days, |
|||
); |
|||
new_emergency_access.save(&conn)?; |
|||
|
|||
if CONFIG.mail_enabled() { |
|||
mail::send_emergency_access_invite( |
|||
&grantee_user.email, |
|||
&grantee_user.uuid, |
|||
Some(new_emergency_access.uuid), |
|||
Some(grantor_user.name.clone()), |
|||
Some(grantor_user.email), |
|||
)?; |
|||
} else { |
|||
// Automatically mark user as accepted if no email invites
|
|||
match User::find_by_mail(&email, &conn) { |
|||
Some(user) => { |
|||
match accept_invite_process(user.uuid, new_emergency_access.uuid, Some(email), conn.borrow()) { |
|||
Ok(v) => (v), |
|||
Err(e) => err!(e.to_string()), |
|||
} |
|||
} |
|||
None => err!("Grantee user not found."), |
|||
} |
|||
} |
|||
|
|||
Ok(()) |
|||
} |
|||
|
|||
#[post("/emergency-access/<emer_id>/reinvite")] |
|||
fn resend_invite(emer_id: String, headers: Headers, conn: DbConn) -> EmptyResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { |
|||
Some(emer) => emer, |
|||
None => err!("Emergency access not valid."), |
|||
}; |
|||
|
|||
if emergency_access.grantor_uuid != headers.user.uuid { |
|||
err!("Emergency access not valid."); |
|||
} |
|||
|
|||
if emergency_access.status != EmergencyAccessStatus::Invited as i32 { |
|||
err!("The grantee user is already accepted or confirmed to the organization"); |
|||
} |
|||
|
|||
let email = match emergency_access.email.clone() { |
|||
Some(email) => email, |
|||
None => err!("Email not valid."), |
|||
}; |
|||
|
|||
let grantee_user = match User::find_by_mail(&email, &conn) { |
|||
Some(user) => user, |
|||
None => err!("Grantee user not found."), |
|||
}; |
|||
|
|||
let grantor_user = headers.user; |
|||
|
|||
if CONFIG.mail_enabled() { |
|||
mail::send_emergency_access_invite( |
|||
&email, |
|||
&grantor_user.uuid, |
|||
Some(emergency_access.uuid), |
|||
Some(grantor_user.name.clone()), |
|||
Some(grantor_user.email), |
|||
)?; |
|||
} else { |
|||
if Invitation::find_by_mail(&email, &conn).is_none() { |
|||
let invitation = Invitation::new(email); |
|||
invitation.save(&conn)?; |
|||
} |
|||
|
|||
// Automatically mark user as accepted if no email invites
|
|||
match accept_invite_process(grantee_user.uuid, emergency_access.uuid, emergency_access.email, conn.borrow()) { |
|||
Ok(v) => (v), |
|||
Err(e) => err!(e.to_string()), |
|||
} |
|||
} |
|||
|
|||
Ok(()) |
|||
} |
|||
|
|||
#[derive(Deserialize)] |
|||
#[allow(non_snake_case)] |
|||
struct AcceptData { |
|||
Token: String, |
|||
} |
|||
|
|||
#[post("/emergency-access/<emer_id>/accept", data = "<data>")] |
|||
fn accept_invite(emer_id: String, data: JsonUpcase<AcceptData>, conn: DbConn) -> EmptyResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let data: AcceptData = data.into_inner().data; |
|||
let token = &data.Token; |
|||
let claims = decode_emergency_access_invite(token)?; |
|||
|
|||
let grantee_user = match User::find_by_mail(&claims.email, &conn) { |
|||
Some(user) => { |
|||
Invitation::take(&claims.email, &conn); |
|||
user |
|||
} |
|||
None => err!("Invited user not found"), |
|||
}; |
|||
|
|||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { |
|||
Some(emer) => emer, |
|||
None => err!("Emergency access not valid."), |
|||
}; |
|||
|
|||
// get grantor user to send Accepted email
|
|||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) { |
|||
Some(user) => user, |
|||
None => err!("Grantor user not found."), |
|||
}; |
|||
|
|||
if (claims.emer_id.is_some() && emer_id == claims.emer_id.unwrap()) |
|||
&& (claims.grantor_name.is_some() && grantor_user.name == claims.grantor_name.unwrap()) |
|||
&& (claims.grantor_email.is_some() && grantor_user.email == claims.grantor_email.unwrap()) |
|||
{ |
|||
match accept_invite_process(grantee_user.uuid.clone(), emer_id, Some(grantee_user.email.clone()), &conn) { |
|||
Ok(v) => (v), |
|||
Err(e) => err!(e.to_string()), |
|||
} |
|||
|
|||
if CONFIG.mail_enabled() { |
|||
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email)?; |
|||
} |
|||
|
|||
Ok(()) |
|||
} else { |
|||
err!("Emergency access invitation error.") |
|||
} |
|||
} |
|||
|
|||
fn accept_invite_process(grantee_uuid: String, emer_id: String, email: Option<String>, conn: &DbConn) -> EmptyResult { |
|||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, conn) { |
|||
Some(emer) => emer, |
|||
None => err!("Emergency access not valid."), |
|||
}; |
|||
|
|||
let emer_email = emergency_access.email; |
|||
if emer_email.is_none() || emer_email != email { |
|||
err!("User email does not match invite."); |
|||
} |
|||
|
|||
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 { |
|||
err!("Emergency contact already accepted."); |
|||
} |
|||
|
|||
emergency_access.status = EmergencyAccessStatus::Accepted as i32; |
|||
emergency_access.grantee_uuid = Some(grantee_uuid); |
|||
emergency_access.email = None; |
|||
emergency_access.save(conn) |
|||
} |
|||
|
|||
#[derive(Deserialize)] |
|||
#[allow(non_snake_case)] |
|||
struct ConfirmData { |
|||
Key: String, |
|||
} |
|||
|
|||
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")] |
|||
fn confirm_emergency_access( |
|||
emer_id: String, |
|||
data: JsonUpcase<ConfirmData>, |
|||
headers: Headers, |
|||
conn: DbConn, |
|||
) -> JsonResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let confirming_user = headers.user; |
|||
let data: ConfirmData = data.into_inner().data; |
|||
let key = data.Key; |
|||
|
|||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { |
|||
Some(emer) => emer, |
|||
None => err!("Emergency access not valid."), |
|||
}; |
|||
|
|||
if emergency_access.status != EmergencyAccessStatus::Accepted as i32 |
|||
|| emergency_access.grantor_uuid != confirming_user.uuid |
|||
{ |
|||
err!("Emergency access not valid.") |
|||
} |
|||
|
|||
let grantor_user = match User::find_by_uuid(&confirming_user.uuid, &conn) { |
|||
Some(user) => user, |
|||
None => err!("Grantor user not found."), |
|||
}; |
|||
|
|||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { |
|||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) { |
|||
Some(user) => user, |
|||
None => err!("Grantee user not found."), |
|||
}; |
|||
|
|||
emergency_access.status = EmergencyAccessStatus::Confirmed as i32; |
|||
emergency_access.key_encrypted = Some(key); |
|||
emergency_access.email = None; |
|||
|
|||
emergency_access.save(&conn)?; |
|||
|
|||
if CONFIG.mail_enabled() { |
|||
mail::send_emergency_access_invite_confirmed(&grantee_user.email, &grantor_user.name)?; |
|||
} |
|||
Ok(Json(emergency_access.to_json())) |
|||
} else { |
|||
err!("Grantee user not found.") |
|||
} |
|||
} |
|||
|
|||
// endregion
|
|||
|
|||
// region access emergency access
|
|||
|
|||
#[post("/emergency-access/<emer_id>/initiate")] |
|||
fn initiate_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let initiating_user = headers.user; |
|||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { |
|||
Some(emer) => emer, |
|||
None => err!("Emergency access not valid."), |
|||
}; |
|||
|
|||
if emergency_access.status != EmergencyAccessStatus::Confirmed as i32 |
|||
|| emergency_access.grantee_uuid != Some(initiating_user.uuid.clone()) |
|||
{ |
|||
err!("Emergency access not valid.") |
|||
} |
|||
|
|||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) { |
|||
Some(user) => user, |
|||
None => err!("Grantor user not found."), |
|||
}; |
|||
|
|||
let now = Utc::now().naive_utc(); |
|||
emergency_access.status = EmergencyAccessStatus::RecoveryInitiated as i32; |
|||
emergency_access.updated_at = now; |
|||
emergency_access.recovery_initiated_at = Some(now); |
|||
emergency_access.last_notification_at = Some(now); |
|||
emergency_access.save(&conn)?; |
|||
|
|||
if CONFIG.mail_enabled() { |
|||
mail::send_emergency_access_recovery_initiated( |
|||
&grantor_user.email, |
|||
&initiating_user.name, |
|||
emergency_access.get_type_as_str(), |
|||
&emergency_access.wait_time_days.clone().to_string(), |
|||
)?; |
|||
} |
|||
Ok(Json(emergency_access.to_json())) |
|||
} |
|||
|
|||
#[post("/emergency-access/<emer_id>/approve")] |
|||
fn approve_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let approving_user = headers.user; |
|||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { |
|||
Some(emer) => emer, |
|||
None => err!("Emergency access not valid."), |
|||
}; |
|||
|
|||
if emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 |
|||
|| emergency_access.grantor_uuid != approving_user.uuid |
|||
{ |
|||
err!("Emergency access not valid.") |
|||
} |
|||
|
|||
let grantor_user = match User::find_by_uuid(&approving_user.uuid, &conn) { |
|||
Some(user) => user, |
|||
None => err!("Grantor user not found."), |
|||
}; |
|||
|
|||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { |
|||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) { |
|||
Some(user) => user, |
|||
None => err!("Grantee user not found."), |
|||
}; |
|||
|
|||
emergency_access.status = EmergencyAccessStatus::RecoveryApproved as i32; |
|||
emergency_access.save(&conn)?; |
|||
|
|||
if CONFIG.mail_enabled() { |
|||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name)?; |
|||
} |
|||
Ok(Json(emergency_access.to_json())) |
|||
} else { |
|||
err!("Grantee user not found.") |
|||
} |
|||
} |
|||
|
|||
#[post("/emergency-access/<emer_id>/reject")] |
|||
fn reject_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let rejecting_user = headers.user; |
|||
let mut emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { |
|||
Some(emer) => emer, |
|||
None => err!("Emergency access not valid."), |
|||
}; |
|||
|
|||
if (emergency_access.status != EmergencyAccessStatus::RecoveryInitiated as i32 |
|||
&& emergency_access.status != EmergencyAccessStatus::RecoveryApproved as i32) |
|||
|| emergency_access.grantor_uuid != rejecting_user.uuid |
|||
{ |
|||
err!("Emergency access not valid.") |
|||
} |
|||
|
|||
let grantor_user = match User::find_by_uuid(&rejecting_user.uuid, &conn) { |
|||
Some(user) => user, |
|||
None => err!("Grantor user not found."), |
|||
}; |
|||
|
|||
if let Some(grantee_uuid) = emergency_access.grantee_uuid.as_ref() { |
|||
let grantee_user = match User::find_by_uuid(grantee_uuid, &conn) { |
|||
Some(user) => user, |
|||
None => err!("Grantee user not found."), |
|||
}; |
|||
|
|||
emergency_access.status = EmergencyAccessStatus::Confirmed as i32; |
|||
emergency_access.save(&conn)?; |
|||
|
|||
if CONFIG.mail_enabled() { |
|||
mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name)?; |
|||
} |
|||
Ok(Json(emergency_access.to_json())) |
|||
} else { |
|||
err!("Grantee user not found.") |
|||
} |
|||
} |
|||
|
|||
// endregion
|
|||
|
|||
// region action
|
|||
|
|||
#[post("/emergency-access/<emer_id>/view")] |
|||
fn view_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let requesting_user = headers.user; |
|||
let host = headers.host; |
|||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { |
|||
Some(emer) => emer, |
|||
None => err!("Emergency access not valid."), |
|||
}; |
|||
|
|||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::View) { |
|||
err!("Emergency access not valid.") |
|||
} |
|||
|
|||
let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &conn); |
|||
|
|||
let ciphers_json: Vec<Value> = |
|||
ciphers.iter().map(|c| c.to_json(&host, &emergency_access.grantor_uuid, &conn)).collect(); |
|||
|
|||
Ok(Json(json!({ |
|||
"Ciphers": ciphers_json, |
|||
"KeyEncrypted": &emergency_access.key_encrypted, |
|||
"Object": "emergencyAccessView", |
|||
}))) |
|||
} |
|||
|
|||
#[post("/emergency-access/<emer_id>/takeover")] |
|||
fn takeover_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let requesting_user = headers.user; |
|||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { |
|||
Some(emer) => emer, |
|||
None => err!("Emergency access not valid."), |
|||
}; |
|||
|
|||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) { |
|||
err!("Emergency access not valid.") |
|||
} |
|||
|
|||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) { |
|||
Some(user) => user, |
|||
None => err!("Grantor user not found."), |
|||
}; |
|||
|
|||
Ok(Json(json!({ |
|||
"Data": [], |
|||
"Kdf": grantor_user.client_kdf_type, |
|||
"KdfIterations": grantor_user.client_kdf_iter, |
|||
"KeyEncrypted": &emergency_access.key_encrypted, |
|||
"Object": "emergencyAccessTakeover", |
|||
}))) |
|||
} |
|||
|
|||
#[derive(Deserialize, Debug)] |
|||
#[allow(non_snake_case)] |
|||
struct EmergencyAccessPasswordData { |
|||
NewMasterPasswordHash: String, |
|||
Key: String, |
|||
} |
|||
|
|||
#[post("/emergency-access/<emer_id>/password", data = "<data>")] |
|||
fn password_emergency_access( |
|||
emer_id: String, |
|||
data: JsonUpcase<EmergencyAccessPasswordData>, |
|||
headers: Headers, |
|||
conn: DbConn, |
|||
) -> EmptyResult { |
|||
check_emergency_access_allowed()?; |
|||
|
|||
let data: EmergencyAccessPasswordData = data.into_inner().data; |
|||
let new_master_password_hash = &data.NewMasterPasswordHash; |
|||
let key = data.Key; |
|||
|
|||
let requesting_user = headers.user; |
|||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { |
|||
Some(emer) => emer, |
|||
None => err!("Emergency access not valid."), |
|||
}; |
|||
|
|||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) { |
|||
err!("Emergency access not valid.") |
|||
} |
|||
|
|||
let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) { |
|||
Some(user) => user, |
|||
None => err!("Grantor user not found."), |
|||
}; |
|||
|
|||
// change grantor_user password
|
|||
grantor_user.set_password(new_master_password_hash, None); |
|||
grantor_user.akey = key; |
|||
grantor_user.save(&conn)?; |
|||
|
|||
// Disable TwoFactor providers since they will otherwise block logins
|
|||
TwoFactor::delete_all_by_user(&grantor_user.uuid, &conn)?; |
|||
|
|||
// Removing owner, check that there are at least another owner
|
|||
let user_org_grantor = UserOrganization::find_any_state_by_user(&grantor_user.uuid, &conn); |
|||
|
|||
// Remove grantor from all organisations unless Owner
|
|||
for user_org in user_org_grantor { |
|||
if user_org.atype != UserOrgType::Owner as i32 { |
|||
user_org.delete(&conn)?; |
|||
} |
|||
} |
|||
Ok(()) |
|||
} |
|||
|
|||
// endregion
|
|||
|
|||
#[get("/emergency-access/<emer_id>/policies")] |
|||
fn policies_emergency_access(emer_id: String, headers: Headers, conn: DbConn) -> JsonResult { |
|||
let requesting_user = headers.user; |
|||
let emergency_access = match EmergencyAccess::find_by_uuid(&emer_id, &conn) { |
|||
Some(emer) => emer, |
|||
None => err!("Emergency access not valid."), |
|||
}; |
|||
|
|||
if !is_valid_request(&emergency_access, requesting_user.uuid, EmergencyAccessType::Takeover) { |
|||
err!("Emergency access not valid.") |
|||
} |
|||
|
|||
let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &conn) { |
|||
Some(user) => user, |
|||
None => err!("Grantor user not found."), |
|||
}; |
|||
|
|||
let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &conn); |
|||
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect(); |
|||
|
|||
Ok(Json(json!({ |
|||
"Data": policies_json, |
|||
"Object": "list", |
|||
"ContinuationToken": null |
|||
}))) |
|||
} |
|||
|
|||
fn is_valid_request( |
|||
emergency_access: &EmergencyAccess, |
|||
requesting_user_uuid: String, |
|||
requested_access_type: EmergencyAccessType, |
|||
) -> bool { |
|||
emergency_access.grantee_uuid == Some(requesting_user_uuid) |
|||
&& emergency_access.status == EmergencyAccessStatus::RecoveryApproved as i32 |
|||
&& emergency_access.atype == requested_access_type as i32 |
|||
} |
|||
|
|||
fn check_emergency_access_allowed() -> EmptyResult { |
|||
if !CONFIG.emergency_access_allowed() { |
|||
err!("Emergency access is not allowed.") |
|||
} |
|||
Ok(()) |
|||
} |
|||
|
|||
pub fn emergency_request_timeout_job(pool: DbPool) { |
|||
debug!("Start emergency_request_timeout_job"); |
|||
if !CONFIG.emergency_access_allowed() { |
|||
return; |
|||
} |
|||
|
|||
if let Ok(conn) = pool.get() { |
|||
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn); |
|||
|
|||
if emergency_access_list.is_empty() { |
|||
debug!("No emergency request timeout to approve"); |
|||
} |
|||
|
|||
for mut emer in emergency_access_list { |
|||
if emer.recovery_initiated_at.is_some() |
|||
&& Utc::now().naive_utc() |
|||
>= emer.recovery_initiated_at.unwrap() + Duration::days(emer.wait_time_days as i64) |
|||
{ |
|||
emer.status = EmergencyAccessStatus::RecoveryApproved as i32; |
|||
emer.save(&conn).expect("Cannot save emergency access on job"); |
|||
|
|||
if CONFIG.mail_enabled() { |
|||
// get grantor user to send Accepted email
|
|||
let grantor_user = User::find_by_uuid(&emer.grantor_uuid, &conn).expect("Grantor user not found."); |
|||
|
|||
// get grantee user to send Accepted email
|
|||
let grantee_user = |
|||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn) |
|||
.expect("Grantee user not found."); |
|||
|
|||
mail::send_emergency_access_recovery_timed_out( |
|||
&grantor_user.email, |
|||
&grantee_user.name.clone(), |
|||
emer.get_type_as_str(), |
|||
) |
|||
.expect("Error on sending email"); |
|||
|
|||
mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name.clone()) |
|||
.expect("Error on sending email"); |
|||
} |
|||
} |
|||
} |
|||
} else { |
|||
error!("Failed to get DB connection while searching emergency request timed out") |
|||
} |
|||
} |
|||
|
|||
pub fn emergency_notification_reminder_job(pool: DbPool) { |
|||
debug!("Start emergency_notification_reminder_job"); |
|||
if !CONFIG.emergency_access_allowed() { |
|||
return; |
|||
} |
|||
|
|||
if let Ok(conn) = pool.get() { |
|||
let emergency_access_list = EmergencyAccess::find_all_recoveries(&conn); |
|||
|
|||
if emergency_access_list.is_empty() { |
|||
debug!("No emergency request reminder notification to send"); |
|||
} |
|||
|
|||
for mut emer in emergency_access_list { |
|||
if (emer.recovery_initiated_at.is_some() |
|||
&& Utc::now().naive_utc() |
|||
>= emer.recovery_initiated_at.unwrap() + Duration::days((emer.wait_time_days as i64) - 1)) |
|||
&& (emer.last_notification_at.is_none() |
|||
|| (emer.last_notification_at.is_some() |
|||
&& Utc::now().naive_utc() >= emer.last_notification_at.unwrap() + Duration::days(1))) |
|||
{ |
|||
emer.save(&conn).expect("Cannot save emergency access on job"); |
|||
|
|||
if CONFIG.mail_enabled() { |
|||
// get grantor user to send Accepted email
|
|||
let grantor_user = User::find_by_uuid(&emer.grantor_uuid, &conn).expect("Grantor user not found."); |
|||
|
|||
// get grantee user to send Accepted email
|
|||
let grantee_user = |
|||
User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid."), &conn) |
|||
.expect("Grantee user not found."); |
|||
|
|||
mail::send_emergency_access_recovery_reminder( |
|||
&grantor_user.email, |
|||
&grantee_user.name.clone(), |
|||
emer.get_type_as_str(), |
|||
&emer.wait_time_days.to_string(), // TODO(jjlin): This should be the number of days left.
|
|||
) |
|||
.expect("Error on sending email"); |
|||
} |
|||
} |
|||
} |
|||
} else { |
|||
error!("Failed to get DB connection while searching emergency notification reminder") |
|||
} |
|||
} |
|||
|
@ -0,0 +1,282 @@ |
|||
use chrono::{NaiveDateTime, Utc}; |
|||
use serde_json::Value; |
|||
|
|||
use super::User; |
|||
|
|||
db_object! { |
|||
#[derive(Debug, Identifiable, Queryable, Insertable, Associations, AsChangeset)] |
|||
#[table_name = "emergency_access"] |
|||
#[changeset_options(treat_none_as_null="true")] |
|||
#[belongs_to(User, foreign_key = "grantor_uuid")] |
|||
#[primary_key(uuid)] |
|||
pub struct EmergencyAccess { |
|||
pub uuid: String, |
|||
pub grantor_uuid: String, |
|||
pub grantee_uuid: Option<String>, |
|||
pub email: Option<String>, |
|||
pub key_encrypted: Option<String>, |
|||
pub atype: i32, //EmergencyAccessType
|
|||
pub status: i32, //EmergencyAccessStatus
|
|||
pub wait_time_days: i32, |
|||
pub recovery_initiated_at: Option<NaiveDateTime>, |
|||
pub last_notification_at: Option<NaiveDateTime>, |
|||
pub updated_at: NaiveDateTime, |
|||
pub created_at: NaiveDateTime, |
|||
} |
|||
} |
|||
|
|||
/// Local methods
|
|||
|
|||
impl EmergencyAccess { |
|||
pub fn new(grantor_uuid: String, email: Option<String>, status: i32, atype: i32, wait_time_days: i32) -> Self { |
|||
let now = Utc::now().naive_utc(); |
|||
|
|||
Self { |
|||
uuid: crate::util::get_uuid(), |
|||
grantor_uuid, |
|||
grantee_uuid: None, |
|||
email, |
|||
status, |
|||
atype, |
|||
wait_time_days, |
|||
recovery_initiated_at: None, |
|||
created_at: now, |
|||
updated_at: now, |
|||
key_encrypted: None, |
|||
last_notification_at: None, |
|||
} |
|||
} |
|||
|
|||
pub fn get_type_as_str(&self) -> &'static str { |
|||
if self.atype == EmergencyAccessType::View as i32 { |
|||
"View" |
|||
} else { |
|||
"Takeover" |
|||
} |
|||
} |
|||
|
|||
pub fn has_type(&self, access_type: EmergencyAccessType) -> bool { |
|||
self.atype == access_type as i32 |
|||
} |
|||
|
|||
pub fn has_status(&self, status: EmergencyAccessStatus) -> bool { |
|||
self.status == status as i32 |
|||
} |
|||
|
|||
pub fn to_json(&self) -> Value { |
|||
json!({ |
|||
"Id": self.uuid, |
|||
"Status": self.status, |
|||
"Type": self.atype, |
|||
"WaitTimeDays": self.wait_time_days, |
|||
"Object": "emergencyAccess", |
|||
}) |
|||
} |
|||
|
|||
pub fn to_json_grantor_details(&self, conn: &DbConn) -> Value { |
|||
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).expect("Grantor user not found."); |
|||
|
|||
json!({ |
|||
"Id": self.uuid, |
|||
"Status": self.status, |
|||
"Type": self.atype, |
|||
"WaitTimeDays": self.wait_time_days, |
|||
"GrantorId": grantor_user.uuid, |
|||
"Email": grantor_user.email, |
|||
"Name": grantor_user.name, |
|||
"Object": "emergencyAccessGrantorDetails", |
|||
}) |
|||
} |
|||
|
|||
#[allow(clippy::manual_map)] |
|||
pub fn to_json_grantee_details(&self, conn: &DbConn) -> Value { |
|||
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() { |
|||
Some(User::find_by_uuid(grantee_uuid, conn).expect("Grantee user not found.")) |
|||
} else if let Some(email) = self.email.as_deref() { |
|||
Some(User::find_by_mail(email, conn).expect("Grantee user not found.")) |
|||
} else { |
|||
None |
|||
}; |
|||
|
|||
json!({ |
|||
"Id": self.uuid, |
|||
"Status": self.status, |
|||
"Type": self.atype, |
|||
"WaitTimeDays": self.wait_time_days, |
|||
"GranteeId": grantee_user.as_ref().map_or("", |u| &u.uuid), |
|||
"Email": grantee_user.as_ref().map_or("", |u| &u.email), |
|||
"Name": grantee_user.as_ref().map_or("", |u| &u.name), |
|||
"Object": "emergencyAccessGranteeDetails", |
|||
}) |
|||
} |
|||
} |
|||
|
|||
#[derive(Copy, Clone, PartialEq, Eq, num_derive::FromPrimitive)] |
|||
pub enum EmergencyAccessType { |
|||
View = 0, |
|||
Takeover = 1, |
|||
} |
|||
|
|||
impl EmergencyAccessType { |
|||
pub fn from_str(s: &str) -> Option<Self> { |
|||
match s { |
|||
"0" | "View" => Some(EmergencyAccessType::View), |
|||
"1" | "Takeover" => Some(EmergencyAccessType::Takeover), |
|||
_ => None, |
|||
} |
|||
} |
|||
} |
|||
|
|||
impl PartialEq<i32> for EmergencyAccessType { |
|||
fn eq(&self, other: &i32) -> bool { |
|||
*other == *self as i32 |
|||
} |
|||
} |
|||
|
|||
impl PartialEq<EmergencyAccessType> for i32 { |
|||
fn eq(&self, other: &EmergencyAccessType) -> bool { |
|||
*self == *other as i32 |
|||
} |
|||
} |
|||
|
|||
pub enum EmergencyAccessStatus { |
|||
Invited = 0, |
|||
Accepted = 1, |
|||
Confirmed = 2, |
|||
RecoveryInitiated = 3, |
|||
RecoveryApproved = 4, |
|||
} |
|||
|
|||
// region Database methods
|
|||
|
|||
use crate::db::DbConn; |
|||
|
|||
use crate::api::EmptyResult; |
|||
use crate::error::MapResult; |
|||
|
|||
impl EmergencyAccess { |
|||
pub fn save(&mut self, conn: &DbConn) -> EmptyResult { |
|||
User::update_uuid_revision(&self.grantor_uuid, conn); |
|||
self.updated_at = Utc::now().naive_utc(); |
|||
|
|||
db_run! { conn: |
|||
sqlite, mysql { |
|||
match diesel::replace_into(emergency_access::table) |
|||
.values(EmergencyAccessDb::to_db(self)) |
|||
.execute(conn) |
|||
{ |
|||
Ok(_) => Ok(()), |
|||
// Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
|
|||
Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => { |
|||
diesel::update(emergency_access::table) |
|||
.filter(emergency_access::uuid.eq(&self.uuid)) |
|||
.set(EmergencyAccessDb::to_db(self)) |
|||
.execute(conn) |
|||
.map_res("Error updating emergency access") |
|||
} |
|||
Err(e) => Err(e.into()), |
|||
}.map_res("Error saving emergency access") |
|||
} |
|||
postgresql { |
|||
let value = EmergencyAccessDb::to_db(self); |
|||
diesel::insert_into(emergency_access::table) |
|||
.values(&value) |
|||
.on_conflict(emergency_access::uuid) |
|||
.do_update() |
|||
.set(&value) |
|||
.execute(conn) |
|||
.map_res("Error saving emergency access") |
|||
} |
|||
} |
|||
} |
|||
|
|||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { |
|||
for ea in Self::find_all_by_grantor_uuid(user_uuid, conn) { |
|||
ea.delete(conn)?; |
|||
} |
|||
for ea in Self::find_all_by_grantee_uuid(user_uuid, conn) { |
|||
ea.delete(conn)?; |
|||
} |
|||
Ok(()) |
|||
} |
|||
|
|||
pub fn delete(self, conn: &DbConn) -> EmptyResult { |
|||
User::update_uuid_revision(&self.grantor_uuid, conn); |
|||
|
|||
db_run! { conn: { |
|||
diesel::delete(emergency_access::table.filter(emergency_access::uuid.eq(self.uuid))) |
|||
.execute(conn) |
|||
.map_res("Error removing user from emergency access") |
|||
}} |
|||
} |
|||
|
|||
pub fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> { |
|||
db_run! { conn: { |
|||
emergency_access::table |
|||
.filter(emergency_access::uuid.eq(uuid)) |
|||
.first::<EmergencyAccessDb>(conn) |
|||
.ok().from_db() |
|||
}} |
|||
} |
|||
|
|||
pub fn find_by_grantor_uuid_and_grantee_uuid_or_email( |
|||
grantor_uuid: &str, |
|||
grantee_uuid: &str, |
|||
email: &str, |
|||
conn: &DbConn, |
|||
) -> Option<Self> { |
|||
db_run! { conn: { |
|||
emergency_access::table |
|||
.filter(emergency_access::grantor_uuid.eq(grantor_uuid)) |
|||
.filter(emergency_access::grantee_uuid.eq(grantee_uuid).or(emergency_access::email.eq(email))) |
|||
.first::<EmergencyAccessDb>(conn) |
|||
.ok().from_db() |
|||
}} |
|||
} |
|||
|
|||
pub fn find_all_recoveries(conn: &DbConn) -> Vec<Self> { |
|||
db_run! { conn: { |
|||
emergency_access::table |
|||
.filter(emergency_access::status.eq(EmergencyAccessStatus::RecoveryInitiated as i32)) |
|||
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db() |
|||
}} |
|||
} |
|||
|
|||
pub fn find_by_uuid_and_grantor_uuid(uuid: &str, grantor_uuid: &str, conn: &DbConn) -> Option<Self> { |
|||
db_run! { conn: { |
|||
emergency_access::table |
|||
.filter(emergency_access::uuid.eq(uuid)) |
|||
.filter(emergency_access::grantor_uuid.eq(grantor_uuid)) |
|||
.first::<EmergencyAccessDb>(conn) |
|||
.ok().from_db() |
|||
}} |
|||
} |
|||
|
|||
pub fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &DbConn) -> Vec<Self> { |
|||
db_run! { conn: { |
|||
emergency_access::table |
|||
.filter(emergency_access::grantee_uuid.eq(grantee_uuid)) |
|||
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db() |
|||
}} |
|||
} |
|||
|
|||
pub fn find_invited_by_grantee_email(grantee_email: &str, conn: &DbConn) -> Option<Self> { |
|||
db_run! { conn: { |
|||
emergency_access::table |
|||
.filter(emergency_access::email.eq(grantee_email)) |
|||
.filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32)) |
|||
.first::<EmergencyAccessDb>(conn) |
|||
.ok().from_db() |
|||
}} |
|||
} |
|||
|
|||
pub fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &DbConn) -> Vec<Self> { |
|||
db_run! { conn: { |
|||
emergency_access::table |
|||
.filter(emergency_access::grantor_uuid.eq(grantor_uuid)) |
|||
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db() |
|||
}} |
|||
} |
|||
} |
|||
|
|||
// endregion
|
@ -0,0 +1,108 @@ |
|||
use chrono::{NaiveDateTime, Utc}; |
|||
|
|||
use crate::{api::EmptyResult, auth::ClientIp, db::DbConn, error::MapResult, CONFIG}; |
|||
|
|||
use super::User; |
|||
|
|||
db_object! { |
|||
#[derive(Identifiable, Queryable, Insertable, Associations, AsChangeset)] |
|||
#[table_name = "twofactor_incomplete"] |
|||
#[belongs_to(User, foreign_key = "user_uuid")] |
|||
#[primary_key(user_uuid, device_uuid)] |
|||
pub struct TwoFactorIncomplete { |
|||
pub user_uuid: String, |
|||
// This device UUID is simply what's claimed by the device. It doesn't
|
|||
// necessarily correspond to any UUID in the devices table, since a device
|
|||
// must complete 2FA login before being added into the devices table.
|
|||
pub device_uuid: String, |
|||
pub device_name: String, |
|||
pub login_time: NaiveDateTime, |
|||
pub ip_address: String, |
|||
} |
|||
} |
|||
|
|||
impl TwoFactorIncomplete { |
|||
pub fn mark_incomplete( |
|||
user_uuid: &str, |
|||
device_uuid: &str, |
|||
device_name: &str, |
|||
ip: &ClientIp, |
|||
conn: &DbConn, |
|||
) -> EmptyResult { |
|||
if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() { |
|||
return Ok(()); |
|||
} |
|||
|
|||
// Don't update the data for an existing user/device pair, since that
|
|||
// would allow an attacker to arbitrarily delay notifications by
|
|||
// sending repeated 2FA attempts to reset the timer.
|
|||
let existing = Self::find_by_user_and_device(user_uuid, device_uuid, conn); |
|||
if existing.is_some() { |
|||
return Ok(()); |
|||
} |
|||
|
|||
db_run! { conn: { |
|||
diesel::insert_into(twofactor_incomplete::table) |
|||
.values(( |
|||
twofactor_incomplete::user_uuid.eq(user_uuid), |
|||
twofactor_incomplete::device_uuid.eq(device_uuid), |
|||
twofactor_incomplete::device_name.eq(device_name), |
|||
twofactor_incomplete::login_time.eq(Utc::now().naive_utc()), |
|||
twofactor_incomplete::ip_address.eq(ip.ip.to_string()), |
|||
)) |
|||
.execute(conn) |
|||
.map_res("Error adding twofactor_incomplete record") |
|||
}} |
|||
} |
|||
|
|||
pub fn mark_complete(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> EmptyResult { |
|||
if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() { |
|||
return Ok(()); |
|||
} |
|||
|
|||
Self::delete_by_user_and_device(user_uuid, device_uuid, conn) |
|||
} |
|||
|
|||
pub fn find_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> Option<Self> { |
|||
db_run! { conn: { |
|||
twofactor_incomplete::table |
|||
.filter(twofactor_incomplete::user_uuid.eq(user_uuid)) |
|||
.filter(twofactor_incomplete::device_uuid.eq(device_uuid)) |
|||
.first::<TwoFactorIncompleteDb>(conn) |
|||
.ok() |
|||
.from_db() |
|||
}} |
|||
} |
|||
|
|||
pub fn find_logins_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec<Self> { |
|||
db_run! {conn: { |
|||
twofactor_incomplete::table |
|||
.filter(twofactor_incomplete::login_time.lt(dt)) |
|||
.load::<TwoFactorIncompleteDb>(conn) |
|||
.expect("Error loading twofactor_incomplete") |
|||
.from_db() |
|||
}} |
|||
} |
|||
|
|||
pub fn delete(self, conn: &DbConn) -> EmptyResult { |
|||
Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn) |
|||
} |
|||
|
|||
pub fn delete_by_user_and_device(user_uuid: &str, device_uuid: &str, conn: &DbConn) -> EmptyResult { |
|||
db_run! { conn: { |
|||
diesel::delete(twofactor_incomplete::table |
|||
.filter(twofactor_incomplete::user_uuid.eq(user_uuid)) |
|||
.filter(twofactor_incomplete::device_uuid.eq(device_uuid))) |
|||
.execute(conn) |
|||
.map_res("Error in twofactor_incomplete::delete_by_user_and_device()") |
|||
}} |
|||
} |
|||
|
|||
pub fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult { |
|||
db_run! { conn: { |
|||
diesel::delete(twofactor_incomplete::table.filter(twofactor_incomplete::user_uuid.eq(user_uuid))) |
|||
.execute(conn) |
|||
.map_res("Error in twofactor_incomplete::delete_all_by_user()") |
|||
}} |
|||
} |
|||
} |
@ -0,0 +1,38 @@ |
|||
use once_cell::sync::Lazy; |
|||
use std::{net::IpAddr, num::NonZeroU32, time::Duration}; |
|||
|
|||
use governor::{clock::DefaultClock, state::keyed::DashMapStateStore, Quota, RateLimiter}; |
|||
|
|||
use crate::{Error, CONFIG}; |
|||
|
|||
type Limiter<T = IpAddr> = RateLimiter<T, DashMapStateStore<T>, DefaultClock>; |
|||
|
|||
static LIMITER_LOGIN: Lazy<Limiter> = Lazy::new(|| { |
|||
let seconds = Duration::from_secs(CONFIG.login_ratelimit_seconds()); |
|||
let burst = NonZeroU32::new(CONFIG.login_ratelimit_max_burst()).expect("Non-zero login ratelimit burst"); |
|||
RateLimiter::keyed(Quota::with_period(seconds).expect("Non-zero login ratelimit seconds").allow_burst(burst)) |
|||
}); |
|||
|
|||
static LIMITER_ADMIN: Lazy<Limiter> = Lazy::new(|| { |
|||
let seconds = Duration::from_secs(CONFIG.admin_ratelimit_seconds()); |
|||
let burst = NonZeroU32::new(CONFIG.admin_ratelimit_max_burst()).expect("Non-zero admin ratelimit burst"); |
|||
RateLimiter::keyed(Quota::with_period(seconds).expect("Non-zero admin ratelimit seconds").allow_burst(burst)) |
|||
}); |
|||
|
|||
pub fn check_limit_login(ip: &IpAddr) -> Result<(), Error> { |
|||
match LIMITER_LOGIN.check_key(ip) { |
|||
Ok(_) => Ok(()), |
|||
Err(_e) => { |
|||
err_code!("Too many login requests", 429); |
|||
} |
|||
} |
|||
} |
|||
|
|||
pub fn check_limit_admin(ip: &IpAddr) -> Result<(), Error> { |
|||
match LIMITER_ADMIN.check_key(ip) { |
|||
Ok(_) => Ok(()), |
|||
Err(_e) => { |
|||
err_code!("Too many admin requests", 429); |
|||
} |
|||
} |
|||
} |
File diff suppressed because it is too large
@ -0,0 +1,8 @@ |
|||
Emergency access contact {{{grantee_email}}} accepted |
|||
<!----------------> |
|||
This email is to notify you that {{grantee_email}} has accepted your invitation to become an emergency access contact. |
|||
|
|||
To confirm this user, log into the web vault ({{url}}), go to settings and confirm the user. |
|||
|
|||
If you do not wish to confirm this user, you can also remove them on the same page. |
|||
{{> email/email_footer_text }} |
@ -0,0 +1,21 @@ |
|||
Emergency access contact {{{grantee_email}}} accepted |
|||
<!----------------> |
|||
{{> email/email_header }} |
|||
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;"> |
|||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;"> |
|||
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top"> |
|||
This email is to notify you that {{grantee_email}} has accepted your invitation to become an emergency access contact. |
|||
</td> |
|||
</tr> |
|||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;"> |
|||
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top"> |
|||
To confirm this user, log into the <a href="{{url}}/">web vault</a>, go to settings and confirm the user. |
|||
</td> |
|||
</tr> |
|||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;"> |
|||
<td class="content-block last" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0; -webkit-text-size-adjust: none;" valign="top"> |
|||
If you do not wish to confirm this user, you can also remove them on the same page. |
|||
</td> |
|||
</tr> |
|||
</table> |
|||
{{> email/email_footer }} |
@ -0,0 +1,6 @@ |
|||
Emergency access contact for {{{grantor_name}}} confirmed |
|||
<!----------------> |
|||
This email is to notify you that you have been confirmed as an emergency access contact for *{{grantor_name}}*. |
|||
|
|||
You can now initiate emergency access requests from the web vault ({{url}}). |
|||
{{> email/email_footer_text }} |
@ -0,0 +1,16 @@ |
|||
Emergency access contact for {{{grantor_name}}} confirmed |
|||
<!----------------> |
|||
{{> email/email_header }} |
|||
<table width="100%" cellpadding="0" cellspacing="0" style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;"> |
|||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;"> |
|||
<td class="content-block" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0 0 10px; -webkit-text-size-adjust: none;" valign="top"> |
|||
This email is to notify you that you have been confirmed as an emergency access contact for <b style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;">{{grantor_name}}</b>. |
|||
</td> |
|||
</tr> |
|||
<tr style="margin: 0; font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: none;"> |
|||
<td class="content-block last" style="font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; box-sizing: border-box; font-size: 16px; color: #333; line-height: 25px; margin: 0; -webkit-font-smoothing: antialiased; padding: 0; -webkit-text-size-adjust: none;" valign="top"> |
|||
You can now initiate emergency access requests from the <a href="{{url}}/">web vault</a>. |
|||
</td> |
|||
</tr> |
|||
</table> |
|||
{{> email/email_footer }} |
Some files were not shown because too many files changed in this diff
Loading…
Reference in new issue