Compare commits

...

4 Commits

Author SHA1 Message Date
Mathijs van Veluw 4f68eafa3e
Add Attestations for containers and artifacts (#5378) 1 week ago
Integral 327d369188
refactor: replace static with const for global constants (#5260) 1 week ago
Mathijs van Veluw ca7483df85
Fix an issue with login with device (#5379) 1 week ago
Helmut K. C. Tessarek 16b6d2a71e
build: raise msrv (1.83.0) rust toolchain (1.84.0) (#5374) 1 week ago
  1. 128
      .github/workflows/release.yml
  2. 2
      Cargo.toml
  3. 2
      docker/DockerSettings.yaml
  4. 8
      docker/Dockerfile.alpine
  5. 2
      docker/Dockerfile.debian
  6. 2
      rust-toolchain.toml
  7. 2
      src/api/core/accounts.rs
  8. 18
      src/api/notifications.rs
  9. 10
      src/api/push.rs
  10. 2
      src/crypto.rs
  11. 4
      src/db/models/organization.rs

128
.github/workflows/release.yml

@ -27,11 +27,16 @@ jobs:
if: ${{ github.ref_type == 'branch' }} if: ${{ github.ref_type == 'branch' }}
docker-build: docker-build:
permissions:
packages: write
contents: read
attestations: write
id-token: write
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
timeout-minutes: 120 timeout-minutes: 120
needs: skip_check needs: skip_check
if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }} if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }}
# Start a local docker registry to extract the final Alpine static build binaries # Start a local docker registry to extract the compiled binaries to upload as artifacts and attest them
services: services:
registry: registry:
image: registry:2 image: registry:2
@ -63,7 +68,7 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- name: Initialize QEMU binfmt support - name: Initialize QEMU binfmt support
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 uses: docker/setup-qemu-action@53851d14592bedcffcf25ea515637cff71ef929a # v3.3.0
with: with:
platforms: "arm64,arm" platforms: "arm64,arm"
@ -159,13 +164,13 @@ jobs:
# #
- name: Add localhost registry - name: Add localhost registry
if: ${{ matrix.base_image == 'alpine' }}
shell: bash shell: bash
run: | run: |
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}" echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
- name: Bake ${{ matrix.base_image }} containers - name: Bake ${{ matrix.base_image }} containers
uses: docker/bake-action@3fc70e1131fee40a422dd8dd0ff22014ae20a1f3 # v5.11.0 id: bake_vw
uses: docker/bake-action@5ca506d06f70338a4968df87fd8bfee5cbfb84c7 # v6.0.0
env: env:
BASE_TAGS: "${{ env.BASE_TAGS }}" BASE_TAGS: "${{ env.BASE_TAGS }}"
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}" SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
@ -181,10 +186,59 @@ jobs:
*.cache-from=${{ env.BAKE_CACHE_FROM }} *.cache-from=${{ env.BAKE_CACHE_FROM }}
*.cache-to=${{ env.BAKE_CACHE_TO }} *.cache-to=${{ env.BAKE_CACHE_TO }}
# Attest Debian
- name: Attest - docker.io - Debian
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && matrix.base_image == 'debian' && steps.bake_vw.outputs.metadata != ''}}
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
with:
subject-name: ${{ vars.DOCKERHUB_REPO }}
subject-digest: ${{ fromJSON(steps.bake_vw.outputs.metadata).debian-multi['containerimage.digest'] }}
push-to-registry: true
- name: Attest - ghcr.io - Debian
if: ${{ env.HAVE_GHCR_LOGIN == 'true' && matrix.base_image == 'debian' && steps.bake_vw.outputs.metadata != ''}}
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
with:
subject-name: ${{ vars.GHCR_REPO }}
subject-digest: ${{ fromJSON(steps.bake_vw.outputs.metadata).debian-multi['containerimage.digest'] }}
push-to-registry: true
- name: Attest - quay.io - Debian
if: ${{ env.HAVE_QUAY_LOGIN == 'true' && matrix.base_image == 'debian' && steps.bake_vw.outputs.metadata != ''}}
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
with:
subject-name: ${{ vars.QUAY_REPO }}
subject-digest: ${{ fromJSON(steps.bake_vw.outputs.metadata).debian-multi['containerimage.digest'] }}
push-to-registry: true
# Attest Alpine
- name: Attest - docker.io - Alpine
if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' && matrix.base_image == 'alpine' && steps.bake_vw.outputs.metadata != ''}}
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
with:
subject-name: ${{ vars.DOCKERHUB_REPO }}
subject-digest: ${{ fromJSON(steps.bake_vw.outputs.metadata).alpine-multi['containerimage.digest'] }}
push-to-registry: true
- name: Attest - ghcr.io - Alpine
if: ${{ env.HAVE_GHCR_LOGIN == 'true' && matrix.base_image == 'alpine' && steps.bake_vw.outputs.metadata != ''}}
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
with:
subject-name: ${{ vars.GHCR_REPO }}
subject-digest: ${{ fromJSON(steps.bake_vw.outputs.metadata).alpine-multi['containerimage.digest'] }}
push-to-registry: true
- name: Attest - quay.io - Alpine
if: ${{ env.HAVE_QUAY_LOGIN == 'true' && matrix.base_image == 'alpine' && steps.bake_vw.outputs.metadata != ''}}
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
with:
subject-name: ${{ vars.QUAY_REPO }}
subject-digest: ${{ fromJSON(steps.bake_vw.outputs.metadata).alpine-multi['containerimage.digest'] }}
push-to-registry: true
# Extract the Alpine binaries from the containers # Extract the Alpine binaries from the containers
- name: Extract binaries - name: Extract binaries
if: ${{ matrix.base_image == 'alpine' }}
shell: bash shell: bash
run: | run: |
# Check which main tag we are going to build determined by github.ref_type # Check which main tag we are going to build determined by github.ref_type
@ -194,59 +248,65 @@ jobs:
EXTRACT_TAG="testing" EXTRACT_TAG="testing"
fi fi
# Check which base_image was used and append -alpine if needed
if [[ "${{ matrix.base_image }}" == "alpine" ]]; then
EXTRACT_TAG="${EXTRACT_TAG}-alpine"
fi
# After each extraction the image is removed. # After each extraction the image is removed.
# This is needed because using different platforms doesn't trigger a new pull/download # This is needed because using different platforms doesn't trigger a new pull/download
# Extract amd64 binary # Extract amd64 binary
docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" docker create --name amd64 --platform=linux/amd64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
docker cp amd64:/vaultwarden vaultwarden-amd64 docker cp amd64:/vaultwarden vaultwarden-amd64-${{ matrix.base_image }}
docker rm --force amd64 docker rm --force amd64
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
# Extract arm64 binary # Extract arm64 binary
docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" docker create --name arm64 --platform=linux/arm64 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
docker cp arm64:/vaultwarden vaultwarden-arm64 docker cp arm64:/vaultwarden vaultwarden-arm64-${{ matrix.base_image }}
docker rm --force arm64 docker rm --force arm64
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
# Extract armv7 binary # Extract armv7 binary
docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" docker create --name armv7 --platform=linux/arm/v7 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
docker cp armv7:/vaultwarden vaultwarden-armv7 docker cp armv7:/vaultwarden vaultwarden-armv7-${{ matrix.base_image }}
docker rm --force armv7 docker rm --force armv7
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
# Extract armv6 binary # Extract armv6 binary
docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" docker create --name armv6 --platform=linux/arm/v6 "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
docker cp armv6:/vaultwarden vaultwarden-armv6 docker cp armv6:/vaultwarden vaultwarden-armv6-${{ matrix.base_image }}
docker rm --force armv6 docker rm --force armv6
docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}-alpine" docker rmi --force "localhost:5000/vaultwarden/server:${EXTRACT_TAG}"
# Upload artifacts to Github Actions # Upload artifacts to Github Actions and Attest the binaries
- name: "Upload amd64 artifact" - name: "Upload amd64 artifact ${{ matrix.base_image }}"
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0 uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
if: ${{ matrix.base_image == 'alpine' }}
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64 name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64-${{ matrix.base_image }}
path: vaultwarden-amd64 path: vaultwarden-amd64-${{ matrix.base_image }}
- name: "Upload arm64 artifact" - name: "Upload arm64 artifact ${{ matrix.base_image }}"
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0 uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
if: ${{ matrix.base_image == 'alpine' }}
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64 name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64-${{ matrix.base_image }}
path: vaultwarden-arm64 path: vaultwarden-arm64-${{ matrix.base_image }}
- name: "Upload armv7 artifact" - name: "Upload armv7 artifact ${{ matrix.base_image }}"
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0 uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
if: ${{ matrix.base_image == 'alpine' }}
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7 name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7-${{ matrix.base_image }}
path: vaultwarden-armv7 path: vaultwarden-armv7-${{ matrix.base_image }}
- name: "Upload armv6 artifact" - name: "Upload armv6 artifact ${{ matrix.base_image }}"
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0 uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b #v4.5.0
if: ${{ matrix.base_image == 'alpine' }}
with: with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6 name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6-${{ matrix.base_image }}
path: vaultwarden-armv6 path: vaultwarden-armv6-${{ matrix.base_image }}
- name: "Attest artifacts ${{ matrix.base_image }}"
uses: actions/attest-build-provenance@7668571508540a607bdfd90a87a560489fe372eb # v2.1.0
with:
subject-path: vaultwarden-*
# End Upload artifacts to Github Actions # End Upload artifacts to Github Actions

2
Cargo.toml

@ -5,7 +5,7 @@ name = "vaultwarden"
version = "1.0.0" version = "1.0.0"
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"] authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
edition = "2021" edition = "2021"
rust-version = "1.82.0" rust-version = "1.83.0"
resolver = "2" resolver = "2"
repository = "https://github.com/dani-garcia/vaultwarden" repository = "https://github.com/dani-garcia/vaultwarden"

2
docker/DockerSettings.yaml

@ -5,7 +5,7 @@ vault_image_digest: "sha256:72d636334b4ad6fe9ba1d12e0cda562cd31772cf28772f6b2fe4
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts # We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
# https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags # https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags
xx_image_digest: "sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894" xx_image_digest: "sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894"
rust_version: 1.83.0 # Rust version to be used rust_version: 1.84.0 # Rust version to be used
debian_version: bookworm # Debian release name to be used debian_version: bookworm # Debian release name to be used
alpine_version: "3.21" # Alpine version to be used alpine_version: "3.21" # Alpine version to be used
# For which platforms/architectures will we try to build images # For which platforms/architectures will we try to build images

8
docker/Dockerfile.alpine

@ -32,10 +32,10 @@ FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:72d636334b4ad
########################## ALPINE BUILD IMAGES ########################## ########################## ALPINE BUILD IMAGES ##########################
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 ## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
## And for Alpine we define all build images here, they will only be loaded when actually used ## And for Alpine we define all build images here, they will only be loaded when actually used
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.83.0 AS build_amd64 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.84.0 AS build_amd64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.83.0 AS build_arm64 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.84.0 AS build_arm64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.83.0 AS build_armv7 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.84.0 AS build_armv7
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.83.0 AS build_armv6 FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.84.0 AS build_armv6
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006 # hadolint ignore=DL3006

2
docker/Dockerfile.debian

@ -36,7 +36,7 @@ FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:9c207bead753dda9430bd
########################## BUILD IMAGE ########################## ########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006 # hadolint ignore=DL3006
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.83.0-slim-bookworm AS build FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.84.0-slim-bookworm AS build
COPY --from=xx / / COPY --from=xx / /
ARG TARGETARCH ARG TARGETARCH
ARG TARGETVARIANT ARG TARGETVARIANT

2
rust-toolchain.toml

@ -1,4 +1,4 @@
[toolchain] [toolchain]
channel = "1.83.0" channel = "1.84.0"
components = [ "rustfmt", "clippy" ] components = [ "rustfmt", "clippy" ]
profile = "minimal" profile = "minimal"

2
src/api/core/accounts.rs

@ -1264,7 +1264,7 @@ async fn put_auth_request(
auth_request.save(&mut conn).await?; auth_request.save(&mut conn).await?;
ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid).await; ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid).await;
nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, data.device_identifier, &mut conn).await; nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, &data.device_identifier, &mut conn).await;
} else { } else {
// If denied, there's no reason to keep the request // If denied, there's no reason to keep the request
auth_request.delete(&mut conn).await?; auth_request.delete(&mut conn).await?;

18
src/api/notifications.rs

@ -10,7 +10,7 @@ use rocket_ws::{Message, WebSocket};
use crate::{ use crate::{
auth::{ClientIp, WsAccessTokenHeader}, auth::{ClientIp, WsAccessTokenHeader},
db::{ db::{
models::{Cipher, CollectionId, DeviceId, Folder, Send as DbSend, User, UserId}, models::{AuthRequestId, Cipher, CollectionId, DeviceId, Folder, Send as DbSend, User, UserId},
DbConn, DbConn,
}, },
Error, CONFIG, Error, CONFIG,
@ -522,8 +522,8 @@ impl WebSocketUsers {
pub async fn send_auth_response( pub async fn send_auth_response(
&self, &self,
user_id: &UserId, user_id: &UserId,
auth_response_uuid: &str, auth_request_id: &AuthRequestId,
approving_device_uuid: DeviceId, approving_device_id: &DeviceId,
conn: &mut DbConn, conn: &mut DbConn,
) { ) {
// Skip any processing if both WebSockets and Push are not active // Skip any processing if both WebSockets and Push are not active
@ -531,16 +531,16 @@ impl WebSocketUsers {
return; return;
} }
let data = create_update( let data = create_update(
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_id.to_string().into())], vec![("Id".into(), auth_request_id.to_string().into()), ("UserId".into(), user_id.to_string().into())],
UpdateType::AuthRequestResponse, UpdateType::AuthRequestResponse,
Some(approving_device_uuid.clone()), Some(approving_device_id.clone()),
); );
if CONFIG.enable_websocket() { if CONFIG.enable_websocket() {
self.send_update(user_id, &data).await; self.send_update(user_id, &data).await;
} }
if CONFIG.push_enabled() { if CONFIG.push_enabled() {
push_auth_response(user_id.clone(), auth_response_uuid.to_string(), approving_device_uuid, conn).await; push_auth_response(user_id, auth_request_id, approving_device_id, conn).await;
} }
} }
} }
@ -559,16 +559,16 @@ impl AnonymousWebSocketSubscriptions {
} }
} }
pub async fn send_auth_response(&self, user_id: &UserId, auth_response_uuid: &str) { pub async fn send_auth_response(&self, user_id: &UserId, auth_request_id: &AuthRequestId) {
if !CONFIG.enable_websocket() { if !CONFIG.enable_websocket() {
return; return;
} }
let data = create_anonymous_update( let data = create_anonymous_update(
vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_id.to_string().into())], vec![("Id".into(), auth_request_id.to_string().into()), ("UserId".into(), user_id.to_string().into())],
UpdateType::AuthRequestResponse, UpdateType::AuthRequestResponse,
user_id.clone(), user_id.clone(),
); );
self.send_update(user_id, &data).await; self.send_update(auth_request_id, &data).await;
} }
} }

10
src/api/push.rs

@ -7,7 +7,7 @@ use tokio::sync::RwLock;
use crate::{ use crate::{
api::{ApiResult, EmptyResult, UpdateType}, api::{ApiResult, EmptyResult, UpdateType},
db::models::{Cipher, Device, DeviceId, Folder, Send, User, UserId}, db::models::{AuthRequestId, Cipher, Device, DeviceId, Folder, Send, User, UserId},
http_client::make_http_request, http_client::make_http_request,
util::format_date, util::format_date,
CONFIG, CONFIG,
@ -301,12 +301,12 @@ pub async fn push_auth_request(user_id: UserId, auth_request_id: String, conn: &
} }
pub async fn push_auth_response( pub async fn push_auth_response(
user_id: UserId, user_id: &UserId,
auth_request_id: String, auth_request_id: &AuthRequestId,
approving_device_id: DeviceId, approving_device_id: &DeviceId,
conn: &mut crate::db::DbConn, conn: &mut crate::db::DbConn,
) { ) {
if Device::check_user_has_push_device(&user_id, conn).await { if Device::check_user_has_push_device(user_id, conn).await {
tokio::task::spawn(send_to_push_relay(json!({ tokio::task::spawn(send_to_push_relay(json!({
"userId": user_id, "userId": user_id,
"organizationId": (), "organizationId": (),

2
src/crypto.rs

@ -6,7 +6,7 @@ use std::num::NonZeroU32;
use data_encoding::{Encoding, HEXLOWER}; use data_encoding::{Encoding, HEXLOWER};
use ring::{digest, hmac, pbkdf2}; use ring::{digest, hmac, pbkdf2};
static DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256; const DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256;
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN; const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> { pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> {

4
src/db/models/organization.rs

@ -87,7 +87,7 @@ impl MembershipType {
impl Ord for MembershipType { impl Ord for MembershipType {
fn cmp(&self, other: &MembershipType) -> Ordering { fn cmp(&self, other: &MembershipType) -> Ordering {
// For easy comparison, map each variant to an access level (where 0 is lowest). // For easy comparison, map each variant to an access level (where 0 is lowest).
static ACCESS_LEVEL: [i32; 4] = [ const ACCESS_LEVEL: [i32; 4] = [
3, // Owner 3, // Owner
2, // Admin 2, // Admin
0, // User 0, // User
@ -216,7 +216,7 @@ impl Organization {
// The number 128 should be fine, it is well within the range of an i32 // The number 128 should be fine, it is well within the range of an i32
// The same goes for the database where we only use INTEGER (the same as an i32) // The same goes for the database where we only use INTEGER (the same as an i32)
// It should also provide enough room for 100+ types, which i doubt will ever happen. // It should also provide enough room for 100+ types, which i doubt will ever happen.
static ACTIVATE_REVOKE_DIFF: i32 = 128; const ACTIVATE_REVOKE_DIFF: i32 = 128;
impl Membership { impl Membership {
pub fn new(user_uuid: UserId, org_uuid: OrganizationId) -> Self { pub fn new(user_uuid: UserId, org_uuid: OrganizationId) -> Self {

Loading…
Cancel
Save