Browse Source

Merge dc91445e9a into 9c7df6412c

pull/6916/merge
Matt Aaron 2 days ago
committed by GitHub
parent
commit
d0d3297a85
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 1
      .env.template
  2. 1
      migrations/mysql/2026-03-09-005927_add_archives/down.sql
  3. 10
      migrations/mysql/2026-03-09-005927_add_archives/up.sql
  4. 1
      migrations/postgresql/2026-03-09-005927-add_archives/down.sql
  5. 8
      migrations/postgresql/2026-03-09-005927-add_archives/up.sql
  6. 1
      migrations/sqlite/2026-03-09-005927-add_archives/down.sql
  7. 8
      migrations/sqlite/2026-03-09-005927-add_archives/up.sql
  8. 124
      src/api/core/ciphers.rs
  9. 2
      src/config.rs
  10. 97
      src/db/models/archive.rs
  11. 22
      src/db/models/cipher.rs
  12. 2
      src/db/models/mod.rs
  13. 8
      src/db/schema.rs

1
.env.template

@ -384,6 +384,7 @@
## - "mutual-tls": Enable the use of mutual TLS on Android (Client >= 2025.2.0) ## - "mutual-tls": Enable the use of mutual TLS on Android (Client >= 2025.2.0)
## - "cxp-import-mobile": Enable the import via CXP on iOS (Clients >=2025.9.2) ## - "cxp-import-mobile": Enable the import via CXP on iOS (Clients >=2025.9.2)
## - "cxp-export-mobile": Enable the export via CXP on iOS (Clients >=2025.9.2) ## - "cxp-export-mobile": Enable the export via CXP on iOS (Clients >=2025.9.2)
## - "pm-19148-innovation-archive": Enable support for archiving items (Client >= 2025.10.1)
# EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials # EXPERIMENTAL_CLIENT_FEATURE_FLAGS=fido2-vault-credentials
## Require new device emails. When a user logs in an email is required to be sent. ## Require new device emails. When a user logs in an email is required to be sent.

1
migrations/mysql/2026-03-09-005927_add_archives/down.sql

@ -0,0 +1 @@
DROP TABLE archives;

10
migrations/mysql/2026-03-09-005927_add_archives/up.sql

@ -0,0 +1,10 @@
DROP TABLE IF EXISTS archives;
CREATE TABLE archives (
user_uuid CHAR(36) NOT NULL,
cipher_uuid CHAR(36) NOT NULL,
archived_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (user_uuid, cipher_uuid),
FOREIGN KEY (user_uuid) REFERENCES users (uuid) ON DELETE CASCADE,
FOREIGN KEY (cipher_uuid) REFERENCES ciphers (uuid) ON DELETE CASCADE
);

1
migrations/postgresql/2026-03-09-005927-add_archives/down.sql

@ -0,0 +1 @@
DROP TABLE archives;

8
migrations/postgresql/2026-03-09-005927-add_archives/up.sql

@ -0,0 +1,8 @@
DROP TABLE IF EXISTS archives;
CREATE TABLE archives (
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid) ON DELETE CASCADE,
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid) ON DELETE CASCADE,
archived_at TIMESTAMP NOT NULL DEFAULT now(),
PRIMARY KEY (user_uuid, cipher_uuid)
);

1
migrations/sqlite/2026-03-09-005927-add_archives/down.sql

@ -0,0 +1 @@
DROP TABLE archives;

8
migrations/sqlite/2026-03-09-005927-add_archives/up.sql

@ -0,0 +1,8 @@
DROP TABLE IF EXISTS archives;
CREATE TABLE archives (
user_uuid CHAR(36) NOT NULL REFERENCES users (uuid) ON DELETE CASCADE,
cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid) ON DELETE CASCADE,
archived_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (user_uuid, cipher_uuid)
);

124
src/api/core/ciphers.rs

@ -19,9 +19,9 @@ use crate::{
crypto, crypto,
db::{ db::{
models::{ models::{
Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup, CollectionId, Archive, Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup,
CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership, MembershipType, CollectionId, CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership,
OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId, MembershipType, OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId,
}, },
DbConn, DbPool, DbConn, DbPool,
}, },
@ -95,6 +95,10 @@ pub fn routes() -> Vec<Route> {
post_collections_update, post_collections_update,
post_collections_admin, post_collections_admin,
put_collections_admin, put_collections_admin,
archive_cipher_put,
archive_cipher_selected,
unarchive_cipher_put,
unarchive_cipher_selected,
] ]
} }
@ -291,6 +295,7 @@ pub struct CipherData {
// when using older client versions, or if the operation doesn't involve // when using older client versions, or if the operation doesn't involve
// updating an existing cipher. // updating an existing cipher.
last_known_revision_date: Option<String>, last_known_revision_date: Option<String>,
archived_date: Option<String>,
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
@ -530,6 +535,17 @@ pub async fn update_cipher_from_data(
cipher.save(conn).await?; cipher.save(conn).await?;
cipher.move_to_folder(data.folder_id, &headers.user.uuid, conn).await?; cipher.move_to_folder(data.folder_id, &headers.user.uuid, conn).await?;
cipher.set_favorite(data.favorite, &headers.user.uuid, conn).await?; cipher.set_favorite(data.favorite, &headers.user.uuid, conn).await?;
let archived_at = match data.archived_date {
Some(dt_str) => match NaiveDateTime::parse_from_str(&dt_str, "%+") {
Ok(dt) => Some(dt),
Err(err) => {
warn!("Error parsing ArchivedDate '{dt_str}': {err}");
None
}
},
None => None,
};
cipher.set_archived_at(archived_at, &headers.user.uuid, conn).await?;
if ut != UpdateType::None { if ut != UpdateType::None {
// Only log events for organizational ciphers // Only log events for organizational ciphers
@ -1703,6 +1719,36 @@ async fn delete_all(
} }
} }
#[put("/ciphers/<cipher_id>/archive")]
async fn archive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
set_archived_cipher_by_uuid(&cipher_id, &headers, true, false, &conn, &nt).await
}
#[put("/ciphers/archive", data = "<data>")]
async fn archive_cipher_selected(
data: Json<CipherIdsData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
set_archived_multiple_ciphers(data, &headers, true, &conn, &nt).await
}
#[put("/ciphers/<cipher_id>/unarchive")]
async fn unarchive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
set_archived_cipher_by_uuid(&cipher_id, &headers, false, false, &conn, &nt).await
}
#[put("/ciphers/unarchive", data = "<data>")]
async fn unarchive_cipher_selected(
data: Json<CipherIdsData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
set_archived_multiple_ciphers(data, &headers, false, &conn, &nt).await
}
#[derive(PartialEq)] #[derive(PartialEq)]
pub enum CipherDeleteOptions { pub enum CipherDeleteOptions {
SoftSingle, SoftSingle,
@ -1921,6 +1967,71 @@ async fn _delete_cipher_attachment_by_id(
Ok(Json(json!({"cipher":cipher_json}))) Ok(Json(json!({"cipher":cipher_json})))
} }
async fn set_archived_cipher_by_uuid(
cipher_id: &CipherId,
headers: &Headers,
archived: bool,
multi_archive: bool,
conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let Some(cipher) = Cipher::find_by_uuid(cipher_id, conn).await else {
err!("Cipher doesn't exist")
};
if !cipher.is_accessible_to_user(&headers.user.uuid, conn).await {
err!("Cipher is not accessible for the current user")
}
let archived_at = if archived {
Some(Utc::now().naive_utc())
} else {
None
};
cipher.set_archived_at(archived_at, &headers.user.uuid, conn).await?;
if !multi_archive {
nt.send_cipher_update(
UpdateType::SyncCipherUpdate,
&cipher,
&cipher.update_users_revision(conn).await,
&headers.device,
None,
conn,
)
.await;
}
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
}
async fn set_archived_multiple_ciphers(
data: Json<CipherIdsData>,
headers: &Headers,
archived: bool,
conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let data = data.into_inner();
let mut ciphers: Vec<Value> = Vec::new();
for cipher_id in data.ids {
match set_archived_cipher_by_uuid(&cipher_id, headers, archived, true, conn, nt).await {
Ok(json) => ciphers.push(json.into_inner()),
err => return err,
}
}
// Multi archive actions do not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, conn).await;
Ok(Json(json!({
"data": ciphers,
"object": "list",
"continuationToken": null
})))
}
/// This will hold all the necessary data to improve a full sync of all the ciphers /// This will hold all the necessary data to improve a full sync of all the ciphers
/// It can be used during the `Cipher::to_json()` call. /// It can be used during the `Cipher::to_json()` call.
/// It will prevent the so called N+1 SQL issue by running just a few queries which will hold all the data needed. /// It will prevent the so called N+1 SQL issue by running just a few queries which will hold all the data needed.
@ -1930,6 +2041,7 @@ pub struct CipherSyncData {
pub cipher_folders: HashMap<CipherId, FolderId>, pub cipher_folders: HashMap<CipherId, FolderId>,
pub cipher_favorites: HashSet<CipherId>, pub cipher_favorites: HashSet<CipherId>,
pub cipher_collections: HashMap<CipherId, Vec<CollectionId>>, pub cipher_collections: HashMap<CipherId, Vec<CollectionId>>,
pub cipher_archives: HashMap<CipherId, NaiveDateTime>,
pub members: HashMap<OrganizationId, Membership>, pub members: HashMap<OrganizationId, Membership>,
pub user_collections: HashMap<CollectionId, CollectionUser>, pub user_collections: HashMap<CollectionId, CollectionUser>,
pub user_collections_groups: HashMap<CollectionId, CollectionGroup>, pub user_collections_groups: HashMap<CollectionId, CollectionGroup>,
@ -1946,6 +2058,7 @@ impl CipherSyncData {
pub async fn new(user_id: &UserId, sync_type: CipherSyncType, conn: &DbConn) -> Self { pub async fn new(user_id: &UserId, sync_type: CipherSyncType, conn: &DbConn) -> Self {
let cipher_folders: HashMap<CipherId, FolderId>; let cipher_folders: HashMap<CipherId, FolderId>;
let cipher_favorites: HashSet<CipherId>; let cipher_favorites: HashSet<CipherId>;
let cipher_archives: HashMap<CipherId, NaiveDateTime>;
match sync_type { match sync_type {
// User Sync supports Folders and Favorites // User Sync supports Folders and Favorites
CipherSyncType::User => { CipherSyncType::User => {
@ -1954,12 +2067,16 @@ impl CipherSyncData {
// Generate a HashSet of all the Cipher UUID's which are marked as favorite // Generate a HashSet of all the Cipher UUID's which are marked as favorite
cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_id, conn).await.into_iter().collect(); cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_id, conn).await.into_iter().collect();
// Generate a HashMap with the Cipher UUID as key and the archived date time as value
cipher_archives = Archive::find_by_user(user_id, conn).await.into_iter().collect();
} }
// Organization Sync does not support Folders and Favorites. // Organization Sync does not support Folders and Favorites.
// If these are set, it will cause issues in the web-vault. // If these are set, it will cause issues in the web-vault.
CipherSyncType::Organization => { CipherSyncType::Organization => {
cipher_folders = HashMap::with_capacity(0); cipher_folders = HashMap::with_capacity(0);
cipher_favorites = HashSet::with_capacity(0); cipher_favorites = HashSet::with_capacity(0);
cipher_archives = HashMap::with_capacity(0);
} }
} }
@ -2019,6 +2136,7 @@ impl CipherSyncData {
}; };
Self { Self {
cipher_archives,
cipher_attachments, cipher_attachments,
cipher_folders, cipher_folders,
cipher_favorites, cipher_favorites,

2
src/config.rs

@ -1052,6 +1052,8 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
"cxp-export-mobile", "cxp-export-mobile",
// Webauthn Related Origins // Webauthn Related Origins
"pm-30529-webauthn-related-origins", "pm-30529-webauthn-related-origins",
// Innovation Team
"pm-19148-innovation-archive",
]; ];
let configured_flags = parse_experimental_client_feature_flags(&cfg.experimental_client_feature_flags); let configured_flags = parse_experimental_client_feature_flags(&cfg.experimental_client_feature_flags);
let invalid_flags: Vec<_> = configured_flags.keys().filter(|flag| !KNOWN_FLAGS.contains(&flag.as_str())).collect(); let invalid_flags: Vec<_> = configured_flags.keys().filter(|flag| !KNOWN_FLAGS.contains(&flag.as_str())).collect();

97
src/db/models/archive.rs

@ -0,0 +1,97 @@
use chrono::NaiveDateTime;
use diesel::prelude::*;
use super::{CipherId, User, UserId};
use crate::api::EmptyResult;
use crate::db::schema::archives;
use crate::db::DbConn;
use crate::error::MapResult;
#[derive(Identifiable, Queryable, Insertable)]
#[diesel(table_name = archives)]
#[diesel(primary_key(user_uuid, cipher_uuid))]
pub struct Archive {
pub user_uuid: UserId,
pub cipher_uuid: CipherId,
pub archived_at: NaiveDateTime,
}
impl Archive {
// Returns the date the specified cipher was archived
pub async fn get_archived_at(cipher_uuid: &CipherId, user_uuid: &UserId, conn: &DbConn) -> Option<NaiveDateTime> {
db_run! { conn: {
archives::table
.filter(archives::cipher_uuid.eq(cipher_uuid))
.filter(archives::user_uuid.eq(user_uuid))
.select(archives::archived_at)
.first::<NaiveDateTime>(conn).ok()
}}
}
// Sets the specified cipher to be archived or unarchived
pub async fn set_archived_at(
archived_at: Option<NaiveDateTime>,
cipher_uuid: &CipherId,
user_uuid: &UserId,
conn: &DbConn,
) -> EmptyResult {
let existing = Self::get_archived_at(cipher_uuid, user_uuid, conn).await;
match (existing, archived_at) {
// Not archived - archive at the provided timestamp
(None, Some(dt)) => {
User::update_uuid_revision(user_uuid, conn).await;
db_run! { conn: {
diesel::insert_into(archives::table)
.values((
archives::user_uuid.eq(user_uuid),
archives::cipher_uuid.eq(cipher_uuid),
archives::archived_at.eq(dt),
))
.execute(conn)
.map_res("Error archiving")
}}
}
// Already archived - update with the provided timestamp
(Some(_), Some(dt)) => {
User::update_uuid_revision(user_uuid, conn).await;
db_run! { conn: {
diesel::update(
archives::table
.filter(archives::user_uuid.eq(user_uuid))
.filter(archives::cipher_uuid.eq(cipher_uuid))
)
.set(archives::archived_at.eq(dt))
.execute(conn)
.map_res("Error updating archive date")
}}
}
(Some(_), None) => {
User::update_uuid_revision(user_uuid, conn).await;
db_run! { conn: {
diesel::delete(
archives::table
.filter(archives::user_uuid.eq(user_uuid))
.filter(archives::cipher_uuid.eq(cipher_uuid))
)
.execute(conn)
.map_res("Error unarchiving")
}}
}
// Otherwise, the archived status is already what it should be
_ => Ok(()),
}
}
/// Return a vec with (cipher_uuid, archived_at)
/// This is used during a full sync so we only need one query for all archive matches
pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec<(CipherId, NaiveDateTime)> {
db_run! { conn: {
archives::table
.filter(archives::user_uuid.eq(user_uuid))
.select((archives::cipher_uuid, archives::archived_at))
.load::<(CipherId, NaiveDateTime)>(conn)
.unwrap_or_default()
}}
}
}

22
src/db/models/cipher.rs

@ -10,8 +10,8 @@ use diesel::prelude::*;
use serde_json::Value; use serde_json::Value;
use super::{ use super::{
Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership, MembershipStatus, Archive, Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership,
MembershipType, OrganizationId, User, UserId, MembershipStatus, MembershipType, OrganizationId, User, UserId,
}; };
use crate::api::core::{CipherData, CipherSyncData, CipherSyncType}; use crate::api::core::{CipherData, CipherSyncData, CipherSyncType};
use macros::UuidFromParam; use macros::UuidFromParam;
@ -380,6 +380,11 @@ impl Cipher {
} else { } else {
self.is_favorite(user_uuid, conn).await self.is_favorite(user_uuid, conn).await
}); });
json_object["archivedDate"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
cipher_sync_data.cipher_archives.get(&self.uuid).map_or(Value::Null, |d| Value::String(format_date(d)))
} else {
self.get_archived_at(user_uuid, conn).await.map_or(Value::Null, |d| Value::String(format_date(&d)))
});
// These values are true by default, but can be false if the // These values are true by default, but can be false if the
// cipher belongs to a collection or group where the org owner has enabled // cipher belongs to a collection or group where the org owner has enabled
// the "Read Only" or "Hide Passwords" restrictions for the user. // the "Read Only" or "Hide Passwords" restrictions for the user.
@ -737,6 +742,19 @@ impl Cipher {
} }
} }
pub async fn get_archived_at(&self, user_uuid: &UserId, conn: &DbConn) -> Option<NaiveDateTime> {
Archive::get_archived_at(&self.uuid, user_uuid, conn).await
}
pub async fn set_archived_at(
&self,
archived_at: Option<NaiveDateTime>,
user_uuid: &UserId,
conn: &DbConn,
) -> EmptyResult {
Archive::set_archived_at(archived_at, &self.uuid, user_uuid, conn).await
}
pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &DbConn) -> Option<FolderId> { pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &DbConn) -> Option<FolderId> {
db_run! { conn: { db_run! { conn: {
folders_ciphers::table folders_ciphers::table

2
src/db/models/mod.rs

@ -1,3 +1,4 @@
mod archive;
mod attachment; mod attachment;
mod auth_request; mod auth_request;
mod cipher; mod cipher;
@ -17,6 +18,7 @@ mod two_factor_duo_context;
mod two_factor_incomplete; mod two_factor_incomplete;
mod user; mod user;
pub use self::archive::Archive;
pub use self::attachment::{Attachment, AttachmentId}; pub use self::attachment::{Attachment, AttachmentId};
pub use self::auth_request::{AuthRequest, AuthRequestId}; pub use self::auth_request::{AuthRequest, AuthRequestId};
pub use self::cipher::{Cipher, CipherId, RepromptType}; pub use self::cipher::{Cipher, CipherId, RepromptType};

8
src/db/schema.rs

@ -341,6 +341,14 @@ table! {
} }
} }
table! {
archives (user_uuid, cipher_uuid) {
user_uuid -> Text,
cipher_uuid -> Text,
archived_at -> Timestamp,
}
}
joinable!(attachments -> ciphers (cipher_uuid)); joinable!(attachments -> ciphers (cipher_uuid));
joinable!(ciphers -> organizations (organization_uuid)); joinable!(ciphers -> organizations (organization_uuid));
joinable!(ciphers -> users (user_uuid)); joinable!(ciphers -> users (user_uuid));

Loading…
Cancel
Save