From c2779fa036249c8246d709d8893e469d96db614e Mon Sep 17 00:00:00 2001 From: Matt Aaron <13080357+matt-aaron@users.noreply.github.com> Date: Mon, 23 Mar 2026 23:16:18 -0400 Subject: [PATCH 1/4] Add archiving --- .../2026-03-09-005927_add_archives/down.sql | 1 + .../2026-03-09-005927_add_archives/up.sql | 10 ++ .../2026-03-09-005927-add_archives/down.sql | 1 + .../2026-03-09-005927-add_archives/up.sql | 8 ++ .../2026-03-09-005927-add_archives/down.sql | 1 + .../2026-03-09-005927-add_archives/up.sql | 8 ++ src/api/core/ciphers.rs | 124 +++++++++++++++++- src/api/core/mod.rs | 4 +- src/config.rs | 2 + src/db/models/archive.rs | 97 ++++++++++++++ src/db/models/cipher.rs | 22 +++- src/db/models/mod.rs | 2 + src/db/schema.rs | 8 ++ 13 files changed, 281 insertions(+), 7 deletions(-) create mode 100644 migrations/mysql/2026-03-09-005927_add_archives/down.sql create mode 100644 migrations/mysql/2026-03-09-005927_add_archives/up.sql create mode 100644 migrations/postgresql/2026-03-09-005927-add_archives/down.sql create mode 100644 migrations/postgresql/2026-03-09-005927-add_archives/up.sql create mode 100644 migrations/sqlite/2026-03-09-005927-add_archives/down.sql create mode 100644 migrations/sqlite/2026-03-09-005927-add_archives/up.sql create mode 100644 src/db/models/archive.rs diff --git a/migrations/mysql/2026-03-09-005927_add_archives/down.sql b/migrations/mysql/2026-03-09-005927_add_archives/down.sql new file mode 100644 index 00000000..a784dc80 --- /dev/null +++ b/migrations/mysql/2026-03-09-005927_add_archives/down.sql @@ -0,0 +1 @@ +DROP TABLE archives; diff --git a/migrations/mysql/2026-03-09-005927_add_archives/up.sql b/migrations/mysql/2026-03-09-005927_add_archives/up.sql new file mode 100644 index 00000000..6d7a7024 --- /dev/null +++ b/migrations/mysql/2026-03-09-005927_add_archives/up.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS archives; + +CREATE TABLE archives ( + user_uuid CHAR(36) NOT NULL, + cipher_uuid CHAR(36) NOT NULL, + archived_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (user_uuid, cipher_uuid), + FOREIGN KEY (user_uuid) REFERENCES users (uuid) ON DELETE CASCADE, + FOREIGN KEY (cipher_uuid) REFERENCES ciphers (uuid) ON DELETE CASCADE +); diff --git a/migrations/postgresql/2026-03-09-005927-add_archives/down.sql b/migrations/postgresql/2026-03-09-005927-add_archives/down.sql new file mode 100644 index 00000000..a784dc80 --- /dev/null +++ b/migrations/postgresql/2026-03-09-005927-add_archives/down.sql @@ -0,0 +1 @@ +DROP TABLE archives; diff --git a/migrations/postgresql/2026-03-09-005927-add_archives/up.sql b/migrations/postgresql/2026-03-09-005927-add_archives/up.sql new file mode 100644 index 00000000..c56d01a0 --- /dev/null +++ b/migrations/postgresql/2026-03-09-005927-add_archives/up.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS archives; + +CREATE TABLE archives ( + user_uuid CHAR(36) NOT NULL REFERENCES users (uuid) ON DELETE CASCADE, + cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid) ON DELETE CASCADE, + archived_at TIMESTAMP NOT NULL DEFAULT now(), + PRIMARY KEY (user_uuid, cipher_uuid) +); diff --git a/migrations/sqlite/2026-03-09-005927-add_archives/down.sql b/migrations/sqlite/2026-03-09-005927-add_archives/down.sql new file mode 100644 index 00000000..a784dc80 --- /dev/null +++ b/migrations/sqlite/2026-03-09-005927-add_archives/down.sql @@ -0,0 +1 @@ +DROP TABLE archives; diff --git a/migrations/sqlite/2026-03-09-005927-add_archives/up.sql b/migrations/sqlite/2026-03-09-005927-add_archives/up.sql new file mode 100644 index 00000000..d624f57b --- /dev/null +++ b/migrations/sqlite/2026-03-09-005927-add_archives/up.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS archives; + +CREATE TABLE archives ( + user_uuid CHAR(36) NOT NULL REFERENCES users (uuid) ON DELETE CASCADE, + cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid) ON DELETE CASCADE, + archived_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (user_uuid, cipher_uuid) +); diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs index 6d4e1f41..e893adbd 100644 --- a/src/api/core/ciphers.rs +++ b/src/api/core/ciphers.rs @@ -19,9 +19,9 @@ use crate::{ crypto, db::{ models::{ - Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup, CollectionId, - CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership, MembershipType, - OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId, + Archive, Attachment, AttachmentId, Cipher, CipherId, Collection, CollectionCipher, CollectionGroup, + CollectionId, CollectionUser, EventType, Favorite, Folder, FolderCipher, FolderId, Group, Membership, + MembershipType, OrgPolicy, OrgPolicyType, OrganizationId, RepromptType, Send, UserId, }, DbConn, DbPool, }, @@ -96,6 +96,10 @@ pub fn routes() -> Vec { post_collections_update, post_collections_admin, put_collections_admin, + archive_cipher_put, + archive_cipher_selected, + unarchive_cipher_put, + unarchive_cipher_selected, ] } @@ -293,6 +297,7 @@ pub struct CipherData { // when using older client versions, or if the operation doesn't involve // updating an existing cipher. last_known_revision_date: Option, + archived_date: Option, } #[derive(Debug, Deserialize)] @@ -533,6 +538,17 @@ pub async fn update_cipher_from_data( cipher.save(conn).await?; cipher.move_to_folder(data.folder_id, &headers.user.uuid, conn).await?; cipher.set_favorite(data.favorite, &headers.user.uuid, conn).await?; + let archived_at = match data.archived_date { + Some(dt_str) => match NaiveDateTime::parse_from_str(&dt_str, "%+") { + Ok(dt) => Some(dt), + Err(err) => { + warn!("Error parsing ArchivedDate '{dt_str}': {err}"); + None + } + }, + None => None, + }; + cipher.set_archived_at(archived_at, &headers.user.uuid, conn).await?; if ut != UpdateType::None { // Only log events for organizational ciphers @@ -1715,6 +1731,36 @@ async fn purge_personal_vault( Ok(()) } +#[put("/ciphers//archive")] +async fn archive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { + set_archived_cipher_by_uuid(&cipher_id, &headers, true, false, &conn, &nt).await +} + +#[put("/ciphers/archive", data = "")] +async fn archive_cipher_selected( + data: Json, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { + set_archived_multiple_ciphers(data, &headers, true, &conn, &nt).await +} + +#[put("/ciphers//unarchive")] +async fn unarchive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { + set_archived_cipher_by_uuid(&cipher_id, &headers, false, false, &conn, &nt).await +} + +#[put("/ciphers/unarchive", data = "")] +async fn unarchive_cipher_selected( + data: Json, + headers: Headers, + conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { + set_archived_multiple_ciphers(data, &headers, false, &conn, &nt).await +} + #[derive(PartialEq)] pub enum CipherDeleteOptions { SoftSingle, @@ -1933,6 +1979,71 @@ async fn _delete_cipher_attachment_by_id( Ok(Json(json!({"cipher":cipher_json}))) } +async fn set_archived_cipher_by_uuid( + cipher_id: &CipherId, + headers: &Headers, + archived: bool, + multi_archive: bool, + conn: &DbConn, + nt: &Notify<'_>, +) -> JsonResult { + let Some(cipher) = Cipher::find_by_uuid(cipher_id, conn).await else { + err!("Cipher doesn't exist") + }; + + if !cipher.is_accessible_to_user(&headers.user.uuid, conn).await { + err!("Cipher is not accessible for the current user") + } + + let archived_at = if archived { + Some(Utc::now().naive_utc()) + } else { + None + }; + cipher.set_archived_at(archived_at, &headers.user.uuid, conn).await?; + + if !multi_archive { + nt.send_cipher_update( + UpdateType::SyncCipherUpdate, + &cipher, + &cipher.update_users_revision(conn).await, + &headers.device, + None, + conn, + ) + .await; + } + + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?)) +} + +async fn set_archived_multiple_ciphers( + data: Json, + headers: &Headers, + archived: bool, + conn: &DbConn, + nt: &Notify<'_>, +) -> JsonResult { + let data = data.into_inner(); + + let mut ciphers: Vec = Vec::new(); + for cipher_id in data.ids { + match set_archived_cipher_by_uuid(&cipher_id, headers, archived, true, conn, nt).await { + Ok(json) => ciphers.push(json.into_inner()), + err => return err, + } + } + + // Multi archive actions do not send out a push for each cipher, we need to send a general sync here + nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, conn).await; + + Ok(Json(json!({ + "data": ciphers, + "object": "list", + "continuationToken": null + }))) +} + /// This will hold all the necessary data to improve a full sync of all the ciphers /// It can be used during the `Cipher::to_json()` call. /// It will prevent the so called N+1 SQL issue by running just a few queries which will hold all the data needed. @@ -1942,6 +2053,7 @@ pub struct CipherSyncData { pub cipher_folders: HashMap, pub cipher_favorites: HashSet, pub cipher_collections: HashMap>, + pub cipher_archives: HashMap, pub members: HashMap, pub user_collections: HashMap, pub user_collections_groups: HashMap, @@ -1958,6 +2070,7 @@ impl CipherSyncData { pub async fn new(user_id: &UserId, sync_type: CipherSyncType, conn: &DbConn) -> Self { let cipher_folders: HashMap; let cipher_favorites: HashSet; + let cipher_archives: HashMap; match sync_type { // User Sync supports Folders and Favorites CipherSyncType::User => { @@ -1966,12 +2079,16 @@ impl CipherSyncData { // Generate a HashSet of all the Cipher UUID's which are marked as favorite cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_id, conn).await.into_iter().collect(); + + // Generate a HashMap with the Cipher UUID as key and the archived date time as value + cipher_archives = Archive::find_by_user(user_id, conn).await.into_iter().collect(); } // Organization Sync does not support Folders and Favorites. // If these are set, it will cause issues in the web-vault. CipherSyncType::Organization => { cipher_folders = HashMap::with_capacity(0); cipher_favorites = HashSet::with_capacity(0); + cipher_archives = HashMap::with_capacity(0); } } @@ -2034,6 +2151,7 @@ impl CipherSyncData { }; Self { + cipher_archives, cipher_attachments, cipher_folders, cipher_favorites, diff --git a/src/api/core/mod.rs b/src/api/core/mod.rs index 038b9a6d..aaac60c3 100644 --- a/src/api/core/mod.rs +++ b/src/api/core/mod.rs @@ -204,11 +204,11 @@ fn config() -> Json { // Client (v2026.2.1): https://github.com/bitwarden/clients/blob/f96380c3138291a028bdd2c7a5fee540d5c98ba5/libs/common/src/enums/feature-flag.enum.ts#L12 // Android (v2026.2.1): https://github.com/bitwarden/android/blob/6902c19c0093fa476bbf74ccaa70c9f14afbb82f/core/src/main/kotlin/com/bitwarden/core/data/manager/model/FlagKey.kt#L31 // iOS (v2026.2.1): https://github.com/bitwarden/ios/blob/cdd9ba1770ca2ffc098d02d12cc3208e3a830454/BitwardenShared/Core/Platform/Models/Enum/FeatureFlag.swift#L7 - let feature_states = parse_experimental_client_feature_flags( + let mut feature_states = parse_experimental_client_feature_flags( &CONFIG.experimental_client_feature_flags(), FeatureFlagFilter::ValidOnly, ); - // Add default feature_states here if needed, currently no features are needed by default. + feature_states.insert("pm-19148-innovation-archive".to_string(), true); Json(json!({ // Note: The clients use this version to handle backwards compatibility concerns diff --git a/src/config.rs b/src/config.rs index 6ff09467..26fb69e6 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1485,6 +1485,8 @@ pub const SUPPORTED_FEATURE_FLAGS: &[&str] = &[ "cxp-export-mobile", // Platform Team "pm-30529-webauthn-related-origins", + // Innovation Team + "pm-19148-innovation-archive", ]; impl Config { diff --git a/src/db/models/archive.rs b/src/db/models/archive.rs new file mode 100644 index 00000000..eb05ec7e --- /dev/null +++ b/src/db/models/archive.rs @@ -0,0 +1,97 @@ +use chrono::NaiveDateTime; +use diesel::prelude::*; + +use super::{CipherId, User, UserId}; +use crate::api::EmptyResult; +use crate::db::schema::archives; +use crate::db::DbConn; +use crate::error::MapResult; + +#[derive(Identifiable, Queryable, Insertable)] +#[diesel(table_name = archives)] +#[diesel(primary_key(user_uuid, cipher_uuid))] +pub struct Archive { + pub user_uuid: UserId, + pub cipher_uuid: CipherId, + pub archived_at: NaiveDateTime, +} + +impl Archive { + // Returns the date the specified cipher was archived + pub async fn get_archived_at(cipher_uuid: &CipherId, user_uuid: &UserId, conn: &DbConn) -> Option { + db_run! { conn: { + archives::table + .filter(archives::cipher_uuid.eq(cipher_uuid)) + .filter(archives::user_uuid.eq(user_uuid)) + .select(archives::archived_at) + .first::(conn).ok() + }} + } + + // Sets the specified cipher to be archived or unarchived + pub async fn set_archived_at( + archived_at: Option, + cipher_uuid: &CipherId, + user_uuid: &UserId, + conn: &DbConn, + ) -> EmptyResult { + let existing = Self::get_archived_at(cipher_uuid, user_uuid, conn).await; + + match (existing, archived_at) { + // Not archived - archive at the provided timestamp + (None, Some(dt)) => { + User::update_uuid_revision(user_uuid, conn).await; + db_run! { conn: { + diesel::insert_into(archives::table) + .values(( + archives::user_uuid.eq(user_uuid), + archives::cipher_uuid.eq(cipher_uuid), + archives::archived_at.eq(dt), + )) + .execute(conn) + .map_res("Error archiving") + }} + } + // Already archived - update with the provided timestamp + (Some(_), Some(dt)) => { + User::update_uuid_revision(user_uuid, conn).await; + db_run! { conn: { + diesel::update( + archives::table + .filter(archives::user_uuid.eq(user_uuid)) + .filter(archives::cipher_uuid.eq(cipher_uuid)) + ) + .set(archives::archived_at.eq(dt)) + .execute(conn) + .map_res("Error updating archive date") + }} + } + (Some(_), None) => { + User::update_uuid_revision(user_uuid, conn).await; + db_run! { conn: { + diesel::delete( + archives::table + .filter(archives::user_uuid.eq(user_uuid)) + .filter(archives::cipher_uuid.eq(cipher_uuid)) + ) + .execute(conn) + .map_res("Error unarchiving") + }} + } + // Otherwise, the archived status is already what it should be + _ => Ok(()), + } + } + + /// Return a vec with (cipher_uuid, archived_at) + /// This is used during a full sync so we only need one query for all archive matches + pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec<(CipherId, NaiveDateTime)> { + db_run! { conn: { + archives::table + .filter(archives::user_uuid.eq(user_uuid)) + .select((archives::cipher_uuid, archives::archived_at)) + .load::<(CipherId, NaiveDateTime)>(conn) + .unwrap_or_default() + }} + } +} diff --git a/src/db/models/cipher.rs b/src/db/models/cipher.rs index edc5f8c9..ff47c268 100644 --- a/src/db/models/cipher.rs +++ b/src/db/models/cipher.rs @@ -10,8 +10,8 @@ use diesel::prelude::*; use serde_json::Value; use super::{ - Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership, MembershipStatus, - MembershipType, OrganizationId, User, UserId, + Archive, Attachment, CollectionCipher, CollectionId, Favorite, FolderCipher, FolderId, Group, Membership, + MembershipStatus, MembershipType, OrganizationId, User, UserId, }; use crate::api::core::{CipherData, CipherSyncData, CipherSyncType}; use macros::UuidFromParam; @@ -380,6 +380,11 @@ impl Cipher { } else { self.is_favorite(user_uuid, conn).await }); + json_object["archivedDate"] = json!(if let Some(cipher_sync_data) = cipher_sync_data { + cipher_sync_data.cipher_archives.get(&self.uuid).map_or(Value::Null, |d| Value::String(format_date(d))) + } else { + self.get_archived_at(user_uuid, conn).await.map_or(Value::Null, |d| Value::String(format_date(&d))) + }); // These values are true by default, but can be false if the // cipher belongs to a collection or group where the org owner has enabled // the "Read Only" or "Hide Passwords" restrictions for the user. @@ -742,6 +747,19 @@ impl Cipher { } } + pub async fn get_archived_at(&self, user_uuid: &UserId, conn: &DbConn) -> Option { + Archive::get_archived_at(&self.uuid, user_uuid, conn).await + } + + pub async fn set_archived_at( + &self, + archived_at: Option, + user_uuid: &UserId, + conn: &DbConn, + ) -> EmptyResult { + Archive::set_archived_at(archived_at, &self.uuid, user_uuid, conn).await + } + pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &DbConn) -> Option { db_run! { conn: { folders_ciphers::table diff --git a/src/db/models/mod.rs b/src/db/models/mod.rs index b4fcf658..2d31259c 100644 --- a/src/db/models/mod.rs +++ b/src/db/models/mod.rs @@ -1,3 +1,4 @@ +mod archive; mod attachment; mod auth_request; mod cipher; @@ -17,6 +18,7 @@ mod two_factor_duo_context; mod two_factor_incomplete; mod user; +pub use self::archive::Archive; pub use self::attachment::{Attachment, AttachmentId}; pub use self::auth_request::{AuthRequest, AuthRequestId}; pub use self::cipher::{Cipher, CipherId, RepromptType}; diff --git a/src/db/schema.rs b/src/db/schema.rs index 914b4fe9..4856b923 100644 --- a/src/db/schema.rs +++ b/src/db/schema.rs @@ -341,6 +341,14 @@ table! { } } +table! { + archives (user_uuid, cipher_uuid) { + user_uuid -> Text, + cipher_uuid -> Text, + archived_at -> Timestamp, + } +} + joinable!(attachments -> ciphers (cipher_uuid)); joinable!(ciphers -> organizations (organization_uuid)); joinable!(ciphers -> users (user_uuid)); From b3b47eb352eb9f5650d50dcbec23cf82e92f1987 Mon Sep 17 00:00:00 2001 From: Matt Aaron <13080357+matt-aaron@users.noreply.github.com> Date: Sun, 5 Apr 2026 13:10:07 -0400 Subject: [PATCH 2/4] Update Diesel macros and remove unnecessary SUPPORTED_FEATURE_FLAG --- src/config.rs | 2 -- src/db/schema.rs | 3 +++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/config.rs b/src/config.rs index 26fb69e6..6ff09467 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1485,8 +1485,6 @@ pub const SUPPORTED_FEATURE_FLAGS: &[&str] = &[ "cxp-export-mobile", // Platform Team "pm-30529-webauthn-related-origins", - // Innovation Team - "pm-19148-innovation-archive", ]; impl Config { diff --git a/src/db/schema.rs b/src/db/schema.rs index 4856b923..2963ad8d 100644 --- a/src/db/schema.rs +++ b/src/db/schema.rs @@ -349,6 +349,8 @@ table! { } } +joinable!(archives -> users (user_uuid)); +joinable!(archives -> ciphers (cipher_uuid)); joinable!(attachments -> ciphers (cipher_uuid)); joinable!(ciphers -> organizations (organization_uuid)); joinable!(ciphers -> users (user_uuid)); @@ -380,6 +382,7 @@ joinable!(auth_requests -> users (user_uuid)); joinable!(sso_users -> users (user_uuid)); allow_tables_to_appear_in_same_query!( + archives, attachments, ciphers, ciphers_collections, From f879f126429ceb2c38f978de207d576dc380a03e Mon Sep 17 00:00:00 2001 From: Matt Aaron <13080357+matt-aaron@users.noreply.github.com> Date: Sat, 18 Apr 2026 14:08:22 -0400 Subject: [PATCH 3/4] Add IF EXISTS to down.sql migratinos --- migrations/mysql/2026-03-09-005927_add_archives/down.sql | 2 +- migrations/postgresql/2026-03-09-005927-add_archives/down.sql | 2 +- migrations/sqlite/2026-03-09-005927-add_archives/down.sql | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/migrations/mysql/2026-03-09-005927_add_archives/down.sql b/migrations/mysql/2026-03-09-005927_add_archives/down.sql index a784dc80..a3ef20c3 100644 --- a/migrations/mysql/2026-03-09-005927_add_archives/down.sql +++ b/migrations/mysql/2026-03-09-005927_add_archives/down.sql @@ -1 +1 @@ -DROP TABLE archives; +DROP TABLE IF EXISTS archives; diff --git a/migrations/postgresql/2026-03-09-005927-add_archives/down.sql b/migrations/postgresql/2026-03-09-005927-add_archives/down.sql index a784dc80..a3ef20c3 100644 --- a/migrations/postgresql/2026-03-09-005927-add_archives/down.sql +++ b/migrations/postgresql/2026-03-09-005927-add_archives/down.sql @@ -1 +1 @@ -DROP TABLE archives; +DROP TABLE IF EXISTS archives; diff --git a/migrations/sqlite/2026-03-09-005927-add_archives/down.sql b/migrations/sqlite/2026-03-09-005927-add_archives/down.sql index a784dc80..a3ef20c3 100644 --- a/migrations/sqlite/2026-03-09-005927-add_archives/down.sql +++ b/migrations/sqlite/2026-03-09-005927-add_archives/down.sql @@ -1 +1 @@ -DROP TABLE archives; +DROP TABLE IF EXISTS archives; From c949b8491fbf19f79784ac7f4c180bd2d5db269d Mon Sep 17 00:00:00 2001 From: Matt Aaron <13080357+matt-aaron@users.noreply.github.com> Date: Sat, 18 Apr 2026 17:52:55 -0400 Subject: [PATCH 4/4] Rename migration folders, separate logic based on PR threads --- .../down.sql | 0 .../up.sql | 0 .../down.sql | 0 .../up.sql | 0 src/api/core/ciphers.rs | 105 +++++++++++++----- src/db/models/archive.rs | 76 ++++++------- src/db/models/cipher.rs | 13 +-- 7 files changed, 117 insertions(+), 77 deletions(-) rename migrations/postgresql/{2026-03-09-005927-add_archives => 2026-03-09-005927_add_archives}/down.sql (100%) rename migrations/postgresql/{2026-03-09-005927-add_archives => 2026-03-09-005927_add_archives}/up.sql (100%) rename migrations/sqlite/{2026-03-09-005927-add_archives => 2026-03-09-005927_add_archives}/down.sql (100%) rename migrations/sqlite/{2026-03-09-005927-add_archives => 2026-03-09-005927_add_archives}/up.sql (100%) diff --git a/migrations/postgresql/2026-03-09-005927-add_archives/down.sql b/migrations/postgresql/2026-03-09-005927_add_archives/down.sql similarity index 100% rename from migrations/postgresql/2026-03-09-005927-add_archives/down.sql rename to migrations/postgresql/2026-03-09-005927_add_archives/down.sql diff --git a/migrations/postgresql/2026-03-09-005927-add_archives/up.sql b/migrations/postgresql/2026-03-09-005927_add_archives/up.sql similarity index 100% rename from migrations/postgresql/2026-03-09-005927-add_archives/up.sql rename to migrations/postgresql/2026-03-09-005927_add_archives/up.sql diff --git a/migrations/sqlite/2026-03-09-005927-add_archives/down.sql b/migrations/sqlite/2026-03-09-005927_add_archives/down.sql similarity index 100% rename from migrations/sqlite/2026-03-09-005927-add_archives/down.sql rename to migrations/sqlite/2026-03-09-005927_add_archives/down.sql diff --git a/migrations/sqlite/2026-03-09-005927-add_archives/up.sql b/migrations/sqlite/2026-03-09-005927_add_archives/up.sql similarity index 100% rename from migrations/sqlite/2026-03-09-005927-add_archives/up.sql rename to migrations/sqlite/2026-03-09-005927_add_archives/up.sql diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs index e893adbd..935ca7a1 100644 --- a/src/api/core/ciphers.rs +++ b/src/api/core/ciphers.rs @@ -538,17 +538,13 @@ pub async fn update_cipher_from_data( cipher.save(conn).await?; cipher.move_to_folder(data.folder_id, &headers.user.uuid, conn).await?; cipher.set_favorite(data.favorite, &headers.user.uuid, conn).await?; - let archived_at = match data.archived_date { - Some(dt_str) => match NaiveDateTime::parse_from_str(&dt_str, "%+") { - Ok(dt) => Some(dt), - Err(err) => { - warn!("Error parsing ArchivedDate '{dt_str}': {err}"); - None - } - }, - None => None, - }; - cipher.set_archived_at(archived_at, &headers.user.uuid, conn).await?; + + if let Some(dt_str) = data.archived_date { + match NaiveDateTime::parse_from_str(&dt_str, "%+") { + Ok(dt) => cipher.set_archived_at(dt, &headers.user.uuid, conn).await?, + Err(err) => warn!("Error parsing ArchivedDate '{dt_str}': {err}"), + } + } if ut != UpdateType::None { // Only log events for organizational ciphers @@ -1733,7 +1729,7 @@ async fn purge_personal_vault( #[put("/ciphers//archive")] async fn archive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { - set_archived_cipher_by_uuid(&cipher_id, &headers, true, false, &conn, &nt).await + archive_cipher(&cipher_id, &headers, false, &conn, &nt).await } #[put("/ciphers/archive", data = "")] @@ -1743,12 +1739,12 @@ async fn archive_cipher_selected( conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - set_archived_multiple_ciphers(data, &headers, true, &conn, &nt).await + archive_multiple_ciphers(data, &headers, &conn, &nt).await } #[put("/ciphers//unarchive")] async fn unarchive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { - set_archived_cipher_by_uuid(&cipher_id, &headers, false, false, &conn, &nt).await + unarchive_cipher(&cipher_id, &headers, false, &conn, &nt).await } #[put("/ciphers/unarchive", data = "")] @@ -1758,7 +1754,7 @@ async fn unarchive_cipher_selected( conn: DbConn, nt: Notify<'_>, ) -> JsonResult { - set_archived_multiple_ciphers(data, &headers, false, &conn, &nt).await + unarchive_multiple_ciphers(data, &headers, &conn, &nt).await } #[derive(PartialEq)] @@ -1979,10 +1975,9 @@ async fn _delete_cipher_attachment_by_id( Ok(Json(json!({"cipher":cipher_json}))) } -async fn set_archived_cipher_by_uuid( +async fn archive_cipher( cipher_id: &CipherId, headers: &Headers, - archived: bool, multi_archive: bool, conn: &DbConn, nt: &Notify<'_>, @@ -1995,12 +1990,7 @@ async fn set_archived_cipher_by_uuid( err!("Cipher is not accessible for the current user") } - let archived_at = if archived { - Some(Utc::now().naive_utc()) - } else { - None - }; - cipher.set_archived_at(archived_at, &headers.user.uuid, conn).await?; + cipher.set_archived_at(Utc::now().naive_utc(), &headers.user.uuid, conn).await?; if !multi_archive { nt.send_cipher_update( @@ -2017,10 +2007,67 @@ async fn set_archived_cipher_by_uuid( Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?)) } -async fn set_archived_multiple_ciphers( +async fn unarchive_cipher( + cipher_id: &CipherId, + headers: &Headers, + multi_unarchive: bool, + conn: &DbConn, + nt: &Notify<'_>, +) -> JsonResult { + let Some(cipher) = Cipher::find_by_uuid(cipher_id, conn).await else { + err!("Cipher doesn't exist") + }; + + if !cipher.is_accessible_to_user(&headers.user.uuid, conn).await { + err!("Cipher is not accessible for the current user") + } + + cipher.unarchive(&headers.user.uuid, conn).await?; + + if !multi_unarchive { + nt.send_cipher_update( + UpdateType::SyncCipherUpdate, + &cipher, + &cipher.update_users_revision(conn).await, + &headers.device, + None, + conn, + ) + .await; + } + + Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?)) +} + +async fn archive_multiple_ciphers( + data: Json, + headers: &Headers, + conn: &DbConn, + nt: &Notify<'_>, +) -> JsonResult { + let data = data.into_inner(); + + let mut ciphers: Vec = Vec::new(); + for cipher_id in data.ids { + match archive_cipher(&cipher_id, headers, true, conn, nt).await { + Ok(json) => ciphers.push(json.into_inner()), + err => return err, + } + } + + // Multi archive does not send out a push for each cipher, we need to send a general sync here + nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, conn).await; + + Ok(Json(json!({ + "data": ciphers, + "object": "list", + "continuationToken": null + }))) +} + +async fn unarchive_multiple_ciphers( data: Json, headers: &Headers, - archived: bool, conn: &DbConn, nt: &Notify<'_>, ) -> JsonResult { @@ -2028,13 +2075,13 @@ async fn set_archived_multiple_ciphers( let mut ciphers: Vec = Vec::new(); for cipher_id in data.ids { - match set_archived_cipher_by_uuid(&cipher_id, headers, archived, true, conn, nt).await { + match unarchive_cipher(&cipher_id, headers, true, conn, nt).await { Ok(json) => ciphers.push(json.into_inner()), err => return err, } } - // Multi archive actions do not send out a push for each cipher, we need to send a general sync here + // Multi unarchive does not send out a push for each cipher, we need to send a general sync here nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, conn).await; Ok(Json(json!({ @@ -2072,7 +2119,7 @@ impl CipherSyncData { let cipher_favorites: HashSet; let cipher_archives: HashMap; match sync_type { - // User Sync supports Folders and Favorites + // User Sync supports Folders, Favorites, and Archives CipherSyncType::User => { // Generate a HashMap with the Cipher UUID as key and the Folder UUID as value cipher_folders = FolderCipher::find_by_user(user_id, conn).await.into_iter().collect(); @@ -2083,7 +2130,7 @@ impl CipherSyncData { // Generate a HashMap with the Cipher UUID as key and the archived date time as value cipher_archives = Archive::find_by_user(user_id, conn).await.into_iter().collect(); } - // Organization Sync does not support Folders and Favorites. + // Organization Sync does not support Folders, Favorites, or Archives. // If these are set, it will cause issues in the web-vault. CipherSyncType::Organization => { cipher_folders = HashMap::with_capacity(0); diff --git a/src/db/models/archive.rs b/src/db/models/archive.rs index eb05ec7e..f576e7ed 100644 --- a/src/db/models/archive.rs +++ b/src/db/models/archive.rs @@ -28,61 +28,55 @@ impl Archive { }} } - // Sets the specified cipher to be archived or unarchived - pub async fn set_archived_at( - archived_at: Option, - cipher_uuid: &CipherId, + // Saves (inserts or updates) an archive record with the provided timestamp + pub async fn save( user_uuid: &UserId, + cipher_uuid: &CipherId, + archived_at: NaiveDateTime, conn: &DbConn, ) -> EmptyResult { - let existing = Self::get_archived_at(cipher_uuid, user_uuid, conn).await; - - match (existing, archived_at) { - // Not archived - archive at the provided timestamp - (None, Some(dt)) => { - User::update_uuid_revision(user_uuid, conn).await; - db_run! { conn: { - diesel::insert_into(archives::table) + User::update_uuid_revision(user_uuid, conn).await; + db_run! { conn: + sqlite, mysql { + diesel::replace_into(archives::table) .values(( archives::user_uuid.eq(user_uuid), archives::cipher_uuid.eq(cipher_uuid), - archives::archived_at.eq(dt), + archives::archived_at.eq(archived_at), )) .execute(conn) - .map_res("Error archiving") - }} + .map_res("Error saving archive") } - // Already archived - update with the provided timestamp - (Some(_), Some(dt)) => { - User::update_uuid_revision(user_uuid, conn).await; - db_run! { conn: { - diesel::update( - archives::table - .filter(archives::user_uuid.eq(user_uuid)) - .filter(archives::cipher_uuid.eq(cipher_uuid)) - ) - .set(archives::archived_at.eq(dt)) - .execute(conn) - .map_res("Error updating archive date") - }} - } - (Some(_), None) => { - User::update_uuid_revision(user_uuid, conn).await; - db_run! { conn: { - diesel::delete( - archives::table - .filter(archives::user_uuid.eq(user_uuid)) - .filter(archives::cipher_uuid.eq(cipher_uuid)) - ) + postgresql { + diesel::insert_into(archives::table) + .values(( + archives::user_uuid.eq(user_uuid), + archives::cipher_uuid.eq(cipher_uuid), + archives::archived_at.eq(archived_at), + )) + .on_conflict((archives::user_uuid, archives::cipher_uuid)) + .do_update() + .set(archives::archived_at.eq(archived_at)) .execute(conn) - .map_res("Error unarchiving") - }} + .map_res("Error saving archive") } - // Otherwise, the archived status is already what it should be - _ => Ok(()), } } + // Deletes an archive record for a specific cipher + pub async fn delete_by_cipher(user_uuid: &UserId, cipher_uuid: &CipherId, conn: &DbConn) -> EmptyResult { + User::update_uuid_revision(user_uuid, conn).await; + db_run! { conn: { + diesel::delete( + archives::table + .filter(archives::user_uuid.eq(user_uuid)) + .filter(archives::cipher_uuid.eq(cipher_uuid)) + ) + .execute(conn) + .map_res("Error deleting archive") + }} + } + /// Return a vec with (cipher_uuid, archived_at) /// This is used during a full sync so we only need one query for all archive matches pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec<(CipherId, NaiveDateTime)> { diff --git a/src/db/models/cipher.rs b/src/db/models/cipher.rs index ff47c268..87f3e415 100644 --- a/src/db/models/cipher.rs +++ b/src/db/models/cipher.rs @@ -751,13 +751,12 @@ impl Cipher { Archive::get_archived_at(&self.uuid, user_uuid, conn).await } - pub async fn set_archived_at( - &self, - archived_at: Option, - user_uuid: &UserId, - conn: &DbConn, - ) -> EmptyResult { - Archive::set_archived_at(archived_at, &self.uuid, user_uuid, conn).await + pub async fn set_archived_at(&self, archived_at: NaiveDateTime, user_uuid: &UserId, conn: &DbConn) -> EmptyResult { + Archive::save(user_uuid, &self.uuid, archived_at, conn).await + } + + pub async fn unarchive(&self, user_uuid: &UserId, conn: &DbConn) -> EmptyResult { + Archive::delete_by_cipher(user_uuid, &self.uuid, conn).await } pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &DbConn) -> Option {