Browse Source

Rename migration folders, separate logic based on PR threads

pull/6916/head
Matt Aaron 5 days ago
parent
commit
c949b8491f
  1. 0
      migrations/postgresql/2026-03-09-005927_add_archives/down.sql
  2. 0
      migrations/postgresql/2026-03-09-005927_add_archives/up.sql
  3. 0
      migrations/sqlite/2026-03-09-005927_add_archives/down.sql
  4. 0
      migrations/sqlite/2026-03-09-005927_add_archives/up.sql
  5. 105
      src/api/core/ciphers.rs
  6. 76
      src/db/models/archive.rs
  7. 13
      src/db/models/cipher.rs

0
migrations/postgresql/2026-03-09-005927-add_archives/down.sql → migrations/postgresql/2026-03-09-005927_add_archives/down.sql

0
migrations/postgresql/2026-03-09-005927-add_archives/up.sql → migrations/postgresql/2026-03-09-005927_add_archives/up.sql

0
migrations/sqlite/2026-03-09-005927-add_archives/down.sql → migrations/sqlite/2026-03-09-005927_add_archives/down.sql

0
migrations/sqlite/2026-03-09-005927-add_archives/up.sql → migrations/sqlite/2026-03-09-005927_add_archives/up.sql

105
src/api/core/ciphers.rs

@ -538,17 +538,13 @@ pub async fn update_cipher_from_data(
cipher.save(conn).await?; cipher.save(conn).await?;
cipher.move_to_folder(data.folder_id, &headers.user.uuid, conn).await?; cipher.move_to_folder(data.folder_id, &headers.user.uuid, conn).await?;
cipher.set_favorite(data.favorite, &headers.user.uuid, conn).await?; cipher.set_favorite(data.favorite, &headers.user.uuid, conn).await?;
let archived_at = match data.archived_date {
Some(dt_str) => match NaiveDateTime::parse_from_str(&dt_str, "%+") { if let Some(dt_str) = data.archived_date {
Ok(dt) => Some(dt), match NaiveDateTime::parse_from_str(&dt_str, "%+") {
Err(err) => { Ok(dt) => cipher.set_archived_at(dt, &headers.user.uuid, conn).await?,
warn!("Error parsing ArchivedDate '{dt_str}': {err}"); Err(err) => warn!("Error parsing ArchivedDate '{dt_str}': {err}"),
None }
} }
},
None => None,
};
cipher.set_archived_at(archived_at, &headers.user.uuid, conn).await?;
if ut != UpdateType::None { if ut != UpdateType::None {
// Only log events for organizational ciphers // Only log events for organizational ciphers
@ -1733,7 +1729,7 @@ async fn purge_personal_vault(
#[put("/ciphers/<cipher_id>/archive")] #[put("/ciphers/<cipher_id>/archive")]
async fn archive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { async fn archive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
set_archived_cipher_by_uuid(&cipher_id, &headers, true, false, &conn, &nt).await archive_cipher(&cipher_id, &headers, false, &conn, &nt).await
} }
#[put("/ciphers/archive", data = "<data>")] #[put("/ciphers/archive", data = "<data>")]
@ -1743,12 +1739,12 @@ async fn archive_cipher_selected(
conn: DbConn, conn: DbConn,
nt: Notify<'_>, nt: Notify<'_>,
) -> JsonResult { ) -> JsonResult {
set_archived_multiple_ciphers(data, &headers, true, &conn, &nt).await archive_multiple_ciphers(data, &headers, &conn, &nt).await
} }
#[put("/ciphers/<cipher_id>/unarchive")] #[put("/ciphers/<cipher_id>/unarchive")]
async fn unarchive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult { async fn unarchive_cipher_put(cipher_id: CipherId, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
set_archived_cipher_by_uuid(&cipher_id, &headers, false, false, &conn, &nt).await unarchive_cipher(&cipher_id, &headers, false, &conn, &nt).await
} }
#[put("/ciphers/unarchive", data = "<data>")] #[put("/ciphers/unarchive", data = "<data>")]
@ -1758,7 +1754,7 @@ async fn unarchive_cipher_selected(
conn: DbConn, conn: DbConn,
nt: Notify<'_>, nt: Notify<'_>,
) -> JsonResult { ) -> JsonResult {
set_archived_multiple_ciphers(data, &headers, false, &conn, &nt).await unarchive_multiple_ciphers(data, &headers, &conn, &nt).await
} }
#[derive(PartialEq)] #[derive(PartialEq)]
@ -1979,10 +1975,9 @@ async fn _delete_cipher_attachment_by_id(
Ok(Json(json!({"cipher":cipher_json}))) Ok(Json(json!({"cipher":cipher_json})))
} }
async fn set_archived_cipher_by_uuid( async fn archive_cipher(
cipher_id: &CipherId, cipher_id: &CipherId,
headers: &Headers, headers: &Headers,
archived: bool,
multi_archive: bool, multi_archive: bool,
conn: &DbConn, conn: &DbConn,
nt: &Notify<'_>, nt: &Notify<'_>,
@ -1995,12 +1990,7 @@ async fn set_archived_cipher_by_uuid(
err!("Cipher is not accessible for the current user") err!("Cipher is not accessible for the current user")
} }
let archived_at = if archived { cipher.set_archived_at(Utc::now().naive_utc(), &headers.user.uuid, conn).await?;
Some(Utc::now().naive_utc())
} else {
None
};
cipher.set_archived_at(archived_at, &headers.user.uuid, conn).await?;
if !multi_archive { if !multi_archive {
nt.send_cipher_update( nt.send_cipher_update(
@ -2017,10 +2007,67 @@ async fn set_archived_cipher_by_uuid(
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?)) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
} }
async fn set_archived_multiple_ciphers( async fn unarchive_cipher(
cipher_id: &CipherId,
headers: &Headers,
multi_unarchive: bool,
conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let Some(cipher) = Cipher::find_by_uuid(cipher_id, conn).await else {
err!("Cipher doesn't exist")
};
if !cipher.is_accessible_to_user(&headers.user.uuid, conn).await {
err!("Cipher is not accessible for the current user")
}
cipher.unarchive(&headers.user.uuid, conn).await?;
if !multi_unarchive {
nt.send_cipher_update(
UpdateType::SyncCipherUpdate,
&cipher,
&cipher.update_users_revision(conn).await,
&headers.device,
None,
conn,
)
.await;
}
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
}
async fn archive_multiple_ciphers(
data: Json<CipherIdsData>,
headers: &Headers,
conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let data = data.into_inner();
let mut ciphers: Vec<Value> = Vec::new();
for cipher_id in data.ids {
match archive_cipher(&cipher_id, headers, true, conn, nt).await {
Ok(json) => ciphers.push(json.into_inner()),
err => return err,
}
}
// Multi archive does not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, conn).await;
Ok(Json(json!({
"data": ciphers,
"object": "list",
"continuationToken": null
})))
}
async fn unarchive_multiple_ciphers(
data: Json<CipherIdsData>, data: Json<CipherIdsData>,
headers: &Headers, headers: &Headers,
archived: bool,
conn: &DbConn, conn: &DbConn,
nt: &Notify<'_>, nt: &Notify<'_>,
) -> JsonResult { ) -> JsonResult {
@ -2028,13 +2075,13 @@ async fn set_archived_multiple_ciphers(
let mut ciphers: Vec<Value> = Vec::new(); let mut ciphers: Vec<Value> = Vec::new();
for cipher_id in data.ids { for cipher_id in data.ids {
match set_archived_cipher_by_uuid(&cipher_id, headers, archived, true, conn, nt).await { match unarchive_cipher(&cipher_id, headers, true, conn, nt).await {
Ok(json) => ciphers.push(json.into_inner()), Ok(json) => ciphers.push(json.into_inner()),
err => return err, err => return err,
} }
} }
// Multi archive actions do not send out a push for each cipher, we need to send a general sync here // Multi unarchive does not send out a push for each cipher, we need to send a general sync here
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, conn).await; nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, conn).await;
Ok(Json(json!({ Ok(Json(json!({
@ -2072,7 +2119,7 @@ impl CipherSyncData {
let cipher_favorites: HashSet<CipherId>; let cipher_favorites: HashSet<CipherId>;
let cipher_archives: HashMap<CipherId, NaiveDateTime>; let cipher_archives: HashMap<CipherId, NaiveDateTime>;
match sync_type { match sync_type {
// User Sync supports Folders and Favorites // User Sync supports Folders, Favorites, and Archives
CipherSyncType::User => { CipherSyncType::User => {
// Generate a HashMap with the Cipher UUID as key and the Folder UUID as value // Generate a HashMap with the Cipher UUID as key and the Folder UUID as value
cipher_folders = FolderCipher::find_by_user(user_id, conn).await.into_iter().collect(); cipher_folders = FolderCipher::find_by_user(user_id, conn).await.into_iter().collect();
@ -2083,7 +2130,7 @@ impl CipherSyncData {
// Generate a HashMap with the Cipher UUID as key and the archived date time as value // Generate a HashMap with the Cipher UUID as key and the archived date time as value
cipher_archives = Archive::find_by_user(user_id, conn).await.into_iter().collect(); cipher_archives = Archive::find_by_user(user_id, conn).await.into_iter().collect();
} }
// Organization Sync does not support Folders and Favorites. // Organization Sync does not support Folders, Favorites, or Archives.
// If these are set, it will cause issues in the web-vault. // If these are set, it will cause issues in the web-vault.
CipherSyncType::Organization => { CipherSyncType::Organization => {
cipher_folders = HashMap::with_capacity(0); cipher_folders = HashMap::with_capacity(0);

76
src/db/models/archive.rs

@ -28,61 +28,55 @@ impl Archive {
}} }}
} }
// Sets the specified cipher to be archived or unarchived // Saves (inserts or updates) an archive record with the provided timestamp
pub async fn set_archived_at( pub async fn save(
archived_at: Option<NaiveDateTime>,
cipher_uuid: &CipherId,
user_uuid: &UserId, user_uuid: &UserId,
cipher_uuid: &CipherId,
archived_at: NaiveDateTime,
conn: &DbConn, conn: &DbConn,
) -> EmptyResult { ) -> EmptyResult {
let existing = Self::get_archived_at(cipher_uuid, user_uuid, conn).await; User::update_uuid_revision(user_uuid, conn).await;
db_run! { conn:
match (existing, archived_at) { sqlite, mysql {
// Not archived - archive at the provided timestamp diesel::replace_into(archives::table)
(None, Some(dt)) => {
User::update_uuid_revision(user_uuid, conn).await;
db_run! { conn: {
diesel::insert_into(archives::table)
.values(( .values((
archives::user_uuid.eq(user_uuid), archives::user_uuid.eq(user_uuid),
archives::cipher_uuid.eq(cipher_uuid), archives::cipher_uuid.eq(cipher_uuid),
archives::archived_at.eq(dt), archives::archived_at.eq(archived_at),
)) ))
.execute(conn) .execute(conn)
.map_res("Error archiving") .map_res("Error saving archive")
}}
} }
// Already archived - update with the provided timestamp postgresql {
(Some(_), Some(dt)) => { diesel::insert_into(archives::table)
User::update_uuid_revision(user_uuid, conn).await; .values((
db_run! { conn: { archives::user_uuid.eq(user_uuid),
diesel::update( archives::cipher_uuid.eq(cipher_uuid),
archives::table archives::archived_at.eq(archived_at),
.filter(archives::user_uuid.eq(user_uuid)) ))
.filter(archives::cipher_uuid.eq(cipher_uuid)) .on_conflict((archives::user_uuid, archives::cipher_uuid))
) .do_update()
.set(archives::archived_at.eq(dt)) .set(archives::archived_at.eq(archived_at))
.execute(conn)
.map_res("Error updating archive date")
}}
}
(Some(_), None) => {
User::update_uuid_revision(user_uuid, conn).await;
db_run! { conn: {
diesel::delete(
archives::table
.filter(archives::user_uuid.eq(user_uuid))
.filter(archives::cipher_uuid.eq(cipher_uuid))
)
.execute(conn) .execute(conn)
.map_res("Error unarchiving") .map_res("Error saving archive")
}}
} }
// Otherwise, the archived status is already what it should be
_ => Ok(()),
} }
} }
// Deletes an archive record for a specific cipher
pub async fn delete_by_cipher(user_uuid: &UserId, cipher_uuid: &CipherId, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(user_uuid, conn).await;
db_run! { conn: {
diesel::delete(
archives::table
.filter(archives::user_uuid.eq(user_uuid))
.filter(archives::cipher_uuid.eq(cipher_uuid))
)
.execute(conn)
.map_res("Error deleting archive")
}}
}
/// Return a vec with (cipher_uuid, archived_at) /// Return a vec with (cipher_uuid, archived_at)
/// This is used during a full sync so we only need one query for all archive matches /// This is used during a full sync so we only need one query for all archive matches
pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec<(CipherId, NaiveDateTime)> { pub async fn find_by_user(user_uuid: &UserId, conn: &DbConn) -> Vec<(CipherId, NaiveDateTime)> {

13
src/db/models/cipher.rs

@ -751,13 +751,12 @@ impl Cipher {
Archive::get_archived_at(&self.uuid, user_uuid, conn).await Archive::get_archived_at(&self.uuid, user_uuid, conn).await
} }
pub async fn set_archived_at( pub async fn set_archived_at(&self, archived_at: NaiveDateTime, user_uuid: &UserId, conn: &DbConn) -> EmptyResult {
&self, Archive::save(user_uuid, &self.uuid, archived_at, conn).await
archived_at: Option<NaiveDateTime>, }
user_uuid: &UserId,
conn: &DbConn, pub async fn unarchive(&self, user_uuid: &UserId, conn: &DbConn) -> EmptyResult {
) -> EmptyResult { Archive::delete_by_cipher(user_uuid, &self.uuid, conn).await
Archive::set_archived_at(archived_at, &self.uuid, user_uuid, conn).await
} }
pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &DbConn) -> Option<FolderId> { pub async fn get_folder_uuid(&self, user_uuid: &UserId, conn: &DbConn) -> Option<FolderId> {

Loading…
Cancel
Save