Browse Source

Merge 53d7713d62 into 2a18665288

pull/5626/merge
Chase Douglas 3 weeks ago
committed by GitHub
parent
commit
c39696ff42
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 927
      Cargo.lock
  2. 11
      Cargo.toml
  3. 3
      build.rs
  4. 8
      src/api/admin.rs
  5. 54
      src/api/core/ciphers.rs
  6. 2
      src/api/core/emergency_access.rs
  7. 17
      src/api/core/organizations.rs
  8. 63
      src/api/core/sends.rs
  9. 2
      src/api/core/two_factor/duo.rs
  10. 52
      src/api/icons.rs
  11. 67
      src/auth.rs
  12. 162
      src/config.rs
  13. 50
      src/db/models/attachment.rs
  14. 18
      src/db/models/cipher.rs
  15. 5
      src/db/models/send.rs
  16. 3
      src/error.rs
  17. 25
      src/main.rs
  18. 22
      src/util.rs

927
Cargo.lock

File diff suppressed because it is too large

11
Cargo.toml

@ -31,6 +31,7 @@ enable_mimalloc = ["dep:mimalloc"]
# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile # You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile
# if you want to turn off the logging for a specific run. # if you want to turn off the logging for a specific run.
query_logger = ["dep:diesel_logger"] query_logger = ["dep:diesel_logger"]
s3 = ["opendal/services-s3", "dep:aws-config", "dep:aws-credential-types", "dep:anyhow", "dep:reqsign"]
# Enable unstable features, requires nightly # Enable unstable features, requires nightly
# Currently only used to enable rusts official ip support # Currently only used to enable rusts official ip support
@ -72,6 +73,7 @@ dashmap = "6.1.0"
# Async futures # Async futures
futures = "0.3.31" futures = "0.3.31"
tokio = { version = "1.43.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] } tokio = { version = "1.43.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
tokio-util = { version = "0.7.14", features = ["compat"]}
# A generic serialization/deserialization framework # A generic serialization/deserialization framework
serde = { version = "1.0.218", features = ["derive"] } serde = { version = "1.0.218", features = ["derive"] }
@ -174,6 +176,15 @@ rpassword = "7.3.1"
# Loading a dynamic CSS Stylesheet # Loading a dynamic CSS Stylesheet
grass_compiler = { version = "0.13.4", default-features = false } grass_compiler = { version = "0.13.4", default-features = false }
# File are accessed through Apache OpenDAL
opendal = { version = "0.51.2", features = ["services-fs"] }
# For retrieving AWS credentials, including temporary SSO credentials
anyhow = { version = "1.0.96", optional = true }
aws-config = { version = "1.5.12", features = ["behavior-version-latest"], optional = true }
aws-credential-types = { version = "1.2.1", optional = true }
reqsign = { version = "0.16.1", optional = true }
[patch.crates-io] [patch.crates-io]
# Patch yubico to remove duplicate crates of older versions # Patch yubico to remove duplicate crates of older versions
yubico = { git = "https://github.com/BlackDex/yubico-rs", rev = "00df14811f58155c0f02e3ab10f1570ed3e115c6" } yubico = { git = "https://github.com/BlackDex/yubico-rs", rev = "00df14811f58155c0f02e3ab10f1570ed3e115c6" }

3
build.rs

@ -11,6 +11,8 @@ fn main() {
println!("cargo:rustc-cfg=postgresql"); println!("cargo:rustc-cfg=postgresql");
#[cfg(feature = "query_logger")] #[cfg(feature = "query_logger")]
println!("cargo:rustc-cfg=query_logger"); println!("cargo:rustc-cfg=query_logger");
#[cfg(feature = "s3")]
println!("cargo:rustc-cfg=s3");
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))] #[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
compile_error!( compile_error!(
@ -23,6 +25,7 @@ fn main() {
println!("cargo::rustc-check-cfg=cfg(mysql)"); println!("cargo::rustc-check-cfg=cfg(mysql)");
println!("cargo::rustc-check-cfg=cfg(postgresql)"); println!("cargo::rustc-check-cfg=cfg(postgresql)");
println!("cargo::rustc-check-cfg=cfg(query_logger)"); println!("cargo::rustc-check-cfg=cfg(query_logger)");
println!("cargo::rustc-check-cfg=cfg(s3)");
// Rerun when these paths are changed. // Rerun when these paths are changed.
// Someone could have checked-out a tag or specific commit, but no other files changed. // Someone could have checked-out a tag or specific commit, but no other files changed.

8
src/api/admin.rs

@ -745,17 +745,17 @@ fn get_diagnostics_http(code: u16, _token: AdminToken) -> EmptyResult {
} }
#[post("/config", format = "application/json", data = "<data>")] #[post("/config", format = "application/json", data = "<data>")]
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult { async fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
let data: ConfigBuilder = data.into_inner(); let data: ConfigBuilder = data.into_inner();
if let Err(e) = CONFIG.update_config(data, true) { if let Err(e) = CONFIG.update_config(data, true).await {
err!(format!("Unable to save config: {e:?}")) err!(format!("Unable to save config: {e:?}"))
} }
Ok(()) Ok(())
} }
#[post("/config/delete", format = "application/json")] #[post("/config/delete", format = "application/json")]
fn delete_config(_token: AdminToken) -> EmptyResult { async fn delete_config(_token: AdminToken) -> EmptyResult {
if let Err(e) = CONFIG.delete_user_config() { if let Err(e) = CONFIG.delete_user_config().await {
err!(format!("Unable to delete config: {e:?}")) err!(format!("Unable to delete config: {e:?}"))
} }
Ok(()) Ok(())

54
src/api/core/ciphers.rs

@ -11,10 +11,11 @@ use rocket::{
use serde_json::Value; use serde_json::Value;
use crate::auth::ClientVersion; use crate::auth::ClientVersion;
use crate::util::NumberOrString; use crate::util::{save_temp_file, NumberOrString};
use crate::{ use crate::{
api::{self, core::log_event, EmptyResult, JsonResult, Notify, PasswordOrOtpData, UpdateType}, api::{self, core::log_event, EmptyResult, JsonResult, Notify, PasswordOrOtpData, UpdateType},
auth::Headers, auth::Headers,
config::PathType,
crypto, crypto,
db::{models::*, DbConn, DbPool}, db::{models::*, DbConn, DbPool},
CONFIG, CONFIG,
@ -105,12 +106,7 @@ struct SyncData {
} }
#[get("/sync?<data..>")] #[get("/sync?<data..>")]
async fn sync( async fn sync(data: SyncData, headers: Headers, client_version: Option<ClientVersion>, mut conn: DbConn) -> JsonResult {
data: SyncData,
headers: Headers,
client_version: Option<ClientVersion>,
mut conn: DbConn,
) -> Json<Value> {
let user_json = headers.user.to_json(&mut conn).await; let user_json = headers.user.to_json(&mut conn).await;
// Get all ciphers which are visible by the user // Get all ciphers which are visible by the user
@ -134,7 +130,7 @@ async fn sync(
for c in ciphers { for c in ciphers {
ciphers_json.push( ciphers_json.push(
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn) c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn)
.await, .await?,
); );
} }
@ -159,7 +155,7 @@ async fn sync(
api::core::_get_eq_domains(headers, true).into_inner() api::core::_get_eq_domains(headers, true).into_inner()
}; };
Json(json!({ Ok(Json(json!({
"profile": user_json, "profile": user_json,
"folders": folders_json, "folders": folders_json,
"collections": collections_json, "collections": collections_json,
@ -168,11 +164,11 @@ async fn sync(
"domains": domains_json, "domains": domains_json,
"sends": sends_json, "sends": sends_json,
"object": "sync" "object": "sync"
})) })))
} }
#[get("/ciphers")] #[get("/ciphers")]
async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> { async fn get_ciphers(headers: Headers, mut conn: DbConn) -> JsonResult {
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await; let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await;
let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await; let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await;
@ -180,15 +176,15 @@ async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> {
for c in ciphers { for c in ciphers {
ciphers_json.push( ciphers_json.push(
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn) c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn)
.await, .await?,
); );
} }
Json(json!({ Ok(Json(json!({
"data": ciphers_json, "data": ciphers_json,
"object": "list", "object": "list",
"continuationToken": null "continuationToken": null
})) })))
} }
#[get("/ciphers/<cipher_id>")] #[get("/ciphers/<cipher_id>")]
@ -201,7 +197,7 @@ async fn get_cipher(cipher_id: CipherId, headers: Headers, mut conn: DbConn) ->
err!("Cipher is not owned by user") err!("Cipher is not owned by user")
} }
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
} }
#[get("/ciphers/<cipher_id>/admin")] #[get("/ciphers/<cipher_id>/admin")]
@ -339,7 +335,7 @@ async fn post_ciphers(data: Json<CipherData>, headers: Headers, mut conn: DbConn
let mut cipher = Cipher::new(data.r#type, data.name.clone()); let mut cipher = Cipher::new(data.r#type, data.name.clone());
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?; update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
} }
/// Enforces the personal ownership policy on user-owned ciphers, if applicable. /// Enforces the personal ownership policy on user-owned ciphers, if applicable.
@ -676,7 +672,7 @@ async fn put_cipher(
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?; update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
} }
#[post("/ciphers/<cipher_id>/partial", data = "<data>")] #[post("/ciphers/<cipher_id>/partial", data = "<data>")]
@ -714,7 +710,7 @@ async fn put_cipher_partial(
// Update favorite // Update favorite
cipher.set_favorite(Some(data.favorite), &headers.user.uuid, &mut conn).await?; cipher.set_favorite(Some(data.favorite), &headers.user.uuid, &mut conn).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
} }
#[derive(Deserialize)] #[derive(Deserialize)]
@ -825,7 +821,7 @@ async fn post_collections_update(
) )
.await; .await;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
} }
#[put("/ciphers/<cipher_id>/collections-admin", data = "<data>")] #[put("/ciphers/<cipher_id>/collections-admin", data = "<data>")]
@ -1030,7 +1026,7 @@ async fn share_cipher_by_uuid(
update_cipher_from_data(&mut cipher, data.cipher, headers, Some(shared_to_collections), conn, nt, ut).await?; update_cipher_from_data(&mut cipher, data.cipher, headers, Some(shared_to_collections), conn, nt, ut).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await)) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
} }
/// v2 API for downloading an attachment. This just redirects the client to /// v2 API for downloading an attachment. This just redirects the client to
@ -1055,7 +1051,7 @@ async fn get_attachment(
} }
match Attachment::find_by_id(&attachment_id, &mut conn).await { match Attachment::find_by_id(&attachment_id, &mut conn).await {
Some(attachment) if cipher_id == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host))), Some(attachment) if cipher_id == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host).await?)),
Some(_) => err!("Attachment doesn't belong to cipher"), Some(_) => err!("Attachment doesn't belong to cipher"),
None => err!("Attachment doesn't exist"), None => err!("Attachment doesn't exist"),
} }
@ -1116,7 +1112,7 @@ async fn post_attachment_v2(
"attachmentId": attachment_id, "attachmentId": attachment_id,
"url": url, "url": url,
"fileUploadType": FileUploadType::Direct as i32, "fileUploadType": FileUploadType::Direct as i32,
response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await, response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?,
}))) })))
} }
@ -1142,7 +1138,7 @@ async fn save_attachment(
mut conn: DbConn, mut conn: DbConn,
nt: Notify<'_>, nt: Notify<'_>,
) -> Result<(Cipher, DbConn), crate::error::Error> { ) -> Result<(Cipher, DbConn), crate::error::Error> {
let mut data = data.into_inner(); let data = data.into_inner();
let Some(size) = data.data.len().to_i64() else { let Some(size) = data.data.len().to_i64() else {
err!("Attachment data size overflow"); err!("Attachment data size overflow");
@ -1269,13 +1265,7 @@ async fn save_attachment(
attachment.save(&mut conn).await.expect("Error saving attachment"); attachment.save(&mut conn).await.expect("Error saving attachment");
} }
let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_id.as_ref()); save_temp_file(PathType::Attachments, &format!("{cipher_id}/{file_id}"), data.data, true).await?;
let file_path = folder_path.join(file_id.as_ref());
tokio::fs::create_dir_all(&folder_path).await?;
if let Err(_err) = data.data.persist_to(&file_path).await {
data.data.move_copy_to(file_path).await?
}
nt.send_cipher_update( nt.send_cipher_update(
UpdateType::SyncCipherUpdate, UpdateType::SyncCipherUpdate,
@ -1342,7 +1332,7 @@ async fn post_attachment(
let (cipher, mut conn) = save_attachment(attachment, cipher_id, data, &headers, conn, nt).await?; let (cipher, mut conn) = save_attachment(attachment, cipher_id, data, &headers, conn, nt).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
} }
#[post("/ciphers/<cipher_id>/attachment-admin", format = "multipart/form-data", data = "<data>")] #[post("/ciphers/<cipher_id>/attachment-admin", format = "multipart/form-data", data = "<data>")]
@ -1786,7 +1776,7 @@ async fn _restore_cipher_by_uuid(
.await; .await;
} }
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await)) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
} }
async fn _restore_multiple_ciphers( async fn _restore_multiple_ciphers(

2
src/api/core/emergency_access.rs

@ -582,7 +582,7 @@ async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut
CipherSyncType::User, CipherSyncType::User,
&mut conn, &mut conn,
) )
.await, .await?,
); );
} }

17
src/api/core/organizations.rs

@ -901,21 +901,26 @@ async fn get_org_details(data: OrgIdData, headers: OrgMemberHeaders, mut conn: D
} }
Ok(Json(json!({ Ok(Json(json!({
"data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await, "data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await?,
"object": "list", "object": "list",
"continuationToken": null, "continuationToken": null,
}))) })))
} }
async fn _get_org_details(org_id: &OrganizationId, host: &str, user_id: &UserId, conn: &mut DbConn) -> Value { async fn _get_org_details(
org_id: &OrganizationId,
host: &str,
user_id: &UserId,
conn: &mut DbConn,
) -> Result<Value, crate::Error> {
let ciphers = Cipher::find_by_org(org_id, conn).await; let ciphers = Cipher::find_by_org(org_id, conn).await;
let cipher_sync_data = CipherSyncData::new(user_id, CipherSyncType::Organization, conn).await; let cipher_sync_data = CipherSyncData::new(user_id, CipherSyncType::Organization, conn).await;
let mut ciphers_json = Vec::with_capacity(ciphers.len()); let mut ciphers_json = Vec::with_capacity(ciphers.len());
for c in ciphers { for c in ciphers {
ciphers_json.push(c.to_json(host, user_id, Some(&cipher_sync_data), CipherSyncType::Organization, conn).await); ciphers_json.push(c.to_json(host, user_id, Some(&cipher_sync_data), CipherSyncType::Organization, conn).await?);
} }
json!(ciphers_json) Ok(json!(ciphers_json))
} }
#[derive(FromForm)] #[derive(FromForm)]
@ -3317,7 +3322,7 @@ async fn get_org_export(
"continuationToken": null, "continuationToken": null,
}, },
"ciphers": { "ciphers": {
"data": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await), "data": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await?),
"object": "list", "object": "list",
"continuationToken": null, "continuationToken": null,
} }
@ -3326,7 +3331,7 @@ async fn get_org_export(
// v2023.1.0 and newer response // v2023.1.0 and newer response
Ok(Json(json!({ Ok(Json(json!({
"collections": convert_json_key_lcase_first(_get_org_collections(&org_id, &mut conn).await), "collections": convert_json_key_lcase_first(_get_org_collections(&org_id, &mut conn).await),
"ciphers": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await), "ciphers": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await?),
}))) })))
} }
} }

63
src/api/core/sends.rs

@ -1,4 +1,6 @@
use std::error::Error as _;
use std::path::Path; use std::path::Path;
use std::time::Duration;
use chrono::{DateTime, TimeDelta, Utc}; use chrono::{DateTime, TimeDelta, Utc};
use num_traits::ToPrimitive; use num_traits::ToPrimitive;
@ -11,8 +13,9 @@ use serde_json::Value;
use crate::{ use crate::{
api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType}, api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType},
auth::{ClientIp, Headers, Host}, auth::{ClientIp, Headers, Host},
config::PathType,
db::{models::*, DbConn, DbPool}, db::{models::*, DbConn, DbPool},
util::NumberOrString, util::{save_temp_file, NumberOrString},
CONFIG, CONFIG,
}; };
@ -210,7 +213,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
let UploadData { let UploadData {
model, model,
mut data, data,
} = data.into_inner(); } = data.into_inner();
let model = model.into_inner(); let model = model.into_inner();
@ -250,13 +253,8 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
} }
let file_id = crate::crypto::generate_send_file_id(); let file_id = crate::crypto::generate_send_file_id();
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
let file_path = folder_path.join(&file_id);
tokio::fs::create_dir_all(&folder_path).await?;
if let Err(_err) = data.persist_to(&file_path).await { save_temp_file(PathType::Sends, &format!("{}/{file_id}", send.uuid), data, true).await?;
data.move_copy_to(file_path).await?
}
let mut data_value: Value = serde_json::from_str(&send.data)?; let mut data_value: Value = serde_json::from_str(&send.data)?;
if let Some(o) = data_value.as_object_mut() { if let Some(o) = data_value.as_object_mut() {
@ -363,7 +361,7 @@ async fn post_send_file_v2_data(
) -> EmptyResult { ) -> EmptyResult {
enforce_disable_send_policy(&headers, &mut conn).await?; enforce_disable_send_policy(&headers, &mut conn).await?;
let mut data = data.into_inner(); let data = data.into_inner();
let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else { let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
err!("Send not found. Unable to save the file.", "Invalid send uuid or does not belong to user.") err!("Send not found. Unable to save the file.", "Invalid send uuid or does not belong to user.")
@ -406,20 +404,30 @@ async fn post_send_file_v2_data(
err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size)); err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size));
} }
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_id); let file_path = format!("{send_id}/{file_id}");
let file_path = folder_path.join(file_id);
// Check if the file already exists, if that is the case do not overwrite it
if tokio::fs::metadata(&file_path).await.is_ok() {
err!("Send file has already been uploaded.", format!("File {file_path:?} already exists"))
}
tokio::fs::create_dir_all(&folder_path).await?; save_temp_file(PathType::Sends, &file_path, data.data, false).await.map_err(|e| {
let was_file_exists_error = e
.source()
.and_then(|e| e.downcast_ref::<std::io::Error>())
.and_then(|e| e.get_ref())
.and_then(|e| e.downcast_ref::<opendal::Error>())
.map(|e| e.kind() == opendal::ErrorKind::ConditionNotMatch)
.unwrap_or(false);
if let Err(_err) = data.data.persist_to(&file_path).await { if was_file_exists_error {
data.data.move_copy_to(file_path).await? return crate::Error::new(
"Send file has already been uploaded.",
format!("File {file_path:?} already exists"),
);
} }
crate::Error::new(
"Unexpected error while creating send file",
format!("Error while saving send file at path {file_path}: {e:?}"),
)
})?;
nt.send_send_update( nt.send_send_update(
UpdateType::SyncSendCreate, UpdateType::SyncSendCreate,
&send, &send,
@ -551,15 +559,26 @@ async fn post_access_file(
) )
.await; .await;
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
let token = crate::auth::encode_jwt(&token_claims);
Ok(Json(json!({ Ok(Json(json!({
"object": "send-fileDownload", "object": "send-fileDownload",
"id": file_id, "id": file_id,
"url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token) "url": download_url(&host, &send_id, &file_id).await?,
}))) })))
} }
async fn download_url(host: &Host, send_id: &SendId, file_id: &SendFileId) -> Result<String, crate::Error> {
let operator = CONFIG.opendal_operator_for_path_type(PathType::Sends)?;
if operator.info().scheme() == opendal::Scheme::Fs {
let token_claims = crate::auth::generate_send_claims(send_id, file_id);
let token = crate::auth::encode_jwt(&token_claims);
Ok(format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token))
} else {
Ok(operator.presign_read(&format!("{send_id}/{file_id}"), Duration::from_secs(5 * 60)).await?.uri().to_string())
}
}
#[get("/sends/<send_id>/<file_id>?<t>")] #[get("/sends/<send_id>/<file_id>?<t>")]
async fn download_send(send_id: SendId, file_id: SendFileId, t: &str) -> Option<NamedFile> { async fn download_send(send_id: SendId, file_id: SendFileId, t: &str) -> Option<NamedFile> {
if let Ok(claims) = crate::auth::decode_send(t) { if let Ok(claims) = crate::auth::decode_send(t) {

2
src/api/core/two_factor/duo.rs

@ -258,7 +258,7 @@ pub(crate) async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiRes
} }
.map_res("Can't fetch Duo Keys")?; .map_res("Can't fetch Duo Keys")?;
Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host)) Ok((data.ik, data.sk, CONFIG.get_duo_akey().await, data.host))
} }
pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult<(String, String)> { pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult<(String, String)> {

52
src/api/icons.rs

@ -14,14 +14,11 @@ use reqwest::{
Client, Response, Client, Response,
}; };
use rocket::{http::ContentType, response::Redirect, Route}; use rocket::{http::ContentType, response::Redirect, Route};
use tokio::{
fs::{create_dir_all, remove_file, symlink_metadata, File},
io::{AsyncReadExt, AsyncWriteExt},
};
use html5gum::{Emitter, HtmlString, Readable, StringReader, Tokenizer}; use html5gum::{Emitter, HtmlString, Readable, StringReader, Tokenizer};
use crate::{ use crate::{
config::PathType,
error::Error, error::Error,
http_client::{get_reqwest_client_builder, should_block_address, CustomHttpClientError}, http_client::{get_reqwest_client_builder, should_block_address, CustomHttpClientError},
util::Cached, util::Cached,
@ -159,7 +156,7 @@ fn is_valid_domain(domain: &str) -> bool {
} }
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> { async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain); let path = format!("{domain}.png");
// Check for expiration of negatively cached copy // Check for expiration of negatively cached copy
if icon_is_negcached(&path).await { if icon_is_negcached(&path).await {
@ -181,7 +178,7 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
// Get the icon, or None in case of error // Get the icon, or None in case of error
match download_icon(domain).await { match download_icon(domain).await {
Ok((icon, icon_type)) => { Ok((icon, icon_type)) => {
save_icon(&path, &icon).await; save_icon(&path, icon.to_vec()).await;
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string())) Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
} }
Err(e) => { Err(e) => {
@ -194,7 +191,7 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
warn!("Unable to download icon: {:?}", e); warn!("Unable to download icon: {:?}", e);
let miss_indicator = path + ".miss"; let miss_indicator = path + ".miss";
save_icon(&miss_indicator, &[]).await; save_icon(&miss_indicator, vec![]).await;
None None
} }
} }
@ -207,11 +204,9 @@ async fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
} }
// Try to read the cached icon, and return it if it exists // Try to read the cached icon, and return it if it exists
if let Ok(mut f) = File::open(path).await { if let Ok(operator) = CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
let mut buffer = Vec::new(); if let Ok(buf) = operator.read(path).await {
return Some(buf.to_vec());
if f.read_to_end(&mut buffer).await.is_ok() {
return Some(buffer);
} }
} }
@ -219,9 +214,11 @@ async fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
} }
async fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> { async fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> {
let meta = symlink_metadata(path).await?; let operator = CONFIG.opendal_operator_for_path_type(PathType::IconCache)?;
let modified = meta.modified()?; let meta = operator.stat(path).await?;
let age = SystemTime::now().duration_since(modified)?; let modified =
meta.last_modified().ok_or_else(|| std::io::Error::other(format!("No last modified time for `{path}`")))?;
let age = SystemTime::now().duration_since(modified.into())?;
Ok(ttl > 0 && ttl <= age.as_secs()) Ok(ttl > 0 && ttl <= age.as_secs())
} }
@ -233,9 +230,14 @@ async fn icon_is_negcached(path: &str) -> bool {
match expired { match expired {
// No longer negatively cached, drop the marker // No longer negatively cached, drop the marker
Ok(true) => { Ok(true) => {
if let Err(e) = remove_file(&miss_indicator).await { match CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
Ok(operator) => {
if let Err(e) = operator.delete(&miss_indicator).await {
error!("Could not remove negative cache indicator for icon {:?}: {:?}", path, e); error!("Could not remove negative cache indicator for icon {:?}: {:?}", path, e);
} }
}
Err(e) => error!("Could not remove negative cache indicator for icon {:?}: {:?}", path, e),
}
false false
} }
// The marker hasn't expired yet. // The marker hasn't expired yet.
@ -568,17 +570,17 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
Ok((buffer, icon_type)) Ok((buffer, icon_type))
} }
async fn save_icon(path: &str, icon: &[u8]) { async fn save_icon(path: &str, icon: Vec<u8>) {
match File::create(path).await { let operator = match CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
Ok(mut f) => { Ok(operator) => operator,
f.write_all(icon).await.expect("Error writing icon file");
}
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
create_dir_all(&CONFIG.icon_cache_folder()).await.expect("Error creating icon cache folder");
}
Err(e) => { Err(e) => {
warn!("Unable to save icon: {:?}", e); warn!("Failed to get OpenDAL operator while saving icon: {e}");
return;
} }
};
if let Err(e) = operator.write(path, icon).await {
warn!("Unable to save icon: {e:?}");
} }
} }

67
src/auth.rs

@ -7,16 +7,14 @@ use once_cell::sync::{Lazy, OnceCell};
use openssl::rsa::Rsa; use openssl::rsa::Rsa;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
use serde::ser::Serialize; use serde::ser::Serialize;
use std::{ use std::{env, net::IpAddr};
env,
fs::File,
io::{Read, Write},
net::IpAddr,
};
use crate::db::models::{ use crate::{
config::PathType,
db::models::{
AttachmentId, CipherId, CollectionId, DeviceId, EmergencyAccessId, MembershipId, OrgApiKeyId, OrganizationId, AttachmentId, CipherId, CollectionId, DeviceId, EmergencyAccessId, MembershipId, OrgApiKeyId, OrganizationId,
SendFileId, SendId, UserId, SendFileId, SendId, UserId,
},
}; };
use crate::{error::Error, CONFIG}; use crate::{error::Error, CONFIG};
@ -40,37 +38,44 @@ static JWT_REGISTER_VERIFY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|regis
static PRIVATE_RSA_KEY: OnceCell<EncodingKey> = OnceCell::new(); static PRIVATE_RSA_KEY: OnceCell<EncodingKey> = OnceCell::new();
static PUBLIC_RSA_KEY: OnceCell<DecodingKey> = OnceCell::new(); static PUBLIC_RSA_KEY: OnceCell<DecodingKey> = OnceCell::new();
pub fn initialize_keys() -> Result<(), Error> { pub async fn initialize_keys() -> Result<(), Error> {
fn read_key(create_if_missing: bool) -> Result<(Rsa<openssl::pkey::Private>, Vec<u8>), Error> { async fn read_key(create_if_missing: bool) -> Result<(Rsa<openssl::pkey::Private>, Vec<u8>), std::io::Error> {
let mut priv_key_buffer = Vec::with_capacity(2048); use std::io::{Error, ErrorKind};
let mut priv_key_file = File::options() let rsa_key_filename = std::path::PathBuf::from(CONFIG.private_rsa_key())
.create(create_if_missing) .file_name()
.truncate(false) .ok_or_else(|| Error::other("Private RSA key path missing filename"))?
.read(true) .to_str()
.write(create_if_missing) .ok_or_else(|| Error::other("Private RSA key path filename is not valid UTF-8"))?
.open(CONFIG.private_rsa_key())?; .to_string();
#[allow(clippy::verbose_file_reads)] let operator = CONFIG.opendal_operator_for_path_type(PathType::RsaKey).map_err(Error::other)?;
let bytes_read = priv_key_file.read_to_end(&mut priv_key_buffer)?;
let rsa_key = if bytes_read > 0 { let priv_key_buffer = match operator.read(&rsa_key_filename).await {
Rsa::private_key_from_pem(&priv_key_buffer[..bytes_read])? Ok(buffer) => Some(buffer),
} else if create_if_missing { Err(e) if e.kind() == opendal::ErrorKind::NotFound && create_if_missing => None,
// Only create the key if the file doesn't exist or is empty Err(e) if e.kind() == opendal::ErrorKind::NotFound => {
let rsa_key = Rsa::generate(2048)?; return Err(Error::new(ErrorKind::NotFound, "Private key not found"))
priv_key_buffer = rsa_key.private_key_to_pem()?; }
priv_key_file.write_all(&priv_key_buffer)?; Err(e) => return Err(Error::new(ErrorKind::InvalidData, format!("Error reading private key: {e}"))),
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
rsa_key
} else {
err!("Private key does not exist or invalid format", CONFIG.private_rsa_key());
}; };
Ok((rsa_key, priv_key_buffer)) if let Some(priv_key_buffer) = priv_key_buffer {
Ok((Rsa::private_key_from_pem(priv_key_buffer.to_vec().as_slice())?, priv_key_buffer.to_vec()))
} else {
let rsa_key = Rsa::generate(2048)?;
let priv_key_buffer = rsa_key.private_key_to_pem()?;
operator.write(&rsa_key_filename, priv_key_buffer).await?;
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
Err(Error::new(ErrorKind::NotFound, "Private key created, forcing attempt to read it again"))
}
} }
let (priv_key, priv_key_buffer) = read_key(true).or_else(|_| read_key(false))?; let (priv_key, priv_key_buffer) = match read_key(true).await {
Ok(key) => key,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => read_key(false).await?,
Err(e) => return Err(e.into()),
};
let pub_key_buffer = priv_key.public_key_to_pem()?; let pub_key_buffer = priv_key.public_key_to_pem()?;
let enc = EncodingKey::from_rsa_pem(&priv_key_buffer)?; let enc = EncodingKey::from_rsa_pem(&priv_key_buffer)?;

162
src/config.rs

@ -1,9 +1,10 @@
use std::{ use std::{
collections::HashMap,
env::consts::EXE_SUFFIX, env::consts::EXE_SUFFIX,
process::exit, process::exit,
sync::{ sync::{
atomic::{AtomicBool, Ordering}, atomic::{AtomicBool, Ordering},
RwLock, LazyLock, Mutex, RwLock,
}, },
}; };
@ -22,10 +23,32 @@ static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{data_folder}/config.json")) get_env("CONFIG_FILE").unwrap_or_else(|| format!("{data_folder}/config.json"))
}); });
static CONFIG_FILE_PARENT_DIR: LazyLock<String> = LazyLock::new(|| {
let path = std::path::PathBuf::from(&*CONFIG_FILE);
path.parent().unwrap_or(std::path::Path::new("data")).to_str().unwrap_or("data").to_string()
});
static CONFIG_FILENAME: LazyLock<String> = LazyLock::new(|| {
let path = std::path::PathBuf::from(&*CONFIG_FILE);
path.file_name().unwrap_or(std::ffi::OsStr::new("config.json")).to_str().unwrap_or("config.json").to_string()
});
pub static SKIP_CONFIG_VALIDATION: AtomicBool = AtomicBool::new(false); pub static SKIP_CONFIG_VALIDATION: AtomicBool = AtomicBool::new(false);
pub static CONFIG: Lazy<Config> = Lazy::new(|| { pub static CONFIG: Lazy<Config> = Lazy::new(|| {
Config::load().unwrap_or_else(|e| { std::thread::spawn(|| {
let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap_or_else(|e| {
println!("Error loading config:\n {e:?}\n");
exit(12)
});
rt.block_on(Config::load()).unwrap_or_else(|e| {
println!("Error loading config:\n {e:?}\n");
exit(12)
})
})
.join()
.unwrap_or_else(|e| {
println!("Error loading config:\n {e:?}\n"); println!("Error loading config:\n {e:?}\n");
exit(12) exit(12)
}) })
@ -110,9 +133,12 @@ macro_rules! make_config {
builder builder
} }
fn from_file(path: &str) -> Result<Self, Error> { async fn from_file() -> Result<Self, Error> {
let config_str = std::fs::read_to_string(path)?; let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
println!("[INFO] Using saved config from `{path}` for configuration.\n"); let config_bytes = operator.read(&CONFIG_FILENAME).await?;
let config_str = String::from_utf8(config_bytes.to_vec())
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?;
println!("[INFO] Using saved config from `{}` for configuration.\n", *CONFIG_FILE);
serde_json::from_str(&config_str).map_err(Into::into) serde_json::from_str(&config_str).map_err(Into::into)
} }
@ -1132,11 +1158,96 @@ fn smtp_convert_deprecated_ssl_options(smtp_ssl: Option<bool>, smtp_explicit_tls
"starttls".to_string() "starttls".to_string()
} }
fn opendal_operator_for_path(path: &str) -> Result<opendal::Operator, Error> {
// Cache of previously built operators by path
static OPERATORS_BY_PATH: LazyLock<Mutex<HashMap<String, opendal::Operator>>> =
LazyLock::new(|| Mutex::new(HashMap::new()));
let mut operators_by_path =
OPERATORS_BY_PATH.lock().map_err(|e| format!("Failed to lock OpenDAL operators cache: {e}"))?;
if let Some(operator) = operators_by_path.get(path) {
return Ok(operator.clone());
}
let operator = if path.starts_with("s3://") {
#[cfg(not(s3))]
return Err(opendal::Error::new(opendal::ErrorKind::ConfigInvalid, "S3 support is not enabled").into());
#[cfg(s3)]
opendal_s3_operator_for_path(path)?
} else {
let builder = opendal::services::Fs::default().root(path);
opendal::Operator::new(builder)?.finish()
};
operators_by_path.insert(path.to_string(), operator.clone());
Ok(operator)
}
#[cfg(s3)]
fn opendal_s3_operator_for_path(path: &str) -> Result<opendal::Operator, Error> {
// This is a custom AWS credential loader that uses the official AWS Rust
// SDK config crate to load credentials. This ensures maximum compatibility
// with AWS credential configurations. For example, OpenDAL doesn't support
// AWS SSO temporary credentials yet.
struct OpenDALS3CredentialLoader {}
#[async_trait]
impl reqsign::AwsCredentialLoad for OpenDALS3CredentialLoader {
async fn load_credential(&self, _client: reqwest::Client) -> anyhow::Result<Option<reqsign::AwsCredential>> {
use aws_credential_types::provider::ProvideCredentials as _;
use tokio::sync::OnceCell;
static DEFAULT_CREDENTIAL_CHAIN: OnceCell<
aws_config::default_provider::credentials::DefaultCredentialsChain,
> = OnceCell::const_new();
let chain = DEFAULT_CREDENTIAL_CHAIN
.get_or_init(|| aws_config::default_provider::credentials::DefaultCredentialsChain::builder().build())
.await;
let creds = chain.provide_credentials().await?;
Ok(Some(reqsign::AwsCredential {
access_key_id: creds.access_key_id().to_string(),
secret_access_key: creds.secret_access_key().to_string(),
session_token: creds.session_token().map(|s| s.to_string()),
expires_in: creds.expiry().map(|expiration| expiration.into()),
}))
}
}
const OPEN_DAL_S3_CREDENTIAL_LOADER: OpenDALS3CredentialLoader = OpenDALS3CredentialLoader {};
let url = Url::parse(path).map_err(|e| format!("Invalid path S3 URL path {path:?}: {e}"))?;
let bucket = url.host_str().ok_or_else(|| format!("Missing Bucket name in data folder S3 URL {path:?}"))?;
let builder = opendal::services::S3::default()
.customized_credential_load(Box::new(OPEN_DAL_S3_CREDENTIAL_LOADER))
.enable_virtual_host_style()
.bucket(bucket)
.root(url.path())
.default_storage_class("INTELLIGENT_TIERING");
Ok(opendal::Operator::new(builder)?.finish())
}
pub enum PathType {
Data,
IconCache,
Attachments,
Sends,
RsaKey,
}
impl Config { impl Config {
pub fn load() -> Result<Self, Error> { pub async fn load() -> Result<Self, Error> {
// Loading from env and file // Loading from env and file
let _env = ConfigBuilder::from_env(); let _env = ConfigBuilder::from_env();
let _usr = ConfigBuilder::from_file(&CONFIG_FILE).unwrap_or_default(); let _usr = ConfigBuilder::from_file().await.unwrap_or_default();
// Create merged config, config file overwrites env // Create merged config, config file overwrites env
let mut _overrides = Vec::new(); let mut _overrides = Vec::new();
@ -1160,7 +1271,7 @@ impl Config {
}) })
} }
pub fn update_config(&self, other: ConfigBuilder, ignore_non_editable: bool) -> Result<(), Error> { pub async fn update_config(&self, other: ConfigBuilder, ignore_non_editable: bool) -> Result<(), Error> {
// Remove default values // Remove default values
//let builder = other.remove(&self.inner.read().unwrap()._env); //let builder = other.remove(&self.inner.read().unwrap()._env);
@ -1192,20 +1303,19 @@ impl Config {
} }
//Save to file //Save to file
use std::{fs::File, io::Write}; let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
let mut file = File::create(&*CONFIG_FILE)?; operator.write(&CONFIG_FILENAME, config_str).await?;
file.write_all(config_str.as_bytes())?;
Ok(()) Ok(())
} }
fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> { async fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
let builder = { let builder = {
let usr = &self.inner.read().unwrap()._usr; let usr = &self.inner.read().unwrap()._usr;
let mut _overrides = Vec::new(); let mut _overrides = Vec::new();
usr.merge(&other, false, &mut _overrides) usr.merge(&other, false, &mut _overrides)
}; };
self.update_config(builder, false) self.update_config(builder, false).await
} }
/// Tests whether an email's domain is allowed. A domain is allowed if it /// Tests whether an email's domain is allowed. A domain is allowed if it
@ -1247,8 +1357,9 @@ impl Config {
} }
} }
pub fn delete_user_config(&self) -> Result<(), Error> { pub async fn delete_user_config(&self) -> Result<(), Error> {
std::fs::remove_file(&*CONFIG_FILE)?; let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
operator.delete(&CONFIG_FILENAME).await?;
// Empty user config // Empty user config
let usr = ConfigBuilder::default(); let usr = ConfigBuilder::default();
@ -1278,7 +1389,7 @@ impl Config {
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail) inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail)
} }
pub fn get_duo_akey(&self) -> String { pub async fn get_duo_akey(&self) -> String {
if let Some(akey) = self._duo_akey() { if let Some(akey) = self._duo_akey() {
akey akey
} else { } else {
@ -1289,7 +1400,7 @@ impl Config {
_duo_akey: Some(akey_s.clone()), _duo_akey: Some(akey_s.clone()),
..Default::default() ..Default::default()
}; };
self.update_config_partial(builder).ok(); self.update_config_partial(builder).await.ok();
akey_s akey_s
} }
@ -1302,6 +1413,23 @@ impl Config {
token.is_some() && !token.unwrap().trim().is_empty() token.is_some() && !token.unwrap().trim().is_empty()
} }
pub fn opendal_operator_for_path_type(&self, path_type: PathType) -> Result<opendal::Operator, Error> {
let path = match path_type {
PathType::Data => self.data_folder(),
PathType::IconCache => self.icon_cache_folder(),
PathType::Attachments => self.attachments_folder(),
PathType::Sends => self.sends_folder(),
PathType::RsaKey => std::path::Path::new(&self.rsa_key_filename())
.parent()
.ok_or_else(|| std::io::Error::other("Failed to get directory of RSA key file"))?
.to_str()
.ok_or_else(|| std::io::Error::other("Failed to convert RSA key file directory to UTF-8 string"))?
.to_string(),
};
opendal_operator_for_path(&path)
}
pub fn render_template<T: serde::ser::Serialize>(&self, name: &str, data: &T) -> Result<String, Error> { pub fn render_template<T: serde::ser::Serialize>(&self, name: &str, data: &T) -> Result<String, Error> {
if self.reload_templates() { if self.reload_templates() {
warn!("RELOADING TEMPLATES"); warn!("RELOADING TEMPLATES");

50
src/db/models/attachment.rs

@ -1,11 +1,11 @@
use std::io::ErrorKind; use std::time::Duration;
use bigdecimal::{BigDecimal, ToPrimitive}; use bigdecimal::{BigDecimal, ToPrimitive};
use derive_more::{AsRef, Deref, Display}; use derive_more::{AsRef, Deref, Display};
use serde_json::Value; use serde_json::Value;
use super::{CipherId, OrganizationId, UserId}; use super::{CipherId, OrganizationId, UserId};
use crate::CONFIG; use crate::{config::PathType, CONFIG};
use macros::IdFromParam; use macros::IdFromParam;
db_object! { db_object! {
@ -41,24 +41,30 @@ impl Attachment {
} }
pub fn get_file_path(&self) -> String { pub fn get_file_path(&self) -> String {
format!("{}/{}/{}", CONFIG.attachments_folder(), self.cipher_uuid, self.id) format!("{}/{}", self.cipher_uuid, self.id)
} }
pub fn get_url(&self, host: &str) -> String { pub async fn get_url(&self, host: &str) -> Result<String, crate::Error> {
let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?;
if operator.info().scheme() == opendal::Scheme::Fs {
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone())); let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
format!("{}/attachments/{}/{}?token={}", host, self.cipher_uuid, self.id, token) Ok(format!("{}/attachments/{}/{}?token={}", host, self.cipher_uuid, self.id, token))
} else {
Ok(operator.presign_read(&self.get_file_path(), Duration::from_secs(5 * 60)).await?.uri().to_string())
}
} }
pub fn to_json(&self, host: &str) -> Value { pub async fn to_json(&self, host: &str) -> Result<Value, crate::Error> {
json!({ Ok(json!({
"id": self.id, "id": self.id,
"url": self.get_url(host), "url": self.get_url(host).await?,
"fileName": self.file_name, "fileName": self.file_name,
"size": self.file_size.to_string(), "size": self.file_size.to_string(),
"sizeName": crate::util::get_display_size(self.file_size), "sizeName": crate::util::get_display_size(self.file_size),
"key": self.akey, "key": self.akey,
"object": "attachment" "object": "attachment"
}) }))
} }
} }
@ -104,26 +110,26 @@ impl Attachment {
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult { pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: { db_run! { conn: {
let _: () = crate::util::retry( crate::util::retry(
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn), || diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
10, 10,
) )
.map_res("Error deleting attachment")?; .map(|_| ())
.map_res("Error deleting attachment")
}}?;
let file_path = &self.get_file_path(); let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?;
let file_path = self.get_file_path();
match std::fs::remove_file(file_path) { if let Err(e) = operator.delete(&file_path).await {
// Ignore "file not found" errors. This can happen when the if e.kind() == opendal::ErrorKind::NotFound {
// upstream caller has already cleaned up the file as part of debug!("File '{file_path}' already deleted.");
// its own error handling. } else {
Err(e) if e.kind() == ErrorKind::NotFound => { return Err(e.into());
debug!("File '{}' already deleted.", file_path);
Ok(())
} }
Err(e) => Err(e.into()),
_ => Ok(()),
} }
}}
Ok(())
} }
pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult { pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {

18
src/db/models/cipher.rs

@ -141,18 +141,28 @@ impl Cipher {
cipher_sync_data: Option<&CipherSyncData>, cipher_sync_data: Option<&CipherSyncData>,
sync_type: CipherSyncType, sync_type: CipherSyncType,
conn: &mut DbConn, conn: &mut DbConn,
) -> Value { ) -> Result<Value, crate::Error> {
use crate::util::{format_date, validate_and_format_date}; use crate::util::{format_date, validate_and_format_date};
let mut attachments_json: Value = Value::Null; let mut attachments_json: Value = Value::Null;
if let Some(cipher_sync_data) = cipher_sync_data { if let Some(cipher_sync_data) = cipher_sync_data {
if let Some(attachments) = cipher_sync_data.cipher_attachments.get(&self.uuid) { if let Some(attachments) = cipher_sync_data.cipher_attachments.get(&self.uuid) {
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect(); if !attachments.is_empty() {
let mut attachments_json_vec = vec![];
for attachment in attachments {
attachments_json_vec.push(attachment.to_json(host).await?);
}
attachments_json = Value::Array(attachments_json_vec);
}
} }
} else { } else {
let attachments = Attachment::find_by_cipher(&self.uuid, conn).await; let attachments = Attachment::find_by_cipher(&self.uuid, conn).await;
if !attachments.is_empty() { if !attachments.is_empty() {
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect() let mut attachments_json_vec = vec![];
for attachment in attachments {
attachments_json_vec.push(attachment.to_json(host).await?);
}
attachments_json = Value::Array(attachments_json_vec);
} }
} }
@ -384,7 +394,7 @@ impl Cipher {
}; };
json_object[key] = type_data_json; json_object[key] = type_data_json;
json_object Ok(json_object)
} }
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<UserId> { pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<UserId> {

5
src/db/models/send.rs

@ -1,7 +1,7 @@
use chrono::{NaiveDateTime, Utc}; use chrono::{NaiveDateTime, Utc};
use serde_json::Value; use serde_json::Value;
use crate::util::LowerCase; use crate::{config::PathType, util::LowerCase, CONFIG};
use super::{OrganizationId, User, UserId}; use super::{OrganizationId, User, UserId};
use id::SendId; use id::SendId;
@ -226,7 +226,8 @@ impl Send {
self.update_users_revision(conn).await; self.update_users_revision(conn).await;
if self.atype == SendType::File as i32 { if self.atype == SendType::File as i32 {
std::fs::remove_dir_all(std::path::Path::new(&crate::CONFIG.sends_folder()).join(&self.uuid)).ok(); let operator = CONFIG.opendal_operator_for_path_type(PathType::Sends)?;
operator.remove_all(&self.uuid).await.ok();
} }
db_run! { conn: { db_run! { conn: {

3
src/error.rs

@ -46,6 +46,7 @@ use jsonwebtoken::errors::Error as JwtErr;
use lettre::address::AddressError as AddrErr; use lettre::address::AddressError as AddrErr;
use lettre::error::Error as LettreErr; use lettre::error::Error as LettreErr;
use lettre::transport::smtp::Error as SmtpErr; use lettre::transport::smtp::Error as SmtpErr;
use opendal::Error as OpenDALErr;
use openssl::error::ErrorStack as SSLErr; use openssl::error::ErrorStack as SSLErr;
use regex::Error as RegexErr; use regex::Error as RegexErr;
use reqwest::Error as ReqErr; use reqwest::Error as ReqErr;
@ -95,6 +96,8 @@ make_error! {
DieselCon(DieselConErr): _has_source, _api_error, DieselCon(DieselConErr): _has_source, _api_error,
Webauthn(WebauthnErr): _has_source, _api_error, Webauthn(WebauthnErr): _has_source, _api_error,
OpenDAL(OpenDALErr): _has_source, _api_error,
} }
impl std::fmt::Debug for Error { impl std::fmt::Debug for Error {

25
src/main.rs

@ -61,7 +61,7 @@ mod util;
use crate::api::core::two_factor::duo_oidc::purge_duo_contexts; use crate::api::core::two_factor::duo_oidc::purge_duo_contexts;
use crate::api::purge_auth_requests; use crate::api::purge_auth_requests;
use crate::api::{WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS}; use crate::api::{WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS};
pub use config::CONFIG; pub use config::{PathType, CONFIG};
pub use error::{Error, MapResult}; pub use error::{Error, MapResult};
use rocket::data::{Limits, ToByteUnit}; use rocket::data::{Limits, ToByteUnit};
use std::sync::{atomic::Ordering, Arc}; use std::sync::{atomic::Ordering, Arc};
@ -75,16 +75,13 @@ async fn main() -> Result<(), Error> {
let level = init_logging()?; let level = init_logging()?;
check_data_folder().await; check_data_folder().await;
auth::initialize_keys().unwrap_or_else(|e| { auth::initialize_keys().await.unwrap_or_else(|e| {
error!("Error creating private key '{}'\n{e:?}\nExiting Vaultwarden!", CONFIG.private_rsa_key()); error!("Error creating private key '{}'\n{e:?}\nExiting Vaultwarden!", CONFIG.private_rsa_key());
exit(1); exit(1);
}); });
check_web_vault(); check_web_vault();
create_dir(&CONFIG.icon_cache_folder(), "icon cache");
create_dir(&CONFIG.tmp_folder(), "tmp folder"); create_dir(&CONFIG.tmp_folder(), "tmp folder");
create_dir(&CONFIG.sends_folder(), "sends folder");
create_dir(&CONFIG.attachments_folder(), "attachments folder");
let pool = create_db_pool().await; let pool = create_db_pool().await;
schedule_jobs(pool.clone()); schedule_jobs(pool.clone());
@ -467,6 +464,24 @@ fn create_dir(path: &str, description: &str) {
async fn check_data_folder() { async fn check_data_folder() {
let data_folder = &CONFIG.data_folder(); let data_folder = &CONFIG.data_folder();
if data_folder.starts_with("s3://") {
if let Err(e) = CONFIG
.opendal_operator_for_path_type(PathType::Data)
.unwrap_or_else(|e| {
error!("Failed to create S3 operator for data folder '{data_folder}': {e:?}");
exit(1);
})
.check()
.await
{
error!("Could not access S3 data folder '{data_folder}': {e:?}");
exit(1);
}
return;
}
let path = Path::new(data_folder); let path = Path::new(data_folder);
if !path.exists() { if !path.exists() {
error!("Data folder '{}' doesn't exist.", data_folder); error!("Data folder '{}' doesn't exist.", data_folder);

22
src/util.rs

@ -16,7 +16,7 @@ use tokio::{
time::{sleep, Duration}, time::{sleep, Duration},
}; };
use crate::CONFIG; use crate::{config::PathType, CONFIG};
pub struct AppHeaders(); pub struct AppHeaders();
@ -816,6 +816,26 @@ pub fn is_global(ip: std::net::IpAddr) -> bool {
ip.is_global() ip.is_global()
} }
/// Saves a Rocket temporary file to the OpenDAL Operator at the given path.
pub async fn save_temp_file(
path_type: PathType,
path: &str,
temp_file: rocket::fs::TempFile<'_>,
overwrite: bool,
) -> Result<(), crate::Error> {
use futures::AsyncWriteExt as _;
use tokio_util::compat::TokioAsyncReadCompatExt as _;
let operator = CONFIG.opendal_operator_for_path_type(path_type)?;
let mut read_stream = temp_file.open().await?.compat();
let mut writer = operator.writer_with(path).if_not_exists(!overwrite).await?.into_futures_async_write();
futures::io::copy(&mut read_stream, &mut writer).await?;
writer.close().await?;
Ok(())
}
/// These are some tests to check that the implementations match /// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 30 seconds or so and they are correct as of nightly 2023-07-17 /// The IPv4 can be all checked in 30 seconds or so and they are correct as of nightly 2023-07-17
/// The IPV6 can't be checked in a reasonable time, so we check over a hundred billion random ones, so far correct /// The IPV6 can't be checked in a reasonable time, so we check over a hundred billion random ones, so far correct

Loading…
Cancel
Save