Browse Source

Merge 53d7713d62 into 2a18665288

pull/5626/merge
Chase Douglas 2 weeks ago
committed by GitHub
parent
commit
c39696ff42
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 927
      Cargo.lock
  2. 11
      Cargo.toml
  3. 3
      build.rs
  4. 8
      src/api/admin.rs
  5. 54
      src/api/core/ciphers.rs
  6. 2
      src/api/core/emergency_access.rs
  7. 17
      src/api/core/organizations.rs
  8. 63
      src/api/core/sends.rs
  9. 2
      src/api/core/two_factor/duo.rs
  10. 54
      src/api/icons.rs
  11. 73
      src/auth.rs
  12. 162
      src/config.rs
  13. 58
      src/db/models/attachment.rs
  14. 18
      src/db/models/cipher.rs
  15. 5
      src/db/models/send.rs
  16. 3
      src/error.rs
  17. 25
      src/main.rs
  18. 22
      src/util.rs

927
Cargo.lock

File diff suppressed because it is too large

11
Cargo.toml

@ -31,6 +31,7 @@ enable_mimalloc = ["dep:mimalloc"]
# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile
# if you want to turn off the logging for a specific run.
query_logger = ["dep:diesel_logger"]
s3 = ["opendal/services-s3", "dep:aws-config", "dep:aws-credential-types", "dep:anyhow", "dep:reqsign"]
# Enable unstable features, requires nightly
# Currently only used to enable rusts official ip support
@ -72,6 +73,7 @@ dashmap = "6.1.0"
# Async futures
futures = "0.3.31"
tokio = { version = "1.43.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
tokio-util = { version = "0.7.14", features = ["compat"]}
# A generic serialization/deserialization framework
serde = { version = "1.0.218", features = ["derive"] }
@ -174,6 +176,15 @@ rpassword = "7.3.1"
# Loading a dynamic CSS Stylesheet
grass_compiler = { version = "0.13.4", default-features = false }
# File are accessed through Apache OpenDAL
opendal = { version = "0.51.2", features = ["services-fs"] }
# For retrieving AWS credentials, including temporary SSO credentials
anyhow = { version = "1.0.96", optional = true }
aws-config = { version = "1.5.12", features = ["behavior-version-latest"], optional = true }
aws-credential-types = { version = "1.2.1", optional = true }
reqsign = { version = "0.16.1", optional = true }
[patch.crates-io]
# Patch yubico to remove duplicate crates of older versions
yubico = { git = "https://github.com/BlackDex/yubico-rs", rev = "00df14811f58155c0f02e3ab10f1570ed3e115c6" }

3
build.rs

@ -11,6 +11,8 @@ fn main() {
println!("cargo:rustc-cfg=postgresql");
#[cfg(feature = "query_logger")]
println!("cargo:rustc-cfg=query_logger");
#[cfg(feature = "s3")]
println!("cargo:rustc-cfg=s3");
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
compile_error!(
@ -23,6 +25,7 @@ fn main() {
println!("cargo::rustc-check-cfg=cfg(mysql)");
println!("cargo::rustc-check-cfg=cfg(postgresql)");
println!("cargo::rustc-check-cfg=cfg(query_logger)");
println!("cargo::rustc-check-cfg=cfg(s3)");
// Rerun when these paths are changed.
// Someone could have checked-out a tag or specific commit, but no other files changed.

8
src/api/admin.rs

@ -745,17 +745,17 @@ fn get_diagnostics_http(code: u16, _token: AdminToken) -> EmptyResult {
}
#[post("/config", format = "application/json", data = "<data>")]
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
async fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
let data: ConfigBuilder = data.into_inner();
if let Err(e) = CONFIG.update_config(data, true) {
if let Err(e) = CONFIG.update_config(data, true).await {
err!(format!("Unable to save config: {e:?}"))
}
Ok(())
}
#[post("/config/delete", format = "application/json")]
fn delete_config(_token: AdminToken) -> EmptyResult {
if let Err(e) = CONFIG.delete_user_config() {
async fn delete_config(_token: AdminToken) -> EmptyResult {
if let Err(e) = CONFIG.delete_user_config().await {
err!(format!("Unable to delete config: {e:?}"))
}
Ok(())

54
src/api/core/ciphers.rs

@ -11,10 +11,11 @@ use rocket::{
use serde_json::Value;
use crate::auth::ClientVersion;
use crate::util::NumberOrString;
use crate::util::{save_temp_file, NumberOrString};
use crate::{
api::{self, core::log_event, EmptyResult, JsonResult, Notify, PasswordOrOtpData, UpdateType},
auth::Headers,
config::PathType,
crypto,
db::{models::*, DbConn, DbPool},
CONFIG,
@ -105,12 +106,7 @@ struct SyncData {
}
#[get("/sync?<data..>")]
async fn sync(
data: SyncData,
headers: Headers,
client_version: Option<ClientVersion>,
mut conn: DbConn,
) -> Json<Value> {
async fn sync(data: SyncData, headers: Headers, client_version: Option<ClientVersion>, mut conn: DbConn) -> JsonResult {
let user_json = headers.user.to_json(&mut conn).await;
// Get all ciphers which are visible by the user
@ -134,7 +130,7 @@ async fn sync(
for c in ciphers {
ciphers_json.push(
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn)
.await,
.await?,
);
}
@ -159,7 +155,7 @@ async fn sync(
api::core::_get_eq_domains(headers, true).into_inner()
};
Json(json!({
Ok(Json(json!({
"profile": user_json,
"folders": folders_json,
"collections": collections_json,
@ -168,11 +164,11 @@ async fn sync(
"domains": domains_json,
"sends": sends_json,
"object": "sync"
}))
})))
}
#[get("/ciphers")]
async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> {
async fn get_ciphers(headers: Headers, mut conn: DbConn) -> JsonResult {
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await;
let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await;
@ -180,15 +176,15 @@ async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> {
for c in ciphers {
ciphers_json.push(
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn)
.await,
.await?,
);
}
Json(json!({
Ok(Json(json!({
"data": ciphers_json,
"object": "list",
"continuationToken": null
}))
})))
}
#[get("/ciphers/<cipher_id>")]
@ -201,7 +197,7 @@ async fn get_cipher(cipher_id: CipherId, headers: Headers, mut conn: DbConn) ->
err!("Cipher is not owned by user")
}
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
}
#[get("/ciphers/<cipher_id>/admin")]
@ -339,7 +335,7 @@ async fn post_ciphers(data: Json<CipherData>, headers: Headers, mut conn: DbConn
let mut cipher = Cipher::new(data.r#type, data.name.clone());
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
}
/// Enforces the personal ownership policy on user-owned ciphers, if applicable.
@ -676,7 +672,7 @@ async fn put_cipher(
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
}
#[post("/ciphers/<cipher_id>/partial", data = "<data>")]
@ -714,7 +710,7 @@ async fn put_cipher_partial(
// Update favorite
cipher.set_favorite(Some(data.favorite), &headers.user.uuid, &mut conn).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
}
#[derive(Deserialize)]
@ -825,7 +821,7 @@ async fn post_collections_update(
)
.await;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
}
#[put("/ciphers/<cipher_id>/collections-admin", data = "<data>")]
@ -1030,7 +1026,7 @@ async fn share_cipher_by_uuid(
update_cipher_from_data(&mut cipher, data.cipher, headers, Some(shared_to_collections), conn, nt, ut).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
}
/// v2 API for downloading an attachment. This just redirects the client to
@ -1055,7 +1051,7 @@ async fn get_attachment(
}
match Attachment::find_by_id(&attachment_id, &mut conn).await {
Some(attachment) if cipher_id == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host))),
Some(attachment) if cipher_id == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host).await?)),
Some(_) => err!("Attachment doesn't belong to cipher"),
None => err!("Attachment doesn't exist"),
}
@ -1116,7 +1112,7 @@ async fn post_attachment_v2(
"attachmentId": attachment_id,
"url": url,
"fileUploadType": FileUploadType::Direct as i32,
response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await,
response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?,
})))
}
@ -1142,7 +1138,7 @@ async fn save_attachment(
mut conn: DbConn,
nt: Notify<'_>,
) -> Result<(Cipher, DbConn), crate::error::Error> {
let mut data = data.into_inner();
let data = data.into_inner();
let Some(size) = data.data.len().to_i64() else {
err!("Attachment data size overflow");
@ -1269,13 +1265,7 @@ async fn save_attachment(
attachment.save(&mut conn).await.expect("Error saving attachment");
}
let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_id.as_ref());
let file_path = folder_path.join(file_id.as_ref());
tokio::fs::create_dir_all(&folder_path).await?;
if let Err(_err) = data.data.persist_to(&file_path).await {
data.data.move_copy_to(file_path).await?
}
save_temp_file(PathType::Attachments, &format!("{cipher_id}/{file_id}"), data.data, true).await?;
nt.send_cipher_update(
UpdateType::SyncCipherUpdate,
@ -1342,7 +1332,7 @@ async fn post_attachment(
let (cipher, mut conn) = save_attachment(attachment, cipher_id, data, &headers, conn, nt).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
}
#[post("/ciphers/<cipher_id>/attachment-admin", format = "multipart/form-data", data = "<data>")]
@ -1786,7 +1776,7 @@ async fn _restore_cipher_by_uuid(
.await;
}
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
}
async fn _restore_multiple_ciphers(

2
src/api/core/emergency_access.rs

@ -582,7 +582,7 @@ async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut
CipherSyncType::User,
&mut conn,
)
.await,
.await?,
);
}

17
src/api/core/organizations.rs

@ -901,21 +901,26 @@ async fn get_org_details(data: OrgIdData, headers: OrgMemberHeaders, mut conn: D
}
Ok(Json(json!({
"data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await,
"data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await?,
"object": "list",
"continuationToken": null,
})))
}
async fn _get_org_details(org_id: &OrganizationId, host: &str, user_id: &UserId, conn: &mut DbConn) -> Value {
async fn _get_org_details(
org_id: &OrganizationId,
host: &str,
user_id: &UserId,
conn: &mut DbConn,
) -> Result<Value, crate::Error> {
let ciphers = Cipher::find_by_org(org_id, conn).await;
let cipher_sync_data = CipherSyncData::new(user_id, CipherSyncType::Organization, conn).await;
let mut ciphers_json = Vec::with_capacity(ciphers.len());
for c in ciphers {
ciphers_json.push(c.to_json(host, user_id, Some(&cipher_sync_data), CipherSyncType::Organization, conn).await);
ciphers_json.push(c.to_json(host, user_id, Some(&cipher_sync_data), CipherSyncType::Organization, conn).await?);
}
json!(ciphers_json)
Ok(json!(ciphers_json))
}
#[derive(FromForm)]
@ -3317,7 +3322,7 @@ async fn get_org_export(
"continuationToken": null,
},
"ciphers": {
"data": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await),
"data": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await?),
"object": "list",
"continuationToken": null,
}
@ -3326,7 +3331,7 @@ async fn get_org_export(
// v2023.1.0 and newer response
Ok(Json(json!({
"collections": convert_json_key_lcase_first(_get_org_collections(&org_id, &mut conn).await),
"ciphers": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await),
"ciphers": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await?),
})))
}
}

63
src/api/core/sends.rs

@ -1,4 +1,6 @@
use std::error::Error as _;
use std::path::Path;
use std::time::Duration;
use chrono::{DateTime, TimeDelta, Utc};
use num_traits::ToPrimitive;
@ -11,8 +13,9 @@ use serde_json::Value;
use crate::{
api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType},
auth::{ClientIp, Headers, Host},
config::PathType,
db::{models::*, DbConn, DbPool},
util::NumberOrString,
util::{save_temp_file, NumberOrString},
CONFIG,
};
@ -210,7 +213,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
let UploadData {
model,
mut data,
data,
} = data.into_inner();
let model = model.into_inner();
@ -250,13 +253,8 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
}
let file_id = crate::crypto::generate_send_file_id();
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
let file_path = folder_path.join(&file_id);
tokio::fs::create_dir_all(&folder_path).await?;
if let Err(_err) = data.persist_to(&file_path).await {
data.move_copy_to(file_path).await?
}
save_temp_file(PathType::Sends, &format!("{}/{file_id}", send.uuid), data, true).await?;
let mut data_value: Value = serde_json::from_str(&send.data)?;
if let Some(o) = data_value.as_object_mut() {
@ -363,7 +361,7 @@ async fn post_send_file_v2_data(
) -> EmptyResult {
enforce_disable_send_policy(&headers, &mut conn).await?;
let mut data = data.into_inner();
let data = data.into_inner();
let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
err!("Send not found. Unable to save the file.", "Invalid send uuid or does not belong to user.")
@ -406,19 +404,29 @@ async fn post_send_file_v2_data(
err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size));
}
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_id);
let file_path = folder_path.join(file_id);
let file_path = format!("{send_id}/{file_id}");
// Check if the file already exists, if that is the case do not overwrite it
if tokio::fs::metadata(&file_path).await.is_ok() {
err!("Send file has already been uploaded.", format!("File {file_path:?} already exists"))
}
save_temp_file(PathType::Sends, &file_path, data.data, false).await.map_err(|e| {
let was_file_exists_error = e
.source()
.and_then(|e| e.downcast_ref::<std::io::Error>())
.and_then(|e| e.get_ref())
.and_then(|e| e.downcast_ref::<opendal::Error>())
.map(|e| e.kind() == opendal::ErrorKind::ConditionNotMatch)
.unwrap_or(false);
tokio::fs::create_dir_all(&folder_path).await?;
if was_file_exists_error {
return crate::Error::new(
"Send file has already been uploaded.",
format!("File {file_path:?} already exists"),
);
}
if let Err(_err) = data.data.persist_to(&file_path).await {
data.data.move_copy_to(file_path).await?
}
crate::Error::new(
"Unexpected error while creating send file",
format!("Error while saving send file at path {file_path}: {e:?}"),
)
})?;
nt.send_send_update(
UpdateType::SyncSendCreate,
@ -551,15 +559,26 @@ async fn post_access_file(
)
.await;
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
let token = crate::auth::encode_jwt(&token_claims);
Ok(Json(json!({
"object": "send-fileDownload",
"id": file_id,
"url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
"url": download_url(&host, &send_id, &file_id).await?,
})))
}
async fn download_url(host: &Host, send_id: &SendId, file_id: &SendFileId) -> Result<String, crate::Error> {
let operator = CONFIG.opendal_operator_for_path_type(PathType::Sends)?;
if operator.info().scheme() == opendal::Scheme::Fs {
let token_claims = crate::auth::generate_send_claims(send_id, file_id);
let token = crate::auth::encode_jwt(&token_claims);
Ok(format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token))
} else {
Ok(operator.presign_read(&format!("{send_id}/{file_id}"), Duration::from_secs(5 * 60)).await?.uri().to_string())
}
}
#[get("/sends/<send_id>/<file_id>?<t>")]
async fn download_send(send_id: SendId, file_id: SendFileId, t: &str) -> Option<NamedFile> {
if let Ok(claims) = crate::auth::decode_send(t) {

2
src/api/core/two_factor/duo.rs

@ -258,7 +258,7 @@ pub(crate) async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiRes
}
.map_res("Can't fetch Duo Keys")?;
Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host))
Ok((data.ik, data.sk, CONFIG.get_duo_akey().await, data.host))
}
pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult<(String, String)> {

54
src/api/icons.rs

@ -14,14 +14,11 @@ use reqwest::{
Client, Response,
};
use rocket::{http::ContentType, response::Redirect, Route};
use tokio::{
fs::{create_dir_all, remove_file, symlink_metadata, File},
io::{AsyncReadExt, AsyncWriteExt},
};
use html5gum::{Emitter, HtmlString, Readable, StringReader, Tokenizer};
use crate::{
config::PathType,
error::Error,
http_client::{get_reqwest_client_builder, should_block_address, CustomHttpClientError},
util::Cached,
@ -159,7 +156,7 @@ fn is_valid_domain(domain: &str) -> bool {
}
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain);
let path = format!("{domain}.png");
// Check for expiration of negatively cached copy
if icon_is_negcached(&path).await {
@ -181,7 +178,7 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
// Get the icon, or None in case of error
match download_icon(domain).await {
Ok((icon, icon_type)) => {
save_icon(&path, &icon).await;
save_icon(&path, icon.to_vec()).await;
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
}
Err(e) => {
@ -194,7 +191,7 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
warn!("Unable to download icon: {:?}", e);
let miss_indicator = path + ".miss";
save_icon(&miss_indicator, &[]).await;
save_icon(&miss_indicator, vec![]).await;
None
}
}
@ -207,11 +204,9 @@ async fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
}
// Try to read the cached icon, and return it if it exists
if let Ok(mut f) = File::open(path).await {
let mut buffer = Vec::new();
if f.read_to_end(&mut buffer).await.is_ok() {
return Some(buffer);
if let Ok(operator) = CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
if let Ok(buf) = operator.read(path).await {
return Some(buf.to_vec());
}
}
@ -219,9 +214,11 @@ async fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
}
async fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> {
let meta = symlink_metadata(path).await?;
let modified = meta.modified()?;
let age = SystemTime::now().duration_since(modified)?;
let operator = CONFIG.opendal_operator_for_path_type(PathType::IconCache)?;
let meta = operator.stat(path).await?;
let modified =
meta.last_modified().ok_or_else(|| std::io::Error::other(format!("No last modified time for `{path}`")))?;
let age = SystemTime::now().duration_since(modified.into())?;
Ok(ttl > 0 && ttl <= age.as_secs())
}
@ -233,8 +230,13 @@ async fn icon_is_negcached(path: &str) -> bool {
match expired {
// No longer negatively cached, drop the marker
Ok(true) => {
if let Err(e) = remove_file(&miss_indicator).await {
error!("Could not remove negative cache indicator for icon {:?}: {:?}", path, e);
match CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
Ok(operator) => {
if let Err(e) = operator.delete(&miss_indicator).await {
error!("Could not remove negative cache indicator for icon {:?}: {:?}", path, e);
}
}
Err(e) => error!("Could not remove negative cache indicator for icon {:?}: {:?}", path, e),
}
false
}
@ -568,17 +570,17 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
Ok((buffer, icon_type))
}
async fn save_icon(path: &str, icon: &[u8]) {
match File::create(path).await {
Ok(mut f) => {
f.write_all(icon).await.expect("Error writing icon file");
}
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
create_dir_all(&CONFIG.icon_cache_folder()).await.expect("Error creating icon cache folder");
}
async fn save_icon(path: &str, icon: Vec<u8>) {
let operator = match CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
Ok(operator) => operator,
Err(e) => {
warn!("Unable to save icon: {:?}", e);
warn!("Failed to get OpenDAL operator while saving icon: {e}");
return;
}
};
if let Err(e) = operator.write(path, icon).await {
warn!("Unable to save icon: {e:?}");
}
}

73
src/auth.rs

@ -7,16 +7,14 @@ use once_cell::sync::{Lazy, OnceCell};
use openssl::rsa::Rsa;
use serde::de::DeserializeOwned;
use serde::ser::Serialize;
use std::{
env,
fs::File,
io::{Read, Write},
net::IpAddr,
};
use crate::db::models::{
AttachmentId, CipherId, CollectionId, DeviceId, EmergencyAccessId, MembershipId, OrgApiKeyId, OrganizationId,
SendFileId, SendId, UserId,
use std::{env, net::IpAddr};
use crate::{
config::PathType,
db::models::{
AttachmentId, CipherId, CollectionId, DeviceId, EmergencyAccessId, MembershipId, OrgApiKeyId, OrganizationId,
SendFileId, SendId, UserId,
},
};
use crate::{error::Error, CONFIG};
@ -40,37 +38,44 @@ static JWT_REGISTER_VERIFY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|regis
static PRIVATE_RSA_KEY: OnceCell<EncodingKey> = OnceCell::new();
static PUBLIC_RSA_KEY: OnceCell<DecodingKey> = OnceCell::new();
pub fn initialize_keys() -> Result<(), Error> {
fn read_key(create_if_missing: bool) -> Result<(Rsa<openssl::pkey::Private>, Vec<u8>), Error> {
let mut priv_key_buffer = Vec::with_capacity(2048);
pub async fn initialize_keys() -> Result<(), Error> {
async fn read_key(create_if_missing: bool) -> Result<(Rsa<openssl::pkey::Private>, Vec<u8>), std::io::Error> {
use std::io::{Error, ErrorKind};
let mut priv_key_file = File::options()
.create(create_if_missing)
.truncate(false)
.read(true)
.write(create_if_missing)
.open(CONFIG.private_rsa_key())?;
let rsa_key_filename = std::path::PathBuf::from(CONFIG.private_rsa_key())
.file_name()
.ok_or_else(|| Error::other("Private RSA key path missing filename"))?
.to_str()
.ok_or_else(|| Error::other("Private RSA key path filename is not valid UTF-8"))?
.to_string();
#[allow(clippy::verbose_file_reads)]
let bytes_read = priv_key_file.read_to_end(&mut priv_key_buffer)?;
let operator = CONFIG.opendal_operator_for_path_type(PathType::RsaKey).map_err(Error::other)?;
let rsa_key = if bytes_read > 0 {
Rsa::private_key_from_pem(&priv_key_buffer[..bytes_read])?
} else if create_if_missing {
// Only create the key if the file doesn't exist or is empty
let rsa_key = Rsa::generate(2048)?;
priv_key_buffer = rsa_key.private_key_to_pem()?;
priv_key_file.write_all(&priv_key_buffer)?;
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
rsa_key
} else {
err!("Private key does not exist or invalid format", CONFIG.private_rsa_key());
let priv_key_buffer = match operator.read(&rsa_key_filename).await {
Ok(buffer) => Some(buffer),
Err(e) if e.kind() == opendal::ErrorKind::NotFound && create_if_missing => None,
Err(e) if e.kind() == opendal::ErrorKind::NotFound => {
return Err(Error::new(ErrorKind::NotFound, "Private key not found"))
}
Err(e) => return Err(Error::new(ErrorKind::InvalidData, format!("Error reading private key: {e}"))),
};
Ok((rsa_key, priv_key_buffer))
if let Some(priv_key_buffer) = priv_key_buffer {
Ok((Rsa::private_key_from_pem(priv_key_buffer.to_vec().as_slice())?, priv_key_buffer.to_vec()))
} else {
let rsa_key = Rsa::generate(2048)?;
let priv_key_buffer = rsa_key.private_key_to_pem()?;
operator.write(&rsa_key_filename, priv_key_buffer).await?;
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
Err(Error::new(ErrorKind::NotFound, "Private key created, forcing attempt to read it again"))
}
}
let (priv_key, priv_key_buffer) = read_key(true).or_else(|_| read_key(false))?;
let (priv_key, priv_key_buffer) = match read_key(true).await {
Ok(key) => key,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => read_key(false).await?,
Err(e) => return Err(e.into()),
};
let pub_key_buffer = priv_key.public_key_to_pem()?;
let enc = EncodingKey::from_rsa_pem(&priv_key_buffer)?;

162
src/config.rs

@ -1,9 +1,10 @@
use std::{
collections::HashMap,
env::consts::EXE_SUFFIX,
process::exit,
sync::{
atomic::{AtomicBool, Ordering},
RwLock,
LazyLock, Mutex, RwLock,
},
};
@ -22,10 +23,32 @@ static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{data_folder}/config.json"))
});
static CONFIG_FILE_PARENT_DIR: LazyLock<String> = LazyLock::new(|| {
let path = std::path::PathBuf::from(&*CONFIG_FILE);
path.parent().unwrap_or(std::path::Path::new("data")).to_str().unwrap_or("data").to_string()
});
static CONFIG_FILENAME: LazyLock<String> = LazyLock::new(|| {
let path = std::path::PathBuf::from(&*CONFIG_FILE);
path.file_name().unwrap_or(std::ffi::OsStr::new("config.json")).to_str().unwrap_or("config.json").to_string()
});
pub static SKIP_CONFIG_VALIDATION: AtomicBool = AtomicBool::new(false);
pub static CONFIG: Lazy<Config> = Lazy::new(|| {
Config::load().unwrap_or_else(|e| {
std::thread::spawn(|| {
let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap_or_else(|e| {
println!("Error loading config:\n {e:?}\n");
exit(12)
});
rt.block_on(Config::load()).unwrap_or_else(|e| {
println!("Error loading config:\n {e:?}\n");
exit(12)
})
})
.join()
.unwrap_or_else(|e| {
println!("Error loading config:\n {e:?}\n");
exit(12)
})
@ -110,9 +133,12 @@ macro_rules! make_config {
builder
}
fn from_file(path: &str) -> Result<Self, Error> {
let config_str = std::fs::read_to_string(path)?;
println!("[INFO] Using saved config from `{path}` for configuration.\n");
async fn from_file() -> Result<Self, Error> {
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
let config_bytes = operator.read(&CONFIG_FILENAME).await?;
let config_str = String::from_utf8(config_bytes.to_vec())
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?;
println!("[INFO] Using saved config from `{}` for configuration.\n", *CONFIG_FILE);
serde_json::from_str(&config_str).map_err(Into::into)
}
@ -1132,11 +1158,96 @@ fn smtp_convert_deprecated_ssl_options(smtp_ssl: Option<bool>, smtp_explicit_tls
"starttls".to_string()
}
fn opendal_operator_for_path(path: &str) -> Result<opendal::Operator, Error> {
// Cache of previously built operators by path
static OPERATORS_BY_PATH: LazyLock<Mutex<HashMap<String, opendal::Operator>>> =
LazyLock::new(|| Mutex::new(HashMap::new()));
let mut operators_by_path =
OPERATORS_BY_PATH.lock().map_err(|e| format!("Failed to lock OpenDAL operators cache: {e}"))?;
if let Some(operator) = operators_by_path.get(path) {
return Ok(operator.clone());
}
let operator = if path.starts_with("s3://") {
#[cfg(not(s3))]
return Err(opendal::Error::new(opendal::ErrorKind::ConfigInvalid, "S3 support is not enabled").into());
#[cfg(s3)]
opendal_s3_operator_for_path(path)?
} else {
let builder = opendal::services::Fs::default().root(path);
opendal::Operator::new(builder)?.finish()
};
operators_by_path.insert(path.to_string(), operator.clone());
Ok(operator)
}
#[cfg(s3)]
fn opendal_s3_operator_for_path(path: &str) -> Result<opendal::Operator, Error> {
// This is a custom AWS credential loader that uses the official AWS Rust
// SDK config crate to load credentials. This ensures maximum compatibility
// with AWS credential configurations. For example, OpenDAL doesn't support
// AWS SSO temporary credentials yet.
struct OpenDALS3CredentialLoader {}
#[async_trait]
impl reqsign::AwsCredentialLoad for OpenDALS3CredentialLoader {
async fn load_credential(&self, _client: reqwest::Client) -> anyhow::Result<Option<reqsign::AwsCredential>> {
use aws_credential_types::provider::ProvideCredentials as _;
use tokio::sync::OnceCell;
static DEFAULT_CREDENTIAL_CHAIN: OnceCell<
aws_config::default_provider::credentials::DefaultCredentialsChain,
> = OnceCell::const_new();
let chain = DEFAULT_CREDENTIAL_CHAIN
.get_or_init(|| aws_config::default_provider::credentials::DefaultCredentialsChain::builder().build())
.await;
let creds = chain.provide_credentials().await?;
Ok(Some(reqsign::AwsCredential {
access_key_id: creds.access_key_id().to_string(),
secret_access_key: creds.secret_access_key().to_string(),
session_token: creds.session_token().map(|s| s.to_string()),
expires_in: creds.expiry().map(|expiration| expiration.into()),
}))
}
}
const OPEN_DAL_S3_CREDENTIAL_LOADER: OpenDALS3CredentialLoader = OpenDALS3CredentialLoader {};
let url = Url::parse(path).map_err(|e| format!("Invalid path S3 URL path {path:?}: {e}"))?;
let bucket = url.host_str().ok_or_else(|| format!("Missing Bucket name in data folder S3 URL {path:?}"))?;
let builder = opendal::services::S3::default()
.customized_credential_load(Box::new(OPEN_DAL_S3_CREDENTIAL_LOADER))
.enable_virtual_host_style()
.bucket(bucket)
.root(url.path())
.default_storage_class("INTELLIGENT_TIERING");
Ok(opendal::Operator::new(builder)?.finish())
}
pub enum PathType {
Data,
IconCache,
Attachments,
Sends,
RsaKey,
}
impl Config {
pub fn load() -> Result<Self, Error> {
pub async fn load() -> Result<Self, Error> {
// Loading from env and file
let _env = ConfigBuilder::from_env();
let _usr = ConfigBuilder::from_file(&CONFIG_FILE).unwrap_or_default();
let _usr = ConfigBuilder::from_file().await.unwrap_or_default();
// Create merged config, config file overwrites env
let mut _overrides = Vec::new();
@ -1160,7 +1271,7 @@ impl Config {
})
}
pub fn update_config(&self, other: ConfigBuilder, ignore_non_editable: bool) -> Result<(), Error> {
pub async fn update_config(&self, other: ConfigBuilder, ignore_non_editable: bool) -> Result<(), Error> {
// Remove default values
//let builder = other.remove(&self.inner.read().unwrap()._env);
@ -1192,20 +1303,19 @@ impl Config {
}
//Save to file
use std::{fs::File, io::Write};
let mut file = File::create(&*CONFIG_FILE)?;
file.write_all(config_str.as_bytes())?;
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
operator.write(&CONFIG_FILENAME, config_str).await?;
Ok(())
}
fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
async fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
let builder = {
let usr = &self.inner.read().unwrap()._usr;
let mut _overrides = Vec::new();
usr.merge(&other, false, &mut _overrides)
};
self.update_config(builder, false)
self.update_config(builder, false).await
}
/// Tests whether an email's domain is allowed. A domain is allowed if it
@ -1247,8 +1357,9 @@ impl Config {
}
}
pub fn delete_user_config(&self) -> Result<(), Error> {
std::fs::remove_file(&*CONFIG_FILE)?;
pub async fn delete_user_config(&self) -> Result<(), Error> {
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
operator.delete(&CONFIG_FILENAME).await?;
// Empty user config
let usr = ConfigBuilder::default();
@ -1278,7 +1389,7 @@ impl Config {
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail)
}
pub fn get_duo_akey(&self) -> String {
pub async fn get_duo_akey(&self) -> String {
if let Some(akey) = self._duo_akey() {
akey
} else {
@ -1289,7 +1400,7 @@ impl Config {
_duo_akey: Some(akey_s.clone()),
..Default::default()
};
self.update_config_partial(builder).ok();
self.update_config_partial(builder).await.ok();
akey_s
}
@ -1302,6 +1413,23 @@ impl Config {
token.is_some() && !token.unwrap().trim().is_empty()
}
pub fn opendal_operator_for_path_type(&self, path_type: PathType) -> Result<opendal::Operator, Error> {
let path = match path_type {
PathType::Data => self.data_folder(),
PathType::IconCache => self.icon_cache_folder(),
PathType::Attachments => self.attachments_folder(),
PathType::Sends => self.sends_folder(),
PathType::RsaKey => std::path::Path::new(&self.rsa_key_filename())
.parent()
.ok_or_else(|| std::io::Error::other("Failed to get directory of RSA key file"))?
.to_str()
.ok_or_else(|| std::io::Error::other("Failed to convert RSA key file directory to UTF-8 string"))?
.to_string(),
};
opendal_operator_for_path(&path)
}
pub fn render_template<T: serde::ser::Serialize>(&self, name: &str, data: &T) -> Result<String, Error> {
if self.reload_templates() {
warn!("RELOADING TEMPLATES");

58
src/db/models/attachment.rs

@ -1,11 +1,11 @@
use std::io::ErrorKind;
use std::time::Duration;
use bigdecimal::{BigDecimal, ToPrimitive};
use derive_more::{AsRef, Deref, Display};
use serde_json::Value;
use super::{CipherId, OrganizationId, UserId};
use crate::CONFIG;
use crate::{config::PathType, CONFIG};
use macros::IdFromParam;
db_object! {
@ -41,24 +41,30 @@ impl Attachment {
}
pub fn get_file_path(&self) -> String {
format!("{}/{}/{}", CONFIG.attachments_folder(), self.cipher_uuid, self.id)
format!("{}/{}", self.cipher_uuid, self.id)
}
pub fn get_url(&self, host: &str) -> String {
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
format!("{}/attachments/{}/{}?token={}", host, self.cipher_uuid, self.id, token)
pub async fn get_url(&self, host: &str) -> Result<String, crate::Error> {
let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?;
if operator.info().scheme() == opendal::Scheme::Fs {
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
Ok(format!("{}/attachments/{}/{}?token={}", host, self.cipher_uuid, self.id, token))
} else {
Ok(operator.presign_read(&self.get_file_path(), Duration::from_secs(5 * 60)).await?.uri().to_string())
}
}
pub fn to_json(&self, host: &str) -> Value {
json!({
pub async fn to_json(&self, host: &str) -> Result<Value, crate::Error> {
Ok(json!({
"id": self.id,
"url": self.get_url(host),
"url": self.get_url(host).await?,
"fileName": self.file_name,
"size": self.file_size.to_string(),
"sizeName": crate::util::get_display_size(self.file_size),
"key": self.akey,
"object": "attachment"
})
}))
}
}
@ -104,26 +110,26 @@ impl Attachment {
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
db_run! { conn: {
let _: () = crate::util::retry(
crate::util::retry(
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
10,
)
.map_res("Error deleting attachment")?;
let file_path = &self.get_file_path();
match std::fs::remove_file(file_path) {
// Ignore "file not found" errors. This can happen when the
// upstream caller has already cleaned up the file as part of
// its own error handling.
Err(e) if e.kind() == ErrorKind::NotFound => {
debug!("File '{}' already deleted.", file_path);
Ok(())
}
Err(e) => Err(e.into()),
_ => Ok(()),
.map(|_| ())
.map_res("Error deleting attachment")
}}?;
let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?;
let file_path = self.get_file_path();
if let Err(e) = operator.delete(&file_path).await {
if e.kind() == opendal::ErrorKind::NotFound {
debug!("File '{file_path}' already deleted.");
} else {
return Err(e.into());
}
}}
}
Ok(())
}
pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {

18
src/db/models/cipher.rs

@ -141,18 +141,28 @@ impl Cipher {
cipher_sync_data: Option<&CipherSyncData>,
sync_type: CipherSyncType,
conn: &mut DbConn,
) -> Value {
) -> Result<Value, crate::Error> {
use crate::util::{format_date, validate_and_format_date};
let mut attachments_json: Value = Value::Null;
if let Some(cipher_sync_data) = cipher_sync_data {
if let Some(attachments) = cipher_sync_data.cipher_attachments.get(&self.uuid) {
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect();
if !attachments.is_empty() {
let mut attachments_json_vec = vec![];
for attachment in attachments {
attachments_json_vec.push(attachment.to_json(host).await?);
}
attachments_json = Value::Array(attachments_json_vec);
}
}
} else {
let attachments = Attachment::find_by_cipher(&self.uuid, conn).await;
if !attachments.is_empty() {
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect()
let mut attachments_json_vec = vec![];
for attachment in attachments {
attachments_json_vec.push(attachment.to_json(host).await?);
}
attachments_json = Value::Array(attachments_json_vec);
}
}
@ -384,7 +394,7 @@ impl Cipher {
};
json_object[key] = type_data_json;
json_object
Ok(json_object)
}
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<UserId> {

5
src/db/models/send.rs

@ -1,7 +1,7 @@
use chrono::{NaiveDateTime, Utc};
use serde_json::Value;
use crate::util::LowerCase;
use crate::{config::PathType, util::LowerCase, CONFIG};
use super::{OrganizationId, User, UserId};
use id::SendId;
@ -226,7 +226,8 @@ impl Send {
self.update_users_revision(conn).await;
if self.atype == SendType::File as i32 {
std::fs::remove_dir_all(std::path::Path::new(&crate::CONFIG.sends_folder()).join(&self.uuid)).ok();
let operator = CONFIG.opendal_operator_for_path_type(PathType::Sends)?;
operator.remove_all(&self.uuid).await.ok();
}
db_run! { conn: {

3
src/error.rs

@ -46,6 +46,7 @@ use jsonwebtoken::errors::Error as JwtErr;
use lettre::address::AddressError as AddrErr;
use lettre::error::Error as LettreErr;
use lettre::transport::smtp::Error as SmtpErr;
use opendal::Error as OpenDALErr;
use openssl::error::ErrorStack as SSLErr;
use regex::Error as RegexErr;
use reqwest::Error as ReqErr;
@ -95,6 +96,8 @@ make_error! {
DieselCon(DieselConErr): _has_source, _api_error,
Webauthn(WebauthnErr): _has_source, _api_error,
OpenDAL(OpenDALErr): _has_source, _api_error,
}
impl std::fmt::Debug for Error {

25
src/main.rs

@ -61,7 +61,7 @@ mod util;
use crate::api::core::two_factor::duo_oidc::purge_duo_contexts;
use crate::api::purge_auth_requests;
use crate::api::{WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS};
pub use config::CONFIG;
pub use config::{PathType, CONFIG};
pub use error::{Error, MapResult};
use rocket::data::{Limits, ToByteUnit};
use std::sync::{atomic::Ordering, Arc};
@ -75,16 +75,13 @@ async fn main() -> Result<(), Error> {
let level = init_logging()?;
check_data_folder().await;
auth::initialize_keys().unwrap_or_else(|e| {
auth::initialize_keys().await.unwrap_or_else(|e| {
error!("Error creating private key '{}'\n{e:?}\nExiting Vaultwarden!", CONFIG.private_rsa_key());
exit(1);
});
check_web_vault();
create_dir(&CONFIG.icon_cache_folder(), "icon cache");
create_dir(&CONFIG.tmp_folder(), "tmp folder");
create_dir(&CONFIG.sends_folder(), "sends folder");
create_dir(&CONFIG.attachments_folder(), "attachments folder");
let pool = create_db_pool().await;
schedule_jobs(pool.clone());
@ -467,6 +464,24 @@ fn create_dir(path: &str, description: &str) {
async fn check_data_folder() {
let data_folder = &CONFIG.data_folder();
if data_folder.starts_with("s3://") {
if let Err(e) = CONFIG
.opendal_operator_for_path_type(PathType::Data)
.unwrap_or_else(|e| {
error!("Failed to create S3 operator for data folder '{data_folder}': {e:?}");
exit(1);
})
.check()
.await
{
error!("Could not access S3 data folder '{data_folder}': {e:?}");
exit(1);
}
return;
}
let path = Path::new(data_folder);
if !path.exists() {
error!("Data folder '{}' doesn't exist.", data_folder);

22
src/util.rs

@ -16,7 +16,7 @@ use tokio::{
time::{sleep, Duration},
};
use crate::CONFIG;
use crate::{config::PathType, CONFIG};
pub struct AppHeaders();
@ -816,6 +816,26 @@ pub fn is_global(ip: std::net::IpAddr) -> bool {
ip.is_global()
}
/// Saves a Rocket temporary file to the OpenDAL Operator at the given path.
pub async fn save_temp_file(
path_type: PathType,
path: &str,
temp_file: rocket::fs::TempFile<'_>,
overwrite: bool,
) -> Result<(), crate::Error> {
use futures::AsyncWriteExt as _;
use tokio_util::compat::TokioAsyncReadCompatExt as _;
let operator = CONFIG.opendal_operator_for_path_type(path_type)?;
let mut read_stream = temp_file.open().await?.compat();
let mut writer = operator.writer_with(path).if_not_exists(!overwrite).await?.into_futures_async_write();
futures::io::copy(&mut read_stream, &mut writer).await?;
writer.close().await?;
Ok(())
}
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 30 seconds or so and they are correct as of nightly 2023-07-17
/// The IPV6 can't be checked in a reasonable time, so we check over a hundred billion random ones, so far correct

Loading…
Cancel
Save