resolve almost all as_conversions lints
may need further opinion from others on these Signed-off-by: strawberry <strawberry@puppygock.gay>
This commit is contained in:
parent
f8e1255994
commit
0ebb323490
18 changed files with 144 additions and 85 deletions
|
@ -626,6 +626,7 @@ trivially_copy_pass_by_ref = "warn"
|
||||||
wildcard_imports = "warn"
|
wildcard_imports = "warn"
|
||||||
checked_conversions = "warn"
|
checked_conversions = "warn"
|
||||||
#integer_arithmetic = "warn"
|
#integer_arithmetic = "warn"
|
||||||
|
#as_conversions = "warn"
|
||||||
|
|
||||||
# some sadness
|
# some sadness
|
||||||
missing_errors_doc = "allow"
|
missing_errors_doc = "allow"
|
||||||
|
@ -646,7 +647,6 @@ map_err_ignore = "allow"
|
||||||
missing_docs_in_private_items = "allow"
|
missing_docs_in_private_items = "allow"
|
||||||
multiple_inherent_impl = "allow"
|
multiple_inherent_impl = "allow"
|
||||||
error_impl_error = "allow"
|
error_impl_error = "allow"
|
||||||
as_conversions = "allow"
|
|
||||||
string_add = "allow"
|
string_add = "allow"
|
||||||
string_slice = "allow"
|
string_slice = "allow"
|
||||||
ref_patterns = "allow"
|
ref_patterns = "allow"
|
||||||
|
|
|
@ -1,11 +1,14 @@
|
||||||
use ruma::api::client::{
|
use ruma::{
|
||||||
backup::{
|
api::client::{
|
||||||
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version,
|
backup::{
|
||||||
delete_backup_keys, delete_backup_keys_for_room, delete_backup_keys_for_session, delete_backup_version,
|
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version,
|
||||||
get_backup_info, get_backup_keys, get_backup_keys_for_room, get_backup_keys_for_session,
|
delete_backup_keys, delete_backup_keys_for_room, delete_backup_keys_for_session, delete_backup_version,
|
||||||
get_latest_backup_info, update_backup_version,
|
get_backup_info, get_backup_keys, get_backup_keys_for_room, get_backup_keys_for_session,
|
||||||
|
get_latest_backup_info, update_backup_version,
|
||||||
|
},
|
||||||
|
error::ErrorKind,
|
||||||
},
|
},
|
||||||
error::ErrorKind,
|
UInt,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{services, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
@ -56,7 +59,8 @@ pub(crate) async fn get_latest_backup_info_route(
|
||||||
|
|
||||||
Ok(get_latest_backup_info::v3::Response {
|
Ok(get_latest_backup_info::v3::Response {
|
||||||
algorithm,
|
algorithm,
|
||||||
count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(),
|
count: (UInt::try_from(services().key_backups.count_keys(sender_user, &version)?)
|
||||||
|
.expect("user backup keys count should not be that high")),
|
||||||
etag: services().key_backups.get_etag(sender_user, &version)?,
|
etag: services().key_backups.get_etag(sender_user, &version)?,
|
||||||
version,
|
version,
|
||||||
})
|
})
|
||||||
|
@ -76,10 +80,12 @@ pub(crate) async fn get_backup_info_route(
|
||||||
|
|
||||||
Ok(get_backup_info::v3::Response {
|
Ok(get_backup_info::v3::Response {
|
||||||
algorithm,
|
algorithm,
|
||||||
count: (services()
|
count: (UInt::try_from(
|
||||||
.key_backups
|
services()
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.key_backups
|
||||||
.into(),
|
.count_keys(sender_user, &body.version)?,
|
||||||
|
)
|
||||||
|
.expect("user backup keys count should not be that high")),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
@ -139,10 +145,12 @@ pub(crate) async fn add_backup_keys_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(add_backup_keys::v3::Response {
|
Ok(add_backup_keys::v3::Response {
|
||||||
count: (services()
|
count: (UInt::try_from(
|
||||||
.key_backups
|
services()
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.key_backups
|
||||||
.into(),
|
.count_keys(sender_user, &body.version)?,
|
||||||
|
)
|
||||||
|
.expect("user backup keys count should not be that high")),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
@ -181,10 +189,12 @@ pub(crate) async fn add_backup_keys_for_room_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(add_backup_keys_for_room::v3::Response {
|
Ok(add_backup_keys_for_room::v3::Response {
|
||||||
count: (services()
|
count: (UInt::try_from(
|
||||||
.key_backups
|
services()
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.key_backups
|
||||||
.into(),
|
.count_keys(sender_user, &body.version)?,
|
||||||
|
)
|
||||||
|
.expect("user backup keys count should not be that high")),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
@ -221,10 +231,12 @@ pub(crate) async fn add_backup_keys_for_session_route(
|
||||||
.add_key(sender_user, &body.version, &body.room_id, &body.session_id, &body.session_data)?;
|
.add_key(sender_user, &body.version, &body.room_id, &body.session_id, &body.session_data)?;
|
||||||
|
|
||||||
Ok(add_backup_keys_for_session::v3::Response {
|
Ok(add_backup_keys_for_session::v3::Response {
|
||||||
count: (services()
|
count: (UInt::try_from(
|
||||||
.key_backups
|
services()
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.key_backups
|
||||||
.into(),
|
.count_keys(sender_user, &body.version)?,
|
||||||
|
)
|
||||||
|
.expect("user backup keys count should not be that high")),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
@ -294,10 +306,12 @@ pub(crate) async fn delete_backup_keys_route(
|
||||||
.delete_all_keys(sender_user, &body.version)?;
|
.delete_all_keys(sender_user, &body.version)?;
|
||||||
|
|
||||||
Ok(delete_backup_keys::v3::Response {
|
Ok(delete_backup_keys::v3::Response {
|
||||||
count: (services()
|
count: (UInt::try_from(
|
||||||
.key_backups
|
services()
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.key_backups
|
||||||
.into(),
|
.count_keys(sender_user, &body.version)?,
|
||||||
|
)
|
||||||
|
.expect("user backup keys count should not be that high")),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
@ -317,10 +331,12 @@ pub(crate) async fn delete_backup_keys_for_room_route(
|
||||||
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
|
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
|
||||||
|
|
||||||
Ok(delete_backup_keys_for_room::v3::Response {
|
Ok(delete_backup_keys_for_room::v3::Response {
|
||||||
count: (services()
|
count: (UInt::try_from(
|
||||||
.key_backups
|
services()
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.key_backups
|
||||||
.into(),
|
.count_keys(sender_user, &body.version)?,
|
||||||
|
)
|
||||||
|
.expect("user backup keys count should not be that high")),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
@ -340,10 +356,12 @@ pub(crate) async fn delete_backup_keys_for_session_route(
|
||||||
.delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?;
|
.delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?;
|
||||||
|
|
||||||
Ok(delete_backup_keys_for_session::v3::Response {
|
Ok(delete_backup_keys_for_session::v3::Response {
|
||||||
count: (services()
|
count: (UInt::try_from(
|
||||||
.key_backups
|
services()
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.key_backups
|
||||||
.into(),
|
.count_keys(sender_user, &body.version)?,
|
||||||
|
)
|
||||||
|
.expect("user backup keys count should not be that high")),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
|
|
@ -63,8 +63,8 @@ pub(crate) async fn get_context_route(body: Ruma<get_context::v3::Request>) -> R
|
||||||
lazy_loaded.insert(base_event.sender.as_str().to_owned());
|
lazy_loaded.insert(base_event.sender.as_str().to_owned());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use limit with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = u64::from(body.limit).min(100) as usize;
|
let limit = usize::try_from(body.limit).unwrap_or(10).min(100);
|
||||||
|
|
||||||
let base_event = base_event.to_room_event();
|
let base_event = base_event.to_room_event();
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ use ruma::{
|
||||||
},
|
},
|
||||||
StateEventType,
|
StateEventType,
|
||||||
},
|
},
|
||||||
ServerName, UInt,
|
uint, ServerName, UInt,
|
||||||
};
|
};
|
||||||
use tracing::{error, info, warn};
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
|
@ -198,8 +198,9 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = limit.map_or(10, u64::from);
|
let limit = limit.map_or(10, u64::from);
|
||||||
let mut num_since = 0_u64;
|
let mut num_since: u64 = 0;
|
||||||
|
|
||||||
if let Some(s) = &since {
|
if let Some(s) = &since {
|
||||||
let mut characters = s.chars();
|
let mut characters = s.chars();
|
||||||
|
@ -363,12 +364,16 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
|
|
||||||
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
|
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
|
||||||
|
|
||||||
let total_room_count_estimate = (all_rooms.len() as u32).into();
|
let total_room_count_estimate = UInt::try_from(all_rooms.len()).unwrap_or_else(|_| uint!(0));
|
||||||
|
|
||||||
let chunk: Vec<_> = all_rooms
|
let chunk: Vec<_> = all_rooms
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.skip(num_since as usize)
|
.skip(
|
||||||
.take(limit as usize)
|
num_since
|
||||||
|
.try_into()
|
||||||
|
.expect("num_since should not be this high"),
|
||||||
|
)
|
||||||
|
.take(limit.try_into().expect("limit should not be this high"))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let prev_batch = if num_since == 0 {
|
let prev_batch = if num_since == 0 {
|
||||||
|
@ -377,7 +382,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
Some(format!("p{num_since}"))
|
Some(format!("p{num_since}"))
|
||||||
};
|
};
|
||||||
|
|
||||||
let next_batch = if chunk.len() < limit as usize {
|
let next_batch = if chunk.len() < limit.try_into().unwrap() {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(format!(
|
Some(format!(
|
||||||
|
|
|
@ -144,7 +144,7 @@ pub(crate) async fn get_message_events_route(
|
||||||
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)
|
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let limit = u64::from(body.limit).min(100) as usize;
|
let limit = usize::try_from(body.limit).unwrap_or(10).min(100);
|
||||||
|
|
||||||
let next_token;
|
let next_token;
|
||||||
|
|
||||||
|
@ -159,8 +159,9 @@ pub(crate) async fn get_message_events_route(
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_after(sender_user, &body.room_id, from)?
|
.pdus_after(sender_user, &body.room_id, from)?
|
||||||
.filter_map(Result::ok) // Filter out buggy events
|
.filter_map(Result::ok) // Filter out buggy events
|
||||||
.filter(|(_, pdu)| contains_url_filter(pdu, &body.filter))
|
.filter(|(_, pdu)| { contains_url_filter(pdu, &body.filter) && visibility_filter(pdu, sender_user, &body.room_id)
|
||||||
.filter(|(_, pdu)| visibility_filter(pdu, sender_user, &body.room_id))
|
|
||||||
|
})
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.collect();
|
.collect();
|
||||||
|
@ -205,8 +206,7 @@ pub(crate) async fn get_message_events_route(
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_until(sender_user, &body.room_id, from)?
|
.pdus_until(sender_user, &body.room_id, from)?
|
||||||
.filter_map(Result::ok) // Filter out buggy events
|
.filter_map(Result::ok) // Filter out buggy events
|
||||||
.filter(|(_, pdu)| contains_url_filter(pdu, &body.filter))
|
.filter(|(_, pdu)| {contains_url_filter(pdu, &body.filter) && visibility_filter(pdu, sender_user, &body.room_id)})
|
||||||
.filter(|(_, pdu)| visibility_filter(pdu, sender_user, &body.room_id))
|
|
||||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
|
@ -10,7 +10,7 @@ use ruma::{
|
||||||
},
|
},
|
||||||
events::AnyStateEvent,
|
events::AnyStateEvent,
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
OwnedRoomId,
|
uint, OwnedRoomId,
|
||||||
};
|
};
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
|
|
||||||
|
@ -39,7 +39,12 @@ pub(crate) async fn search_events_route(body: Ruma<search_events::v3::Request>)
|
||||||
});
|
});
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = filter.limit.map_or(10, u64::from).min(100) as usize;
|
let limit: usize = filter
|
||||||
|
.limit
|
||||||
|
.unwrap_or_else(|| uint!(10))
|
||||||
|
.try_into()
|
||||||
|
.unwrap_or(10)
|
||||||
|
.min(100);
|
||||||
|
|
||||||
let mut room_states: BTreeMap<OwnedRoomId, Vec<Raw<AnyStateEvent>>> = BTreeMap::new();
|
let mut room_states: BTreeMap<OwnedRoomId, Vec<Raw<AnyStateEvent>>> = BTreeMap::new();
|
||||||
|
|
||||||
|
@ -167,7 +172,7 @@ pub(crate) async fn search_events_route(body: Ruma<search_events::v3::Request>)
|
||||||
|
|
||||||
Ok(search_events::v3::Response::new(ResultCategories {
|
Ok(search_events::v3::Response::new(ResultCategories {
|
||||||
room_events: ResultRoomEvents {
|
room_events: ResultRoomEvents {
|
||||||
count: Some((results.len() as u32).into()),
|
count: Some(results.len().try_into().unwrap_or_else(|_| uint!(0))),
|
||||||
groups: BTreeMap::new(), // TODO
|
groups: BTreeMap::new(), // TODO
|
||||||
next_batch,
|
next_batch,
|
||||||
results,
|
results,
|
||||||
|
|
|
@ -2,7 +2,7 @@ use std::str::FromStr;
|
||||||
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{error::ErrorKind, space::get_hierarchy},
|
api::client::{error::ErrorKind, space::get_hierarchy},
|
||||||
UInt,
|
uint, UInt,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{service::rooms::spaces::PagnationToken, services, Error, Result, Ruma};
|
use crate::{service::rooms::spaces::PagnationToken, services, Error, Result, Ruma};
|
||||||
|
@ -14,10 +14,12 @@ use crate::{service::rooms::spaces::PagnationToken, services, Error, Result, Rum
|
||||||
pub(crate) async fn get_hierarchy_route(body: Ruma<get_hierarchy::v1::Request>) -> Result<get_hierarchy::v1::Response> {
|
pub(crate) async fn get_hierarchy_route(body: Ruma<get_hierarchy::v1::Request>) -> Result<get_hierarchy::v1::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let limit = body
|
let limit: usize = body
|
||||||
.limit
|
.limit
|
||||||
.unwrap_or_else(|| UInt::from(10_u32))
|
.unwrap_or_else(|| uint!(10))
|
||||||
.min(UInt::from(100_u32));
|
.try_into()
|
||||||
|
.unwrap_or(10)
|
||||||
|
.min(100);
|
||||||
|
|
||||||
let max_depth = body
|
let max_depth = body
|
||||||
.max_depth
|
.max_depth
|
||||||
|
@ -45,9 +47,9 @@ pub(crate) async fn get_hierarchy_route(body: Ruma<get_hierarchy::v1::Request>)
|
||||||
.get_client_hierarchy(
|
.get_client_hierarchy(
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
u64::from(limit) as usize,
|
limit,
|
||||||
key.map_or(0, |token| u64::from(token.skip) as usize),
|
key.map_or(0, |token| token.skip.try_into().unwrap_or(0)),
|
||||||
u64::from(max_depth) as usize,
|
max_depth.try_into().unwrap_or(3),
|
||||||
body.suggested_only,
|
body.suggested_only,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
|
|
@ -1,4 +1,7 @@
|
||||||
use ruma::api::client::{error::ErrorKind, threads::get_threads};
|
use ruma::{
|
||||||
|
api::client::{error::ErrorKind, threads::get_threads},
|
||||||
|
uint,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::{services, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
|
||||||
|
@ -9,7 +12,8 @@ pub(crate) async fn get_threads_route(body: Ruma<get_threads::v1::Request>) -> R
|
||||||
// Use limit or else 10, with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = body
|
let limit = body
|
||||||
.limit
|
.limit
|
||||||
.and_then(|l| l.try_into().ok())
|
.unwrap_or_else(|| uint!(10))
|
||||||
|
.try_into()
|
||||||
.unwrap_or(10)
|
.unwrap_or(10)
|
||||||
.min(100);
|
.min(100);
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ use crate::{services, Result, Ruma};
|
||||||
/// and don't share a room with the sender
|
/// and don't share a room with the sender
|
||||||
pub(crate) async fn search_users_route(body: Ruma<search_users::v3::Request>) -> Result<search_users::v3::Response> {
|
pub(crate) async fn search_users_route(body: Ruma<search_users::v3::Request>) -> Result<search_users::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let limit = u64::from(body.limit) as usize;
|
let limit = usize::try_from(body.limit).unwrap_or(10); // default limit is 10
|
||||||
|
|
||||||
let mut users = services().users.iter().filter_map(|user_id| {
|
let mut users = services().users.iter().filter_map(|user_id| {
|
||||||
// Filter out buggy users (they should not exist, but you never know...)
|
// Filter out buggy users (they should not exist, but you never know...)
|
||||||
|
|
|
@ -340,6 +340,7 @@ impl KeyValueDatabase {
|
||||||
|
|
||||||
roomid_inviteviaservers: builder.open_tree("roomid_inviteviaservers")?,
|
roomid_inviteviaservers: builder.open_tree("roomid_inviteviaservers")?,
|
||||||
|
|
||||||
|
#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)]
|
||||||
auth_chain_cache: Mutex::new(LruCache::new(
|
auth_chain_cache: Mutex::new(LruCache::new(
|
||||||
(f64::from(config.auth_chain_cache_capacity) * config.conduit_cache_capacity_modifier) as usize,
|
(f64::from(config.auth_chain_cache_capacity) * config.conduit_cache_capacity_modifier) as usize,
|
||||||
)),
|
)),
|
||||||
|
|
|
@ -35,7 +35,11 @@ pub(crate) struct Engine {
|
||||||
impl KeyValueDatabaseEngine for Arc<Engine> {
|
impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
fn open(config: &Config) -> Result<Self> {
|
fn open(config: &Config) -> Result<Self> {
|
||||||
let cache_capacity_bytes = config.db_cache_capacity_mb * 1024.0 * 1024.0;
|
let cache_capacity_bytes = config.db_cache_capacity_mb * 1024.0 * 1024.0;
|
||||||
|
|
||||||
|
#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)]
|
||||||
let row_cache_capacity_bytes = (cache_capacity_bytes * 0.50) as usize;
|
let row_cache_capacity_bytes = (cache_capacity_bytes * 0.50) as usize;
|
||||||
|
|
||||||
|
#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)]
|
||||||
let col_cache_capacity_bytes = (cache_capacity_bytes * 0.50) as usize;
|
let col_cache_capacity_bytes = (cache_capacity_bytes * 0.50) as usize;
|
||||||
|
|
||||||
let mut col_cache = HashMap::new();
|
let mut col_cache = HashMap::new();
|
||||||
|
@ -128,6 +132,7 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)]
|
||||||
fn memory_usage(&self) -> Result<String> {
|
fn memory_usage(&self) -> Result<String> {
|
||||||
let mut res = String::new();
|
let mut res = String::new();
|
||||||
let stats = rust_rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.row_cache]))?;
|
let stats = rust_rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.row_cache]))?;
|
||||||
|
|
|
@ -21,7 +21,7 @@ thread_local! {
|
||||||
|
|
||||||
struct PreparedStatementIterator<'a> {
|
struct PreparedStatementIterator<'a> {
|
||||||
iterator: Box<dyn Iterator<Item = TupleOfBytes> + 'a>,
|
iterator: Box<dyn Iterator<Item = TupleOfBytes> + 'a>,
|
||||||
_statement_ref: NonAliasingBox<rusqlite::Statement<'a>>,
|
_statement_ref: AliasableBox<rusqlite::Statement<'a>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Iterator for PreparedStatementIterator<'_> {
|
impl Iterator for PreparedStatementIterator<'_> {
|
||||||
|
@ -30,16 +30,25 @@ impl Iterator for PreparedStatementIterator<'_> {
|
||||||
fn next(&mut self) -> Option<Self::Item> { self.iterator.next() }
|
fn next(&mut self) -> Option<Self::Item> { self.iterator.next() }
|
||||||
}
|
}
|
||||||
|
|
||||||
struct NonAliasingBox<T>(*mut T);
|
struct AliasableBox<T>(*mut T);
|
||||||
impl<T> Drop for NonAliasingBox<T> {
|
impl<T> Drop for AliasableBox<T> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
// TODO: figure out why this is necessary, but also this is sqlite so dont think
|
// SAFETY: This is cursed and relies on non-local reasoning.
|
||||||
// i care that much. i tried checking commit history but couldn't find out why
|
//
|
||||||
// this was done.
|
// In order for this to be safe:
|
||||||
#[allow(clippy::undocumented_unsafe_blocks)]
|
//
|
||||||
unsafe {
|
// * All aliased references to this value must have been dropped first, for
|
||||||
_ = Box::from_raw(self.0);
|
// example by coming after its referrers in struct fields, because struct
|
||||||
};
|
// fields are automatically dropped in order from top to bottom in the absence
|
||||||
|
// of an explicit Drop impl. Otherwise, the referrers may read into
|
||||||
|
// deallocated memory.
|
||||||
|
// * This type must not be copyable or cloneable. Otherwise, double-free can
|
||||||
|
// occur.
|
||||||
|
//
|
||||||
|
// These conditions are met, but again, note that changing safe code in
|
||||||
|
// this module can result in unsoundness if any of these constraints are
|
||||||
|
// violated.
|
||||||
|
unsafe { drop(Box::from_raw(self.0)) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,8 +102,13 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
// 2. divide by permanent connections + permanent iter connections + write
|
// 2. divide by permanent connections + permanent iter connections + write
|
||||||
// connection
|
// connection
|
||||||
// 3. round down to nearest integer
|
// 3. round down to nearest integer
|
||||||
let cache_size_per_thread: u32 =
|
#[allow(
|
||||||
((config.db_cache_capacity_mb * 1024.0) / ((num_cpus::get().max(1) * 2) + 1) as f64) as u32;
|
clippy::as_conversions,
|
||||||
|
clippy::cast_possible_truncation,
|
||||||
|
clippy::cast_precision_loss,
|
||||||
|
clippy::cast_sign_loss
|
||||||
|
)]
|
||||||
|
let cache_size_per_thread = ((config.db_cache_capacity_mb * 1024.0) / ((num_cpus::get() as f64 * 2.0) + 1.0)) as u32;
|
||||||
|
|
||||||
let writer = Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?);
|
let writer = Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?);
|
||||||
|
|
||||||
|
@ -161,7 +175,7 @@ impl SqliteTable {
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
));
|
));
|
||||||
|
|
||||||
let statement_ref = NonAliasingBox(statement);
|
let statement_ref = AliasableBox(statement);
|
||||||
|
|
||||||
//let name = self.name.clone();
|
//let name = self.name.clone();
|
||||||
|
|
||||||
|
@ -250,7 +264,7 @@ impl KvTree for SqliteTable {
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
));
|
));
|
||||||
|
|
||||||
let statement_ref = NonAliasingBox(statement);
|
let statement_ref = AliasableBox(statement);
|
||||||
|
|
||||||
let iterator = Box::new(
|
let iterator = Box::new(
|
||||||
statement
|
statement
|
||||||
|
@ -272,7 +286,7 @@ impl KvTree for SqliteTable {
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
));
|
));
|
||||||
|
|
||||||
let statement_ref = NonAliasingBox(statement);
|
let statement_ref = AliasableBox(statement);
|
||||||
|
|
||||||
let iterator = Box::new(
|
let iterator = Box::new(
|
||||||
statement
|
statement
|
||||||
|
|
|
@ -47,7 +47,7 @@ impl Watchers {
|
||||||
let mut watchers = self.watchers.write().unwrap();
|
let mut watchers = self.watchers.write().unwrap();
|
||||||
for prefix in triggered {
|
for prefix in triggered {
|
||||||
if let Some(tx) = watchers.remove(prefix) {
|
if let Some(tx) = watchers.remove(prefix) {
|
||||||
_ = tx.0.send(());
|
tx.0.send(()).expect("channel should still be open");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -30,6 +30,7 @@ pub(crate) struct Hooked {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Resolver {
|
impl Resolver {
|
||||||
|
#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)]
|
||||||
pub(crate) fn new(config: &Config) -> Self {
|
pub(crate) fn new(config: &Config) -> Self {
|
||||||
let (sys_conf, mut opts) = hickory_resolver::system_conf::read_system_conf()
|
let (sys_conf, mut opts) = hickory_resolver::system_conf::read_system_conf()
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
|
|
|
@ -40,6 +40,7 @@ pub(crate) struct Services<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Services<'_> {
|
impl Services<'_> {
|
||||||
|
#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)]
|
||||||
pub(crate) fn build<
|
pub(crate) fn build<
|
||||||
D: appservice::Data
|
D: appservice::Data
|
||||||
+ pusher::Data
|
+ pusher::Data
|
||||||
|
|
|
@ -188,13 +188,14 @@ impl Service {
|
||||||
|
|
||||||
let ctx = PushConditionRoomCtx {
|
let ctx = PushConditionRoomCtx {
|
||||||
room_id: room_id.to_owned(),
|
room_id: room_id.to_owned(),
|
||||||
member_count: UInt::from(
|
member_count: UInt::try_from(
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_joined_count(room_id)?
|
.room_joined_count(room_id)?
|
||||||
.unwrap_or(1) as u32,
|
.unwrap_or(1),
|
||||||
),
|
)
|
||||||
|
.unwrap_or_else(|_| uint!(0)),
|
||||||
user_id: user.to_owned(),
|
user_id: user.to_owned(),
|
||||||
user_display_name: services()
|
user_display_name: services()
|
||||||
.users
|
.users
|
||||||
|
|
|
@ -5,7 +5,7 @@ pub(crate) use data::Data;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{client::relations::get_relating_events, Direction},
|
api::{client::relations::get_relating_events, Direction},
|
||||||
events::{relation::RelationType, TimelineEventType},
|
events::{relation::RelationType, TimelineEventType},
|
||||||
EventId, RoomId, UInt, UserId,
|
uint, EventId, RoomId, UInt, UserId,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
|
@ -57,8 +57,9 @@ impl Service {
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = limit
|
let limit = limit
|
||||||
.and_then(|u| u32::try_from(u).ok())
|
.unwrap_or_else(|| uint!(10))
|
||||||
.map_or(10_usize, |u| u as usize)
|
.try_into()
|
||||||
|
.unwrap_or(10)
|
||||||
.min(100);
|
.min(100);
|
||||||
|
|
||||||
let next_token;
|
let next_token;
|
||||||
|
|
|
@ -21,6 +21,7 @@ use crate::{services, Error, Result};
|
||||||
|
|
||||||
pub(crate) fn clamp<T: Ord>(val: T, min: T, max: T) -> T { cmp::min(cmp::max(val, min), max) }
|
pub(crate) fn clamp<T: Ord>(val: T, min: T, max: T) -> T { cmp::min(cmp::max(val, min), max) }
|
||||||
|
|
||||||
|
#[allow(clippy::as_conversions)]
|
||||||
pub(crate) fn millis_since_unix_epoch() -> u64 {
|
pub(crate) fn millis_since_unix_epoch() -> u64 {
|
||||||
SystemTime::now()
|
SystemTime::now()
|
||||||
.duration_since(UNIX_EPOCH)
|
.duration_since(UNIX_EPOCH)
|
||||||
|
|
Loading…
Add table
Reference in a new issue