resolve half of the integer_arithmetic lints, couple misc changes

Signed-off-by: strawberry <strawberry@puppygock.gay>
This commit is contained in:
strawberry 2024-05-03 21:42:47 -04:00 committed by June
parent ac4590952b
commit b5c0c30a5e
34 changed files with 188 additions and 109 deletions

View file

@ -625,6 +625,7 @@ manual_let_else = "warn"
trivially_copy_pass_by_ref = "warn"
wildcard_imports = "warn"
checked_conversions = "warn"
#integer_arithmetic = "warn"
# some sadness
missing_errors_doc = "allow"

View file

@ -1,3 +1,5 @@
use std::fmt::Write as _;
use register::RegistrationKind;
use ruma::{
api::client::{
@ -238,7 +240,7 @@ pub(crate) async fn register_route(body: Ruma<register::v3::Request>) -> Result<
// If `new_user_displayname_suffix` is set, registration will push whatever
// content is set to the user's display name with a space before it
if !services().globals.new_user_displayname_suffix().is_empty() {
displayname.push_str(&(" ".to_owned() + services().globals.new_user_displayname_suffix()));
_ = write!(displayname, " {}", services().globals.config.new_user_displayname_suffix);
}
services()

View file

@ -380,7 +380,12 @@ pub(crate) async fn get_public_rooms_filtered_helper(
let next_batch = if chunk.len() < limit as usize {
None
} else {
Some(format!("n{}", num_since + limit))
Some(format!(
"n{}",
num_since
.checked_add(limit)
.expect("num_since and limit should not be that large")
))
};
Ok(get_public_rooms_filtered::v3::Response {

View file

@ -1,4 +1,5 @@
use std::{
cmp,
collections::{hash_map, BTreeMap, HashMap, HashSet},
time::{Duration, Instant},
};
@ -338,7 +339,15 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
},
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (
Instant::now(),
e.get()
.1
.checked_add(1)
.expect("bad_query_ratelimiter attempt/try count should not ever get this high"),
);
},
}
};
@ -353,10 +362,8 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
.get(server)
{
// Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
const MAX_DURATION: Duration = Duration::from_secs(60 * 60 * 24);
let min_elapsed_duration = cmp::min(MAX_DURATION, Duration::from_secs(5 * 60) * (*tries) * (*tries));
if time.elapsed() < min_elapsed_duration {
debug!("Backing off query from {:?}", server);

View file

@ -1,4 +1,5 @@
use std::{
cmp,
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
sync::Arc,
time::{Duration, Instant},
@ -1301,8 +1302,8 @@ async fn make_join_request(
) -> Result<(federation::membership::prepare_join_event::v1::Response, OwnedServerName)> {
let mut make_join_response_and_server = Err(Error::BadServerResponse("No server available to assist in joining."));
let mut make_join_counter = 0;
let mut incompatible_room_version_count = 0;
let mut make_join_counter: u16 = 0;
let mut incompatible_room_version_count: u8 = 0;
for remote_server in servers {
if server_is_ours(remote_server) {
@ -1322,7 +1323,7 @@ async fn make_join_request(
.await;
trace!("make_join response: {:?}", make_join_response);
make_join_counter += 1;
make_join_counter = make_join_counter.saturating_add(1);
if let Err(ref e) = make_join_response {
trace!("make_join ErrorKind string: {:?}", e.error_code().to_string());
@ -1336,7 +1337,7 @@ async fn make_join_request(
.to_string()
.contains("M_UNSUPPORTED_ROOM_VERSION")
{
incompatible_room_version_count += 1;
incompatible_room_version_count = incompatible_room_version_count.saturating_add(1);
}
if incompatible_room_version_count > 15 {
@ -1393,7 +1394,15 @@ async fn validate_and_add_event_id(
Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
},
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
Entry::Occupied(mut e) => {
*e.get_mut() = (
Instant::now(),
e.get()
.1
.checked_add(1)
.expect("bad_event_ratelimiter attempt/try count should not ever get this high"),
);
},
}
};
@ -1405,10 +1414,8 @@ async fn validate_and_add_event_id(
.get(&event_id)
{
// Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
const MAX_DURATION: Duration = Duration::from_secs(60 * 60 * 24);
let min_elapsed_duration = cmp::min(MAX_DURATION, Duration::from_secs(5 * 60) * (*tries) * (*tries));
if time.elapsed() < min_elapsed_duration {
debug!("Backing off from {}", event_id);

View file

@ -801,7 +801,13 @@ pub(crate) async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) ->
.map_err(|_| Error::bad_database("Invalid room event in database."))?;
// Setting events_default and invite to the greater of 50 and users_default + 1
let new_level = max(int!(50), power_levels_event_content.users_default + int!(1));
let new_level = max(
int!(50),
power_levels_event_content
.users_default
.checked_add(int!(1))
.expect("user power level should not be this high"),
);
power_levels_event_content.events_default = new_level;
power_levels_event_content.invite = new_level;

View file

@ -106,14 +106,14 @@ pub(crate) async fn search_events_route(body: Ruma<search_events::v3::Request>)
}
}
let skip = match body.next_batch.as_ref().map(|s| s.parse()) {
let skip: usize = match body.next_batch.as_ref().map(|s| s.parse()) {
Some(Ok(s)) => s,
Some(Err(_)) => return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid next_batch token.")),
None => 0, // Default to the start
};
let mut results = Vec::new();
for _ in 0..skip + limit {
for _ in 0_usize..skip.saturating_add(limit) {
if let Some(s) = searches
.iter_mut()
.map(|s| (s.peek().cloned(), s))
@ -162,7 +162,7 @@ pub(crate) async fn search_events_route(body: Ruma<search_events::v3::Request>)
let next_batch = if results.len() < limit {
None
} else {
Some((skip + limit).to_string())
Some((skip.checked_add(limit).unwrap()).to_string())
};
Ok(search_events::v3::Response::new(ResultCategories {

View file

@ -539,7 +539,7 @@ async fn handle_left_room(
left_state_ids.insert(leave_shortstatekey, left_event_id);
let mut i = 0;
let mut i: u8 = 0;
for (key, id) in left_state_ids {
if full_state || since_state_ids.get(&key) != Some(&id) {
let (event_type, state_key) = services().rooms.short.get_statekey_from_short(key)?;
@ -557,7 +557,7 @@ async fn handle_left_room(
left_state_events.push(pdu.to_sync_state_event());
i += 1;
i = i.saturating_add(1);
if i % 100 == 0 {
tokio::task::yield_now().await;
}
@ -705,7 +705,11 @@ async fn load_joined_room(
// Recalculate heroes (first 5 members)
let mut heroes = Vec::new();
if joined_member_count + invited_member_count <= 5 {
if joined_member_count
.checked_add(invited_member_count)
.expect("joined/invite member count should not be this high")
<= 5
{
// Go through all PDUs and for each member event, check if the user is still
// joined or invited until we have 5 or we reach the end
@ -784,7 +788,7 @@ async fn load_joined_room(
let mut state_events = Vec::new();
let mut lazy_loaded = HashSet::new();
let mut i = 0;
let mut i: u8 = 0;
for (shortstatekey, id) in current_state_ids {
let (event_type, state_key) = services()
.rooms
@ -798,7 +802,7 @@ async fn load_joined_room(
};
state_events.push(pdu);
i += 1;
i = i.saturating_add(1);
if i % 100 == 0 {
tokio::task::yield_now().await;
}
@ -819,7 +823,7 @@ async fn load_joined_room(
}
state_events.push(pdu);
i += 1;
i = i.saturating_add(1);
if i % 100 == 0 {
tokio::task::yield_now().await;
}
@ -1416,10 +1420,14 @@ pub(crate) async fn sync_events_v4_route(
.ranges
.into_iter()
.map(|mut r| {
r.0 =
r.0.clamp(uint!(0), UInt::from(all_joined_rooms.len() as u32 - 1));
r.1 =
r.1.clamp(r.0, UInt::from(all_joined_rooms.len() as u32 - 1));
r.0 = r.0.clamp(
uint!(0),
UInt::try_from(all_joined_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX),
);
r.1 = r.1.clamp(
r.0,
UInt::try_from(all_joined_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX),
);
let room_ids = all_joined_rooms[(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)].to_vec();
new_known_rooms.extend(room_ids.iter().cloned());
for room_id in &room_ids {
@ -1592,14 +1600,13 @@ pub(crate) async fn sync_events_v4_route(
.collect::<Vec<_>>();
let name = match heroes.len().cmp(&(1_usize)) {
Ordering::Greater => {
let last = heroes[0].0.clone();
Some(
heroes[1..]
let firsts = heroes[1..]
.iter()
.map(|h| h.0.clone())
.collect::<Vec<_>>()
.join(", ") + " and " + &last,
)
.join(", ");
let last = heroes[0].0.clone();
Some(format!("{firsts} and {last}"))
},
Ordering::Equal => Some(heroes[0].0.clone()),
Ordering::Less => None,

View file

@ -22,14 +22,30 @@ pub(crate) async fn create_typing_event_route(
if let Typing::Yes(duration) = body.state {
let duration = utils::clamp(
duration.as_millis() as u64,
services().globals.config.typing_client_timeout_min_s * 1000,
services().globals.config.typing_client_timeout_max_s * 1000,
duration.as_millis().try_into().unwrap_or(u64::MAX),
services()
.globals
.config
.typing_client_timeout_min_s
.checked_mul(1000)
.unwrap(),
services()
.globals
.config
.typing_client_timeout_max_s
.checked_mul(1000)
.unwrap(),
);
services()
.rooms
.typing
.typing_add(sender_user, &body.room_id, utils::millis_since_unix_epoch() + duration)
.typing_add(
sender_user,
&body.room_id,
utils::millis_since_unix_epoch()
.checked_add(duration)
.expect("user typing timeout should not get this high"),
)
.await?;
} else {
services()

View file

@ -21,7 +21,9 @@ pub(crate) async fn turn_server_route(
let (username, password) = if !turn_secret.is_empty() {
let expiry = SecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(services().globals.turn_ttl()),
SystemTime::now()
.checked_add(Duration::from_secs(services().globals.turn_ttl()))
.expect("TURN TTL should not get this high"),
)
.expect("time is valid");

View file

@ -101,7 +101,9 @@ pub(crate) async fn get_server_keys_route() -> Result<impl IntoResponse> {
old_verify_keys: BTreeMap::new(),
signatures: BTreeMap::new(),
valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(86400 * 7),
SystemTime::now()
.checked_add(Duration::from_secs(86400 * 7))
.expect("valid_until_ts should not get this high"),
)
.expect("time is valid"),
})
@ -398,8 +400,13 @@ pub(crate) async fn send_transaction_message_route(
.is_joined(&typing.user_id, &typing.room_id)?
{
if typing.typing {
let timeout = utils::millis_since_unix_epoch()
+ services().globals.config.typing_federation_timeout_s * 1000;
let timeout = utils::millis_since_unix_epoch().saturating_add(
services()
.globals
.config
.typing_federation_timeout_s
.saturating_mul(1000),
);
services()
.rooms
.typing
@ -662,7 +669,7 @@ pub(crate) async fn get_missing_events_route(
}
if body.earliest_events.contains(&queued_events[i]) {
i += 1;
i = i.saturating_add(1);
continue;
}
@ -671,7 +678,7 @@ pub(crate) async fn get_missing_events_route(
&body.room_id,
&queued_events[i],
)? {
i += 1;
i = i.saturating_add(1);
continue;
}
@ -688,7 +695,7 @@ pub(crate) async fn get_missing_events_route(
);
events.push(PduEvent::convert_to_outgoing_federation_event(pdu));
}
i += 1;
i = i.saturating_add(1);
}
Ok(get_missing_events::v1::Response {

View file

@ -105,7 +105,7 @@ impl service::account_data::Data for KeyValueDatabase {
// Skip the data that's exactly at since, because we sent that last time
let mut first_possible = prefix.clone();
first_possible.extend_from_slice(&(since + 1).to_be_bytes());
first_possible.extend_from_slice(&(since.saturating_add(1)).to_be_bytes());
for r in self
.roomuserdataid_accountdata

View file

@ -158,18 +158,15 @@ impl service::globals::Data for KeyValueDatabase {
let max_appservice_in_room_cache = self.appservice_in_room_cache.read().unwrap().capacity();
let max_lasttimelinecount_cache = self.lasttimelinecount_cache.lock().unwrap().capacity();
let mut response = format!(
format!(
"\
auth_chain_cache: {auth_chain_cache} / {max_auth_chain_cache}
our_real_users_cache: {our_real_users_cache} / {max_our_real_users_cache}
appservice_in_room_cache: {appservice_in_room_cache} / {max_appservice_in_room_cache}
lasttimelinecount_cache: {lasttimelinecount_cache} / {max_lasttimelinecount_cache}\n\n"
);
if let Ok(db_stats) = self.db.memory_usage() {
response += &db_stats;
}
response
lasttimelinecount_cache: {lasttimelinecount_cache} / {max_lasttimelinecount_cache}\n\n
{}",
self.db.memory_usage().unwrap_or_default()
)
}
fn clear_caches(&self, amount: u32) {

View file

@ -23,10 +23,10 @@ impl service::rooms::pdu_metadata::Data for KeyValueDatabase {
let mut current = prefix.clone();
let count_raw = match until {
PduCount::Normal(x) => x - 1,
PduCount::Normal(x) => x.saturating_sub(1),
PduCount::Backfilled(x) => {
current.extend_from_slice(&0_u64.to_be_bytes());
u64::MAX - x - 1
u64::MAX.saturating_sub(x).saturating_sub(1)
},
};
current.extend_from_slice(&count_raw.to_be_bytes());

View file

@ -48,7 +48,7 @@ impl service::rooms::read_receipt::Data for KeyValueDatabase {
let prefix2 = prefix.clone();
let mut first_possible_edu = prefix.clone();
first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since
first_possible_edu.extend_from_slice(&(since.saturating_add(1)).to_be_bytes()); // +1 so we don't send the event at since
Box::new(
self.readreceiptid_readreceipt

View file

@ -17,7 +17,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
.expect("there is always one layer")
.1;
let mut result = HashMap::new();
let mut i = 0;
let mut i: u8 = 0;
for compressed in full_state.iter() {
let parsed = services()
.rooms
@ -25,7 +25,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
.parse_compressed_state_event(compressed)?;
result.insert(parsed.0, parsed.1);
i += 1;
i = i.saturating_add(1);
if i % 100 == 0 {
tokio::task::yield_now().await;
}
@ -44,7 +44,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
.1;
let mut result = HashMap::new();
let mut i = 0;
let mut i: u8 = 0;
for compressed in full_state.iter() {
let (_, eventid) = services()
.rooms
@ -63,7 +63,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
);
}
i += 1;
i = i.saturating_add(1);
if i % 100 == 0 {
tokio::task::yield_now().await;
}

View file

@ -154,11 +154,11 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
if user_is_local(&joined) && !services().users.is_deactivated(&joined).unwrap_or(true) {
real_users.insert(joined);
}
joinedcount += 1;
joinedcount = joinedcount.saturating_add(1);
}
for _invited in self.room_members_invited(room_id).filter_map(Result::ok) {
invitedcount += 1;
invitedcount = invitedcount.saturating_add(1);
}
self.roomid_joinedcount

View file

@ -19,7 +19,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
.to_vec();
let mut current = prefix.clone();
current.extend_from_slice(&(until - 1).to_be_bytes());
current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes());
Ok(Box::new(
self.threadid_userids

View file

@ -256,7 +256,7 @@ fn pdu_count(pdu_id: &[u8]) -> Result<PduCount> {
utils::u64_from_bytes(&pdu_id[pdu_id.len() - 2 * size_of::<u64>()..pdu_id.len() - size_of::<u64>()]);
if matches!(second_last_u64, Ok(0)) {
Ok(PduCount::Backfilled(u64::MAX - last_u64))
Ok(PduCount::Backfilled(u64::MAX.saturating_sub(last_u64)))
} else {
Ok(PduCount::Normal(last_u64))
}
@ -275,22 +275,22 @@ fn count_to_id(room_id: &RoomId, count: PduCount, offset: u64, subtract: bool) -
let count_raw = match count {
PduCount::Normal(x) => {
if subtract {
x - offset
x.saturating_sub(offset)
} else {
x + offset
x.saturating_add(offset)
}
},
PduCount::Backfilled(x) => {
pdu_id.extend_from_slice(&0_u64.to_be_bytes());
let num = u64::MAX - x;
let num = u64::MAX.saturating_sub(x);
if subtract {
if num > 0 {
num - offset
num.saturating_sub(offset)
} else {
num
}
} else {
num + offset
num.saturating_add(offset)
}
},
};

View file

@ -110,7 +110,8 @@ impl service::rooms::user::Data for KeyValueDatabase {
.enumerate()
.find(|(_, &b)| b == 0xFF)
.ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))?
.0 + 1; // +1 because the room id starts AFTER the separator
.0
.saturating_add(1); // +1 because the room id starts AFTER the separator
let room_id = key[roomid_index..].to_vec();

View file

@ -5,7 +5,7 @@ use ruma::{
encryption::{CrossSigningKey, DeviceKeys, OneTimeKey},
events::{AnyToDeviceEvent, StateEventType},
serde::Raw,
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedDeviceKeyId,
uint, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedDeviceKeyId,
OwnedMxcUri, OwnedUserId, UInt, UserId,
};
use tracing::warn;
@ -414,7 +414,7 @@ impl service::users::Data for KeyValueDatabase {
.algorithm(),
)
}) {
*counts.entry(algorithm?).or_default() += UInt::from(1_u32);
*counts.entry(algorithm?).or_default() += uint!(1);
}
Ok(counts)
@ -561,7 +561,7 @@ impl service::users::Data for KeyValueDatabase {
prefix.push(0xFF);
let mut start = prefix.clone();
start.extend_from_slice(&(from + 1).to_be_bytes());
start.extend_from_slice(&(from.saturating_add(1)).to_be_bytes());
let to = to.unwrap_or(u64::MAX);

View file

@ -173,13 +173,13 @@ pub(crate) async fn migrations(db: &KeyValueDatabase, config: &Config) -> Result
let mut current_sstatehash: Option<u64> = None;
let mut current_room = None;
let mut current_state = HashSet::new();
let mut counter = 0;
let mut counter: u32 = 0;
let mut handle_state = |current_sstatehash: u64,
current_room: &RoomId,
current_state: HashSet<_>,
last_roomstates: &mut HashMap<_, _>| {
counter += 1;
counter = counter.saturating_add(1);
let last_roomsstatehash = last_roomstates.get(current_room);
let states_parents = last_roomsstatehash.map_or_else(

View file

@ -401,18 +401,18 @@ impl KeyValueDatabase {
let sqlite_exists = path.join("conduit.db").exists();
let rocksdb_exists = path.join("IDENTITY").exists();
let mut count = 0;
let mut count: u8 = 0;
if sqlite_exists {
count += 1;
count = count.saturating_add(1);
}
if rocksdb_exists {
count += 1;
count = count.saturating_add(1);
}
if count > 1 {
warn!("Multiple databases at database_path detected");
error!("Multiple databases at database_path detected");
return Ok(());
}

View file

@ -145,7 +145,14 @@ pub(crate) fn cf_options(cfg: &Config, name: &str, mut opts: Options, cache: &mu
cache_size(cfg, cfg.statekeyshort_cache_capacity, 1024),
),
"pduid_pdu" => set_table_with_new_cache(&mut opts, cfg, cache, name, cfg.pdu_cache_capacity as usize * 1536),
#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)]
"pduid_pdu" => set_table_with_new_cache(
&mut opts,
cfg,
cache,
name,
(cfg.pdu_cache_capacity as usize).saturating_mul(1536),
),
"eventid_outlierpdu" => set_table_with_shared_cache(&mut opts, cfg, cache, name, "pduid_pdu"),
@ -309,7 +316,10 @@ fn set_table_with_shared_cache(
fn cache_size(config: &Config, base_size: u32, entity_size: usize) -> usize {
let ents = f64::from(base_size) * config.conduit_cache_capacity_modifier;
ents as usize * entity_size
#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)]
(ents as usize)
.checked_mul(entity_size)
.expect("cache capacity size is too large")
}
fn table_options(_config: &Config) -> BlockBasedOptions {

View file

@ -4,7 +4,7 @@ use crate::{service::admin::escape_html, services, Result};
pub(crate) async fn register(body: Vec<&str>) -> Result<RoomMessageEventContent> {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let appservice_config = body[1..body.len() - 1].join("\n");
let appservice_config = body[1..body.len().checked_sub(1).unwrap()].join("\n");
let parsed_config = serde_yaml::from_str::<Registration>(&appservice_config);
match parsed_config {
Ok(yaml) => match services().appservice.register_appservice(yaml).await {

View file

@ -121,7 +121,7 @@ pub(crate) async fn get_remote_pdu_list(
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let list = body
.clone()
.drain(1..body.len() - 1)
.drain(1..body.len().checked_sub(1).unwrap())
.filter_map(|pdu| EventId::parse(pdu).ok())
.collect::<Vec<_>>();
@ -381,7 +381,7 @@ pub(crate) async fn change_log_level(
pub(crate) async fn sign_json(body: Vec<&str>) -> Result<RoomMessageEventContent> {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let string = body[1..body.len() - 1].join("\n");
let string = body[1..body.len().checked_sub(1).unwrap()].join("\n");
match serde_json::from_str(&string) {
Ok(mut value) => {
ruma::signatures::sign_json(
@ -404,7 +404,7 @@ pub(crate) async fn sign_json(body: Vec<&str>) -> Result<RoomMessageEventContent
pub(crate) async fn verify_json(body: Vec<&str>) -> Result<RoomMessageEventContent> {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let string = body[1..body.len() - 1].join("\n");
let string = body[1..body.len().checked_sub(1).unwrap()].join("\n");
match serde_json::from_str(&string) {
Ok(value) => {
let pub_key_map = RwLock::new(BTreeMap::new());

View file

@ -139,14 +139,19 @@ pub(crate) async fn delete(
pub(crate) async fn delete_list(body: Vec<&str>) -> Result<RoomMessageEventContent> {
if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" {
let mxc_list = body.clone().drain(1..body.len() - 1).collect::<Vec<_>>();
let mxc_list = body
.clone()
.drain(1..body.len().checked_sub(1).unwrap())
.collect::<Vec<_>>();
let mut mxc_deletion_count = 0;
let mut mxc_deletion_count: u32 = 0;
for mxc in mxc_list {
debug!("Deleting MXC {mxc} in bulk");
services().media.delete(mxc.to_owned()).await?;
mxc_deletion_count += 1;
mxc_deletion_count = mxc_deletion_count
.checked_add(1)
.expect("mxc_deletion_count should not get this high");
}
return Ok(RoomMessageEventContent::text_plain(format!(

View file

@ -39,7 +39,7 @@ pub(crate) async fn process(command: RoomDirectoryCommand, _body: Vec<&str>) ->
let rooms = rooms
.into_iter()
.skip(page.saturating_sub(1) * PAGE_SIZE)
.skip(page.checked_sub(1).unwrap().checked_mul(PAGE_SIZE).unwrap())
.take(PAGE_SIZE)
.collect::<Vec<_>>();

View file

@ -33,9 +33,9 @@ pub(crate) async fn memory_usage(_body: Vec<&str>) -> Result<RoomMessageEventCon
Ok(RoomMessageEventContent::text_plain(format!(
"Services:\n{response0}\n\nDatabase:\n{response1}\n{}",
if !response2.is_empty() {
"Allocator:\n{response2}"
format!("Allocator:\n {response2}")
} else {
""
String::new()
}
)))
}

View file

@ -61,8 +61,13 @@ pub(crate) async fn create(
// If `new_user_displayname_suffix` is set, registration will push whatever
// content is set to the user's display name with a space before it
if !services().globals.new_user_displayname_suffix().is_empty() {
displayname.push_str(&(" ".to_owned() + services().globals.new_user_displayname_suffix()));
if !services()
.globals
.config
.new_user_displayname_suffix
.is_empty()
{
_ = write!(displayname, " {}", services().globals.config.new_user_displayname_suffix);
}
services()

View file

@ -89,6 +89,8 @@ impl Client {
fn base(config: &Config) -> Result<reqwest::ClientBuilder> {
let version = conduwuit_version();
let user_agent = format!("Conduwuit/{version}");
let mut builder = reqwest::Client::builder()
.hickory_dns(true)
.connect_timeout(Duration::from_secs(config.request_conn_timeout))
@ -96,7 +98,7 @@ impl Client {
.timeout(Duration::from_secs(config.request_total_timeout))
.pool_idle_timeout(Duration::from_secs(config.request_idle_timeout))
.pool_max_idle_per_host(config.request_idle_per_host.into())
.user_agent("Conduwuit".to_owned() + "/" + &version)
.user_agent(user_agent)
.redirect(redirect::Policy::limited(6))
.connection_verbose(true);

View file

@ -188,7 +188,7 @@ impl Service {
Ok(duration) => {
debug!("Parsed duration: {:?}", duration);
debug!("System time now: {:?}", SystemTime::now());
SystemTime::now() - duration
SystemTime::now().checked_sub(duration).unwrap()
},
Err(e) => {
error!("Failed to parse user-specified time duration: {}", e);

View file

@ -236,6 +236,7 @@ impl Service {
const MAX_DURATION: Duration = Duration::from_secs(60 * 60 * 24);
let min_duration = cmp::min(MAX_DURATION, Duration::from_secs(5 * 60) * (*tries) * (*tries));
let duration = time.elapsed();
if duration < min_duration {
debug!(
duration = ?duration,
@ -1003,7 +1004,7 @@ impl Service {
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
},
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1.saturating_add(1)),
}
};
@ -1034,10 +1035,9 @@ impl Service {
.get(&*next_id)
{
// Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
const MAX_DURATION: Duration = Duration::from_secs(60 * 60 * 24);
let min_elapsed_duration =
cmp::min(MAX_DURATION, Duration::from_secs(5 * 60) * (*tries) * (*tries));
if time.elapsed() < min_elapsed_duration {
info!("Backing off from {}", next_id);
@ -1143,10 +1143,9 @@ impl Service {
.get(&**next_id)
{
// Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
const MAX_DURATION: Duration = Duration::from_secs(60 * 60 * 24);
let min_elapsed_duration =
cmp::min(MAX_DURATION, Duration::from_secs(5 * 60) * (*tries) * (*tries));
if time.elapsed() < min_elapsed_duration {
debug!("Backing off from {}", next_id);

View file

@ -818,7 +818,7 @@ impl FedDest {
fn into_uri_string(self) -> String {
match self {
Self::Literal(addr) => addr.to_string(),
Self::Named(host, port) => host + &port,
Self::Named(host, port) => format!("{host}{port}"),
}
}