resolve couple pedantic clippy lints, remove unnecessary qualifications
Signed-off-by: strawberry <strawberry@puppygock.gay>
This commit is contained in:
parent
6d7ef80aba
commit
9d0b647911
35 changed files with 127 additions and 148 deletions
|
@ -16,8 +16,7 @@ use tracing::{error, info, warn};
|
|||
|
||||
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
|
||||
use crate::{
|
||||
api::client_server::{self, join_room_by_id_helper},
|
||||
services, utils, Error, Result, Ruma,
|
||||
api::client_server::{self, join_room_by_id_helper}, service, services, utils, Error, Result, Ruma
|
||||
};
|
||||
|
||||
const RANDOM_USER_ID_LENGTH: usize = 10;
|
||||
|
@ -279,7 +278,7 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
|||
// If this is the first real user, grant them admin privileges except for guest
|
||||
// users Note: the server user, @conduit:servername, is generated first
|
||||
if !is_guest {
|
||||
if let Some(admin_room) = services().admin.get_admin_room()? {
|
||||
if let Some(admin_room) = service::admin::Service::get_admin_room()? {
|
||||
if services().rooms.state_cache.room_joined_count(&admin_room)? == Some(1) {
|
||||
services().admin.make_user_admin(&user_id, displayname).await?;
|
||||
|
||||
|
@ -375,12 +374,7 @@ pub async fn change_password_route(body: Ruma<change_password::v3::Request>) ->
|
|||
|
||||
if body.logout_devices {
|
||||
// Logout all devices except the current one
|
||||
for id in services()
|
||||
.users
|
||||
.all_device_ids(sender_user)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter(|id| id != sender_device)
|
||||
{
|
||||
for id in services().users.all_device_ids(sender_user).filter_map(Result::ok).filter(|id| id != sender_device) {
|
||||
services().users.remove_device(sender_user, &id)?;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ pub(crate) async fn get_alias_helper(room_alias: OwnedRoomAliasId) -> Result<get
|
|||
let mut servers = response.servers;
|
||||
|
||||
// find active servers in room state cache to suggest
|
||||
for extra_servers in services().rooms.state_cache.room_servers(&room_id).filter_map(std::result::Result::ok) {
|
||||
for extra_servers in services().rooms.state_cache.room_servers(&room_id).filter_map(Result::ok) {
|
||||
servers.push(extra_servers);
|
||||
}
|
||||
|
||||
|
@ -152,7 +152,7 @@ pub(crate) async fn get_alias_helper(room_alias: OwnedRoomAliasId) -> Result<get
|
|||
let mut servers: Vec<OwnedServerName> = Vec::new();
|
||||
|
||||
// find active servers in room state cache to suggest
|
||||
for extra_servers in services().rooms.state_cache.room_servers(&room_id).filter_map(std::result::Result::ok) {
|
||||
for extra_servers in services().rooms.state_cache.room_servers(&room_id).filter_map(Result::ok) {
|
||||
servers.push(extra_servers);
|
||||
}
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ pub async fn get_context_route(body: Ruma<get_context::v3::Request>) -> Result<g
|
|||
.timeline
|
||||
.pdus_until(sender_user, &room_id, base_token)?
|
||||
.take(limit / 2)
|
||||
.filter_map(std::result::Result::ok) // Remove buggy events
|
||||
.filter_map(Result::ok) // Remove buggy events
|
||||
.filter(|(_, pdu)| {
|
||||
services()
|
||||
.rooms
|
||||
|
@ -101,7 +101,7 @@ pub async fn get_context_route(body: Ruma<get_context::v3::Request>) -> Result<g
|
|||
.timeline
|
||||
.pdus_after(sender_user, &room_id, base_token)?
|
||||
.take(limit / 2)
|
||||
.filter_map(std::result::Result::ok) // Remove buggy events
|
||||
.filter_map(Result::ok) // Remove buggy events
|
||||
.filter(|(_, pdu)| {
|
||||
services()
|
||||
.rooms
|
||||
|
|
|
@ -16,7 +16,7 @@ pub async fn get_devices_route(body: Ruma<get_devices::v3::Request>) -> Result<g
|
|||
let devices: Vec<device::Device> = services()
|
||||
.users
|
||||
.all_devices_metadata(sender_user)
|
||||
.filter_map(std::result::Result::ok) // Filter out buggy devices
|
||||
.filter_map(Result::ok) // Filter out buggy devices
|
||||
.collect();
|
||||
|
||||
Ok(get_devices::v3::Response {
|
||||
|
|
|
@ -191,10 +191,10 @@ pub async fn get_key_changes_route(body: Ruma<get_key_changes::v3::Request>) ->
|
|||
body.from.parse().map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?,
|
||||
Some(body.to.parse().map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?),
|
||||
)
|
||||
.filter_map(std::result::Result::ok),
|
||||
.filter_map(Result::ok),
|
||||
);
|
||||
|
||||
for room_id in services().rooms.state_cache.rooms_joined(sender_user).filter_map(std::result::Result::ok) {
|
||||
for room_id in services().rooms.state_cache.rooms_joined(sender_user).filter_map(Result::ok) {
|
||||
device_list_updates.extend(
|
||||
services()
|
||||
.users
|
||||
|
@ -203,7 +203,7 @@ pub async fn get_key_changes_route(body: Ruma<get_key_changes::v3::Request>) ->
|
|||
body.from.parse().map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?,
|
||||
Some(body.to.parse().map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?),
|
||||
)
|
||||
.filter_map(std::result::Result::ok),
|
||||
.filter_map(Result::ok),
|
||||
);
|
||||
}
|
||||
Ok(get_key_changes::v3::Response {
|
||||
|
|
|
@ -399,12 +399,7 @@ pub async fn joined_rooms_route(body: Ruma<joined_rooms::v3::Request>) -> Result
|
|||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
|
||||
Ok(joined_rooms::v3::Response {
|
||||
joined_rooms: services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(sender_user)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.collect(),
|
||||
joined_rooms: services().rooms.state_cache.rooms_joined(sender_user).filter_map(Result::ok).collect(),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -456,7 +451,7 @@ pub async fn joined_members_route(body: Ruma<joined_members::v3::Request>) -> Re
|
|||
}
|
||||
|
||||
let mut joined = BTreeMap::new();
|
||||
for user_id in services().rooms.state_cache.room_members(&body.room_id).filter_map(std::result::Result::ok) {
|
||||
for user_id in services().rooms.state_cache.room_members(&body.room_id).filter_map(Result::ok) {
|
||||
let display_name = services().users.displayname(&user_id)?;
|
||||
let avatar_url = services().users.avatar_url(&user_id)?;
|
||||
|
||||
|
@ -847,7 +842,7 @@ pub(crate) async fn join_room_by_id_helper(
|
|||
.rooms
|
||||
.state_cache
|
||||
.room_members(restriction_room_id)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.find(|uid| uid.server_name() == services().globals.server_name())
|
||||
});
|
||||
Some(authorized_user)
|
||||
|
@ -1208,7 +1203,7 @@ pub(crate) async fn invite_helper(
|
|||
.rooms
|
||||
.state_cache
|
||||
.room_servers(room_id)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter(|server| &**server != services().globals.server_name());
|
||||
|
||||
services().sending.send_pdu(servers, &pdu_id)?;
|
||||
|
|
|
@ -177,7 +177,7 @@ pub async fn get_message_events_route(
|
|||
.timeline
|
||||
.pdus_after(sender_user, &body.room_id, from)?
|
||||
.take(limit)
|
||||
.filter_map(std::result::Result::ok) // Filter out buggy events
|
||||
.filter_map(Result::ok) // Filter out buggy events
|
||||
.filter(|(_, pdu)| {
|
||||
services()
|
||||
.rooms
|
||||
|
@ -219,7 +219,7 @@ pub async fn get_message_events_route(
|
|||
.timeline
|
||||
.pdus_until(sender_user, &body.room_id, from)?
|
||||
.take(limit)
|
||||
.filter_map(std::result::Result::ok) // Filter out buggy events
|
||||
.filter_map(Result::ok) // Filter out buggy events
|
||||
.filter(|(_, pdu)| {
|
||||
services()
|
||||
.rooms
|
||||
|
|
|
@ -32,7 +32,7 @@ pub async fn set_displayname_route(
|
|||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(sender_user)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.map(|room_id| {
|
||||
Ok::<_, Error>((
|
||||
PduBuilder {
|
||||
|
@ -60,7 +60,7 @@ pub async fn set_displayname_route(
|
|||
room_id,
|
||||
))
|
||||
})
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.collect();
|
||||
|
||||
for (pdu_builder, room_id) in all_rooms_joined {
|
||||
|
@ -143,7 +143,7 @@ pub async fn set_avatar_url_route(body: Ruma<set_avatar_url::v3::Request>) -> Re
|
|||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(sender_user)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.map(|room_id| {
|
||||
Ok::<_, Error>((
|
||||
PduBuilder {
|
||||
|
@ -171,7 +171,7 @@ pub async fn set_avatar_url_route(body: Ruma<set_avatar_url::v3::Request>) -> Re
|
|||
room_id,
|
||||
))
|
||||
})
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.collect();
|
||||
|
||||
for (pdu_builder, room_id) in all_joined_rooms {
|
||||
|
|
|
@ -44,7 +44,7 @@ pub async fn report_event_route(body: Ruma<report_content::v3::Request>) -> Resu
|
|||
.rooms
|
||||
.state_cache
|
||||
.room_members(&pdu.room_id)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.any(|user_id| user_id == *sender_user)
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
|
|
|
@ -576,12 +576,7 @@ pub async fn get_room_aliases_route(body: Ruma<aliases::v3::Request>) -> Result<
|
|||
}
|
||||
|
||||
Ok(aliases::v3::Response {
|
||||
aliases: services()
|
||||
.rooms
|
||||
.alias
|
||||
.local_aliases_for_room(&body.room_id)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.collect(),
|
||||
aliases: services().rooms.alias.local_aliases_for_room(&body.room_id).filter_map(Result::ok).collect(),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -801,7 +796,7 @@ pub async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) -> Result
|
|||
}
|
||||
|
||||
// Moves any local aliases to the new room
|
||||
for alias in services().rooms.alias.local_aliases_for_room(&body.room_id).filter_map(std::result::Result::ok) {
|
||||
for alias in services().rooms.alias.local_aliases_for_room(&body.room_id).filter_map(Result::ok) {
|
||||
services().rooms.alias.set_alias(&alias, &replacement_room)?;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,9 +22,10 @@ pub async fn search_events_route(body: Ruma<search_events::v3::Request>) -> Resu
|
|||
let search_criteria = body.search_categories.room_events.as_ref().unwrap();
|
||||
let filter = &search_criteria.filter;
|
||||
|
||||
let room_ids = filter.rooms.clone().unwrap_or_else(|| {
|
||||
services().rooms.state_cache.rooms_joined(sender_user).filter_map(std::result::Result::ok).collect()
|
||||
});
|
||||
let room_ids = filter
|
||||
.rooms
|
||||
.clone()
|
||||
.unwrap_or_else(|| services().rooms.state_cache.rooms_joined(sender_user).filter_map(Result::ok).collect());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = filter.limit.map_or(10, u64::from).min(100) as usize;
|
||||
|
@ -92,7 +93,7 @@ pub async fn search_events_route(body: Ruma<search_events::v3::Request>) -> Resu
|
|||
result: Some(result),
|
||||
})
|
||||
})
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.skip(skip)
|
||||
.take(limit)
|
||||
.collect();
|
||||
|
|
|
@ -7,7 +7,7 @@ use ruma::{
|
|||
|
||||
use crate::{service::rooms::spaces::PagnationToken, services, Error, Result, Ruma};
|
||||
|
||||
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy``
|
||||
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy`
|
||||
///
|
||||
/// Paginates over the space tree in a depth-first manner to locate child rooms
|
||||
/// of a given space.
|
||||
|
|
|
@ -193,8 +193,7 @@ async fn sync_helper(
|
|||
let mut device_list_left = HashSet::new();
|
||||
|
||||
// Look for device list updates of this account
|
||||
device_list_updates
|
||||
.extend(services().users.keys_changed(sender_user.as_ref(), since, None).filter_map(std::result::Result::ok));
|
||||
device_list_updates.extend(services().users.keys_changed(sender_user.as_ref(), since, None).filter_map(Result::ok));
|
||||
|
||||
let all_joined_rooms = services().rooms.state_cache.rooms_joined(&sender_user).collect::<Vec<_>>();
|
||||
|
||||
|
@ -372,7 +371,7 @@ async fn sync_helper(
|
|||
.rooms
|
||||
.user
|
||||
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter_map(|other_room_id| {
|
||||
Some(
|
||||
services()
|
||||
|
@ -542,7 +541,7 @@ async fn load_joined_room(
|
|||
.rooms
|
||||
.timeline
|
||||
.all_pdus(sender_user, room_id)?
|
||||
.filter_map(std::result::Result::ok) // Ignore all broken pdus
|
||||
.filter_map(Result::ok) // Ignore all broken pdus
|
||||
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
|
||||
.map(|(_, pdu)| {
|
||||
let content: RoomMemberEventContent = serde_json::from_str(pdu.content.get())
|
||||
|
@ -566,7 +565,7 @@ async fn load_joined_room(
|
|||
}
|
||||
})
|
||||
// Filter out buggy users
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
// Filter for possible heroes
|
||||
.flatten()
|
||||
{
|
||||
|
@ -817,8 +816,7 @@ async fn load_joined_room(
|
|||
};
|
||||
|
||||
// Look for device list updates in this room
|
||||
device_list_updates
|
||||
.extend(services().users.keys_changed(room_id.as_ref(), since, None).filter_map(std::result::Result::ok));
|
||||
device_list_updates.extend(services().users.keys_changed(room_id.as_ref(), since, None).filter_map(Result::ok));
|
||||
|
||||
let notification_count = if send_notification_counts {
|
||||
Some(
|
||||
|
@ -863,7 +861,7 @@ async fn load_joined_room(
|
|||
.edus
|
||||
.read_receipt
|
||||
.readreceipts_since(room_id, since)
|
||||
.filter_map(std::result::Result::ok) // Filter out buggy events
|
||||
.filter_map(Result::ok) // Filter out buggy events
|
||||
.map(|(_, _, v)| v)
|
||||
.collect();
|
||||
|
||||
|
@ -956,7 +954,7 @@ fn share_encrypted_room(sender_user: &UserId, user_id: &UserId, ignore_room: &Ro
|
|||
.rooms
|
||||
.user
|
||||
.get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])?
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter(|room_id| room_id != ignore_room)
|
||||
.filter_map(|other_room_id| {
|
||||
Some(
|
||||
|
@ -999,7 +997,7 @@ pub async fn sync_events_v4_route(
|
|||
services().users.update_sync_request_with_cache(sender_user.clone(), sender_device.clone(), &mut body);
|
||||
|
||||
let all_joined_rooms =
|
||||
services().rooms.state_cache.rooms_joined(&sender_user).filter_map(std::result::Result::ok).collect::<Vec<_>>();
|
||||
services().rooms.state_cache.rooms_joined(&sender_user).filter_map(Result::ok).collect::<Vec<_>>();
|
||||
|
||||
if body.extensions.to_device.enabled.unwrap_or(false) {
|
||||
services().users.remove_to_device_events(&sender_user, &sender_device, globalsince)?;
|
||||
|
@ -1011,9 +1009,8 @@ pub async fn sync_events_v4_route(
|
|||
|
||||
if body.extensions.e2ee.enabled.unwrap_or(false) {
|
||||
// Look for device list updates of this account
|
||||
device_list_changes.extend(
|
||||
services().users.keys_changed(sender_user.as_ref(), globalsince, None).filter_map(std::result::Result::ok),
|
||||
);
|
||||
device_list_changes
|
||||
.extend(services().users.keys_changed(sender_user.as_ref(), globalsince, None).filter_map(Result::ok));
|
||||
|
||||
for room_id in &all_joined_rooms {
|
||||
let current_shortstatehash = if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||
|
@ -1129,16 +1126,15 @@ pub async fn sync_events_v4_route(
|
|||
}
|
||||
}
|
||||
// Look for device list updates in this room
|
||||
device_list_changes.extend(
|
||||
services().users.keys_changed(room_id.as_ref(), globalsince, None).filter_map(std::result::Result::ok),
|
||||
);
|
||||
device_list_changes
|
||||
.extend(services().users.keys_changed(room_id.as_ref(), globalsince, None).filter_map(Result::ok));
|
||||
}
|
||||
for user_id in left_encrypted_users {
|
||||
let dont_share_encrypted_room = services()
|
||||
.rooms
|
||||
.user
|
||||
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter_map(|other_room_id| {
|
||||
Some(
|
||||
services()
|
||||
|
@ -1288,7 +1284,7 @@ pub async fn sync_events_v4_route(
|
|||
let required_state = required_state_request
|
||||
.iter()
|
||||
.map(|state| services().rooms.state_accessor.room_state_get(room_id, &state.0, &state.1))
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.flatten()
|
||||
.map(|state| state.to_sync_state_event())
|
||||
.collect();
|
||||
|
@ -1298,7 +1294,7 @@ pub async fn sync_events_v4_route(
|
|||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter(|member| member != &sender_user)
|
||||
.map(|member| {
|
||||
Ok::<_, Error>(
|
||||
|
@ -1310,7 +1306,7 @@ pub async fn sync_events_v4_route(
|
|||
}),
|
||||
)
|
||||
})
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.flatten()
|
||||
.take(5)
|
||||
.collect::<Vec<_>>();
|
||||
|
|
|
@ -20,7 +20,7 @@ pub async fn get_threads_route(body: Ruma<get_threads::v1::Request>) -> Result<g
|
|||
.threads
|
||||
.threads_until(sender_user, &body.room_id, from, &body.include)?
|
||||
.take(limit)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter(|(_, pdu)| {
|
||||
services()
|
||||
.rooms
|
||||
|
|
|
@ -45,7 +45,7 @@ pub async fn search_users_route(body: Ruma<search_users::v3::Request>) -> Result
|
|||
let mut user_visible = false;
|
||||
|
||||
let user_is_in_public_rooms =
|
||||
services().rooms.state_cache.rooms_joined(&user_id).filter_map(std::result::Result::ok).any(|room| {
|
||||
services().rooms.state_cache.rooms_joined(&user_id).filter_map(Result::ok).any(|room| {
|
||||
services().rooms.state_accessor.room_state_get(&room, &StateEventType::RoomJoinRules, "").map_or(
|
||||
false,
|
||||
|event| {
|
||||
|
|
|
@ -321,7 +321,7 @@ where
|
|||
e.url()
|
||||
),
|
||||
false => {
|
||||
info!("Could not send request to {} at {}: {}", destination, actual_destination_str, e)
|
||||
info!("Could not send request to {} at {}: {}", destination, actual_destination_str, e);
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1016,7 +1016,7 @@ pub async fn get_backfill_route(body: Ruma<get_backfill::v1::Request>) -> Result
|
|||
.take(limit.try_into().unwrap());
|
||||
|
||||
let events = all_events
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter(|(_, e)| {
|
||||
matches!(
|
||||
services().rooms.state_accessor.server_can_see_event(sender_servername, &e.room_id, &e.event_id,),
|
||||
|
@ -1412,7 +1412,7 @@ async fn create_join_event(
|
|||
.rooms
|
||||
.state_cache
|
||||
.room_servers(room_id)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter(|server| &**server != services().globals.server_name());
|
||||
|
||||
services().sending.send_pdu(servers, &pdu_id)?;
|
||||
|
@ -1614,7 +1614,7 @@ pub async fn get_devices_route(body: Ruma<get_devices::v1::Request>) -> Result<g
|
|||
devices: services()
|
||||
.users
|
||||
.all_devices_metadata(&body.user_id)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter_map(|metadata| {
|
||||
let device_id_string = metadata.device_id.as_str().to_owned();
|
||||
let device_display_name = match services().globals.allow_device_name_federation() {
|
||||
|
|
|
@ -264,7 +264,7 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
|
|||
if self.config.database_backups_to_keep >= 0 {
|
||||
let keep = u32::try_from(self.config.database_backups_to_keep)?;
|
||||
if let Err(e) = engine.purge_old_backups(keep.try_into()?) {
|
||||
error!("Failed to purge old backup: {:?}", e.to_string())
|
||||
error!("Failed to purge old backup: {:?}", e.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -398,7 +398,7 @@ impl KvTree for RocksDbEngineTree<'_> {
|
|||
self.db
|
||||
.rocks
|
||||
.iterator_cf_opt(&self.cf(), readoptions, rust_rocksdb::IteratorMode::Start)
|
||||
.map(std::result::Result::unwrap)
|
||||
.map(Result::unwrap)
|
||||
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||
)
|
||||
}
|
||||
|
@ -422,7 +422,7 @@ impl KvTree for RocksDbEngineTree<'_> {
|
|||
},
|
||||
),
|
||||
)
|
||||
.map(std::result::Result::unwrap)
|
||||
.map(Result::unwrap)
|
||||
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||
)
|
||||
}
|
||||
|
@ -485,7 +485,7 @@ impl KvTree for RocksDbEngineTree<'_> {
|
|||
readoptions,
|
||||
rust_rocksdb::IteratorMode::From(&prefix, rust_rocksdb::Direction::Forward),
|
||||
)
|
||||
.map(std::result::Result::unwrap)
|
||||
.map(Result::unwrap)
|
||||
.map(|(k, v)| (Vec::from(k), Vec::from(v)))
|
||||
.take_while(move |(k, _)| k.starts_with(&prefix)),
|
||||
)
|
||||
|
|
|
@ -161,10 +161,7 @@ impl SqliteTable {
|
|||
//let name = self.name.clone();
|
||||
|
||||
let iterator = Box::new(
|
||||
statement
|
||||
.query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||
.unwrap()
|
||||
.map(std::result::Result::unwrap),
|
||||
statement.query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))).unwrap().map(Result::unwrap),
|
||||
);
|
||||
|
||||
Box::new(PreparedStatementIterator {
|
||||
|
@ -251,7 +248,7 @@ impl KvTree for SqliteTable {
|
|||
statement
|
||||
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||
.unwrap()
|
||||
.map(std::result::Result::unwrap),
|
||||
.map(Result::unwrap),
|
||||
);
|
||||
Box::new(PreparedStatementIterator {
|
||||
iterator,
|
||||
|
@ -273,7 +270,7 @@ impl KvTree for SqliteTable {
|
|||
statement
|
||||
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||
.unwrap()
|
||||
.map(std::result::Result::unwrap),
|
||||
.map(Result::unwrap),
|
||||
);
|
||||
|
||||
Box::new(PreparedStatementIterator {
|
||||
|
|
|
@ -40,7 +40,7 @@ impl service::appservice::Data for KeyValueDatabase {
|
|||
|
||||
fn all(&self) -> Result<Vec<(String, Registration)>> {
|
||||
self.iter_ids()?
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.map(move |id| {
|
||||
Ok((
|
||||
id.clone(),
|
||||
|
|
|
@ -222,7 +222,7 @@ impl service::key_backups::Data for KeyValueDatabase {
|
|||
|
||||
Ok::<_, Error>((session_id, key_data))
|
||||
})
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.collect())
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
let mut joined_servers = HashSet::new();
|
||||
let mut real_users = HashSet::new();
|
||||
|
||||
for joined in self.room_members(room_id).filter_map(std::result::Result::ok) {
|
||||
for joined in self.room_members(room_id).filter_map(Result::ok) {
|
||||
joined_servers.insert(joined.server_name().to_owned());
|
||||
if joined.server_name() == services().globals.server_name()
|
||||
&& !services().users.is_deactivated(&joined).unwrap_or(true)
|
||||
|
@ -105,7 +105,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
joinedcount += 1;
|
||||
}
|
||||
|
||||
for _invited in self.room_members_invited(room_id).filter_map(std::result::Result::ok) {
|
||||
for _invited in self.room_members_invited(room_id).filter_map(Result::ok) {
|
||||
invitedcount += 1;
|
||||
}
|
||||
|
||||
|
@ -115,7 +115,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
|
||||
self.our_real_users_cache.write().unwrap().insert(room_id.to_owned(), Arc::new(real_users));
|
||||
|
||||
for old_joined_server in self.room_servers(room_id).filter_map(std::result::Result::ok) {
|
||||
for old_joined_server in self.room_servers(room_id).filter_map(Result::ok) {
|
||||
if !joined_servers.remove(&old_joined_server) {
|
||||
// Server not in room anymore
|
||||
let mut roomserver_id = room_id.as_bytes().to_vec();
|
||||
|
|
|
@ -54,7 +54,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
|||
)
|
||||
.map_err(|_| Error::bad_database("Invalid UserId in threadid_userids."))
|
||||
})
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.collect(),
|
||||
))
|
||||
} else {
|
||||
|
|
|
@ -106,7 +106,7 @@ impl service::rooms::user::Data for KeyValueDatabase {
|
|||
|
||||
Ok::<_, Error>(room_id)
|
||||
})
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
});
|
||||
|
||||
// We use the default compare function because keys are sorted correctly (not
|
||||
|
|
|
@ -562,7 +562,7 @@ impl service::users::Data for KeyValueDatabase {
|
|||
|
||||
fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> {
|
||||
let count = services().globals.next_count()?.to_be_bytes();
|
||||
for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(std::result::Result::ok) {
|
||||
for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(Result::ok) {
|
||||
// Don't send key updates to unencrypted rooms
|
||||
if services().rooms.state_accessor.room_state_get(&room_id, &StateEventType::RoomEncryption, "")?.is_none()
|
||||
{
|
||||
|
@ -719,7 +719,7 @@ impl service::users::Data for KeyValueDatabase {
|
|||
.map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?,
|
||||
))
|
||||
})
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.take_while(|&(_, count)| count <= until)
|
||||
{
|
||||
self.todeviceid_events.remove(&key)?;
|
||||
|
|
|
@ -431,12 +431,9 @@ impl KeyValueDatabase {
|
|||
for (roomserverid, _) in db.roomserverids.iter() {
|
||||
let mut parts = roomserverid.split(|&b| b == 0xFF);
|
||||
let room_id = parts.next().expect("split always returns one element");
|
||||
let servername = match parts.next() {
|
||||
Some(s) => s,
|
||||
None => {
|
||||
error!("Migration: Invalid roomserverid in db.");
|
||||
continue;
|
||||
},
|
||||
let Some(servername) = parts.next() else {
|
||||
error!("Migration: Invalid roomserverid in db.");
|
||||
continue;
|
||||
};
|
||||
let mut serverroomid = servername.to_vec();
|
||||
serverroomid.push(0xFF);
|
||||
|
@ -771,7 +768,7 @@ impl KeyValueDatabase {
|
|||
}
|
||||
|
||||
// Force E2EE device list updates so we can send them over federation
|
||||
for user_id in services().users.iter().filter_map(std::result::Result::ok) {
|
||||
for user_id in services().users.iter().filter_map(Result::ok) {
|
||||
services().users.mark_device_key_update(&user_id)?;
|
||||
}
|
||||
|
||||
|
@ -929,7 +926,7 @@ impl KeyValueDatabase {
|
|||
for user_id in services()
|
||||
.users
|
||||
.iter()
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter(|user| !services().users.is_deactivated(user).unwrap_or(true))
|
||||
.filter(|user| user.server_name() == services().globals.server_name())
|
||||
{
|
||||
|
|
|
@ -404,7 +404,7 @@ async fn run_server() -> io::Result<()> {
|
|||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(not(feature = "zstd_compression"))]
|
||||
{
|
||||
app = routes().layer(middlewares).into_make_service()
|
||||
app = routes().layer(middlewares).into_make_service();
|
||||
};
|
||||
|
||||
let handle = ServerHandle::new();
|
||||
|
|
|
@ -482,7 +482,7 @@ impl Service {
|
|||
let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name()))
|
||||
.expect("@conduit:server_name is valid");
|
||||
|
||||
if let Ok(Some(conduit_room)) = services().admin.get_admin_room() {
|
||||
if let Ok(Some(conduit_room)) = Self::get_admin_room() {
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(event) = receiver.recv() => {
|
||||
|
@ -1131,7 +1131,7 @@ impl Service {
|
|||
.try_into()
|
||||
.expect("#admins:server_name is a valid alias name");
|
||||
|
||||
if let Some(admin_room_id) = services().admin.get_admin_room()? {
|
||||
if let Some(admin_room_id) = Self::get_admin_room()? {
|
||||
if room.to_string().eq(&admin_room_id) || room.to_string().eq(&admin_room_alias) {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Not allowed to ban the admin room.",
|
||||
|
@ -1304,7 +1304,7 @@ impl Service {
|
|||
match <&RoomId>::try_from(room_id) {
|
||||
Ok(owned_room_id) => {
|
||||
// silently ignore deleting admin room
|
||||
if let Some(admin_room_id) = services().admin.get_admin_room()? {
|
||||
if let Some(admin_room_id) = Self::get_admin_room()? {
|
||||
if owned_room_id.eq(&admin_room_id) {
|
||||
info!("User specified admin room in bulk ban list, ignoring");
|
||||
continue;
|
||||
|
@ -1550,7 +1550,7 @@ impl Service {
|
|||
.rooms
|
||||
.metadata
|
||||
.iter_ids()
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.map(|id: OwnedRoomId| Self::get_room_info(&id))
|
||||
.collect::<Vec<_>>();
|
||||
rooms.sort_by_key(|r| r.1);
|
||||
|
@ -1751,7 +1751,7 @@ impl Service {
|
|||
.rooms
|
||||
.directory
|
||||
.public_rooms()
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.map(|id: OwnedRoomId| Self::get_room_info(&id))
|
||||
.collect::<Vec<_>>();
|
||||
rooms.sort_by_key(|r| r.1);
|
||||
|
@ -2152,7 +2152,7 @@ impl Service {
|
|||
},
|
||||
DebugCommand::ForceDeviceListUpdates => {
|
||||
// Force E2EE device list updates for all users
|
||||
for user_id in services().users.iter().filter_map(std::result::Result::ok) {
|
||||
for user_id in services().users.iter().filter_map(Result::ok) {
|
||||
services().users.mark_device_key_update(&user_id)?;
|
||||
}
|
||||
RoomMessageEventContent::text_plain("Marked all devices for all users as having new keys to update")
|
||||
|
@ -2486,7 +2486,7 @@ impl Service {
|
|||
///
|
||||
/// Errors are propagated from the database, and will have None if there is
|
||||
/// no admin room
|
||||
pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> {
|
||||
pub(crate) fn get_admin_room() -> Result<Option<OwnedRoomId>> {
|
||||
let admin_room_alias: Box<RoomAliasId> = format!("#admins:{}", services().globals.server_name())
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid alias name");
|
||||
|
@ -2498,7 +2498,7 @@ impl Service {
|
|||
///
|
||||
/// In conduit, this is equivalent to granting admin privileges.
|
||||
pub(crate) async fn make_user_admin(&self, user_id: &UserId, displayname: String) -> Result<()> {
|
||||
if let Some(room_id) = services().admin.get_admin_room()? {
|
||||
if let Some(room_id) = Self::get_admin_room()? {
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(room_id.clone()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
|
|
@ -60,17 +60,18 @@ impl Service {
|
|||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(feature = "sha256_media")]
|
||||
{
|
||||
path = services().globals.get_media_file_new(&key)
|
||||
path = services().globals.get_media_file_new(&key);
|
||||
};
|
||||
|
||||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(not(feature = "sha256_media"))]
|
||||
{
|
||||
path = services().globals.get_media_file(&key)
|
||||
path = services().globals.get_media_file(&key);
|
||||
};
|
||||
|
||||
let mut f = File::create(path).await?;
|
||||
f.write_all(file).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -83,13 +84,13 @@ impl Service {
|
|||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(feature = "sha256_media")]
|
||||
{
|
||||
file_path = services().globals.get_media_file_new(&key)
|
||||
file_path = services().globals.get_media_file_new(&key);
|
||||
};
|
||||
|
||||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(not(feature = "sha256_media"))]
|
||||
{
|
||||
file_path = services().globals.get_media_file(&key)
|
||||
file_path = services().globals.get_media_file(&key);
|
||||
};
|
||||
|
||||
debug!("Got local file path: {:?}", file_path);
|
||||
|
@ -133,7 +134,7 @@ impl Service {
|
|||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(not(feature = "sha256_media"))]
|
||||
{
|
||||
path = services().globals.get_media_file(&key)
|
||||
path = services().globals.get_media_file(&key);
|
||||
};
|
||||
|
||||
let mut f = File::create(path).await?;
|
||||
|
@ -150,13 +151,13 @@ impl Service {
|
|||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(feature = "sha256_media")]
|
||||
{
|
||||
path = services().globals.get_media_file_new(&key)
|
||||
path = services().globals.get_media_file_new(&key);
|
||||
};
|
||||
|
||||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(not(feature = "sha256_media"))]
|
||||
{
|
||||
path = services().globals.get_media_file(&key)
|
||||
path = services().globals.get_media_file(&key);
|
||||
};
|
||||
|
||||
let mut file = Vec::new();
|
||||
|
@ -236,7 +237,7 @@ impl Service {
|
|||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(not(feature = "sha256_media"))]
|
||||
{
|
||||
path = services().globals.get_media_file(&key)
|
||||
path = services().globals.get_media_file(&key);
|
||||
};
|
||||
|
||||
debug!("MXC path: {:?}", path);
|
||||
|
@ -316,13 +317,13 @@ impl Service {
|
|||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(feature = "sha256_media")]
|
||||
{
|
||||
path = services().globals.get_media_file_new(&key)
|
||||
path = services().globals.get_media_file_new(&key);
|
||||
};
|
||||
|
||||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(not(feature = "sha256_media"))]
|
||||
{
|
||||
path = services().globals.get_media_file(&key)
|
||||
path = services().globals.get_media_file(&key);
|
||||
};
|
||||
|
||||
let mut file = Vec::new();
|
||||
|
@ -340,13 +341,13 @@ impl Service {
|
|||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(feature = "sha256_media")]
|
||||
{
|
||||
path = services().globals.get_media_file_new(&key)
|
||||
path = services().globals.get_media_file_new(&key);
|
||||
};
|
||||
|
||||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(not(feature = "sha256_media"))]
|
||||
{
|
||||
path = services().globals.get_media_file(&key)
|
||||
path = services().globals.get_media_file(&key);
|
||||
};
|
||||
|
||||
let mut file = Vec::new();
|
||||
|
@ -427,13 +428,13 @@ impl Service {
|
|||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(feature = "sha256_media")]
|
||||
{
|
||||
path = services().globals.get_media_file_new(&thumbnail_key)
|
||||
path = services().globals.get_media_file_new(&thumbnail_key);
|
||||
};
|
||||
|
||||
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
|
||||
#[cfg(not(feature = "sha256_media"))]
|
||||
{
|
||||
path = services().globals.get_media_file(&thumbnail_key)
|
||||
path = services().globals.get_media_file(&thumbnail_key);
|
||||
};
|
||||
|
||||
let mut f = File::create(path).await?;
|
||||
|
|
|
@ -45,17 +45,20 @@ impl Service {
|
|||
async fn typings_maintain(&self, room_id: &RoomId) -> Result<()> {
|
||||
let current_timestamp = utils::millis_since_unix_epoch();
|
||||
let mut removable = Vec::new();
|
||||
|
||||
{
|
||||
let typing = self.typing.read().await;
|
||||
let Some(room) = typing.get(room_id) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
for (user, timeout) in room {
|
||||
if *timeout < current_timestamp {
|
||||
removable.push(user.clone());
|
||||
}
|
||||
}
|
||||
drop(typing)
|
||||
|
||||
drop(typing);
|
||||
};
|
||||
|
||||
if !removable.is_empty() {
|
||||
|
|
|
@ -1491,14 +1491,13 @@ impl Service {
|
|||
let permit =
|
||||
services().globals.servername_ratelimiter.read().await.get(origin).map(|s| Arc::clone(s).acquire_owned());
|
||||
|
||||
let permit = match permit {
|
||||
Some(p) => p,
|
||||
None => {
|
||||
let mut write = services().globals.servername_ratelimiter.write().await;
|
||||
let s = Arc::clone(write.entry(origin.to_owned()).or_insert_with(|| Arc::new(Semaphore::new(1))));
|
||||
let permit = if let Some(p) = permit {
|
||||
p
|
||||
} else {
|
||||
let mut write = services().globals.servername_ratelimiter.write().await;
|
||||
let s = Arc::clone(write.entry(origin.to_owned()).or_insert_with(|| Arc::new(Semaphore::new(1))));
|
||||
|
||||
s.acquire_owned()
|
||||
},
|
||||
s.acquire_owned()
|
||||
}
|
||||
.await;
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ impl Service {
|
|||
})
|
||||
})
|
||||
.take(limit)
|
||||
.filter_map(std::result::Result::ok) // Filter out buggy events
|
||||
.filter_map(Result::ok) // Filter out buggy events
|
||||
.filter(|(_, pdu)| {
|
||||
services()
|
||||
.rooms
|
||||
|
@ -113,7 +113,7 @@ impl Service {
|
|||
})
|
||||
})
|
||||
.take(limit)
|
||||
.filter_map(std::result::Result::ok) // Filter out buggy events
|
||||
.filter_map(Result::ok) // Filter out buggy events
|
||||
.filter(|(_, pdu)| {
|
||||
services()
|
||||
.rooms
|
||||
|
|
|
@ -109,7 +109,7 @@ impl Arena {
|
|||
if let Some(next) = self.next_sibling(current) {
|
||||
current = next;
|
||||
} else if let Some(parent) = self.parent(current) {
|
||||
current = parent
|
||||
current = parent;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ impl Arena {
|
|||
)];
|
||||
|
||||
while let Some(parent) = self.parent(parents.last().expect("Has at least one value, as above").0) {
|
||||
parents.push((parent, self.get(parent).expect("It is some, as above").room_id.clone()))
|
||||
parents.push((parent, self.get(parent).expect("It is some, as above").room_id.clone()));
|
||||
}
|
||||
|
||||
// If at max_depth, don't add new rooms
|
||||
|
@ -607,9 +607,9 @@ impl Service {
|
|||
arena.push(current_room, children);
|
||||
|
||||
if left_to_skip > 0 {
|
||||
left_to_skip -= 1
|
||||
left_to_skip -= 1;
|
||||
} else {
|
||||
results.push(summary_to_chunk(*summary.clone()))
|
||||
results.push(summary_to_chunk(*summary.clone()));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -111,7 +111,7 @@ impl Service {
|
|||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter(|member| member.server_name() == origin);
|
||||
|
||||
let visibility = match history_visibility {
|
||||
|
|
|
@ -37,6 +37,7 @@ use super::state_compressor::CompressedStateEvent;
|
|||
use crate::{
|
||||
api::server_server,
|
||||
service::{
|
||||
self,
|
||||
appservice::NamespaceRegex,
|
||||
pdu::{EventHash, PduBuilder},
|
||||
},
|
||||
|
@ -476,7 +477,7 @@ impl Service {
|
|||
// the administrator can execute commands as conduit
|
||||
let from_conduit = pdu.sender == server_user && services().globals.emergency_password().is_none();
|
||||
|
||||
if let Some(admin_room) = services().admin.get_admin_room()? {
|
||||
if let Some(admin_room) = service::admin::Service::get_admin_room()? {
|
||||
if to_conduit && !from_conduit && admin_room == pdu.room_id {
|
||||
services().admin.process_message(body, pdu.event_id.clone());
|
||||
}
|
||||
|
@ -540,7 +541,7 @@ impl Service {
|
|||
.rooms
|
||||
.alias
|
||||
.local_aliases_for_room(&pdu.room_id)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.any(|room_alias| aliases.is_match(room_alias.as_str()))
|
||||
};
|
||||
|
||||
|
@ -721,7 +722,7 @@ impl Service {
|
|||
) -> Result<Arc<EventId>> {
|
||||
let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?;
|
||||
|
||||
if let Some(admin_room) = services().admin.get_admin_room()? {
|
||||
if let Some(admin_room) = service::admin::Service::get_admin_room()? {
|
||||
if admin_room == room_id {
|
||||
match pdu.event_type() {
|
||||
TimelineEventType::RoomEncryption => {
|
||||
|
@ -756,7 +757,7 @@ impl Service {
|
|||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter(|m| m.server_name() == server_name)
|
||||
.filter(|m| m != target)
|
||||
.count();
|
||||
|
@ -782,7 +783,7 @@ impl Service {
|
|||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter(|m| m.server_name() == server_name)
|
||||
.filter(|m| m != target)
|
||||
.count();
|
||||
|
@ -821,7 +822,7 @@ impl Service {
|
|||
services().rooms.state.set_room_state(room_id, statehashid, state_lock)?;
|
||||
|
||||
let mut servers: HashSet<OwnedServerName> =
|
||||
services().rooms.state_cache.room_servers(room_id).filter_map(std::result::Result::ok).collect();
|
||||
services().rooms.state_cache.room_servers(room_id).filter_map(Result::ok).collect();
|
||||
|
||||
// In case we are kicking or banning a user, we need to inform their server of
|
||||
// the change
|
||||
|
|
|
@ -126,7 +126,7 @@ impl Service {
|
|||
// Retry requests we could not finish yet
|
||||
let mut initial_transactions = HashMap::<OutgoingKind, Vec<SendingEventType>>::new();
|
||||
|
||||
for (key, outgoing_kind, event) in self.db.active_requests().filter_map(std::result::Result::ok) {
|
||||
for (key, outgoing_kind, event) in self.db.active_requests().filter_map(Result::ok) {
|
||||
let entry = initial_transactions.entry(outgoing_kind.clone()).or_default();
|
||||
|
||||
if entry.len() > 30 {
|
||||
|
@ -152,7 +152,7 @@ impl Service {
|
|||
self.db.delete_all_active_requests_for(&outgoing_kind)?;
|
||||
|
||||
// Find events that have been added since starting the last request
|
||||
let new_events = self.db.queued_requests(&outgoing_kind).filter_map(std::result::Result::ok).take(30).collect::<Vec<_>>();
|
||||
let new_events = self.db.queued_requests(&outgoing_kind).filter_map(Result::ok).take(30).collect::<Vec<_>>();
|
||||
|
||||
if !new_events.is_empty() {
|
||||
// Insert pdus we found
|
||||
|
@ -236,7 +236,7 @@ impl Service {
|
|||
|
||||
if retry {
|
||||
// We retry the previous transaction
|
||||
for (_, e) in self.db.active_requests_for(outgoing_kind).filter_map(std::result::Result::ok) {
|
||||
for (_, e) in self.db.active_requests_for(outgoing_kind).filter_map(Result::ok) {
|
||||
events.push(e);
|
||||
}
|
||||
} else {
|
||||
|
@ -274,7 +274,7 @@ impl Service {
|
|||
services()
|
||||
.users
|
||||
.keys_changed(room_id.as_ref(), since, None)
|
||||
.filter_map(std::result::Result::ok)
|
||||
.filter_map(Result::ok)
|
||||
.filter(|user_id| user_id.server_name() == services().globals.server_name()),
|
||||
);
|
||||
|
||||
|
@ -409,7 +409,7 @@ impl Service {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip(self, server, serialized))]
|
||||
pub fn send_reliable_edu(&self, server: &ServerName, serialized: Vec<u8>, id: u64) -> Result<()> {
|
||||
pub fn send_reliable_edu(&self, server: &ServerName, serialized: Vec<u8>, _id: u64) -> Result<()> {
|
||||
let outgoing_kind = OutgoingKind::Normal(server.to_owned());
|
||||
let event = SendingEventType::Edu(serialized);
|
||||
let _cork = services().globals.db.cork()?;
|
||||
|
@ -433,7 +433,7 @@ impl Service {
|
|||
#[tracing::instrument(skip(self, room_id))]
|
||||
pub fn flush_room(&self, room_id: &RoomId) -> Result<()> {
|
||||
let servers: HashSet<OwnedServerName> =
|
||||
services().rooms.state_cache.room_servers(room_id).filter_map(std::result::Result::ok).collect();
|
||||
services().rooms.state_cache.room_servers(room_id).filter_map(Result::ok).collect();
|
||||
|
||||
self.flush_servers(servers.into_iter())
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue