Merge together pending feature branches

Temporary branch for those who wish to use pending/wip features
This commit is contained in:
Nyaaori 2022-12-16 08:57:35 +01:00
No known key found for this signature in database
GPG key ID: E7819C3ED4D1F82E
33 changed files with 1874 additions and 394 deletions

2
Cargo.lock generated
View file

@ -372,7 +372,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
[[package]] [[package]]
name = "conduit" name = "conduit"
version = "0.4.0-next" version = "0.4.0-experinyantal"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"axum", "axum",

View file

@ -6,7 +6,7 @@ authors = ["timokoesters <timo@koesters.xyz>"]
homepage = "https://conduit.rs" homepage = "https://conduit.rs"
repository = "https://gitlab.com/famedly/conduit" repository = "https://gitlab.com/famedly/conduit"
readme = "README.md" readme = "README.md"
version = "0.4.0-next" version = "0.4.0-experinyantal"
rust-version = "1.64" rust-version = "1.64"
edition = "2021" edition = "2021"

View file

@ -38,8 +38,12 @@ max_request_size = 20_000_000 # in bytes
# Enables registration. If set to false, no users can register on this server. # Enables registration. If set to false, no users can register on this server.
allow_registration = true allow_registration = true
# Enables federation. If set to false, this server will not federate with others (rooms from other server will not be available).
allow_federation = true allow_federation = true
# Enables presence. If set to false, the presence of users (whether they are online, idle or offline) will not be shown or processed.
allow_presence = true
# Enable the display name lightning bolt on registration. # Enable the display name lightning bolt on registration.
enable_lightning_bolt = true enable_lightning_bolt = true

View file

@ -595,7 +595,7 @@ async fn join_room_by_id_helper(
} }
}; };
if &signed_event_id != event_id { if signed_event_id != event_id {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
"Server sent event with wrong event id", "Server sent event with wrong event id",
@ -916,7 +916,7 @@ async fn join_room_by_id_helper(
} }
}; };
if &signed_event_id != event_id { if signed_event_id != event_id {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
"Server sent event with wrong event id", "Server sent event with wrong event id",

View file

@ -20,6 +20,7 @@ mod report;
mod room; mod room;
mod search; mod search;
mod session; mod session;
mod space;
mod state; mod state;
mod sync; mod sync;
mod tag; mod tag;
@ -52,6 +53,7 @@ pub use report::*;
pub use room::*; pub use room::*;
pub use search::*; pub use search::*;
pub use session::*; pub use session::*;
pub use space::*;
pub use state::*; pub use state::*;
pub use sync::*; pub use sync::*;
pub use tag::*; pub use tag::*;

View file

@ -1,5 +1,9 @@
use crate::{services, utils, Result, Ruma}; use crate::{services, Result, Ruma};
use ruma::api::client::presence::{get_presence, set_presence}; use ruma::{
api::client::presence::{get_presence, set_presence},
presence::PresenceState,
uint,
};
use std::time::Duration; use std::time::Duration;
/// # `PUT /_matrix/client/r0/presence/{userId}/status` /// # `PUT /_matrix/client/r0/presence/{userId}/status`
@ -21,16 +25,13 @@ pub async fn set_presence_route(
avatar_url: services().users.avatar_url(sender_user)?, avatar_url: services().users.avatar_url(sender_user)?,
currently_active: None, currently_active: None,
displayname: services().users.displayname(sender_user)?, displayname: services().users.displayname(sender_user)?,
last_active_ago: Some( last_active_ago: Some(uint!(0)),
utils::millis_since_unix_epoch()
.try_into()
.expect("time is valid"),
),
presence: body.presence.clone(), presence: body.presence.clone(),
status_msg: body.status_msg.clone(), status_msg: body.status_msg.clone(),
}, },
sender: sender_user.clone(), sender: sender_user.clone(),
}, },
true,
)?; )?;
} }
@ -60,7 +61,7 @@ pub async fn get_presence_route(
.rooms .rooms
.edus .edus
.presence .presence
.get_last_presence_event(sender_user, &room_id)? .get_presence_event(sender_user, &room_id)?
{ {
presence_event = Some(presence); presence_event = Some(presence);
break; break;
@ -69,7 +70,6 @@ pub async fn get_presence_route(
if let Some(presence) = presence_event { if let Some(presence) = presence_event {
Ok(get_presence::v3::Response { Ok(get_presence::v3::Response {
// TODO: Should ruma just use the presenceeventcontent type here?
status_msg: presence.content.status_msg, status_msg: presence.content.status_msg,
currently_active: presence.content.currently_active, currently_active: presence.content.currently_active,
last_active_ago: presence last_active_ago: presence
@ -79,6 +79,6 @@ pub async fn get_presence_route(
presence: presence.content.presence, presence: presence.content.presence,
}) })
} else { } else {
todo!(); Ok(get_presence::v3::Response::new(PresenceState::Offline))
} }
} }

View file

@ -109,6 +109,7 @@ pub async fn set_displayname_route(
}, },
sender: sender_user.clone(), sender: sender_user.clone(),
}, },
true,
)?; )?;
} }
@ -244,6 +245,7 @@ pub async fn set_avatar_url_route(
}, },
sender: sender_user.clone(), sender: sender_user.clone(),
}, },
true,
)?; )?;
} }

View file

@ -34,29 +34,34 @@ pub async fn set_read_marker_route(
)?; )?;
} }
if body.private_read_receipt.is_some() || body.read_receipt.is_some() {
services()
.rooms
.user
.reset_notification_counts(sender_user, &body.room_id)?;
}
if let Some(event) = &body.private_read_receipt { if let Some(event) = &body.private_read_receipt {
services().rooms.edus.read_receipt.private_read_set( let _pdu = services()
&body.room_id,
sender_user,
services()
.rooms .rooms
.timeline .timeline
.get_pdu_count(event)? .get_pdu(event)?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
"Event does not exist.", "Event does not exist.",
))?, ))?;
services().rooms.edus.read_receipt.private_read_set(
&body.room_id,
sender_user,
services().rooms.short.get_or_create_shorteventid(event)?,
)?; )?;
} }
if let Some(event) = &body.read_receipt { if let Some(event) = &body.read_receipt {
let _pdu = services()
.rooms
.timeline
.get_pdu(event)?
.ok_or(Error::BadRequest(
ErrorKind::InvalidParam,
"Event does not exist.",
))?;
if services().globals.allow_public_read_receipts() {
let mut user_receipts = BTreeMap::new(); let mut user_receipts = BTreeMap::new();
user_receipts.insert( user_receipts.insert(
sender_user.clone(), sender_user.clone(),
@ -80,6 +85,12 @@ pub async fn set_read_marker_route(
room_id: body.room_id.clone(), room_id: body.room_id.clone(),
}, },
)?; )?;
};
services().rooms.edus.read_receipt.private_read_set(
&body.room_id,
sender_user,
services().rooms.short.get_or_create_shorteventid(event)?,
)?;
} }
Ok(set_read_marker::v3::Response {}) Ok(set_read_marker::v3::Response {})
@ -93,16 +104,6 @@ pub async fn create_receipt_route(
) -> Result<create_receipt::v3::Response> { ) -> Result<create_receipt::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if matches!(
&body.receipt_type,
create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate
) {
services()
.rooms
.user
.reset_notification_counts(sender_user, &body.room_id)?;
}
match body.receipt_type { match body.receipt_type {
create_receipt::v3::ReceiptType::FullyRead => { create_receipt::v3::ReceiptType::FullyRead => {
let fully_read_event = ruma::events::fully_read::FullyReadEvent { let fully_read_event = ruma::events::fully_read::FullyReadEvent {
@ -118,6 +119,17 @@ pub async fn create_receipt_route(
)?; )?;
} }
create_receipt::v3::ReceiptType::Read => { create_receipt::v3::ReceiptType::Read => {
let _pdu =
services()
.rooms
.timeline
.get_pdu(&body.event_id)?
.ok_or(Error::BadRequest(
ErrorKind::InvalidParam,
"Event does not exist.",
))?;
if services().globals.allow_public_read_receipts() {
let mut user_receipts = BTreeMap::new(); let mut user_receipts = BTreeMap::new();
user_receipts.insert( user_receipts.insert(
sender_user.clone(), sender_user.clone(),
@ -140,19 +152,34 @@ pub async fn create_receipt_route(
room_id: body.room_id.clone(), room_id: body.room_id.clone(),
}, },
)?; )?;
} };
create_receipt::v3::ReceiptType::ReadPrivate => {
services().rooms.edus.read_receipt.private_read_set( services().rooms.edus.read_receipt.private_read_set(
&body.room_id, &body.room_id,
sender_user, sender_user,
services()
.rooms
.short
.get_or_create_shorteventid(&body.event_id)?,
)?;
}
create_receipt::v3::ReceiptType::ReadPrivate => {
let _pdu =
services() services()
.rooms .rooms
.timeline .timeline
.get_pdu_count(&body.event_id)? .get_pdu(&body.event_id)?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
"Event does not exist.", "Event does not exist.",
))?, ))?;
services().rooms.edus.read_receipt.private_read_set(
&body.room_id,
sender_user,
services()
.rooms
.short
.get_or_create_shorteventid(&body.event_id)?,
)?; )?;
} }
_ => return Err(Error::bad_database("Unsupported receipt type")), _ => return Err(Error::bad_database("Unsupported receipt type")),

View file

@ -0,0 +1,275 @@
use std::{collections::HashSet, sync::Arc};
use crate::{services, Error, PduEvent, Result, Ruma};
use ruma::{
api::client::{
error::ErrorKind,
space::{get_hierarchy, SpaceHierarchyRoomsChunk, SpaceRoomJoinRule},
},
events::{
room::{
avatar::RoomAvatarEventContent,
canonical_alias::RoomCanonicalAliasEventContent,
create::RoomCreateEventContent,
guest_access::{GuestAccess, RoomGuestAccessEventContent},
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
join_rules::{JoinRule, RoomJoinRulesEventContent},
name::RoomNameEventContent,
topic::RoomTopicEventContent,
},
space::child::{HierarchySpaceChildEvent, SpaceChildEventContent},
StateEventType,
},
serde::Raw,
MilliSecondsSinceUnixEpoch, OwnedRoomId, RoomId,
};
use serde_json;
use tracing::warn;
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy``
///
/// Paginates over the space tree in a depth-first manner to locate child rooms of a given space.
///
/// - TODO: Use federation for unknown room.
///
pub async fn get_hierarchy_route(
body: Ruma<get_hierarchy::v1::IncomingRequest>,
) -> Result<get_hierarchy::v1::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
// Check if room is world readable
let is_world_readable = services()
.rooms
.state_accessor
.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
.map_or(Ok(false), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomHistoryVisibilityEventContent| {
c.history_visibility == HistoryVisibility::WorldReadable
})
.map_err(|_| {
Error::bad_database("Invalid room history visibility event in database.")
})
})
.unwrap_or(false);
// Reject if user not in room and not world readable
if !services()
.rooms
.state_cache
.is_joined(sender_user, &body.room_id)?
&& !is_world_readable
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"You don't have permission to view this room.",
));
}
// from format is '{suggested_only}|{max_depth}|{skip}'
let (suggested_only, max_depth, start) = body
.from
.as_ref()
.map_or(
Some((
body.suggested_only,
body.max_depth
.map_or(services().globals.hierarchy_max_depth(), |v| v.into())
.min(services().globals.hierarchy_max_depth()),
0,
)),
|from| {
let mut p = from.split('|');
Some((
p.next()?.trim().parse().ok()?,
p.next()?
.trim()
.parse::<u64>()
.ok()?
.min(services().globals.hierarchy_max_depth()),
p.next()?.trim().parse().ok()?,
))
},
)
.ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid from"))?;
let limit = body.limit.map_or(20u64, |v| v.into()) as usize;
let mut skip = start;
// Set for avoid search in loop.
let mut room_set = HashSet::new();
let mut rooms_chunk: Vec<SpaceHierarchyRoomsChunk> = vec![];
let mut stack = vec![(0, body.room_id.clone())];
while let (Some((depth, room_id)), true) = (stack.pop(), rooms_chunk.len() < limit) {
let (childern, pdus): (Vec<_>, Vec<_>) = services()
.rooms
.state_accessor
.room_state_full(&room_id)
.await?
.into_iter()
.filter_map(|((e_type, key), pdu)| {
(e_type == StateEventType::SpaceChild && !room_set.contains(&room_id))
.then_some((key, pdu))
})
.unzip();
if skip == 0 {
if rooms_chunk.len() < limit {
room_set.insert(room_id.clone());
if let Ok(chunk) = get_room_chunk(room_id, suggested_only, pdus).await {
rooms_chunk.push(chunk)
};
}
} else {
skip -= 1;
}
if depth < max_depth {
childern.into_iter().rev().for_each(|key| {
stack.push((depth + 1, RoomId::parse(key).unwrap()));
});
}
}
Ok(get_hierarchy::v1::Response {
next_batch: (!stack.is_empty()).then_some(format!(
"{}|{}|{}",
suggested_only,
max_depth,
start + limit
)),
rooms: rooms_chunk,
})
}
async fn get_room_chunk(
room_id: OwnedRoomId,
suggested_only: bool,
pdus: Vec<Arc<PduEvent>>,
) -> Result<SpaceHierarchyRoomsChunk> {
Ok(SpaceHierarchyRoomsChunk {
canonical_alias: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")
.ok()
.and_then(|s| {
serde_json::from_str(s?.content.get())
.map(|c: RoomCanonicalAliasEventContent| c.alias)
.ok()?
}),
name: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomName, "")
.ok()
.flatten()
.and_then(|s| {
serde_json::from_str(s.content.get())
.map(|c: RoomNameEventContent| c.name)
.ok()?
}),
num_joined_members: services()
.rooms
.state_cache
.room_joined_count(&room_id)?
.unwrap_or_else(|| {
warn!("Room {} has no member count", &room_id);
0
})
.try_into()
.expect("user count should not be that big"),
topic: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomTopic, "")
.ok()
.and_then(|s| {
serde_json::from_str(s?.content.get())
.ok()
.map(|c: RoomTopicEventContent| c.topic)
}),
world_readable: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
.map_or(Ok(false), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomHistoryVisibilityEventContent| {
c.history_visibility == HistoryVisibility::WorldReadable
})
.map_err(|_| {
Error::bad_database("Invalid room history visibility event in database.")
})
})?,
guest_can_join: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
.map_or(Ok(false), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomGuestAccessEventContent| c.guest_access == GuestAccess::CanJoin)
.map_err(|_| {
Error::bad_database("Invalid room guest access event in database.")
})
})?,
avatar_url: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")
.ok()
.and_then(|s| {
serde_json::from_str(s?.content.get())
.map(|c: RoomAvatarEventContent| c.url)
.ok()?
}),
join_rule: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
.map(|s| {
serde_json::from_str(s.content.get())
.map(|c: RoomJoinRulesEventContent| match c.join_rule {
JoinRule::Invite => SpaceRoomJoinRule::Invite,
JoinRule::Knock => SpaceRoomJoinRule::Knock,
JoinRule::KnockRestricted(_) => SpaceRoomJoinRule::KnockRestricted,
JoinRule::Private => SpaceRoomJoinRule::Private,
JoinRule::Public => SpaceRoomJoinRule::Public,
JoinRule::Restricted(_) => SpaceRoomJoinRule::Restricted,
JoinRule::_Custom(_) => SpaceRoomJoinRule::from(c.join_rule.as_str()),
})
.map_err(|_| Error::bad_database("Invalid room join rules event in database."))
})
.ok_or_else(|| Error::bad_database("Invalid room join rules event in database."))??,
room_type: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomCreate, "")
.map(|s| {
serde_json::from_str(s?.content.get())
.map(|c: RoomCreateEventContent| c.room_type)
.ok()?
})
.ok()
.flatten(),
children_state: pdus
.into_iter()
.flat_map(|pdu| {
Some(HierarchySpaceChildEvent {
// Ignore unsuggested rooms if suggested_only is set
content: serde_json::from_str(pdu.content.get()).ok().filter(
|pdu: &SpaceChildEventContent| {
!suggested_only || pdu.suggested.unwrap_or(false)
},
)?,
sender: pdu.sender.clone(),
state_key: pdu.state_key.clone()?,
origin_server_ts: MilliSecondsSinceUnixEpoch(pdu.origin_server_ts),
})
})
.filter_map(|hsce| Raw::<HierarchySpaceChildEvent>::new(&hsce).ok())
.collect::<Vec<_>>(),
room_id,
})
}

View file

@ -6,6 +6,7 @@ use ruma::{
uiaa::UiaaResponse, uiaa::UiaaResponse,
}, },
events::{ events::{
receipt::{ReceiptThread, ReceiptType},
room::member::{MembershipState, RoomMemberEventContent}, room::member::{MembershipState, RoomMemberEventContent},
RoomEventType, StateEventType, RoomEventType, StateEventType,
}, },
@ -166,7 +167,11 @@ async fn sync_helper(
}; };
// TODO: match body.set_presence { // TODO: match body.set_presence {
services().rooms.edus.presence.ping_presence(&sender_user)?; services()
.rooms
.edus
.presence
.ping_presence(&sender_user, false, true, true)?;
// Setup watchers, so if there's no response, we can wait for them // Setup watchers, so if there's no response, we can wait for them
let watcher = services().globals.watch(&sender_user, &sender_device); let watcher = services().globals.watch(&sender_user, &sender_device);
@ -731,6 +736,50 @@ async fn sync_helper(
.map(|(_, _, v)| v) .map(|(_, _, v)| v)
.collect(); .collect();
if services()
.rooms
.edus
.read_receipt
.last_privateread_update(&sender_user, &room_id)
.unwrap_or(0)
> since
{
if let Ok(event_id) = services().rooms.short.get_eventid_from_short(
services()
.rooms
.edus
.read_receipt
.private_read_get(&room_id, &sender_user)
.expect("User did not have a valid private read receipt?")
.expect("User had a last read private receipt update but no receipt?"),
) {
let mut user_receipts = BTreeMap::new();
user_receipts.insert(
sender_user.clone(),
ruma::events::receipt::Receipt {
ts: None,
thread: ReceiptThread::Unthreaded,
},
);
let mut receipts = BTreeMap::new();
receipts.insert(ReceiptType::ReadPrivate, user_receipts);
let mut receipt_content = BTreeMap::new();
receipt_content.insert((*event_id).to_owned(), receipts);
edus.push(
serde_json::from_str(
&serde_json::to_string(&ruma::events::SyncEphemeralRoomEvent {
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
})
.expect("Did not get valid JSON?"),
)
.expect("JSON was somehow invalid despite just being created"),
);
}
};
if services().rooms.edus.typing.last_typing_update(&room_id)? > since { if services().rooms.edus.typing.last_typing_update(&room_id)? > since {
edus.push( edus.push(
serde_json::from_str( serde_json::from_str(

View file

@ -24,7 +24,11 @@ pub async fn get_supported_versions_route(
"v1.1".to_owned(), "v1.1".to_owned(),
"v1.2".to_owned(), "v1.2".to_owned(),
], ],
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), unstable_features: BTreeMap::from_iter([
("org.matrix.e2e_cross_signing".to_owned(), true),
("org.matrix.msc3827.stable".to_owned(), true),
("org.matrix.msc2285.stable".to_owned(), true),
]),
}; };
Ok(resp) Ok(resp)

View file

@ -12,6 +12,7 @@ use ruma::{
client::error::{Error as RumaError, ErrorKind}, client::error::{Error as RumaError, ErrorKind},
federation::{ federation::{
authorization::get_event_authorization, authorization::get_event_authorization,
backfill::get_backfill,
device::get_devices::{self, v1::UserDevice}, device::get_devices::{self, v1::UserDevice},
directory::{get_public_rooms, get_public_rooms_filtered}, directory::{get_public_rooms, get_public_rooms_filtered},
discovery::{get_server_keys, get_server_version, ServerSigningKeys, VerifyKey}, discovery::{get_server_keys, get_server_version, ServerSigningKeys, VerifyKey},
@ -33,6 +34,7 @@ use ruma::{
}, },
directory::{IncomingFilter, IncomingRoomNetwork}, directory::{IncomingFilter, IncomingRoomNetwork},
events::{ events::{
presence::{PresenceEvent, PresenceEventContent},
receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType},
room::{ room::{
join_rules::{JoinRule, RoomJoinRulesEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent},
@ -43,11 +45,11 @@ use ruma::{
serde::{Base64, JsonObject, Raw}, serde::{Base64, JsonObject, Raw},
to_device::DeviceIdOrAllDevices, to_device::DeviceIdOrAllDevices,
CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId,
OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, UInt,
}; };
use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use std::{ use std::{
collections::BTreeMap, collections::{BTreeMap, HashSet, VecDeque},
fmt::Debug, fmt::Debug,
mem, mem,
net::{IpAddr, SocketAddr}, net::{IpAddr, SocketAddr},
@ -753,8 +755,36 @@ pub async fn send_transaction_message_route(
.filter_map(|edu| serde_json::from_str::<Edu>(edu.json().get()).ok()) .filter_map(|edu| serde_json::from_str::<Edu>(edu.json().get()).ok())
{ {
match edu { match edu {
Edu::Presence(_) => {} Edu::Presence(presence) => {
for presence_update in presence.push {
let user_id = presence_update.user_id;
for room_id in services()
.rooms
.state_cache
.rooms_joined(&user_id)
.filter_map(|room_id| room_id.ok())
{
services().rooms.edus.presence.update_presence(
&user_id,
&room_id,
PresenceEvent {
content: PresenceEventContent {
avatar_url: services().users.avatar_url(&user_id)?,
currently_active: Some(presence_update.currently_active),
displayname: services().users.displayname(&user_id)?,
last_active_ago: Some(presence_update.last_active_ago),
presence: presence_update.presence.clone(),
status_msg: presence_update.status_msg.clone(),
},
sender: user_id.clone(),
},
true,
)?;
}
}
}
Edu::Receipt(receipt) => { Edu::Receipt(receipt) => {
if services().globals.allow_receiving_read_receipts() {
for (room_id, room_updates) in receipt.receipts { for (room_id, room_updates) in receipt.receipts {
for (user_id, user_updates) in room_updates.read { for (user_id, user_updates) in room_updates.read {
if let Some((event_id, _)) = user_updates if let Some((event_id, _)) = user_updates
@ -796,6 +826,7 @@ pub async fn send_transaction_message_route(
} }
} }
} }
}
Edu::Typing(typing) => { Edu::Typing(typing) => {
if services() if services()
.rooms .rooms
@ -959,6 +990,58 @@ pub async fn get_event_route(
}) })
} }
/// # `GET /_matrix/federation/v1/backfill/<room_id>`
///
/// Retrieves events from before the sender joined the room, if the room's
/// history visibility allows.
pub async fn get_backfill_route(
body: Ruma<get_backfill::v1::IncomingRequest>,
) -> Result<get_backfill::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
let sender_servername = body
.sender_servername
.as_ref()
.expect("server is authenticated");
info!("Got backfill request from: {}", sender_servername);
if !services()
.rooms
.state_cache
.server_in_room(sender_servername, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Server is not in room.",
));
}
services()
.rooms
.event_handler
.acl_check(sender_servername, &body.room_id)?;
let origin = services().globals.server_name().to_owned();
let earliest_events = &[];
let events = get_missing_events(
sender_servername,
&body.room_id,
earliest_events,
&body.v,
body.limit,
)?;
Ok(get_backfill::v1::Response {
origin,
origin_server_ts: MilliSecondsSinceUnixEpoch::now(),
pdus: events,
})
}
/// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}`
/// ///
/// Retrieves events that the sender is missing. /// Retrieves events that the sender is missing.
@ -990,52 +1073,197 @@ pub async fn get_missing_events_route(
.event_handler .event_handler
.acl_check(sender_servername, &body.room_id)?; .acl_check(sender_servername, &body.room_id)?;
let mut queued_events = body.latest_events.clone(); let events = get_missing_events(
let mut events = Vec::new(); sender_servername,
&body.room_id,
let mut i = 0; &body.earliest_events,
while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { &body.latest_events,
if let Some(pdu) = services().rooms.timeline.get_pdu_json(&queued_events[i])? { body.limit,
let room_id_str = pdu )?;
.get("room_id")
.and_then(|val| val.as_str())
.ok_or_else(|| Error::bad_database("Invalid event in database"))?;
let event_room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
if event_room_id != body.room_id {
warn!(
"Evil event detected: Event {} found while searching in room {}",
queued_events[i], body.room_id
);
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Evil event detected",
));
}
if body.earliest_events.contains(&queued_events[i]) {
i += 1;
continue;
}
queued_events.extend_from_slice(
&serde_json::from_value::<Vec<OwnedEventId>>(
serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| {
Error::bad_database("Event in db has no prev_events field.")
})?)
.expect("canonical json is valid json value"),
)
.map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?,
);
events.push(PduEvent::convert_to_outgoing_federation_event(pdu));
}
i += 1;
}
Ok(get_missing_events::v1::Response { events }) Ok(get_missing_events::v1::Response { events })
} }
/// Fetch events starting from `latest_events`, going backwards
/// through each event's `prev_events` until reaching the `earliest_events`.
///
/// Used by the federation /backfill and /get_missing_events routes.
fn get_missing_events(
sender_servername: &ServerName,
room_id: &RoomId,
earliest_events: &[OwnedEventId],
latest_events: &[OwnedEventId],
limit: UInt,
) -> Result<Vec<Box<RawJsonValue>>> {
let (room_members, room_errors): (Vec<_>, Vec<_>) = services()
.rooms
.state_cache
.room_members(room_id)
.partition(Result::is_ok);
// Just log errors and continue with correct users
if !room_errors.is_empty() {
warn!(?room_id, "Some errors occurred when fetching room members");
}
let current_server_members: Vec<OwnedUserId> = room_members
.into_iter()
.map(Result::unwrap)
.filter(|member| member.server_name() == sender_servername)
.collect();
let event_filter = |event_id: &EventId| {
services()
.rooms
.state_accessor
.server_can_see_event(
sender_servername,
current_server_members.as_slice(),
event_id,
)
.unwrap_or_default()
};
let pdu_filter = |pdu: &CanonicalJsonObject| {
let event_room_id = pdu
.get("room_id")
.and_then(|val| val.as_str())
.and_then(|room_id_str| <&RoomId>::try_from(room_id_str).ok());
match event_room_id {
Some(event_room_id) => {
let valid_event = event_room_id == room_id;
if !valid_event {
error!(?room_id, ?event_room_id, "An evil event detected");
}
valid_event
}
None => {
error!(?pdu, "Can't extract valid `room_id` from pdu");
false
}
}
};
#[inline]
fn get_pdu(event: &EventId) -> Option<CanonicalJsonObject> {
services()
.rooms
.timeline
.get_pdu_json(event)
.unwrap_or_default()
}
let events = linearize_previous_events(
latest_events.iter().cloned(),
earliest_events.iter().cloned(),
limit,
get_pdu,
event_filter,
pdu_filter,
);
Ok(events)
}
/// Unwinds previous events by doing a breadth-first walk from given roots
///
/// # Arguments
///
/// * `roots`: Starting point to unwind event history
/// * `excluded`: Skipped events
/// * `limit`: How many events to extract
/// * `pdu_extractor`: Closure to extract PDU for given event_id, for example, from DB.
/// * `event_filter`: Closure to filter event by it's visiblity. It may or may not hit DB.
/// * `pdu_filter`: Closure to get basic validation against malformed PDUs.
///
/// # Returns
///
/// The previous events for given roots, without any `excluded` events, up to the provided `limit`.
///
/// # Note
///
/// In matrix specification, «Server-Server API», paragraph 8 there is no mention of previous events for excluded events.
/// Therefore, algorithm below excludes **only** events itself, but allows to process their history.
fn linearize_previous_events<E, L, F, V, P>(
roots: E,
excluded: E,
limit: L,
pdu_extractor: P,
event_filter: F,
pdu_filter: V,
) -> Vec<Box<RawJsonValue>>
where
E: IntoIterator<Item = OwnedEventId>,
F: Fn(&EventId) -> bool,
L: Into<u64>,
V: Fn(&CanonicalJsonObject) -> bool,
P: Fn(&EventId) -> Option<CanonicalJsonObject>,
{
let limit = limit.into() as usize;
assert!(limit > 0, "Limit should be > 0");
#[inline]
fn get_previous_events(pdu: &CanonicalJsonObject) -> Option<Vec<OwnedEventId>> {
match pdu.get("prev_events") {
None => {
error!(?pdu, "A stored event has no 'prev_events' field");
None
}
Some(prev_events) => {
let val = prev_events.clone().into();
let events = serde_json::from_value::<Vec<OwnedEventId>>(val);
if let Err(error) = events {
error!(?prev_events, ?error, "Broken 'prev_events' field");
return None;
}
Some(events.unwrap_or_default())
}
}
}
let mut visited: HashSet<OwnedEventId> = Default::default();
let mut history: Vec<Box<RawJsonValue>> = Default::default();
let mut queue: VecDeque<OwnedEventId> = Default::default();
let excluded: HashSet<_> = excluded.into_iter().collect();
// Add all roots into processing queue
for root in roots {
queue.push_back(root);
}
while let Some(current_event) = queue.pop_front() {
// Return all collected events if reached limit
if history.len() >= limit {
return history;
}
// Skip an entire branch containing incorrect events
if !event_filter(&current_event) {
continue;
}
// Process PDU from a current event if it exists and valid
if let Some(pdu) = pdu_extractor(&current_event).filter(&pdu_filter) {
if !&excluded.contains(&current_event) {
history.push(PduEvent::convert_to_outgoing_federation_event(pdu.clone()));
}
// Fetch previous events, if they exists
if let Some(previous_events) = get_previous_events(&pdu) {
for previous_event in previous_events {
if !visited.contains(&previous_event) {
visited.insert(previous_event.clone());
queue.push_back(previous_event);
}
}
}
}
}
// All done, return collected events
history
}
/// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` /// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}`
/// ///
/// Retrieves the auth chain for a given event. /// Retrieves the auth chain for a given event.
@ -1788,7 +2016,11 @@ pub async fn claim_keys_route(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{add_port_to_hostname, get_ip_with_port, FedDest}; use super::{add_port_to_hostname, get_ip_with_port, linearize_previous_events, FedDest};
use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId};
use serde::{Deserialize, Serialize};
use serde_json::{value::RawValue, Value};
use std::collections::HashMap;
#[test] #[test]
fn ips_get_default_ports() { fn ips_get_default_ports() {
@ -1829,4 +2061,227 @@ mod tests {
FedDest::Named(String::from("example.com"), String::from(":1337")) FedDest::Named(String::from("example.com"), String::from(":1337"))
) )
} }
type PduStorage = HashMap<OwnedEventId, CanonicalJsonObject>;
#[derive(Debug, Serialize, Deserialize)]
struct MockPDU {
content: i32,
prev_events: Vec<OwnedEventId>,
}
fn mock_event_id(id: &i32) -> OwnedEventId {
const DOMAIN: &str = "canterlot.eq";
<OwnedEventId>::try_from(format!("${id}:{DOMAIN}")).unwrap()
}
fn create_graph(data: Vec<(i32, Vec<i32>)>) -> PduStorage {
data.iter()
.map(|(head, tail)| {
let key = mock_event_id(head);
let pdu = MockPDU {
content: *head,
prev_events: tail.iter().map(mock_event_id).collect(),
};
let value = serde_json::to_value(pdu).unwrap();
let value: CanonicalJsonValue = value.try_into().unwrap();
(key, value.as_object().unwrap().to_owned())
})
.collect()
}
fn mock_full_graph() -> PduStorage {
/*
(1)
__________|___________
/ / \ \
(2) (3) (10) (11)
/ \ / \ | |
(4) (5) (6) (7) (12) (13)
| | |
(8) (9) (14)
\ /
(15)
|
(16)
*/
create_graph(vec![
(1, vec![2, 3, 10, 11]),
(2, vec![4, 5]),
(3, vec![6, 7]),
(4, vec![]),
(5, vec![8]),
(6, vec![9]),
(7, vec![]),
(8, vec![15]),
(9, vec![15]),
(10, vec![12]),
(11, vec![13]),
(12, vec![]),
(13, vec![14]),
(14, vec![]),
(15, vec![16]),
(16, vec![16]),
])
}
fn extract_events_payload(events: Vec<Box<RawValue>>) -> Vec<i32> {
events
.iter()
.map(|e| serde_json::from_str(e.get()).unwrap())
.map(|p: MockPDU| p.content)
.collect()
}
#[test]
fn backfill_empty() {
let events = linearize_previous_events(
vec![],
vec![],
16u64,
|_| unreachable!(),
|_| true,
|_| true,
);
assert!(events.is_empty());
}
#[test]
fn backfill_limit() {
/*
(5) (4) (3) (2) (1) ×
*/
let events = create_graph(vec![
(1, vec![]),
(2, vec![1]),
(3, vec![2]),
(4, vec![3]),
(5, vec![4]),
]);
let roots = vec![mock_event_id(&5)];
let result = linearize_previous_events(
roots,
vec![],
3u64,
|e| events.get(e).cloned(),
|_| true,
|_| true,
);
assert_eq!(extract_events_payload(result), vec![5, 4, 3])
}
#[test]
fn backfill_bfs() {
let events = mock_full_graph();
let roots = vec![mock_event_id(&1)];
let result = linearize_previous_events(
roots,
vec![],
100u64,
|e| events.get(e).cloned(),
|_| true,
|_| true,
);
assert_eq!(
extract_events_payload(result),
vec![1, 2, 3, 10, 11, 4, 5, 6, 7, 12, 13, 8, 9, 14, 15, 16]
)
}
#[test]
fn backfill_subgraph() {
let events = mock_full_graph();
let roots = vec![mock_event_id(&3)];
let result = linearize_previous_events(
roots,
vec![],
100u64,
|e| events.get(e).cloned(),
|_| true,
|_| true,
);
assert_eq!(extract_events_payload(result), vec![3, 6, 7, 9, 15, 16])
}
#[test]
fn backfill_two_roots() {
let events = mock_full_graph();
let roots = vec![mock_event_id(&3), mock_event_id(&11)];
let result = linearize_previous_events(
roots,
vec![],
100u64,
|e| events.get(e).cloned(),
|_| true,
|_| true,
);
assert_eq!(
extract_events_payload(result),
vec![3, 11, 6, 7, 13, 9, 14, 15, 16]
)
}
#[test]
fn backfill_exclude_events() {
let events = mock_full_graph();
let roots = vec![mock_event_id(&1)];
let excluded_events = vec![
mock_event_id(&14),
mock_event_id(&15),
mock_event_id(&16),
mock_event_id(&3),
];
let result = linearize_previous_events(
roots,
excluded_events,
100u64,
|e| events.get(e).cloned(),
|_| true,
|_| true,
);
assert_eq!(
extract_events_payload(result),
vec![1, 2, 10, 11, 4, 5, 6, 7, 12, 13, 8, 9]
)
}
#[test]
fn backfill_exclude_branch_with_evil_event() {
let events = mock_full_graph();
let roots = vec![mock_event_id(&1)];
let result = linearize_previous_events(
roots,
vec![],
100u64,
|e| events.get(e).cloned(),
|_| true,
|e| {
let value: Value = CanonicalJsonValue::Object(e.clone()).into();
let pdu: MockPDU = serde_json::from_value(value).unwrap();
pdu.content != 3
},
);
assert_eq!(
extract_events_payload(result),
vec![1, 2, 10, 11, 4, 5, 12, 13, 8, 14, 15, 16]
)
}
#[test]
fn backfill_exclude_branch_with_inaccessible_event() {
let events = mock_full_graph();
let roots = vec![mock_event_id(&1)];
let result = linearize_previous_events(
roots,
vec![],
100u64,
|e| events.get(e).cloned(),
|e| e != mock_event_id(&3),
|_| true,
);
assert_eq!(
extract_events_payload(result),
vec![1, 2, 10, 11, 4, 5, 12, 13, 8, 14, 15, 16]
)
}
} }

View file

@ -40,6 +40,8 @@ pub struct Config {
pub max_request_size: u32, pub max_request_size: u32,
#[serde(default = "default_max_concurrent_requests")] #[serde(default = "default_max_concurrent_requests")]
pub max_concurrent_requests: u16, pub max_concurrent_requests: u16,
#[serde(default = "default_max_fetch_prev_events")]
pub max_fetch_prev_events: u16,
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
pub allow_registration: bool, pub allow_registration: bool,
#[serde(default = "true_fn")] #[serde(default = "true_fn")]
@ -47,11 +49,17 @@ pub struct Config {
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
pub allow_federation: bool, pub allow_federation: bool,
#[serde(default = "true_fn")] #[serde(default = "true_fn")]
pub allow_public_read_receipts: bool,
#[serde(default = "true_fn")]
pub allow_receiving_read_receipts: bool,
#[serde(default = "true_fn")]
pub allow_room_creation: bool, pub allow_room_creation: bool,
#[serde(default = "true_fn")] #[serde(default = "true_fn")]
pub allow_unstable_room_versions: bool, pub allow_unstable_room_versions: bool,
#[serde(default = "default_default_room_version")] #[serde(default = "default_default_room_version")]
pub default_room_version: RoomVersionId, pub default_room_version: RoomVersionId,
#[serde(default = "default_hierarchy_max_depth")]
pub hierarchy_max_depth: u64,
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
pub allow_jaeger: bool, pub allow_jaeger: bool,
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
@ -76,6 +84,19 @@ pub struct Config {
pub emergency_password: Option<String>, pub emergency_password: Option<String>,
#[serde(default = "true_fn")]
pub allow_presence: bool,
#[serde(default = "default_presence_idle_timeout")]
pub presence_idle_timeout: u64,
#[serde(default = "default_presence_offline_timeout")]
pub presence_offline_timeout: u64,
#[serde(default = "default_presence_cleanup_period")]
pub presence_cleanup_period: u64,
#[serde(default = "default_presence_cleanup_limit")]
pub presence_cleanup_limit: u64,
#[serde(flatten)] #[serde(flatten)]
pub catchall: BTreeMap<String, IgnoredAny>, pub catchall: BTreeMap<String, IgnoredAny>,
} }
@ -249,6 +270,10 @@ fn default_max_concurrent_requests() -> u16 {
100 100
} }
fn default_max_fetch_prev_events() -> u16 {
100_u16
}
fn default_log() -> String { fn default_log() -> String {
"warn,state_res=warn,_=off,sled=off".to_owned() "warn,state_res=warn,_=off,sled=off".to_owned()
} }
@ -257,7 +282,27 @@ fn default_turn_ttl() -> u64 {
60 * 60 * 24 60 * 60 * 24
} }
fn default_presence_idle_timeout() -> u64 {
60
}
fn default_presence_offline_timeout() -> u64 {
30 * 60
}
fn default_presence_cleanup_period() -> u64 {
24 * 60 * 60
}
fn default_presence_cleanup_limit() -> u64 {
24 * 60 * 60
}
// I know, it's a great name // I know, it's a great name
pub fn default_default_room_version() -> RoomVersionId { pub fn default_default_room_version() -> RoomVersionId {
RoomVersionId::V9 RoomVersionId::V9
} }
fn default_hierarchy_max_depth() -> u64 {
6
}

View file

@ -1,10 +1,53 @@
use std::collections::HashMap; use futures_util::{stream::FuturesUnordered, StreamExt};
use std::{
collections::{hash_map::Entry, HashMap},
mem,
time::Duration,
};
use tracing::{error, info};
use ruma::{ use ruma::{
events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, RoomId, UInt, UserId, events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, RoomId, UInt, UserId,
}; };
use tokio::{sync::mpsc, time::sleep};
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; use crate::{
database::KeyValueDatabase,
service::{self, rooms::edus::presence::PresenceIter},
services, utils,
utils::{millis_since_unix_epoch, u64_from_bytes},
Error, Result,
};
pub struct PresenceUpdate {
count: u64,
prev_timestamp: u64,
curr_timestamp: u64,
}
impl PresenceUpdate {
fn to_be_bytes(&self) -> Vec<u8> {
[
self.count.to_be_bytes(),
self.prev_timestamp.to_be_bytes(),
self.curr_timestamp.to_be_bytes(),
]
.concat()
}
fn from_be_bytes(bytes: &[u8]) -> Result<Self> {
let (count_bytes, timestamps_bytes) = bytes.split_at(mem::size_of::<u64>());
let (prev_timestamp_bytes, curr_timestamp_bytes) =
timestamps_bytes.split_at(mem::size_of::<u64>());
Ok(Self {
count: u64_from_bytes(count_bytes).expect("count bytes from DB are valid"),
prev_timestamp: u64_from_bytes(prev_timestamp_bytes)
.expect("timestamp bytes from DB are valid"),
curr_timestamp: u64_from_bytes(curr_timestamp_bytes)
.expect("timestamp bytes from DB are valid"),
})
}
}
impl service::rooms::edus::presence::Data for KeyValueDatabase { impl service::rooms::edus::presence::Data for KeyValueDatabase {
fn update_presence( fn update_presence(
@ -13,45 +56,82 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase {
room_id: &RoomId, room_id: &RoomId,
presence: PresenceEvent, presence: PresenceEvent,
) -> Result<()> { ) -> Result<()> {
// TODO: Remove old entry? Or maybe just wipe completely from time to time? let roomuser_id = [room_id.as_bytes(), &[0xff], user_id.as_bytes()].concat();
let count = services().globals.next_count()?.to_be_bytes(); self.roomuserid_presenceevent.insert(
&roomuser_id,
let mut presence_id = room_id.as_bytes().to_vec(); &serde_json::to_vec(&presence).expect("presence event from DB is valid"),
presence_id.push(0xff);
presence_id.extend_from_slice(&count);
presence_id.push(0xff);
presence_id.extend_from_slice(presence.sender.as_bytes());
self.presenceid_presence.insert(
&presence_id,
&serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"),
)?; )?;
self.userid_lastpresenceupdate.insert( let timestamp = match presence.content.last_active_ago {
Some(active_ago) => millis_since_unix_epoch().saturating_sub(active_ago.into()),
None => millis_since_unix_epoch(),
};
self.userid_presenceupdate.insert(
user_id.as_bytes(), user_id.as_bytes(),
&utils::millis_since_unix_epoch().to_be_bytes(), &PresenceUpdate {
count: services().globals.next_count()?,
prev_timestamp: timestamp,
curr_timestamp: timestamp,
}
.to_be_bytes(),
)?; )?;
Ok(()) Ok(())
} }
fn ping_presence(&self, user_id: &UserId) -> Result<()> { fn ping_presence(
self.userid_lastpresenceupdate.insert( &self,
user_id.as_bytes(), user_id: &UserId,
&utils::millis_since_unix_epoch().to_be_bytes(), update_count: bool,
)?; update_timestamp: bool,
) -> Result<()> {
let now = millis_since_unix_epoch();
let presence = self
.userid_presenceupdate
.get(user_id.as_bytes())?
.map(|presence_bytes| PresenceUpdate::from_be_bytes(&presence_bytes))
.transpose()?;
let new_presence = match presence {
Some(presence) => PresenceUpdate {
count: if update_count {
services().globals.next_count()?
} else {
presence.count
},
prev_timestamp: if update_timestamp {
presence.curr_timestamp
} else {
presence.prev_timestamp
},
curr_timestamp: if update_timestamp {
now
} else {
presence.curr_timestamp
},
},
None => PresenceUpdate {
count: services().globals.current_count()?,
prev_timestamp: now,
curr_timestamp: now,
},
};
self.userid_presenceupdate
.insert(user_id.as_bytes(), &new_presence.to_be_bytes())?;
Ok(()) Ok(())
} }
fn last_presence_update(&self, user_id: &UserId) -> Result<Option<u64>> { fn last_presence_update(&self, user_id: &UserId) -> Result<Option<(u64, u64)>> {
self.userid_lastpresenceupdate self.userid_presenceupdate
.get(user_id.as_bytes())? .get(user_id.as_bytes())?
.map(|bytes| { .map(|bytes| {
utils::u64_from_bytes(&bytes).map_err(|_| { PresenceUpdate::from_be_bytes(&bytes)
Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") .map(|update| (update.prev_timestamp, update.curr_timestamp))
})
}) })
.transpose() .transpose()
} }
@ -60,93 +140,268 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase {
&self, &self,
room_id: &RoomId, room_id: &RoomId,
user_id: &UserId, user_id: &UserId,
count: u64, presence_timestamp: u64,
) -> Result<Option<PresenceEvent>> { ) -> Result<Option<PresenceEvent>> {
let mut presence_id = room_id.as_bytes().to_vec(); let roomuser_id = [room_id.as_bytes(), &[0xff], user_id.as_bytes()].concat();
presence_id.push(0xff); self.roomuserid_presenceevent
presence_id.extend_from_slice(&count.to_be_bytes()); .get(&roomuser_id)?
presence_id.push(0xff); .map(|value| parse_presence_event(&value, presence_timestamp))
presence_id.extend_from_slice(user_id.as_bytes());
self.presenceid_presence
.get(&presence_id)?
.map(|value| parse_presence_event(&value))
.transpose() .transpose()
} }
fn presence_since( fn presence_since<'a>(&'a self, room_id: &RoomId, since: u64) -> Result<PresenceIter<'a>> {
&self, let user_timestamp: HashMap<OwnedUserId, u64> = self
room_id: &RoomId, .userid_presenceupdate
since: u64, .iter()
) -> Result<HashMap<OwnedUserId, PresenceEvent>> { .map(|(user_id_bytes, update_bytes)| {
let mut prefix = room_id.as_bytes().to_vec(); (
prefix.push(0xff); UserId::parse(
utils::string_from_bytes(&user_id_bytes)
let mut first_possible_edu = prefix.clone(); .expect("UserID bytes are a valid string"),
first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since )
let mut hashmap = HashMap::new(); .expect("UserID bytes from database are a valid UserID"),
PresenceUpdate::from_be_bytes(&update_bytes)
for (key, value) in self .expect("PresenceUpdate bytes from database are a valid PresenceUpdate"),
.presenceid_presence )
.iter_from(&first_possible_edu, false) })
.take_while(|(key, _)| key.starts_with(&prefix)) .filter_map(|(user_id, presence_update)| {
if presence_update.count <= since
|| !services()
.rooms
.state_cache
.is_joined(&user_id, room_id)
.ok()?
{ {
let user_id = UserId::parse( return None;
utils::string_from_bytes(
key.rsplit(|&b| b == 0xff)
.next()
.expect("rsplit always returns an element"),
)
.map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?,
)
.map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?;
let presence = parse_presence_event(&value)?;
hashmap.insert(user_id, presence);
} }
Ok(hashmap) Some((user_id, presence_update.curr_timestamp))
})
.collect();
Ok(Box::new(
self.roomuserid_presenceevent
.scan_prefix(room_id.as_bytes().to_vec())
.filter_map(move |(roomuserid_bytes, presence_bytes)| {
let user_id_bytes = roomuserid_bytes.split(|byte| *byte == 0xff).last()?;
let user_id: OwnedUserId = UserId::parse(
utils::string_from_bytes(user_id_bytes)
.expect("UserID bytes are a valid string"),
)
.expect("UserID bytes from database are a valid UserID");
let timestamp = user_timestamp.get(&user_id)?;
let presence_event = parse_presence_event(&presence_bytes, *timestamp)
.expect("PresenceEvent bytes from database are a valid PresenceEvent");
Some((user_id, presence_event))
}),
))
} }
/* fn presence_maintain(
fn presence_maintain(&self, db: Arc<TokioRwLock<Database>>) { &self,
// TODO @M0dEx: move this to a timed tasks module mut timer_receiver: mpsc::UnboundedReceiver<OwnedUserId>,
) -> Result<()> {
let mut timers = FuturesUnordered::new();
let mut timers_timestamp: HashMap<OwnedUserId, u64> = HashMap::new();
tokio::spawn(async move { tokio::spawn(async move {
loop { // Wait for services to be created
select! { sleep(Duration::from_secs(15)).await;
Some(user_id) = self.presence_timers.next() {
// TODO @M0dEx: would it be better to acquire the lock outside the loop?
let guard = db.read().await;
// TODO @M0dEx: add self.presence_timers if !services().globals.allow_presence() {
// TODO @M0dEx: maintain presence return;
}
let idle_timeout = Duration::from_secs(services().globals.presence_idle_timeout());
let offline_timeout =
Duration::from_secs(services().globals.presence_offline_timeout());
// TODO: Get rid of this hack (hinting correct types to rustc)
timers.push(create_presence_timer(
idle_timeout,
UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("Conduit user always exists"),
));
loop {
tokio::select! {
Some(user_id) = timers.next() => {
info!("Processing timer for user '{}' ({})", user_id.clone(), timers.len());
let (prev_timestamp, curr_timestamp) = match services().rooms.edus.presence.last_presence_update(&user_id) {
Ok(timestamp_tuple) => match timestamp_tuple {
Some(timestamp_tuple) => timestamp_tuple,
None => continue,
},
Err(e) => {
error!("{e}");
continue;
}
};
let prev_presence_state = determine_presence_state(prev_timestamp);
let curr_presence_state = determine_presence_state(curr_timestamp);
// Continue if there is no change in state
if prev_presence_state == curr_presence_state {
continue;
}
match services().rooms.edus.presence.ping_presence(&user_id, true, false, false) {
Ok(_) => (),
Err(e) => error!("{e}")
}
// TODO: Notify federation sender
}
Some(user_id) = timer_receiver.recv() => {
let now = millis_since_unix_epoch();
// Do not create timers if we added timers recently
let should_send = match timers_timestamp.entry(user_id.to_owned()) {
Entry::Occupied(mut entry) => {
if now - entry.get() > 15 * 1000 {
entry.insert(now);
true
} else {
false
}
},
Entry::Vacant(entry) => {
entry.insert(now);
true
}
};
if !should_send {
continue;
}
// Idle timeout
timers.push(create_presence_timer(idle_timeout, user_id.clone()));
// Offline timeout
timers.push(create_presence_timer(offline_timeout, user_id.clone()));
info!("Added timers for user '{}' ({})", user_id, timers.len());
} }
} }
} }
}); });
}
*/ Ok(())
} }
fn parse_presence_event(bytes: &[u8]) -> Result<PresenceEvent> { fn presence_cleanup(&self) -> Result<()> {
let userid_presenceupdate = self.userid_presenceupdate.clone();
let roomuserid_presenceevent = self.roomuserid_presenceevent.clone();
tokio::spawn(async move {
// Wait for services to be created
sleep(Duration::from_secs(15)).await;
if !services().globals.allow_presence() {
return;
}
let period = Duration::from_secs(services().globals.presence_cleanup_period());
let age_limit = Duration::from_secs(services().globals.presence_cleanup_limit());
loop {
let mut removed_events: u64 = 0;
let age_limit_curr =
millis_since_unix_epoch().saturating_sub(age_limit.as_millis() as u64);
for user_id in userid_presenceupdate
.iter()
.map(|(user_id_bytes, update_bytes)| {
(
UserId::parse(
utils::string_from_bytes(&user_id_bytes)
.expect("UserID bytes are a valid string"),
)
.expect("UserID bytes from database are a valid UserID"),
PresenceUpdate::from_be_bytes(&update_bytes).expect(
"PresenceUpdate bytes from database are a valid PresenceUpdate",
),
)
})
.filter_map(|(user_id, presence_update)| {
if presence_update.curr_timestamp < age_limit_curr {
return None;
}
Some(user_id)
})
{
match userid_presenceupdate.remove(user_id.as_bytes()) {
Ok(_) => (),
Err(e) => {
error!("An errord occured while removing a stale presence update: {e}")
}
}
for room_id in services()
.rooms
.state_cache
.rooms_joined(&user_id)
.filter_map(|room_id| room_id.ok())
{
match roomuserid_presenceevent
.remove(&[room_id.as_bytes(), &[0xff], user_id.as_bytes()].concat())
{
Ok(_) => removed_events += 1,
Err(e) => error!(
"An errord occured while removing a stale presence event: {e}"
),
}
}
}
info!("Cleaned up {removed_events} stale presence events!");
sleep(period).await;
}
});
Ok(())
}
}
async fn create_presence_timer(duration: Duration, user_id: OwnedUserId) -> OwnedUserId {
sleep(duration).await;
user_id
}
fn parse_presence_event(bytes: &[u8], presence_timestamp: u64) -> Result<PresenceEvent> {
let mut presence: PresenceEvent = serde_json::from_slice(bytes) let mut presence: PresenceEvent = serde_json::from_slice(bytes)
.map_err(|_| Error::bad_database("Invalid presence event in db."))?; .map_err(|_| Error::bad_database("Invalid presence event in db."))?;
let current_timestamp: UInt = utils::millis_since_unix_epoch() translate_active_ago(&mut presence, presence_timestamp);
.try_into()
.expect("time is valid");
if presence.content.presence == PresenceState::Online {
// Don't set last_active_ago when the user is online
presence.content.last_active_ago = None;
} else {
// Convert from timestamp to duration
presence.content.last_active_ago = presence
.content
.last_active_ago
.map(|timestamp| current_timestamp - timestamp);
}
Ok(presence) Ok(presence)
} }
fn determine_presence_state(last_active_ago: u64) -> PresenceState {
let globals = &services().globals;
if last_active_ago < globals.presence_idle_timeout() * 1000 {
PresenceState::Online
} else if last_active_ago < globals.presence_offline_timeout() * 1000 {
PresenceState::Unavailable
} else {
PresenceState::Offline
}
}
/// Translates the timestamp representing last_active_ago to a diff from now.
fn translate_active_ago(presence_event: &mut PresenceEvent, last_active_ts: u64) {
let last_active_ago = millis_since_unix_epoch().saturating_sub(last_active_ts);
presence_event.content.presence = determine_presence_state(last_active_ago);
presence_event.content.last_active_ago = match presence_event.content.presence {
PresenceState::Online => None,
_ => Some(UInt::new_saturating(last_active_ago)),
}
}

View file

@ -105,16 +105,25 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase {
) )
} }
fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { fn private_read_set(
&self,
room_id: &RoomId,
user_id: &UserId,
shorteventid: u64,
) -> Result<()> {
let mut key = room_id.as_bytes().to_vec(); let mut key = room_id.as_bytes().to_vec();
key.push(0xff); key.push(0xff);
key.extend_from_slice(user_id.as_bytes()); key.extend_from_slice(user_id.as_bytes());
if self.private_read_get(room_id, user_id)?.unwrap_or(0) < shorteventid {
self.roomuserid_privateread self.roomuserid_privateread
.insert(&key, &count.to_be_bytes())?; .insert(&key, &shorteventid.to_be_bytes())?;
self.roomuserid_lastprivatereadupdate self.roomuserid_lastprivatereadupdate
.insert(&key, &services().globals.next_count()?.to_be_bytes()) .insert(&key, &services().globals.next_count()?.to_be_bytes())
} else {
Ok(())
}
} }
fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>> { fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>> {

View file

@ -5,7 +5,11 @@ use std::{
use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result};
use async_trait::async_trait; use async_trait::async_trait;
use ruma::{events::StateEventType, EventId, RoomId}; use ruma::{
events::{room::member::MembershipState, StateEventType},
EventId, RoomId, UserId,
};
use serde_json::Value;
#[async_trait] #[async_trait]
impl service::rooms::state_accessor::Data for KeyValueDatabase { impl service::rooms::state_accessor::Data for KeyValueDatabase {
@ -123,6 +127,21 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
}) })
} }
fn state_get_content(
&self,
shortstatehash: u64,
event_type: &StateEventType,
state_key: &str,
) -> Result<Option<Value>> {
let content = self
.state_get(shortstatehash, event_type, state_key)?
.map(|event| serde_json::from_str(event.content.get()))
.transpose()
.map_err(|_| Error::bad_database("Invalid event in database"))?;
Ok(content)
}
/// Returns the state hash for this pdu. /// Returns the state hash for this pdu.
fn pdu_shortstatehash(&self, event_id: &EventId) -> Result<Option<u64>> { fn pdu_shortstatehash(&self, event_id: &EventId) -> Result<Option<u64>> {
self.eventid_shorteventid self.eventid_shorteventid
@ -141,6 +160,23 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
}) })
} }
/// Get membership for given user in state
fn user_membership(&self, shortstatehash: u64, user_id: &UserId) -> Result<MembershipState> {
self.state_get_content(
shortstatehash,
&StateEventType::RoomMember,
user_id.as_str(),
)?
.map(|content| match content.get("membership") {
Some(Value::String(membership)) => Ok(MembershipState::from(membership.as_str())),
None => Ok(MembershipState::Leave),
_ => Err(Error::bad_database(
"Malformed membership, expected Value::String",
)),
})
.unwrap_or(Ok(MembershipState::Leave))
}
/// Returns the full room state. /// Returns the full room state.
async fn room_state_full( async fn room_state_full(
&self, &self,

View file

@ -3,7 +3,13 @@ use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId};
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
impl service::rooms::user::Data for KeyValueDatabase { impl service::rooms::user::Data for KeyValueDatabase {
fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { fn update_notification_counts(
&self,
user_id: &UserId,
room_id: &RoomId,
notification_count: u64,
highlight_count: u64,
) -> Result<()> {
let mut userroom_id = user_id.as_bytes().to_vec(); let mut userroom_id = user_id.as_bytes().to_vec();
userroom_id.push(0xff); userroom_id.push(0xff);
userroom_id.extend_from_slice(room_id.as_bytes()); userroom_id.extend_from_slice(room_id.as_bytes());
@ -12,9 +18,9 @@ impl service::rooms::user::Data for KeyValueDatabase {
roomuser_id.extend_from_slice(user_id.as_bytes()); roomuser_id.extend_from_slice(user_id.as_bytes());
self.userroomid_notificationcount self.userroomid_notificationcount
.insert(&userroom_id, &0_u64.to_be_bytes())?; .insert(&userroom_id, &notification_count.to_be_bytes())?;
self.userroomid_highlightcount self.userroomid_highlightcount
.insert(&userroom_id, &0_u64.to_be_bytes())?; .insert(&userroom_id, &highlight_count.to_be_bytes())?;
self.roomuserid_lastnotificationread.insert( self.roomuserid_lastnotificationread.insert(
&roomuser_id, &roomuser_id,

View file

@ -65,8 +65,8 @@ pub struct KeyValueDatabase {
pub(super) roomuserid_lastprivatereadupdate: Arc<dyn KvTree>, // LastPrivateReadUpdate = Count pub(super) roomuserid_lastprivatereadupdate: Arc<dyn KvTree>, // LastPrivateReadUpdate = Count
pub(super) typingid_userid: Arc<dyn KvTree>, // TypingId = RoomId + TimeoutTime + Count pub(super) typingid_userid: Arc<dyn KvTree>, // TypingId = RoomId + TimeoutTime + Count
pub(super) roomid_lasttypingupdate: Arc<dyn KvTree>, // LastRoomTypingUpdate = Count pub(super) roomid_lasttypingupdate: Arc<dyn KvTree>, // LastRoomTypingUpdate = Count
pub(super) presenceid_presence: Arc<dyn KvTree>, // PresenceId = RoomId + Count + UserId pub(super) userid_presenceupdate: Arc<dyn KvTree>, // PresenceUpdate = Count + Timestamp
pub(super) userid_lastpresenceupdate: Arc<dyn KvTree>, // LastPresenceUpdate = Count pub(super) roomuserid_presenceevent: Arc<dyn KvTree>, // PresenceEvent
//pub rooms: rooms::Rooms, //pub rooms: rooms::Rooms,
pub(super) pduid_pdu: Arc<dyn KvTree>, // PduId = ShortRoomId + Count pub(super) pduid_pdu: Arc<dyn KvTree>, // PduId = ShortRoomId + Count
@ -288,8 +288,8 @@ impl KeyValueDatabase {
.open_tree("roomuserid_lastprivatereadupdate")?, .open_tree("roomuserid_lastprivatereadupdate")?,
typingid_userid: builder.open_tree("typingid_userid")?, typingid_userid: builder.open_tree("typingid_userid")?,
roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?, roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?,
presenceid_presence: builder.open_tree("presenceid_presence")?, userid_presenceupdate: builder.open_tree("userid_presenceupdate")?,
userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, roomuserid_presenceevent: builder.open_tree("roomuserid_presenceevent")?,
pduid_pdu: builder.open_tree("pduid_pdu")?, pduid_pdu: builder.open_tree("pduid_pdu")?,
eventid_pduid: builder.open_tree("eventid_pduid")?, eventid_pduid: builder.open_tree("eventid_pduid")?,
roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, roomid_pduleaves: builder.open_tree("roomid_pduleaves")?,
@ -825,9 +825,6 @@ impl KeyValueDatabase {
); );
} }
// This data is probably outdated
db.presenceid_presence.clear()?;
services().admin.start_handler(); services().admin.start_handler();
// Set emergency access for the conduit user // Set emergency access for the conduit user

View file

@ -318,6 +318,7 @@ fn routes() -> Router {
.ruma_route(client_server::send_state_event_for_key_route) .ruma_route(client_server::send_state_event_for_key_route)
.ruma_route(client_server::get_state_events_route) .ruma_route(client_server::get_state_events_route)
.ruma_route(client_server::get_state_events_for_key_route) .ruma_route(client_server::get_state_events_for_key_route)
.ruma_route(client_server::get_hierarchy_route)
// Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes // Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes
// share one Ruma request / response type pair with {get,send}_state_event_for_key_route // share one Ruma request / response type pair with {get,send}_state_event_for_key_route
.route( .route(
@ -381,6 +382,7 @@ fn routes() -> Router {
.ruma_route(server_server::send_transaction_message_route) .ruma_route(server_server::send_transaction_message_route)
.ruma_route(server_server::get_event_route) .ruma_route(server_server::get_event_route)
.ruma_route(server_server::get_missing_events_route) .ruma_route(server_server::get_missing_events_route)
.ruma_route(server_server::get_backfill_route)
.ruma_route(server_server::get_event_authorization_route) .ruma_route(server_server::get_event_authorization_route)
.ruma_route(server_server::get_room_state_route) .ruma_route(server_server::get_room_state_route)
.ruma_route(server_server::get_room_state_ids_route) .ruma_route(server_server::get_room_state_ids_route)

View file

@ -222,6 +222,10 @@ impl Service {
self.config.max_request_size self.config.max_request_size
} }
pub fn max_fetch_prev_events(&self) -> u16 {
self.config.max_fetch_prev_events
}
pub fn allow_registration(&self) -> bool { pub fn allow_registration(&self) -> bool {
self.config.allow_registration self.config.allow_registration
} }
@ -234,6 +238,14 @@ impl Service {
self.config.allow_federation self.config.allow_federation
} }
pub fn allow_public_read_receipts(&self) -> bool {
self.config.allow_public_read_receipts
}
pub fn allow_receiving_read_receipts(&self) -> bool {
self.config.allow_receiving_read_receipts
}
pub fn allow_room_creation(&self) -> bool { pub fn allow_room_creation(&self) -> bool {
self.config.allow_room_creation self.config.allow_room_creation
} }
@ -250,6 +262,10 @@ impl Service {
self.config.enable_lightning_bolt self.config.enable_lightning_bolt
} }
pub fn hierarchy_max_depth(&self) -> u64 {
self.config.hierarchy_max_depth
}
pub fn trusted_servers(&self) -> &[OwnedServerName] { pub fn trusted_servers(&self) -> &[OwnedServerName] {
&self.config.trusted_servers &self.config.trusted_servers
} }
@ -286,6 +302,26 @@ impl Service {
&self.config.emergency_password &self.config.emergency_password
} }
pub fn allow_presence(&self) -> bool {
self.config.allow_presence
}
pub fn presence_idle_timeout(&self) -> u64 {
self.config.presence_idle_timeout
}
pub fn presence_offline_timeout(&self) -> u64 {
self.config.presence_offline_timeout
}
pub fn presence_cleanup_period(&self) -> u64 {
self.config.presence_cleanup_period
}
pub fn presence_cleanup_limit(&self) -> u64 {
self.config.presence_cleanup_limit
}
pub fn supported_room_versions(&self) -> Vec<RoomVersionId> { pub fn supported_room_versions(&self) -> Vec<RoomVersionId> {
let mut room_versions: Vec<RoomVersionId> = vec![]; let mut room_versions: Vec<RoomVersionId> = vec![];
room_versions.extend(self.stable_room_versions.clone()); room_versions.extend(self.stable_room_versions.clone());

View file

@ -62,7 +62,7 @@ impl Services {
auth_chain: rooms::auth_chain::Service { db }, auth_chain: rooms::auth_chain::Service { db },
directory: rooms::directory::Service { db }, directory: rooms::directory::Service { db },
edus: rooms::edus::Service { edus: rooms::edus::Service {
presence: rooms::edus::presence::Service { db }, presence: rooms::edus::presence::Service::build(db)?,
read_receipt: rooms::edus::read_receipt::Service { db }, read_receipt: rooms::edus::read_receipt::Service { db },
typing: rooms::edus::typing::Service { db }, typing: rooms::edus::typing::Service { db },
}, },
@ -77,7 +77,12 @@ impl Services {
search: rooms::search::Service { db }, search: rooms::search::Service { db },
short: rooms::short::Service { db }, short: rooms::short::Service { db },
state: rooms::state::Service { db }, state: rooms::state::Service { db },
state_accessor: rooms::state_accessor::Service { db }, state_accessor: rooms::state_accessor::Service {
db,
server_visibility_cache: Mutex::new(LruCache::new(
(100.0 * config.conduit_cache_capacity_modifier) as usize,
)),
},
state_cache: rooms::state_cache::Service { db }, state_cache: rooms::state_cache::Service { db },
state_compressor: rooms::state_compressor::Service { state_compressor: rooms::state_compressor::Service {
db, db,

View file

@ -281,6 +281,10 @@ impl state_res::Event for PduEvent {
&self.sender &self.sender
} }
fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch {
MilliSecondsSinceUnixEpoch(self.origin_server_ts)
}
fn event_type(&self) -> &RoomEventType { fn event_type(&self) -> &RoomEventType {
&self.kind &self.kind
} }
@ -289,10 +293,6 @@ impl state_res::Event for PduEvent {
&self.content &self.content
} }
fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch {
MilliSecondsSinceUnixEpoch(self.origin_server_ts)
}
fn state_key(&self) -> Option<&str> { fn state_key(&self) -> Option<&str> {
self.state_key.as_deref() self.state_key.as_deref()
} }

View file

@ -1,7 +1,8 @@
use std::collections::HashMap;
use crate::Result; use crate::Result;
use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId};
use tokio::sync::mpsc;
use super::PresenceIter;
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
/// Adds a presence event which will be saved until a new event replaces it. /// Adds a presence event which will be saved until a new event replaces it.
@ -16,23 +17,29 @@ pub trait Data: Send + Sync {
) -> Result<()>; ) -> Result<()>;
/// Resets the presence timeout, so the user will stay in their current presence state. /// Resets the presence timeout, so the user will stay in their current presence state.
fn ping_presence(&self, user_id: &UserId) -> Result<()>; fn ping_presence(
&self,
user_id: &UserId,
update_count: bool,
update_timestamp: bool,
) -> Result<()>;
/// Returns the timestamp of the last presence update of this user in millis since the unix epoch. /// Returns the timestamp of the last presence update of this user in millis since the unix epoch.
fn last_presence_update(&self, user_id: &UserId) -> Result<Option<u64>>; fn last_presence_update(&self, user_id: &UserId) -> Result<Option<(u64, u64)>>;
/// Returns the presence event with correct last_active_ago. /// Returns the presence event with correct last_active_ago.
fn get_presence_event( fn get_presence_event(
&self, &self,
room_id: &RoomId, room_id: &RoomId,
user_id: &UserId, user_id: &UserId,
count: u64, presence_timestamp: u64,
) -> Result<Option<PresenceEvent>>; ) -> Result<Option<PresenceEvent>>;
/// Returns the most recent presence updates that happened after the event with id `since`. /// Returns the most recent presence updates that happened after the event with id `since`.
fn presence_since( fn presence_since<'a>(&'a self, room_id: &RoomId, since: u64) -> Result<PresenceIter<'a>>;
&self,
room_id: &RoomId, fn presence_maintain(&self, timer_receiver: mpsc::UnboundedReceiver<OwnedUserId>)
since: u64, -> Result<()>;
) -> Result<HashMap<OwnedUserId, PresenceEvent>>;
fn presence_cleanup(&self) -> Result<()>;
} }

View file

@ -1,16 +1,55 @@
mod data; mod data;
use std::collections::HashMap;
pub use data::Data; pub use data::Data;
use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId};
use tokio::sync::mpsc;
use crate::Result; use crate::{services, Error, Result};
pub(crate) type PresenceIter<'a> = Box<dyn Iterator<Item = (OwnedUserId, PresenceEvent)> + 'a>;
pub struct Service { pub struct Service {
pub db: &'static dyn Data, pub db: &'static dyn Data,
// Presence timers
timer_sender: mpsc::UnboundedSender<OwnedUserId>,
} }
impl Service { impl Service {
/// Builds the service and initialized the presence_maintain task
pub fn build(db: &'static dyn Data) -> Result<Self> {
let (sender, receiver) = mpsc::unbounded_channel();
let service = Self {
db,
timer_sender: sender,
};
service.presence_maintain(receiver)?;
service.presence_cleanup()?;
Ok(service)
}
/// Resets the presence timeout, so the user will stay in their current presence state.
pub fn ping_presence(
&self,
user_id: &UserId,
update_count: bool,
update_timestamp: bool,
spawn_timer: bool,
) -> Result<()> {
if !services().globals.allow_presence() {
return Ok(());
}
if spawn_timer {
self.spawn_timer(user_id)?;
}
self.db
.ping_presence(user_id, update_count, update_timestamp)
}
/// Adds a presence event which will be saved until a new event replaces it. /// Adds a presence event which will be saved until a new event replaces it.
/// ///
/// Note: This method takes a RoomId because presence updates are always bound to rooms to /// Note: This method takes a RoomId because presence updates are always bound to rooms to
@ -20,103 +59,78 @@ impl Service {
user_id: &UserId, user_id: &UserId,
room_id: &RoomId, room_id: &RoomId,
presence: PresenceEvent, presence: PresenceEvent,
spawn_timer: bool,
) -> Result<()> { ) -> Result<()> {
if !services().globals.allow_presence() {
return Ok(());
}
if spawn_timer {
self.spawn_timer(user_id)?;
}
self.db.update_presence(user_id, room_id, presence) self.db.update_presence(user_id, room_id, presence)
} }
/// Resets the presence timeout, so the user will stay in their current presence state. /// Returns the timestamp of when the presence was last updated for the specified user.
pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { pub fn last_presence_update(&self, user_id: &UserId) -> Result<Option<(u64, u64)>> {
self.db.ping_presence(user_id) if !services().globals.allow_presence() {
return Ok(None);
} }
pub fn get_last_presence_event( self.db.last_presence_update(user_id)
}
/// Returns the saved presence event for this user with actual last_active_ago.
pub fn get_presence_event(
&self, &self,
user_id: &UserId, user_id: &UserId,
room_id: &RoomId, room_id: &RoomId,
) -> Result<Option<PresenceEvent>> { ) -> Result<Option<PresenceEvent>> {
if !services().globals.allow_presence() {
return Ok(None);
}
let last_update = match self.db.last_presence_update(user_id)? { let last_update = match self.db.last_presence_update(user_id)? {
Some(last) => last, Some(last) => last.1,
None => return Ok(None), None => return Ok(None),
}; };
self.db.get_presence_event(room_id, user_id, last_update) self.db.get_presence_event(room_id, user_id, last_update)
} }
/* TODO
/// Sets all users to offline who have been quiet for too long.
fn _presence_maintain(
&self,
rooms: &super::Rooms,
globals: &super::super::globals::Globals,
) -> Result<()> {
let current_timestamp = utils::millis_since_unix_epoch();
for (user_id_bytes, last_timestamp) in self
.userid_lastpresenceupdate
.iter()
.filter_map(|(k, bytes)| {
Some((
k,
utils::u64_from_bytes(&bytes)
.map_err(|_| {
Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.")
})
.ok()?,
))
})
.take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000)
// 5 Minutes
{
// Send new presence events to set the user offline
let count = globals.next_count()?.to_be_bytes();
let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes)
.map_err(|_| {
Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.")
})?
.try_into()
.map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?;
for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) {
let mut presence_id = room_id.as_bytes().to_vec();
presence_id.push(0xff);
presence_id.extend_from_slice(&count);
presence_id.push(0xff);
presence_id.extend_from_slice(&user_id_bytes);
self.presenceid_presence.insert(
&presence_id,
&serde_json::to_vec(&PresenceEvent {
content: PresenceEventContent {
avatar_url: None,
currently_active: None,
displayname: None,
last_active_ago: Some(
last_timestamp.try_into().expect("time is valid"),
),
presence: PresenceState::Offline,
status_msg: None,
},
sender: user_id.to_owned(),
})
.expect("PresenceEvent can be serialized"),
)?;
}
self.userid_lastpresenceupdate.insert(
user_id.as_bytes(),
&utils::millis_since_unix_epoch().to_be_bytes(),
)?;
}
Ok(())
}*/
/// Returns the most recent presence updates that happened after the event with id `since`. /// Returns the most recent presence updates that happened after the event with id `since`.
#[tracing::instrument(skip(self, since, room_id))] #[tracing::instrument(skip(self, since, room_id))]
pub fn presence_since( pub fn presence_since<'a>(&'a self, room_id: &RoomId, since: u64) -> Result<PresenceIter<'a>> {
&self, if !services().globals.allow_presence() {
room_id: &RoomId, return Ok(Box::new(std::iter::empty()));
since: u64, }
) -> Result<HashMap<OwnedUserId, PresenceEvent>> {
self.db.presence_since(room_id, since) self.db.presence_since(room_id, since)
} }
/// Spawns a task maintaining presence data
fn presence_maintain(
&self,
timer_receiver: mpsc::UnboundedReceiver<OwnedUserId>,
) -> Result<()> {
self.db.presence_maintain(timer_receiver)
}
fn presence_cleanup(&self) -> Result<()> {
self.db.presence_cleanup()
}
/// Spawns a timer for the user used by the maintenance task
fn spawn_timer(&self, user_id: &UserId) -> Result<()> {
if !services().globals.allow_presence() {
return Ok(());
}
self.timer_sender
.send(user_id.into())
.map_err(|_| Error::bad_database("Sender errored out"))?;
Ok(())
}
} }

View file

@ -25,8 +25,9 @@ pub trait Data: Send + Sync {
> + 'a, > + 'a,
>; >;
/// Sets a private read marker at `count`. /// Sets a private read marker at `shorteventid`.
fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()>; fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, shorteventid: u64)
-> Result<()>;
/// Returns the private read marker. /// Returns the private read marker.
fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>>; fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>>;

View file

@ -2,7 +2,7 @@ mod data;
pub use data::Data; pub use data::Data;
use crate::Result; use crate::{services, Result};
use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId}; use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId};
pub struct Service { pub struct Service {
@ -36,10 +36,19 @@ impl Service {
self.db.readreceipts_since(room_id, since) self.db.readreceipts_since(room_id, since)
} }
/// Sets a private read marker at `count`. /// Sets a private read marker at `shorteventid`.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { pub fn private_read_set(
self.db.private_read_set(room_id, user_id, count) &self,
room_id: &RoomId,
user_id: &UserId,
shorteventid: u64,
) -> Result<()> {
self.db.private_read_set(room_id, user_id, shorteventid)?;
services()
.rooms
.user
.update_notification_counts(user_id, room_id)
} }
/// Returns the private read marker. /// Returns the private read marker.

View file

@ -1224,7 +1224,7 @@ impl Service {
.await .await
.pop() .pop()
{ {
if amount > 100 { if amount > services().globals.max_fetch_prev_events() {
// Max limit reached // Max limit reached
warn!("Max prev event limit reached!"); warn!("Max prev event limit reached!");
graph.insert(prev_event_id.clone(), HashSet::new()); graph.insert(prev_event_id.clone(), HashSet::new());

View file

@ -4,7 +4,10 @@ use std::{
}; };
use async_trait::async_trait; use async_trait::async_trait;
use ruma::{events::StateEventType, EventId, RoomId}; use ruma::{
events::{room::member::MembershipState, StateEventType},
EventId, RoomId, UserId,
};
use crate::{PduEvent, Result}; use crate::{PduEvent, Result};
@ -35,9 +38,19 @@ pub trait Data: Send + Sync {
state_key: &str, state_key: &str,
) -> Result<Option<Arc<PduEvent>>>; ) -> Result<Option<Arc<PduEvent>>>;
fn state_get_content(
&self,
shortstatehash: u64,
event_type: &StateEventType,
state_key: &str,
) -> Result<Option<serde_json::Value>>;
/// Returns the state hash for this pdu. /// Returns the state hash for this pdu.
fn pdu_shortstatehash(&self, event_id: &EventId) -> Result<Option<u64>>; fn pdu_shortstatehash(&self, event_id: &EventId) -> Result<Option<u64>>;
/// Get membership for given user in state
fn user_membership(&self, shortstatehash: u64, user_id: &UserId) -> Result<MembershipState>;
/// Returns the full room state. /// Returns the full room state.
async fn room_state_full( async fn room_state_full(
&self, &self,

View file

@ -1,16 +1,25 @@
mod data; mod data;
use std::{ use std::{
collections::{BTreeMap, HashMap}, collections::{BTreeMap, HashMap},
sync::Arc, sync::{Arc, Mutex},
}; };
pub use data::Data; pub use data::Data;
use ruma::{events::StateEventType, EventId, RoomId}; use lru_cache::LruCache;
use ruma::{
events::{
room::{history_visibility::HistoryVisibility, member::MembershipState},
StateEventType,
},
EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
};
use tracing::warn;
use crate::{PduEvent, Result}; use crate::{PduEvent, Result};
pub struct Service { pub struct Service {
pub db: &'static dyn Data, pub db: &'static dyn Data,
pub server_visibility_cache: Mutex<LruCache<(OwnedServerName, u64), bool>>,
} }
impl Service { impl Service {
@ -49,11 +58,95 @@ impl Service {
self.db.state_get(shortstatehash, event_type, state_key) self.db.state_get(shortstatehash, event_type, state_key)
} }
pub fn state_get_content(
&self,
shortstatehash: u64,
event_type: &StateEventType,
state_key: &str,
) -> Result<Option<serde_json::Value>> {
self.db
.state_get_content(shortstatehash, event_type, state_key)
}
/// Returns the state hash for this pdu. /// Returns the state hash for this pdu.
pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result<Option<u64>> { pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result<Option<u64>> {
self.db.pdu_shortstatehash(event_id) self.db.pdu_shortstatehash(event_id)
} }
/// Whether a server is allowed to see an event through federation, based on
/// the room's history_visibility at that event's state.
#[tracing::instrument(skip(self))]
pub fn server_can_see_event(
&self,
server_name: &ServerName,
current_server_members: &[OwnedUserId],
event_id: &EventId,
) -> Result<bool> {
let shortstatehash = match self.pdu_shortstatehash(event_id) {
Ok(Some(shortstatehash)) => shortstatehash,
_ => return Ok(false),
};
if let Some(visibility) = self
.server_visibility_cache
.lock()
.unwrap()
.get_mut(&(server_name.to_owned(), shortstatehash))
{
return Ok(*visibility);
}
let history_visibility = self
.state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "")?
.map(|content| match content.get("history_visibility") {
Some(visibility) => HistoryVisibility::from(visibility.as_str().unwrap_or("")),
None => HistoryVisibility::Shared,
});
let visibility = match history_visibility {
Some(HistoryVisibility::WorldReadable) => {
// Allow if event was sent while world readable
true
}
Some(HistoryVisibility::Invited) => {
// Allow if any member on requesting server was AT LEAST invited, else deny
current_server_members
.iter()
.any(|member| self.user_was_invited(shortstatehash, member))
}
_ => {
// Allow if any member on requested server was joined, else deny
current_server_members
.iter()
.any(|member| self.user_was_joined(shortstatehash, member))
}
};
self.server_visibility_cache
.lock()
.unwrap()
.insert((server_name.to_owned(), shortstatehash), visibility);
Ok(visibility)
}
/// The user was a joined member at this state (potentially in the past)
fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> bool {
self.db
.user_membership(shortstatehash, user_id)
.map(|s| s == MembershipState::Join)
.unwrap_or_default() // Return sensible default, i.e. false
}
/// The user was an invited or joined room member at this state (potentially
/// in the past)
fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> bool {
self.db
.user_membership(shortstatehash, user_id)
.map(|s| s == MembershipState::Join || s == MembershipState::Invite)
.unwrap_or_default() // Return sensible default, i.e. false
}
/// Returns the full room state. /// Returns the full room state.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub async fn room_state_full( pub async fn room_state_full(

View file

@ -213,18 +213,17 @@ impl Service {
); );
let insert_lock = mutex_insert.lock().unwrap(); let insert_lock = mutex_insert.lock().unwrap();
let count1 = services().globals.next_count()?; let _count1 = services().globals.next_count()?;
// Mark as read first so the sending client doesn't get a notification even if appending // Mark as read first so the sending client doesn't get a notification even if appending
// fails // fails
services().rooms.edus.read_receipt.private_read_set(
&pdu.room_id,
&pdu.sender,
services() services()
.rooms .rooms
.edus .short
.read_receipt .get_or_create_shorteventid(&pdu.event_id)?,
.private_read_set(&pdu.room_id, &pdu.sender, count1)?; )?;
services()
.rooms
.user
.reset_notification_counts(&pdu.sender, &pdu.room_id)?;
let count2 = services().globals.next_count()?; let count2 = services().globals.next_count()?;
let mut pdu_id = shortroomid.to_be_bytes().to_vec(); let mut pdu_id = shortroomid.to_be_bytes().to_vec();

View file

@ -2,7 +2,13 @@ use crate::Result;
use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId};
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn update_notification_counts(
&self,
user_id: &UserId,
room_id: &RoomId,
notification_count: u64,
highlight_count: u64,
) -> Result<()>;
fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64>; fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64>;

View file

@ -1,17 +1,117 @@
mod data; mod data;
pub use data::Data; pub use data::Data;
use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use ruma::{
events::{
push_rules::PushRulesEvent, room::power_levels::RoomPowerLevelsEventContent,
GlobalAccountDataEventType, StateEventType,
},
push::{Action, Ruleset, Tweak},
OwnedRoomId, OwnedUserId, RoomId, UserId,
};
use crate::Result; use crate::{services, Error, Result};
pub struct Service { pub struct Service {
pub db: &'static dyn Data, pub db: &'static dyn Data,
} }
impl Service { impl Service {
pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { pub fn update_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
self.db.reset_notification_counts(user_id, room_id) let power_levels: RoomPowerLevelsEventContent = services()
.rooms
.state_accessor
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")?
.map(|ev| {
serde_json::from_str(ev.content.get())
.map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
})
.transpose()?
.unwrap_or_default();
let read_event = services()
.rooms
.edus
.read_receipt
.private_read_get(room_id, user_id)
.unwrap_or(None)
.unwrap_or(0u64);
let mut notification_count = 0u64;
let mut highlight_count = 0u64;
services()
.rooms
.timeline
.pdus_since(user_id, room_id, read_event)?
.filter_map(|pdu| pdu.ok())
.map(|(_, pdu)| pdu)
.filter(|pdu| {
// Don't include user's own messages in notification counts
user_id != pdu.sender
&& services()
.rooms
.short
.get_or_create_shorteventid(&pdu.event_id)
.unwrap_or(0)
!= read_event
})
.filter_map(|pdu| {
let rules_for_user = services()
.account_data
.get(
None,
user_id,
GlobalAccountDataEventType::PushRules.to_string().into(),
)
.ok()?
.map(|event| {
serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid push rules event in db."))
})
.transpose()
.ok()?
.map(|ev: PushRulesEvent| ev.content.global)
.unwrap_or_else(|| Ruleset::server_default(user_id));
let mut highlight = false;
let mut notify = false;
for action in services()
.pusher
.get_actions(
user_id,
&rules_for_user,
&power_levels,
&pdu.to_sync_room_event(),
&pdu.room_id,
)
.ok()?
{
match action {
Action::DontNotify => notify = false,
// TODO: Implement proper support for coalesce
Action::Notify | Action::Coalesce => notify = true,
Action::SetTweak(Tweak::Highlight(true)) => {
highlight = true;
}
_ => {}
};
}
if notify {
notification_count += 1;
};
if highlight {
highlight_count += 1;
};
Some(())
})
.for_each(|_| {});
self.db
.update_notification_counts(user_id, room_id, notification_count, highlight_count)
} }
pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64> { pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64> {

View file

@ -24,7 +24,8 @@ use ruma::{
federation::{ federation::{
self, self,
transactions::edu::{ transactions::edu::{
DeviceListUpdateContent, Edu, ReceiptContent, ReceiptData, ReceiptMap, DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent,
ReceiptData, ReceiptMap,
}, },
}, },
OutgoingRequest, OutgoingRequest,
@ -283,6 +284,34 @@ impl Service {
.filter(|user_id| user_id.server_name() == services().globals.server_name()), .filter(|user_id| user_id.server_name() == services().globals.server_name()),
); );
// Look for presence updates in this room
let presence_updates: Vec<PresenceUpdate> = services()
.rooms
.edus
.presence
.presence_since(&room_id, since)?
.filter(|(user_id, _)| user_id.server_name() == services().globals.server_name())
.map(|(user_id, presence_event)| PresenceUpdate {
user_id,
presence: presence_event.content.presence,
status_msg: presence_event.content.status_msg,
last_active_ago: presence_event
.content
.last_active_ago
.unwrap_or_else(|| uint!(0)),
currently_active: presence_event.content.currently_active.unwrap_or(false),
})
.collect();
let presence_content = PresenceContent {
push: presence_updates,
};
events.push(
serde_json::to_vec(&Edu::Presence(presence_content))
.expect("presence json can be serialized"),
);
// Look for read receipts in this room // Look for read receipts in this room
for r in services() for r in services()
.rooms .rooms