refactor dyn KvTree out of services

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-06-28 22:51:39 +00:00
parent 57acc4f655
commit cb48e25783
69 changed files with 594 additions and 647 deletions

View file

@ -1,25 +1,26 @@
use std::{collections::HashMap, sync::Arc};
use conduit::{utils, warn, Error, Result};
use database::{Database, Map};
use ruma::{
api::client::error::ErrorKind,
events::{AnyEphemeralRoomEvent, RoomAccountDataEventType},
serde::Raw,
RoomId, UserId,
};
use tracing::warn;
use crate::{services, utils, Error, KeyValueDatabase, KvTree, Result};
use crate::services;
pub(super) struct Data {
roomuserdataid_accountdata: Arc<dyn KvTree>,
roomusertype_roomuserdataid: Arc<dyn KvTree>,
roomuserdataid_accountdata: Arc<Map>,
roomusertype_roomuserdataid: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
roomuserdataid_accountdata: db.roomuserdataid_accountdata.clone(),
roomusertype_roomuserdataid: db.roomusertype_roomuserdataid.clone(),
roomuserdataid_accountdata: db["roomuserdataid_accountdata"].clone(),
roomusertype_roomuserdataid: db["roomusertype_roomuserdataid"].clone(),
}
}

View file

@ -4,7 +4,7 @@ use std::{collections::HashMap, sync::Arc};
use conduit::{Result, Server};
use data::Data;
use database::KeyValueDatabase;
use database::Database;
use ruma::{
events::{AnyEphemeralRoomEvent, RoomAccountDataEventType},
serde::Raw,
@ -16,7 +16,7 @@ pub struct Service {
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,14 +1,12 @@
use conduit::Server;
use database::KeyValueDatabase;
pub mod console;
mod create;
mod grant;
use std::{future::Future, pin::Pin, sync::Arc};
use conduit::{utils::mutex_map, Error, Result};
use conduit::{error, utils::mutex_map, Error, Result, Server};
pub use create::create_admin_room;
use database::Database;
pub use grant::make_user_admin;
use loole::{Receiver, Sender};
use ruma::{
@ -20,7 +18,6 @@ use ruma::{
};
use serde_json::value::to_raw_value;
use tokio::{sync::Mutex, task::JoinHandle};
use tracing::error;
use crate::{pdu::PduBuilder, services, user_is_local, PduEvent};
@ -47,7 +44,7 @@ pub struct Command {
}
impl Service {
pub fn build(_server: &Arc<Server>, _db: &Arc<KeyValueDatabase>) -> Result<Arc<Self>> {
pub fn build(_server: &Arc<Server>, _db: &Arc<Database>) -> Result<Arc<Self>> {
let (sender, receiver) = loole::bounded(COMMAND_QUEUE_LIMIT);
Ok(Arc::new(Self {
sender,

View file

@ -1,18 +1,17 @@
use std::sync::Arc;
use database::KvTree;
use conduit::{utils, Error, Result};
use database::{Database, Map};
use ruma::api::appservice::Registration;
use crate::{utils, Error, KeyValueDatabase, Result};
pub struct Data {
id_appserviceregistrations: Arc<dyn KvTree>,
id_appserviceregistrations: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
id_appserviceregistrations: db.id_appserviceregistrations.clone(),
id_appserviceregistrations: db["id_appserviceregistrations"].clone(),
}
}

View file

@ -2,7 +2,9 @@ mod data;
use std::{collections::BTreeMap, sync::Arc};
pub use data::Data;
use conduit::{Result, Server};
use data::Data;
use database::Database;
use futures_util::Future;
use regex::RegexSet;
use ruma::{
@ -11,7 +13,7 @@ use ruma::{
};
use tokio::sync::RwLock;
use crate::{services, Result};
use crate::services;
/// Compiled regular expressions for a namespace
#[derive(Clone, Debug)]
@ -117,11 +119,8 @@ pub struct Service {
registration_info: RwLock<BTreeMap<String, RegistrationInfo>>,
}
use conduit::Server;
use database::KeyValueDatabase;
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
let mut registration_info = BTreeMap::new();
let db = Data::new(db);
// Inserting registrations into cache

View file

@ -2,7 +2,7 @@ use std::{sync::Arc, time::Duration};
use reqwest::redirect;
use crate::{service::globals::resolver, Config, Result};
use crate::{globals::resolver, Config, Result};
pub struct Client {
pub default: reqwest::Client,

View file

@ -3,7 +3,8 @@ use std::{
sync::Arc,
};
use database::{Cork, KeyValueDatabase, KvTree};
use conduit::{trace, utils, Error, Result};
use database::{Cork, Database, Map};
use futures_util::{stream::FuturesUnordered, StreamExt};
use lru_cache::LruCache;
use ruma::{
@ -11,46 +12,45 @@ use ruma::{
signatures::Ed25519KeyPair,
DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId,
};
use tracing::trace;
use crate::{services, utils, Error, Result};
use crate::services;
const COUNTER: &[u8] = b"c";
const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u";
pub struct Data {
global: Arc<dyn KvTree>,
todeviceid_events: Arc<dyn KvTree>,
userroomid_joined: Arc<dyn KvTree>,
userroomid_invitestate: Arc<dyn KvTree>,
userroomid_leftstate: Arc<dyn KvTree>,
userroomid_notificationcount: Arc<dyn KvTree>,
userroomid_highlightcount: Arc<dyn KvTree>,
pduid_pdu: Arc<dyn KvTree>,
keychangeid_userid: Arc<dyn KvTree>,
roomusertype_roomuserdataid: Arc<dyn KvTree>,
server_signingkeys: Arc<dyn KvTree>,
readreceiptid_readreceipt: Arc<dyn KvTree>,
userid_lastonetimekeyupdate: Arc<dyn KvTree>,
pub(super) db: Arc<KeyValueDatabase>,
global: Arc<Map>,
todeviceid_events: Arc<Map>,
userroomid_joined: Arc<Map>,
userroomid_invitestate: Arc<Map>,
userroomid_leftstate: Arc<Map>,
userroomid_notificationcount: Arc<Map>,
userroomid_highlightcount: Arc<Map>,
pduid_pdu: Arc<Map>,
keychangeid_userid: Arc<Map>,
roomusertype_roomuserdataid: Arc<Map>,
server_signingkeys: Arc<Map>,
readreceiptid_readreceipt: Arc<Map>,
userid_lastonetimekeyupdate: Arc<Map>,
pub(super) db: Arc<Database>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
global: db.global.clone(),
todeviceid_events: db.todeviceid_events.clone(),
userroomid_joined: db.userroomid_joined.clone(),
userroomid_invitestate: db.userroomid_invitestate.clone(),
userroomid_leftstate: db.userroomid_leftstate.clone(),
userroomid_notificationcount: db.userroomid_notificationcount.clone(),
userroomid_highlightcount: db.userroomid_highlightcount.clone(),
pduid_pdu: db.pduid_pdu.clone(),
keychangeid_userid: db.keychangeid_userid.clone(),
roomusertype_roomuserdataid: db.roomusertype_roomuserdataid.clone(),
server_signingkeys: db.server_signingkeys.clone(),
readreceiptid_readreceipt: db.readreceiptid_readreceipt.clone(),
userid_lastonetimekeyupdate: db.userid_lastonetimekeyupdate.clone(),
global: db["global"].clone(),
todeviceid_events: db["todeviceid_events"].clone(),
userroomid_joined: db["userroomid_joined"].clone(),
userroomid_invitestate: db["userroomid_invitestate"].clone(),
userroomid_leftstate: db["userroomid_leftstate"].clone(),
userroomid_notificationcount: db["userroomid_notificationcount"].clone(),
userroomid_highlightcount: db["userroomid_highlightcount"].clone(),
pduid_pdu: db["pduid_pdu"].clone(),
keychangeid_userid: db["keychangeid_userid"].clone(),
roomusertype_roomuserdataid: db["roomusertype_roomuserdataid"].clone(),
server_signingkeys: db["server_signingkeys"].clone(),
readreceiptid_readreceipt: db["readreceiptid_readreceipt"].clone(),
userid_lastonetimekeyupdate: db["userid_lastonetimekeyupdate"].clone(),
db: db.clone(),
}
}
@ -298,7 +298,6 @@ lasttimelinecount_cache: {lasttimelinecount_cache} / {max_lasttimelinecount_cach
/// for the server.
pub fn signing_keys_for(&self, origin: &ServerName) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
let signingkeys = self
.db
.server_signingkeys
.get(origin.as_bytes())?
.and_then(|bytes| serde_json::from_slice(&bytes).ok())

View file

@ -10,7 +10,7 @@ use std::{
};
use conduit::{debug, debug_info, debug_warn, error, info, utils, warn, Config, Error, Result};
use database::KeyValueDatabase;
use database::Database;
use itertools::Itertools;
use ruma::{
events::{push_rules::PushRulesEvent, room::member::MembershipState, GlobalAccountDataEventType},
@ -28,7 +28,7 @@ use crate::services;
/// equal or lesser version. These are expected to be backward-compatible.
const DATABASE_VERSION: u64 = 13;
pub(crate) async fn migrations(db: &KeyValueDatabase, config: &Config) -> Result<()> {
pub(crate) async fn migrations(db: &Arc<Database>, config: &Config) -> Result<()> {
// Matrix resource ownership is based on the server name; changing it
// requires recreating the database from scratch.
if services().users.count()? > 0 {
@ -49,13 +49,11 @@ pub(crate) async fn migrations(db: &KeyValueDatabase, config: &Config) -> Result
}
}
async fn fresh(db: &KeyValueDatabase, config: &Config) -> Result<()> {
async fn fresh(db: &Arc<Database>, config: &Config) -> Result<()> {
services().globals.bump_database_version(DATABASE_VERSION)?;
db.global
.insert(b"fix_bad_double_separator_in_state_cache", &[])?;
db.global
.insert(b"retroactively_fix_bad_data_from_roomuserid_joined", &[])?;
db["global"].insert(b"fix_bad_double_separator_in_state_cache", &[])?;
db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", &[])?;
// Create the admin room and server user on first run
crate::admin::create_admin_room().await?;
@ -69,7 +67,7 @@ async fn fresh(db: &KeyValueDatabase, config: &Config) -> Result<()> {
}
/// Apply any migrations
async fn migrate(db: &KeyValueDatabase, config: &Config) -> Result<()> {
async fn migrate(db: &Arc<Database>, config: &Config) -> Result<()> {
if services().globals.database_version()? < 1 {
db_lt_1(db, config).await?;
}
@ -124,22 +122,20 @@ async fn migrate(db: &KeyValueDatabase, config: &Config) -> Result<()> {
db_lt_13(db, config).await?;
}
if db.global.get(b"feat_sha256_media")?.is_none() {
if db["global"].get(b"feat_sha256_media")?.is_none() {
migrate_sha256_media(db, config).await?;
} else if config.media_startup_check {
checkup_sha256_media(db, config).await?;
}
if db
.global
if db["global"]
.get(b"fix_bad_double_separator_in_state_cache")?
.is_none()
{
fix_bad_double_separator_in_state_cache(db, config).await?;
}
if db
.global
if db["global"]
.get(b"retroactively_fix_bad_data_from_roomuserid_joined")?
.is_none()
{
@ -212,8 +208,10 @@ async fn migrate(db: &KeyValueDatabase, config: &Config) -> Result<()> {
Ok(())
}
async fn db_lt_1(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
for (roomserverid, _) in db.roomserverids.iter() {
async fn db_lt_1(db: &Arc<Database>, _config: &Config) -> Result<()> {
let roomserverids = &db["roomserverids"];
let serverroomids = &db["serverroomids"];
for (roomserverid, _) in roomserverids.iter() {
let mut parts = roomserverid.split(|&b| b == 0xFF);
let room_id = parts.next().expect("split always returns one element");
let Some(servername) = parts.next() else {
@ -224,7 +222,7 @@ async fn db_lt_1(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
serverroomid.push(0xFF);
serverroomid.extend_from_slice(room_id);
db.serverroomids.insert(&serverroomid, &[])?;
serverroomids.insert(&serverroomid, &[])?;
}
services().globals.bump_database_version(1)?;
@ -232,14 +230,15 @@ async fn db_lt_1(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
Ok(())
}
async fn db_lt_2(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
async fn db_lt_2(db: &Arc<Database>, _config: &Config) -> Result<()> {
// We accidentally inserted hashed versions of "" into the db instead of just ""
for (userid, password) in db.userid_password.iter() {
let userid_password = &db["roomserverids"];
for (userid, password) in userid_password.iter() {
let empty_pass = utils::hash::password("").expect("our own password to be properly hashed");
let password = std::str::from_utf8(&password).expect("password is valid utf-8");
let empty_hashed_password = utils::hash::verify_password(password, &empty_pass).is_ok();
if empty_hashed_password {
db.userid_password.insert(&userid, b"")?;
userid_password.insert(&userid, b"")?;
}
}
@ -248,9 +247,10 @@ async fn db_lt_2(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
Ok(())
}
async fn db_lt_3(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
async fn db_lt_3(db: &Arc<Database>, _config: &Config) -> Result<()> {
// Move media to filesystem
for (key, content) in db.mediaid_file.iter() {
let mediaid_file = &db["mediaid_file"];
for (key, content) in mediaid_file.iter() {
if content.is_empty() {
continue;
}
@ -259,7 +259,7 @@ async fn db_lt_3(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
let path = services().media.get_media_file(&key);
let mut file = fs::File::create(path)?;
file.write_all(&content)?;
db.mediaid_file.insert(&key, &[])?;
mediaid_file.insert(&key, &[])?;
}
services().globals.bump_database_version(3)?;
@ -267,7 +267,7 @@ async fn db_lt_3(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
Ok(())
}
async fn db_lt_4(_db: &KeyValueDatabase, config: &Config) -> Result<()> {
async fn db_lt_4(_db: &Arc<Database>, config: &Config) -> Result<()> {
// Add federated users to services() as deactivated
for our_user in services().users.iter() {
let our_user = our_user?;
@ -290,9 +290,11 @@ async fn db_lt_4(_db: &KeyValueDatabase, config: &Config) -> Result<()> {
Ok(())
}
async fn db_lt_5(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
async fn db_lt_5(db: &Arc<Database>, _config: &Config) -> Result<()> {
// Upgrade user data store
for (roomuserdataid, _) in db.roomuserdataid_accountdata.iter() {
let roomuserdataid_accountdata = &db["roomuserdataid_accountdata"];
let roomusertype_roomuserdataid = &db["roomusertype_roomuserdataid"];
for (roomuserdataid, _) in roomuserdataid_accountdata.iter() {
let mut parts = roomuserdataid.split(|&b| b == 0xFF);
let room_id = parts.next().unwrap();
let user_id = parts.next().unwrap();
@ -304,8 +306,7 @@ async fn db_lt_5(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
key.push(0xFF);
key.extend_from_slice(event_type);
db.roomusertype_roomuserdataid
.insert(&key, &roomuserdataid)?;
roomusertype_roomuserdataid.insert(&key, &roomuserdataid)?;
}
services().globals.bump_database_version(5)?;
@ -313,9 +314,10 @@ async fn db_lt_5(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
Ok(())
}
async fn db_lt_6(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
async fn db_lt_6(db: &Arc<Database>, _config: &Config) -> Result<()> {
// Set room member count
for (roomid, _) in db.roomid_shortstatehash.iter() {
let roomid_shortstatehash = &db["roomid_shortstatehash"];
for (roomid, _) in roomid_shortstatehash.iter() {
let string = utils::string_from_bytes(&roomid).unwrap();
let room_id = <&RoomId>::try_from(string.as_str()).unwrap();
services().rooms.state_cache.update_joined_count(room_id)?;
@ -326,7 +328,7 @@ async fn db_lt_6(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
Ok(())
}
async fn db_lt_7(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
async fn db_lt_7(db: &Arc<Database>, _config: &Config) -> Result<()> {
// Upgrade state store
let mut last_roomstates: HashMap<OwnedRoomId, u64> = HashMap::new();
let mut current_sstatehash: Option<u64> = None;
@ -399,7 +401,9 @@ async fn db_lt_7(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
Ok::<_, Error>(())
};
for (k, seventid) in db.db.open_tree("stateid_shorteventid")?.iter() {
let stateid_shorteventid = &db["stateid_shorteventid"];
let shorteventid_eventid = &db["shorteventid_eventid"];
for (k, seventid) in stateid_shorteventid.iter() {
let sstatehash = utils::u64_from_bytes(&k[0..size_of::<u64>()]).expect("number of bytes is correct");
let sstatekey = k[size_of::<u64>()..].to_vec();
if Some(sstatehash) != current_sstatehash {
@ -415,7 +419,7 @@ async fn db_lt_7(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
current_state = HashSet::new();
current_sstatehash = Some(sstatehash);
let event_id = db.shorteventid_eventid.get(&seventid).unwrap().unwrap();
let event_id = shorteventid_eventid.get(&seventid).unwrap().unwrap();
let string = utils::string_from_bytes(&event_id).unwrap();
let event_id = <&EventId>::try_from(string.as_str()).unwrap();
let pdu = services()
@ -449,15 +453,20 @@ async fn db_lt_7(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
Ok(())
}
async fn db_lt_8(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
async fn db_lt_8(db: &Arc<Database>, _config: &Config) -> Result<()> {
let roomid_shortstatehash = &db["roomid_shortstatehash"];
let roomid_shortroomid = &db["roomid_shortroomid"];
let pduid_pdu = &db["pduid_pdu"];
let eventid_pduid = &db["eventid_pduid"];
// Generate short room ids for all rooms
for (room_id, _) in db.roomid_shortstatehash.iter() {
for (room_id, _) in roomid_shortstatehash.iter() {
let shortroomid = services().globals.next_count()?.to_be_bytes();
db.roomid_shortroomid.insert(&room_id, &shortroomid)?;
roomid_shortroomid.insert(&room_id, &shortroomid)?;
info!("Migration: 8");
}
// Update pduids db layout
let mut batch = db.pduid_pdu.iter().filter_map(|(key, v)| {
let mut batch = pduid_pdu.iter().filter_map(|(key, v)| {
if !key.starts_with(b"!") {
return None;
}
@ -465,8 +474,7 @@ async fn db_lt_8(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
let room_id = parts.next().unwrap();
let count = parts.next().unwrap();
let short_room_id = db
.roomid_shortroomid
let short_room_id = roomid_shortroomid
.get(room_id)
.unwrap()
.expect("shortroomid should exist");
@ -477,9 +485,9 @@ async fn db_lt_8(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
Some((new_key, v))
});
db.pduid_pdu.insert_batch(&mut batch)?;
pduid_pdu.insert_batch(&mut batch)?;
let mut batch2 = db.eventid_pduid.iter().filter_map(|(k, value)| {
let mut batch2 = eventid_pduid.iter().filter_map(|(k, value)| {
if !value.starts_with(b"!") {
return None;
}
@ -487,8 +495,7 @@ async fn db_lt_8(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
let room_id = parts.next().unwrap();
let count = parts.next().unwrap();
let short_room_id = db
.roomid_shortroomid
let short_room_id = roomid_shortroomid
.get(room_id)
.unwrap()
.expect("shortroomid should exist");
@ -499,17 +506,19 @@ async fn db_lt_8(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
Some((k, new_value))
});
db.eventid_pduid.insert_batch(&mut batch2)?;
eventid_pduid.insert_batch(&mut batch2)?;
services().globals.bump_database_version(8)?;
info!("Migration: 7 -> 8 finished");
Ok(())
}
async fn db_lt_9(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
async fn db_lt_9(db: &Arc<Database>, _config: &Config) -> Result<()> {
let tokenids = &db["tokenids"];
let roomid_shortroomid = &db["roomid_shortroomid"];
// Update tokenids db layout
let mut iter = db
.tokenids
let mut iter = tokenids
.iter()
.filter_map(|(key, _)| {
if !key.starts_with(b"!") {
@ -521,8 +530,7 @@ async fn db_lt_9(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
let _pdu_id_room = parts.next().unwrap();
let pdu_id_count = parts.next().unwrap();
let short_room_id = db
.roomid_shortroomid
let short_room_id = roomid_shortroomid
.get(room_id)
.unwrap()
.expect("shortroomid should exist");
@ -535,14 +543,13 @@ async fn db_lt_9(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
.peekable();
while iter.peek().is_some() {
db.tokenids.insert_batch(&mut iter.by_ref().take(1000))?;
tokenids.insert_batch(&mut iter.by_ref().take(1000))?;
debug!("Inserted smaller batch");
}
info!("Deleting starts");
let batch2: Vec<_> = db
.tokenids
let batch2: Vec<_> = tokenids
.iter()
.filter_map(|(key, _)| {
if key.starts_with(b"!") {
@ -554,7 +561,7 @@ async fn db_lt_9(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
.collect();
for key in batch2 {
db.tokenids.remove(&key)?;
tokenids.remove(&key)?;
}
services().globals.bump_database_version(9)?;
@ -562,11 +569,13 @@ async fn db_lt_9(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
Ok(())
}
async fn db_lt_10(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
async fn db_lt_10(db: &Arc<Database>, _config: &Config) -> Result<()> {
let statekey_shortstatekey = &db["statekey_shortstatekey"];
let shortstatekey_statekey = &db["shortstatekey_statekey"];
// Add other direction for shortstatekeys
for (statekey, shortstatekey) in db.statekey_shortstatekey.iter() {
db.shortstatekey_statekey
.insert(&shortstatekey, &statekey)?;
for (statekey, shortstatekey) in statekey_shortstatekey.iter() {
shortstatekey_statekey.insert(&shortstatekey, &statekey)?;
}
// Force E2EE device list updates so we can send them over federation
@ -579,17 +588,16 @@ async fn db_lt_10(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
Ok(())
}
async fn db_lt_11(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
db.db
.open_tree("userdevicesessionid_uiaarequest")?
.clear()?;
async fn db_lt_11(db: &Arc<Database>, _config: &Config) -> Result<()> {
let _userdevicesessionid_uiaarequest = &db["userdevicesessionid_uiaarequest"];
//userdevicesessionid_uiaarequest.clear()?;
services().globals.bump_database_version(11)?;
info!("Migration: 10 -> 11 finished");
Ok(())
}
async fn db_lt_12(_db: &KeyValueDatabase, config: &Config) -> Result<()> {
async fn db_lt_12(_db: &Arc<Database>, config: &Config) -> Result<()> {
for username in services().users.list_local_users()? {
let user = match UserId::parse_with_server_name(username.clone(), &config.server_name) {
Ok(u) => u,
@ -657,7 +665,7 @@ async fn db_lt_12(_db: &KeyValueDatabase, config: &Config) -> Result<()> {
Ok(())
}
async fn db_lt_13(_db: &KeyValueDatabase, config: &Config) -> Result<()> {
async fn db_lt_13(_db: &Arc<Database>, config: &Config) -> Result<()> {
for username in services().users.list_local_users()? {
let user = match UserId::parse_with_server_name(username.clone(), &config.server_name) {
Ok(u) => u,
@ -697,12 +705,13 @@ async fn db_lt_13(_db: &KeyValueDatabase, config: &Config) -> Result<()> {
/// Migrates a media directory from legacy base64 file names to sha2 file names.
/// All errors are fatal. Upon success the database is keyed to not perform this
/// again.
async fn migrate_sha256_media(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
async fn migrate_sha256_media(db: &Arc<Database>, _config: &Config) -> Result<()> {
warn!("Migrating legacy base64 file names to sha256 file names");
let mediaid_file = &db["mediaid_file"];
// Move old media files to new names
let mut changes = Vec::<(PathBuf, PathBuf)>::new();
for (key, _) in db.mediaid_file.iter() {
for (key, _) in mediaid_file.iter() {
let old = services().media.get_media_file_b64(&key);
let new = services().media.get_media_file_sha256(&key);
debug!(?key, ?old, ?new, num = changes.len(), "change");
@ -722,7 +731,7 @@ async fn migrate_sha256_media(db: &KeyValueDatabase, _config: &Config) -> Result
services().globals.bump_database_version(13)?;
}
db.global.insert(b"feat_sha256_media", &[])?;
db["global"].insert(b"feat_sha256_media", &[])?;
info!("Finished applying sha256_media");
Ok(())
}
@ -731,10 +740,13 @@ async fn migrate_sha256_media(db: &KeyValueDatabase, _config: &Config) -> Result
/// - Going back and forth to non-sha256 legacy binaries (e.g. upstream).
/// - Deletion of artifacts in the media directory which will then fall out of
/// sync with the database.
async fn checkup_sha256_media(db: &KeyValueDatabase, config: &Config) -> Result<()> {
async fn checkup_sha256_media(db: &Arc<Database>, config: &Config) -> Result<()> {
use crate::media::encode_key;
debug!("Checking integrity of media directory");
let mediaid_file = &db["mediaid_file"];
let mediaid_user = &db["mediaid_user"];
let dbs = (mediaid_file, mediaid_user);
let media = &services().media;
let timer = Instant::now();
@ -746,7 +758,7 @@ async fn checkup_sha256_media(db: &KeyValueDatabase, config: &Config) -> Result<
for key in media.db.get_all_media_keys() {
let new_path = media.get_media_file_sha256(&key).into_os_string();
let old_path = media.get_media_file_b64(&key).into_os_string();
if let Err(e) = handle_media_check(db, config, &files, &key, &new_path, &old_path).await {
if let Err(e) = handle_media_check(&dbs, config, &files, &key, &new_path, &old_path).await {
error!(
media_id = ?encode_key(&key), ?new_path, ?old_path,
"Failed to resolve media check failure: {e}"
@ -763,9 +775,11 @@ async fn checkup_sha256_media(db: &KeyValueDatabase, config: &Config) -> Result<
}
async fn handle_media_check(
db: &KeyValueDatabase, config: &Config, files: &HashSet<OsString>, key: &[u8], new_path: &OsStr, old_path: &OsStr,
dbs: &(&Arc<database::Map>, &Arc<database::Map>), config: &Config, files: &HashSet<OsString>, key: &[u8],
new_path: &OsStr, old_path: &OsStr,
) -> Result<()> {
use crate::media::encode_key;
let (mediaid_file, mediaid_user) = dbs;
let old_exists = files.contains(old_path);
let new_exists = files.contains(new_path);
@ -775,8 +789,8 @@ async fn handle_media_check(
"Media is missing at all paths. Removing from database..."
);
db.mediaid_file.remove(key)?;
db.mediaid_user.remove(key)?;
mediaid_file.remove(key)?;
mediaid_user.remove(key)?;
}
if config.media_compat_file_link && !old_exists && new_exists {
@ -801,13 +815,13 @@ async fn handle_media_check(
Ok(())
}
async fn fix_bad_double_separator_in_state_cache(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
async fn fix_bad_double_separator_in_state_cache(db: &Arc<Database>, _config: &Config) -> Result<()> {
warn!("Fixing bad double separator in state_cache roomuserid_joined");
let roomuserid_joined = &db["roomuserid_joined"];
let _cork = database::Cork::new(&db.db, true, true);
let mut iter_count: usize = 0;
let _cork = db.db.cork();
for (mut key, value) in db.roomuserid_joined.iter() {
for (mut key, value) in roomuserid_joined.iter() {
iter_count = iter_count.saturating_add(1);
debug_info!(%iter_count);
let first_sep_index = key.iter().position(|&i| i == 0xFF).unwrap();
@ -820,24 +834,24 @@ async fn fix_bad_double_separator_in_state_cache(db: &KeyValueDatabase, _config:
== vec![0xFF, 0xFF]
{
debug_warn!("Found bad key: {key:?}");
db.roomuserid_joined.remove(&key)?;
roomuserid_joined.remove(&key)?;
key.remove(first_sep_index);
debug_warn!("Fixed key: {key:?}");
db.roomuserid_joined.insert(&key, &value)?;
roomuserid_joined.insert(&key, &value)?;
}
}
db.db.cleanup()?;
db.global
.insert(b"fix_bad_double_separator_in_state_cache", &[])?;
db["global"].insert(b"fix_bad_double_separator_in_state_cache", &[])?;
info!("Finished fixing");
Ok(())
}
async fn retroactively_fix_bad_data_from_roomuserid_joined(db: &KeyValueDatabase, _config: &Config) -> Result<()> {
async fn retroactively_fix_bad_data_from_roomuserid_joined(db: &Arc<Database>, _config: &Config) -> Result<()> {
warn!("Retroactively fixing bad data from broken roomuserid_joined");
let _cork = database::Cork::new(&db.db, true, true);
let room_ids = services()
.rooms
@ -846,8 +860,6 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(db: &KeyValueDatabase
.filter_map(Result::ok)
.collect_vec();
let _cork = db.db.cork();
for room_id in room_ids.clone() {
debug_info!("Fixing room {room_id}");
@ -910,8 +922,7 @@ async fn retroactively_fix_bad_data_from_roomuserid_joined(db: &KeyValueDatabase
}
db.db.cleanup()?;
db.global
.insert(b"retroactively_fix_bad_data_from_roomuserid_joined", &[])?;
db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", &[])?;
info!("Finished fixing");
Ok(())

View file

@ -1,5 +1,3 @@
use conduit::Server;
mod client;
mod data;
pub(super) mod emerg_access;
@ -13,8 +11,9 @@ use std::{
time::Instant,
};
use conduit::utils;
use conduit::{error, trace, utils::MutexMap, Config, Result, Server};
use data::Data;
use database::Database;
use hickory_resolver::TokioAsyncResolver;
use ipaddress::IPAddress;
use regex::RegexSet;
@ -31,11 +30,9 @@ use tokio::{
sync::{Mutex, RwLock},
task::JoinHandle,
};
use tracing::{error, trace};
use url::Url;
use utils::MutexMap;
use crate::{services, Config, KeyValueDatabase, Result};
use crate::services;
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
@ -64,7 +61,7 @@ pub struct Service {
}
impl Service {
pub fn build(server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
let config = &server.config;
let db = Data::new(db);
let keypair = db.load_keypair();

View file

@ -6,13 +6,13 @@ use std::{
time::Duration,
};
use conduit::{error, Config, Error};
use hickory_resolver::TokioAsyncResolver;
use reqwest::dns::{Addrs, Name, Resolve, Resolving};
use ruma::OwnedServerName;
use tokio::sync::RwLock;
use tracing::error;
use crate::{service::sending::FedDest, Config, Error};
use crate::sending::FedDest;
pub(crate) type WellKnownMap = HashMap<OwnedServerName, (FedDest, String)>;
type TlsNameMap = HashMap<String, (Vec<IpAddr>, u16)>;

View file

@ -1,5 +1,7 @@
use std::{collections::BTreeMap, sync::Arc};
use conduit::{utils, Error, Result};
use database::{Database, Map};
use ruma::{
api::client::{
backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup},
@ -9,20 +11,20 @@ use ruma::{
OwnedRoomId, RoomId, UserId,
};
use crate::{services, utils, Error, KeyValueDatabase, KvTree, Result};
use crate::services;
pub(crate) struct Data {
backupid_algorithm: Arc<dyn KvTree>,
backupid_etag: Arc<dyn KvTree>,
backupkeyid_backup: Arc<dyn KvTree>,
pub(super) struct Data {
backupid_algorithm: Arc<Map>,
backupid_etag: Arc<Map>,
backupkeyid_backup: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
backupid_algorithm: db.backupid_algorithm.clone(),
backupid_etag: db.backupid_etag.clone(),
backupkeyid_backup: db.backupkeyid_backup.clone(),
backupid_algorithm: db["backupid_algorithm"].clone(),
backupid_etag: db["backupid_etag"].clone(),
backupkeyid_backup: db["backupkeyid_backup"].clone(),
}
}

View file

@ -1,11 +1,10 @@
use conduit::Server;
mod data;
use std::{collections::BTreeMap, sync::Arc};
use conduit::Result;
use conduit::{Result, Server};
use data::Data;
use database::KeyValueDatabase;
use database::Database;
use ruma::{
api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup},
serde::Raw,
@ -13,11 +12,11 @@ use ruma::{
};
pub struct Service {
pub(super) db: Data,
db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,24 +1,23 @@
use std::sync::Arc;
use conduit::debug_info;
use database::{KeyValueDatabase, KvTree};
use conduit::{debug, debug_info, Error, Result};
use database::{Database, Map};
use ruma::api::client::error::ErrorKind;
use tracing::debug;
use crate::{media::UrlPreviewData, utils::string_from_bytes, Error, Result};
use crate::{media::UrlPreviewData, utils::string_from_bytes};
pub struct Data {
mediaid_file: Arc<dyn KvTree>,
mediaid_user: Arc<dyn KvTree>,
url_previews: Arc<dyn KvTree>,
pub(crate) struct Data {
mediaid_file: Arc<Map>,
mediaid_user: Arc<Map>,
url_previews: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
mediaid_file: db.mediaid_file.clone(),
mediaid_user: db.mediaid_user.clone(),
url_previews: db.url_previews.clone(),
mediaid_file: db["mediaid_file"].clone(),
mediaid_user: db["mediaid_user"].clone(),
url_previews: db["url_previews"].clone(),
}
}

View file

@ -6,7 +6,7 @@ use std::{collections::HashMap, io::Cursor, path::PathBuf, sync::Arc, time::Syst
use base64::{engine::general_purpose, Engine as _};
use conduit::{debug, debug_error, error, utils, Error, Result, Server};
use data::Data;
use database::KeyValueDatabase;
use database::Database;
use image::imageops::FilterType;
use ruma::{OwnedMxcUri, OwnedUserId};
use serde::Serialize;
@ -44,12 +44,12 @@ pub struct UrlPreviewData {
pub struct Service {
server: Arc<Server>,
pub db: Data,
pub(crate) db: Data,
pub url_preview_mutex: RwLock<HashMap<String, Arc<Mutex<()>>>>,
}
impl Service {
pub fn build(server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
server: server.clone(),
db: Data::new(db),

View file

@ -1,6 +1,7 @@
#![cfg(test)]
#[tokio::test]
#[cfg(disable)] //TODO: fixme
async fn long_file_names_works() {
use std::path::PathBuf;

View file

@ -17,15 +17,17 @@ pub mod users;
extern crate conduit_core as conduit;
extern crate conduit_database as database;
use std::sync::{Arc, RwLock};
pub(crate) use conduit::{config, debug_error, debug_info, debug_warn, utils, Config, Error, PduCount, Result, Server};
pub(crate) use database::{KeyValueDatabase, KvTree};
pub use globals::{server_is_ours, user_is_local};
pub use pdu::PduEvent;
pub use services::Services;
use database::Database;
pub(crate) use crate as service;
pub use crate::{
globals::{server_is_ours, user_is_local},
pdu::PduEvent,
services::Services,
};
conduit::mod_ctor! {}
conduit::mod_dtor! {}
@ -34,7 +36,7 @@ static SERVICES: RwLock<Option<&Services>> = RwLock::new(None);
#[allow(clippy::let_underscore_must_use)]
pub async fn init(server: &Arc<Server>) -> Result<()> {
let d = Arc::new(KeyValueDatabase::load_or_create(server).await?);
let d = Arc::new(Database::open(server).await?);
let s = Box::new(Services::build(server.clone(), d.clone()).await?);
_ = SERVICES.write().expect("write locked").insert(Box::leak(s));

View file

@ -1,5 +1,6 @@
use std::{cmp::Ordering, collections::BTreeMap, sync::Arc};
use conduit::{warn, Error};
use ruma::{
canonical_json::redact_content_in_place,
events::{
@ -17,9 +18,8 @@ use serde_json::{
json,
value::{to_raw_value, RawValue as RawJsonValue},
};
use tracing::warn;
use crate::{services, Error};
use crate::services;
#[derive(Deserialize)]
struct ExtractRedactedBecause {

View file

@ -1,26 +1,21 @@
use std::sync::Arc;
use conduit::debug_warn;
use database::KvTree;
use conduit::{debug_warn, utils, Error, Result};
use database::{Database, Map};
use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId};
use crate::{
presence::Presence,
services,
utils::{self, user_id_from_bytes},
Error, KeyValueDatabase, Result,
};
use crate::{presence::Presence, services};
pub struct Data {
presenceid_presence: Arc<dyn KvTree>,
userid_presenceid: Arc<dyn KvTree>,
presenceid_presence: Arc<Map>,
userid_presenceid: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
presenceid_presence: db.presenceid_presence.clone(),
userid_presenceid: db.userid_presenceid.clone(),
presenceid_presence: db["presenceid_presence"].clone(),
userid_presenceid: db["userid_presenceid"].clone(),
}
}
@ -135,7 +130,7 @@ fn presenceid_key(count: u64, user_id: &UserId) -> Vec<u8> {
#[inline]
fn presenceid_parse(key: &[u8]) -> Result<(u64, OwnedUserId)> {
let (count, user_id) = key.split_at(8);
let user_id = user_id_from_bytes(user_id)?;
let user_id = utils::user_id_from_bytes(user_id)?;
let count = utils::u64_from_bytes(count).unwrap();
Ok((count, user_id))

View file

@ -1,10 +1,10 @@
use conduit::Server;
mod data;
use std::{sync::Arc, time::Duration};
use conduit::{debug, error, utils, Error, Result, Server};
use data::Data;
use database::Database;
use futures_util::{stream::FuturesUnordered, StreamExt};
use ruma::{
events::presence::{PresenceEvent, PresenceEventContent},
@ -13,14 +13,8 @@ use ruma::{
};
use serde::{Deserialize, Serialize};
use tokio::{sync::Mutex, task::JoinHandle, time::sleep};
use tracing::{debug, error};
use crate::{
database::KeyValueDatabase,
services, user_is_local,
utils::{self},
Error, Result,
};
use crate::{services, user_is_local};
/// Represents data required to be kept in order to implement the presence
/// specification.
@ -88,7 +82,7 @@ pub struct Service {
}
impl Service {
pub fn build(server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Arc<Self>> {
pub fn build(server: &Arc<Server>, db: &Arc<Database>) -> Result<Arc<Self>> {
let config = &server.config;
let (timer_sender, timer_receiver) = loole::unbounded();
Ok(Arc::new(Self {

View file

@ -1,21 +1,20 @@
use std::sync::Arc;
use database::{KeyValueDatabase, KvTree};
use conduit::{utils, Error, Result};
use database::{Database, Map};
use ruma::{
api::client::push::{set_pusher, Pusher},
UserId,
};
use crate::{utils, Error, Result};
pub struct Data {
senderkey_pusher: Arc<dyn KvTree>,
pub(super) struct Data {
senderkey_pusher: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
senderkey_pusher: db.senderkey_pusher.clone(),
senderkey_pusher: db["senderkey_pusher"].clone(),
}
}

View file

@ -1,11 +1,11 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::{fmt::Debug, mem, sync::Arc};
use bytes::BytesMut;
use conduit::{debug_info, info, trace, warn, Error, Result, Server};
use data::Data;
use database::Database;
use ipaddress::IPAddress;
use ruma::{
api::{
@ -23,16 +23,15 @@ use ruma::{
serde::Raw,
uint, RoomId, UInt, UserId,
};
use tracing::{info, trace, warn};
use crate::{debug_info, services, Error, PduEvent, Result};
use crate::{services, PduEvent};
pub struct Service {
pub db: Data,
db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,22 +1,23 @@
use std::sync::Arc;
use database::KvTree;
use conduit::{utils, Error, Result};
use database::{Database, Map};
use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, UserId};
use crate::{services, utils, Error, KeyValueDatabase, Result};
use crate::services;
pub struct Data {
alias_userid: Arc<dyn KvTree>,
alias_roomid: Arc<dyn KvTree>,
aliasid_alias: Arc<dyn KvTree>,
pub(super) struct Data {
alias_userid: Arc<Map>,
alias_roomid: Arc<Map>,
aliasid_alias: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
alias_userid: db.alias_userid.clone(),
alias_roomid: db.alias_roomid.clone(),
aliasid_alias: db.aliasid_alias.clone(),
alias_userid: db["alias_userid"].clone(),
alias_roomid: db["alias_roomid"].clone(),
aliasid_alias: db["aliasid_alias"].clone(),
}
}
@ -55,7 +56,7 @@ impl Data {
Ok(())
}
pub fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
pub(super) fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
self.alias_roomid
.get(alias.alias().as_bytes())?
.map(|bytes| {
@ -81,7 +82,7 @@ impl Data {
.transpose()
}
pub fn local_aliases_for_room<'a>(
pub(super) fn local_aliases_for_room<'a>(
&'a self, room_id: &RoomId,
) -> Box<dyn Iterator<Item = Result<OwnedRoomAliasId>> + 'a> {
let mut prefix = room_id.as_bytes().to_vec();
@ -95,7 +96,7 @@ impl Data {
}))
}
pub fn all_local_aliases<'a>(&'a self) -> Box<dyn Iterator<Item = Result<(OwnedRoomId, String)>> + 'a> {
pub(super) fn all_local_aliases<'a>(&'a self) -> Box<dyn Iterator<Item = Result<(OwnedRoomId, String)>> + 'a> {
Box::new(
self.alias_roomid
.iter()

View file

@ -1,11 +1,10 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::sync::Arc;
use conduit::{Error, Result, Server};
use data::Data;
use database::Database;
use ruma::{
api::client::error::ErrorKind,
events::{
@ -15,14 +14,14 @@ use ruma::{
OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, UserId,
};
use crate::{services, Error, Result};
use crate::services;
pub struct Service {
pub db: Data,
db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,18 +1,17 @@
use std::{mem::size_of, sync::Arc};
use database::KvTree;
use crate::{utils, KeyValueDatabase, Result};
use conduit::{utils, Result};
use database::{Database, Map};
pub(super) struct Data {
shorteventid_authchain: Arc<dyn KvTree>,
db: Arc<KeyValueDatabase>,
shorteventid_authchain: Arc<Map>,
db: Arc<Database>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
shorteventid_authchain: db.shorteventid_authchain.clone(),
shorteventid_authchain: db["shorteventid_authchain"].clone(),
db: db.clone(),
}
}

View file

@ -1,24 +1,23 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::{
collections::{BTreeSet, HashSet},
sync::Arc,
};
use conduit::{debug, error, trace, warn, Error, Result, Server};
use data::Data;
use database::Database;
use ruma::{api::client::error::ErrorKind, EventId, RoomId};
use tracing::{debug, error, trace, warn};
use crate::{services, Error, Result};
use crate::services;
pub struct Service {
db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,18 +1,17 @@
use std::sync::Arc;
use database::KvTree;
use conduit::{utils, Error, Result};
use database::{Database, Map};
use ruma::{OwnedRoomId, RoomId};
use crate::{utils, Error, KeyValueDatabase, Result};
pub(super) struct Data {
publicroomids: Arc<dyn KvTree>,
publicroomids: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
publicroomids: db.publicroomids.clone(),
publicroomids: db["publicroomids"].clone(),
}
}

View file

@ -1,11 +1,10 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::sync::Arc;
use conduit::Server;
use data::Data;
use database::Database;
use ruma::{OwnedRoomId, RoomId};
use crate::Result;
@ -15,7 +14,7 @@ pub struct Service {
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,6 +1,3 @@
use conduit::Server;
use database::KeyValueDatabase;
mod parse_incoming_pdu;
mod signing_keys;
@ -12,6 +9,8 @@ use std::{
time::{Duration, Instant},
};
use conduit::{debug_error, debug_info, Error, Result, Server};
use database::Database;
use futures_util::Future;
pub use parse_incoming_pdu::parse_incoming_pdu;
use ruma::{
@ -32,7 +31,7 @@ use tokio::sync::RwLock;
use tracing::{debug, error, info, trace, warn};
use super::state_compressor::CompressedStateEvent;
use crate::{debug_error, debug_info, pdu, services, Error, PduEvent, Result};
use crate::{pdu, services, PduEvent};
pub struct Service;
@ -45,7 +44,7 @@ type AsyncRecursiveCanonicalJsonResult<'a> =
AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>>;
impl Service {
pub fn build(_server: &Arc<Server>, _db: &Arc<KeyValueDatabase>) -> Result<Self> { Ok(Self {}) }
pub fn build(_server: &Arc<Server>, _db: &Arc<Database>) -> Result<Self> { Ok(Self {}) }
/// When receiving an event one needs to:
/// 0. Check the server is in the room

View file

@ -1,8 +1,9 @@
use conduit::{Error, Result};
use ruma::{api::client::error::ErrorKind, CanonicalJsonObject, OwnedEventId, OwnedRoomId, RoomId};
use serde_json::value::RawValue as RawJsonValue;
use tracing::warn;
use crate::{service::pdu::gen_event_id_canonical_json, services, Error, Result};
use crate::{pdu::gen_event_id_canonical_json, services};
pub fn parse_incoming_pdu(pdu: &RawJsonValue) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> {
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {

View file

@ -1,18 +1,17 @@
use std::sync::Arc;
use database::KvTree;
use conduit::Result;
use database::{Database, Map};
use ruma::{DeviceId, RoomId, UserId};
use crate::{KeyValueDatabase, Result};
pub struct Data {
lazyloadedids: Arc<dyn KvTree>,
pub(super) struct Data {
lazyloadedids: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
lazyloadedids: db.lazyloadedids.clone(),
lazyloadedids: db["lazyloadedids"].clone(),
}
}

View file

@ -1,27 +1,27 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::{
collections::{HashMap, HashSet},
sync::Arc,
};
use conduit::Server;
use data::Data;
use database::Database;
use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId};
use tokio::sync::Mutex;
use crate::{PduCount, Result};
pub struct Service {
pub db: Data,
db: Data,
#[allow(clippy::type_complexity)]
pub lazy_load_waiting: Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount), HashSet<OwnedUserId>>>,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
lazy_load_waiting: Mutex::new(HashMap::new()),

View file

@ -1,25 +1,25 @@
use std::sync::Arc;
use database::KvTree;
use conduit::{error, utils, Error, Result};
use database::{Database, Map};
use ruma::{OwnedRoomId, RoomId};
use tracing::error;
use crate::{services, utils, Error, KeyValueDatabase, Result};
use crate::services;
pub struct Data {
disabledroomids: Arc<dyn KvTree>,
bannedroomids: Arc<dyn KvTree>,
roomid_shortroomid: Arc<dyn KvTree>,
pduid_pdu: Arc<dyn KvTree>,
pub(super) struct Data {
disabledroomids: Arc<Map>,
bannedroomids: Arc<Map>,
roomid_shortroomid: Arc<Map>,
pduid_pdu: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
disabledroomids: db.disabledroomids.clone(),
bannedroomids: db.bannedroomids.clone(),
roomid_shortroomid: db.roomid_shortroomid.clone(),
pduid_pdu: db.pduid_pdu.clone(),
disabledroomids: db["disabledroomids"].clone(),
bannedroomids: db["bannedroomids"].clone(),
roomid_shortroomid: db["roomid_shortroomid"].clone(),
pduid_pdu: db["pduid_pdu"].clone(),
}
}

View file

@ -1,21 +1,18 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::sync::Arc;
use conduit::{Result, Server};
use data::Data;
use database::Database;
use ruma::{OwnedRoomId, RoomId};
use crate::Result;
pub struct Service {
pub db: Data,
db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,18 +1,19 @@
use std::sync::Arc;
use database::KvTree;
use conduit::{Error, Result};
use database::{Database, Map};
use ruma::{CanonicalJsonObject, EventId};
use crate::{Error, KeyValueDatabase, PduEvent, Result};
use crate::PduEvent;
pub struct Data {
eventid_outlierpdu: Arc<dyn KvTree>,
pub(super) struct Data {
eventid_outlierpdu: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
eventid_outlierpdu: db.eventid_outlierpdu.clone(),
eventid_outlierpdu: db["eventid_outlierpdu"].clone(),
}
}

View file

@ -1,21 +1,20 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::sync::Arc;
use conduit::{Result, Server};
use data::Data;
use database::Database;
use ruma::{CanonicalJsonObject, EventId};
use crate::{PduEvent, Result};
use crate::PduEvent;
pub struct Service {
pub db: Data,
db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,25 +1,26 @@
use std::{mem::size_of, sync::Arc};
use database::KvTree;
use conduit::{utils, Error, Result};
use database::{Database, Map};
use ruma::{EventId, RoomId, UserId};
use crate::{services, utils, Error, KeyValueDatabase, PduCount, PduEvent, Result};
use crate::{services, PduCount, PduEvent};
pub(super) struct Data {
tofrom_relation: Arc<dyn KvTree>,
referencedevents: Arc<dyn KvTree>,
softfailedeventids: Arc<dyn KvTree>,
tofrom_relation: Arc<Map>,
referencedevents: Arc<Map>,
softfailedeventids: Arc<Map>,
}
type PdusIterItem = Result<(PduCount, PduEvent)>;
type PdusIterator<'a> = Box<dyn Iterator<Item = PdusIterItem> + 'a>;
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
tofrom_relation: db.tofrom_relation.clone(),
referencedevents: db.referencedevents.clone(),
softfailedeventids: db.softfailedeventids.clone(),
tofrom_relation: db["tofrom_relation"].clone(),
referencedevents: db["referencedevents"].clone(),
softfailedeventids: db["softfailedeventids"].clone(),
}
}

View file

@ -1,11 +1,10 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::sync::Arc;
use conduit::{Result, Server};
use data::Data;
use database::Database;
use ruma::{
api::{client::relations::get_relating_events, Direction},
events::{relation::RelationType, TimelineEventType},
@ -13,7 +12,7 @@ use ruma::{
};
use serde::Deserialize;
use crate::{services, PduCount, PduEvent, Result};
use crate::{services, PduCount, PduEvent};
pub struct Service {
db: Data,
@ -30,7 +29,7 @@ struct ExtractRelatesToEventId {
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,29 +1,30 @@
use std::{mem::size_of, sync::Arc};
use database::KvTree;
use conduit::{utils, Error, Result};
use database::{Database, Map};
use ruma::{
events::{receipt::ReceiptEvent, AnySyncEphemeralRoomEvent},
serde::Raw,
CanonicalJsonObject, OwnedUserId, RoomId, UserId,
};
use crate::{services, utils, Error, KeyValueDatabase, Result};
use crate::services;
type AnySyncEphemeralRoomEventIter<'a> =
Box<dyn Iterator<Item = Result<(OwnedUserId, u64, Raw<AnySyncEphemeralRoomEvent>)>> + 'a>;
pub(super) struct Data {
roomuserid_privateread: Arc<dyn KvTree>,
roomuserid_lastprivatereadupdate: Arc<dyn KvTree>,
readreceiptid_readreceipt: Arc<dyn KvTree>,
roomuserid_privateread: Arc<Map>,
roomuserid_lastprivatereadupdate: Arc<Map>,
readreceiptid_readreceipt: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
roomuserid_privateread: db.roomuserid_privateread.clone(),
roomuserid_lastprivatereadupdate: db.roomuserid_lastprivatereadupdate.clone(),
readreceiptid_readreceipt: db.readreceiptid_readreceipt.clone(),
roomuserid_privateread: db["roomuserid_privateread"].clone(),
roomuserid_lastprivatereadupdate: db["roomuserid_lastprivatereadupdate"].clone(),
readreceiptid_readreceipt: db["readreceiptid_readreceipt"].clone(),
}
}

View file

@ -1,21 +1,20 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::sync::Arc;
use conduit::{Result, Server};
use data::Data;
use database::Database;
use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId};
use crate::{services, Result};
use crate::services;
pub struct Service {
db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,20 +1,21 @@
use std::sync::Arc;
use database::KvTree;
use conduit::{utils, Result};
use database::{Database, Map};
use ruma::RoomId;
use crate::{services, utils, KeyValueDatabase, Result};
use crate::services;
type SearchPdusResult<'a> = Result<Option<(Box<dyn Iterator<Item = Vec<u8>> + 'a>, Vec<String>)>>;
pub struct Data {
tokenids: Arc<dyn KvTree>,
pub(super) struct Data {
tokenids: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
tokenids: db.tokenids.clone(),
tokenids: db["tokenids"].clone(),
}
}

View file

@ -1,21 +1,18 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::sync::Arc;
use conduit::{Result, Server};
use data::Data;
use database::Database;
use ruma::RoomId;
use crate::Result;
pub struct Service {
pub db: Data,
db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,29 +1,29 @@
use std::sync::Arc;
use conduit::warn;
use database::{KeyValueDatabase, KvTree};
use conduit::{utils, warn, Error, Result};
use database::{Database, Map};
use ruma::{events::StateEventType, EventId, RoomId};
use crate::{services, utils, Error, Result};
use crate::services;
pub(super) struct Data {
eventid_shorteventid: Arc<dyn KvTree>,
shorteventid_eventid: Arc<dyn KvTree>,
statekey_shortstatekey: Arc<dyn KvTree>,
shortstatekey_statekey: Arc<dyn KvTree>,
roomid_shortroomid: Arc<dyn KvTree>,
statehash_shortstatehash: Arc<dyn KvTree>,
eventid_shorteventid: Arc<Map>,
shorteventid_eventid: Arc<Map>,
statekey_shortstatekey: Arc<Map>,
shortstatekey_statekey: Arc<Map>,
roomid_shortroomid: Arc<Map>,
statehash_shortstatehash: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
eventid_shorteventid: db.eventid_shorteventid.clone(),
shorteventid_eventid: db.shorteventid_eventid.clone(),
statekey_shortstatekey: db.statekey_shortstatekey.clone(),
shortstatekey_statekey: db.shortstatekey_statekey.clone(),
roomid_shortroomid: db.roomid_shortroomid.clone(),
statehash_shortstatehash: db.statehash_shortstatehash.clone(),
eventid_shorteventid: db["eventid_shorteventid"].clone(),
shorteventid_eventid: db["shorteventid_eventid"].clone(),
statekey_shortstatekey: db["statekey_shortstatekey"].clone(),
shortstatekey_statekey: db["shortstatekey_statekey"].clone(),
roomid_shortroomid: db["roomid_shortroomid"].clone(),
statehash_shortstatehash: db["statehash_shortstatehash"].clone(),
}
}

View file

@ -1,20 +1,18 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::sync::Arc;
use conduit::{Result, Server};
use data::Data;
use database::Database;
use ruma::{events::StateEventType, EventId, RoomId};
use crate::Result;
pub struct Service {
db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -4,8 +4,8 @@ use std::{
sync::Arc,
};
use conduit::Server;
use database::KeyValueDatabase;
use conduit::{debug_info, Error, Result, Server};
use database::Database;
use lru_cache::LruCache;
use ruma::{
api::{
@ -31,7 +31,7 @@ use ruma::{
use tokio::sync::Mutex;
use tracing::{debug, error, warn};
use crate::{debug_info, server_is_ours, services, Error, Result};
use crate::{server_is_ours, services};
pub struct CachedSpaceHierarchySummary {
summary: SpaceHierarchyParentSummary,
@ -333,7 +333,7 @@ impl From<CachedSpaceHierarchySummary> for SpaceHierarchyRoomsChunk {
}
impl Service {
pub fn build(server: &Arc<Server>, _db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(server: &Arc<Server>, _db: &Arc<Database>) -> Result<Self> {
let config = &server.config;
Ok(Self {
roomid_spacehierarchy_cache: Mutex::new(LruCache::new(

View file

@ -1,23 +1,22 @@
use std::{collections::HashSet, sync::Arc};
use conduit::utils::mutex_map;
use database::KvTree;
use conduit::{utils, Error, Result};
use database::{Database, Map};
use ruma::{EventId, OwnedEventId, RoomId};
use utils::mutex_map;
use crate::{utils, Error, KeyValueDatabase, Result};
pub struct Data {
shorteventid_shortstatehash: Arc<dyn KvTree>,
roomid_pduleaves: Arc<dyn KvTree>,
roomid_shortstatehash: Arc<dyn KvTree>,
pub(super) struct Data {
shorteventid_shortstatehash: Arc<Map>,
roomid_pduleaves: Arc<Map>,
roomid_shortstatehash: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
shorteventid_shortstatehash: db.shorteventid_shortstatehash.clone(),
roomid_pduleaves: db.roomid_pduleaves.clone(),
roomid_shortstatehash: db.roomid_shortstatehash.clone(),
shorteventid_shortstatehash: db["shorteventid_shortstatehash"].clone(),
roomid_pduleaves: db["roomid_pduleaves"].clone(),
roomid_shortstatehash: db["roomid_shortstatehash"].clone(),
}
}

View file

@ -1,14 +1,16 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::{
collections::{HashMap, HashSet},
sync::Arc,
};
use conduit::utils::mutex_map;
use conduit::{
utils::{calculate_hash, mutex_map},
warn, Error, Result, Server,
};
use data::Data;
use database::Database;
use ruma::{
api::client::error::ErrorKind,
events::{
@ -19,17 +21,16 @@ use ruma::{
state_res::{self, StateMap},
EventId, OwnedEventId, RoomId, RoomVersionId, UserId,
};
use tracing::warn;
use super::state_compressor::CompressedStateEvent;
use crate::{services, utils::calculate_hash, Error, PduEvent, Result};
use crate::{services, PduEvent};
pub struct Service {
pub db: Data,
db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,20 +1,21 @@
use std::{collections::HashMap, sync::Arc};
use database::KvTree;
use conduit::{utils, Error, Result};
use database::{Database, Map};
use ruma::{events::StateEventType, EventId, RoomId};
use crate::{services, utils, Error, KeyValueDatabase, PduEvent, Result};
use crate::{services, PduEvent};
pub struct Data {
eventid_shorteventid: Arc<dyn KvTree>,
shorteventid_shortstatehash: Arc<dyn KvTree>,
pub(super) struct Data {
eventid_shorteventid: Arc<Map>,
shorteventid_shortstatehash: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
eventid_shorteventid: db.eventid_shorteventid.clone(),
shorteventid_shortstatehash: db.shorteventid_shortstatehash.clone(),
eventid_shorteventid: db["eventid_shorteventid"].clone(),
shorteventid_shortstatehash: db["shorteventid_shortstatehash"].clone(),
}
}

View file

@ -1,16 +1,13 @@
use std::sync::Mutex as StdMutex;
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::{
collections::HashMap,
sync::{Arc, Mutex},
sync::{Arc, Mutex as StdMutex, Mutex},
};
use conduit::utils::mutex_map;
use conduit::{error, utils::mutex_map, warn, Error, Result, Server};
use data::Data;
use database::Database;
use lru_cache::LruCache;
use ruma::{
events::{
@ -29,18 +26,17 @@ use ruma::{
EventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
};
use serde_json::value::to_raw_value;
use tracing::{error, warn};
use crate::{service::pdu::PduBuilder, services, Error, PduEvent, Result};
use crate::{pdu::PduBuilder, services, PduEvent};
pub struct Service {
pub db: Data,
db: Data,
pub server_visibility_cache: Mutex<LruCache<(OwnedServerName, u64), bool>>,
pub user_visibility_cache: Mutex<LruCache<(OwnedUserId, u64), bool>>,
}
impl Service {
pub fn build(server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
let config = &server.config;
Ok(Self {
db: Data::new(db),

View file

@ -1,5 +1,7 @@
use std::collections::HashSet;
use std::{collections::HashSet, sync::Arc};
use conduit::{utils, Error, Result};
use database::{Database, Map};
use itertools::Itertools;
use ruma::{
events::{AnyStrippedStateEvent, AnySyncStateEvent},
@ -7,51 +9,42 @@ use ruma::{
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
};
use crate::{
appservice::RegistrationInfo,
services, user_is_local,
utils::{self},
Error, KeyValueDatabase, Result,
};
use crate::{appservice::RegistrationInfo, services, user_is_local};
type StrippedStateEventIter<'a> = Box<dyn Iterator<Item = Result<(OwnedRoomId, Vec<Raw<AnyStrippedStateEvent>>)>> + 'a>;
type AnySyncStateEventIter<'a> = Box<dyn Iterator<Item = Result<(OwnedRoomId, Vec<Raw<AnySyncStateEvent>>)>> + 'a>;
use std::sync::Arc;
use database::KvTree;
pub struct Data {
userroomid_joined: Arc<dyn KvTree>,
roomuserid_joined: Arc<dyn KvTree>,
userroomid_invitestate: Arc<dyn KvTree>,
roomuserid_invitecount: Arc<dyn KvTree>,
userroomid_leftstate: Arc<dyn KvTree>,
roomuserid_leftcount: Arc<dyn KvTree>,
roomid_inviteviaservers: Arc<dyn KvTree>,
roomuseroncejoinedids: Arc<dyn KvTree>,
roomid_joinedcount: Arc<dyn KvTree>,
roomid_invitedcount: Arc<dyn KvTree>,
roomserverids: Arc<dyn KvTree>,
serverroomids: Arc<dyn KvTree>,
db: Arc<KeyValueDatabase>,
pub(super) struct Data {
userroomid_joined: Arc<Map>,
roomuserid_joined: Arc<Map>,
userroomid_invitestate: Arc<Map>,
roomuserid_invitecount: Arc<Map>,
userroomid_leftstate: Arc<Map>,
roomuserid_leftcount: Arc<Map>,
roomid_inviteviaservers: Arc<Map>,
roomuseroncejoinedids: Arc<Map>,
roomid_joinedcount: Arc<Map>,
roomid_invitedcount: Arc<Map>,
roomserverids: Arc<Map>,
serverroomids: Arc<Map>,
db: Arc<Database>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
userroomid_joined: db.userroomid_joined.clone(),
roomuserid_joined: db.roomuserid_joined.clone(),
userroomid_invitestate: db.userroomid_invitestate.clone(),
roomuserid_invitecount: db.roomuserid_invitecount.clone(),
userroomid_leftstate: db.userroomid_leftstate.clone(),
roomuserid_leftcount: db.roomuserid_leftcount.clone(),
roomid_inviteviaservers: db.roomid_inviteviaservers.clone(),
roomuseroncejoinedids: db.roomuseroncejoinedids.clone(),
roomid_joinedcount: db.roomid_joinedcount.clone(),
roomid_invitedcount: db.roomid_invitedcount.clone(),
roomserverids: db.roomserverids.clone(),
serverroomids: db.serverroomids.clone(),
userroomid_joined: db["userroomid_joined"].clone(),
roomuserid_joined: db["roomuserid_joined"].clone(),
userroomid_invitestate: db["userroomid_invitestate"].clone(),
roomuserid_invitecount: db["roomuserid_invitecount"].clone(),
userroomid_leftstate: db["userroomid_leftstate"].clone(),
roomuserid_leftcount: db["roomuserid_leftcount"].clone(),
roomid_inviteviaservers: db["roomid_inviteviaservers"].clone(),
roomuseroncejoinedids: db["roomuseroncejoinedids"].clone(),
roomid_joinedcount: db["roomid_joinedcount"].clone(),
roomid_invitedcount: db["roomid_invitedcount"].clone(),
roomserverids: db["roomserverids"].clone(),
serverroomids: db["serverroomids"].clone(),
db: db.clone(),
}
}

View file

@ -1,8 +1,10 @@
mod data;
use std::sync::Arc;
use conduit::Server;
use conduit::{error, warn, Error, Result, Server};
use data::Data;
use database::KeyValueDatabase;
use database::Database;
use itertools::Itertools;
use ruma::{
events::{
@ -19,18 +21,15 @@ use ruma::{
serde::Raw,
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
};
use tracing::{error, warn};
use crate::{service::appservice::RegistrationInfo, services, user_is_local, Error, Result};
mod data;
use crate::{appservice::RegistrationInfo, services, user_is_local};
pub struct Service {
pub db: Data,
db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,9 +1,9 @@
use std::{collections::HashSet, mem::size_of, sync::Arc};
use database::KvTree;
use conduit::{utils, Error, Result};
use database::{Database, Map};
use super::CompressedStateEvent;
use crate::{utils, Error, KeyValueDatabase, Result};
pub(super) struct StateDiff {
pub(super) parent: Option<u64>,
@ -11,14 +11,14 @@ pub(super) struct StateDiff {
pub(super) removed: Arc<HashSet<CompressedStateEvent>>,
}
pub struct Data {
shortstatehash_statediff: Arc<dyn KvTree>,
pub(super) struct Data {
shortstatehash_statediff: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
shortstatehash_statediff: db.shortstatehash_statediff.clone(),
shortstatehash_statediff: db["shortstatehash_statediff"].clone(),
}
}

View file

@ -1,21 +1,19 @@
use std::sync::Mutex as StdMutex;
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::{
collections::HashSet,
mem::size_of,
sync::{Arc, Mutex},
sync::{Arc, Mutex as StdMutex, Mutex},
};
use conduit::{utils, Result, Server};
use data::Data;
use database::Database;
use lru_cache::LruCache;
use ruma::{EventId, RoomId};
use self::data::StateDiff;
use crate::{services, utils, Result};
use crate::services;
type StateInfoLruCache = Mutex<
LruCache<
@ -49,13 +47,13 @@ type HashSetCompressStateEvent = Result<(u64, Arc<HashSet<CompressedStateEvent>>
pub type CompressedStateEvent = [u8; 2 * size_of::<u64>()];
pub struct Service {
pub db: Data,
db: Data,
pub stateinfo_cache: StateInfoLruCache,
}
impl Service {
pub fn build(server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
let config = &server.config;
Ok(Self {
db: Data::new(db),

View file

@ -1,20 +1,21 @@
use std::{mem::size_of, sync::Arc};
use database::KvTree;
use conduit::{utils, Error, Result};
use database::{Database, Map};
use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId};
use crate::{services, utils, Error, KeyValueDatabase, PduEvent, Result};
use crate::{services, PduEvent};
type PduEventIterResult<'a> = Result<Box<dyn Iterator<Item = Result<(u64, PduEvent)>> + 'a>>;
pub struct Data {
threadid_userids: Arc<dyn KvTree>,
pub(super) struct Data {
threadid_userids: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
threadid_userids: db.threadid_userids.clone(),
threadid_userids: db["threadid_userids"].clone(),
}
}

View file

@ -1,11 +1,10 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::{collections::BTreeMap, sync::Arc};
use conduit::{Error, Result, Server};
use data::Data;
use database::Database;
use ruma::{
api::client::{error::ErrorKind, threads::get_threads::v1::IncludeThreads},
events::relation::BundledThread,
@ -13,14 +12,14 @@ use ruma::{
};
use serde_json::json;
use crate::{services, Error, PduEvent, Result};
use crate::{services, PduEvent};
pub struct Service {
pub db: Data,
db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,32 +1,31 @@
use std::{collections::hash_map, mem::size_of, sync::Arc};
use database::KvTree;
use conduit::{error, utils, Error, Result};
use database::{Database, Map};
use ruma::{api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId};
use tracing::error;
use super::PduCount;
use crate::{services, utils, Error, KeyValueDatabase, PduEvent, Result};
use crate::{services, PduCount, PduEvent};
pub struct Data {
eventid_pduid: Arc<dyn KvTree>,
pduid_pdu: Arc<dyn KvTree>,
eventid_outlierpdu: Arc<dyn KvTree>,
userroomid_notificationcount: Arc<dyn KvTree>,
userroomid_highlightcount: Arc<dyn KvTree>,
db: Arc<KeyValueDatabase>,
pub(super) struct Data {
eventid_pduid: Arc<Map>,
pduid_pdu: Arc<Map>,
eventid_outlierpdu: Arc<Map>,
userroomid_notificationcount: Arc<Map>,
userroomid_highlightcount: Arc<Map>,
db: Arc<Database>,
}
type PdusIterItem = Result<(PduCount, PduEvent)>;
type PdusIterator<'a> = Box<dyn Iterator<Item = PdusIterItem> + 'a>;
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
eventid_pduid: db.eventid_pduid.clone(),
pduid_pdu: db.pduid_pdu.clone(),
eventid_outlierpdu: db.eventid_outlierpdu.clone(),
userroomid_notificationcount: db.userroomid_notificationcount.clone(),
userroomid_highlightcount: db.userroomid_highlightcount.clone(),
eventid_pduid: db["eventid_pduid"].clone(),
pduid_pdu: db["pduid_pdu"].clone(),
eventid_outlierpdu: db["eventid_outlierpdu"].clone(),
userroomid_notificationcount: db["userroomid_notificationcount"].clone(),
userroomid_highlightcount: db["userroomid_highlightcount"].clone(),
db: db.clone(),
}
}

View file

@ -1,6 +1,3 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::{
@ -8,7 +5,9 @@ use std::{
sync::Arc,
};
use conduit::{debug, error, info, utils, utils::mutex_map, warn, Error, Result, Server};
use data::Data;
use database::Database;
use itertools::Itertools;
use rand::prelude::SliceRandom;
use ruma::{
@ -34,24 +33,13 @@ use ruma::{
use serde::Deserialize;
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use tokio::sync::{Mutex, RwLock};
use tracing::{debug, error, info, warn};
use super::state_compressor::CompressedStateEvent;
use crate::{
admin,
server_is_ours,
//api::server_server,
service::{
appservice::NamespaceRegex,
pdu::{EventHash, PduBuilder},
rooms::event_handler::parse_incoming_pdu,
},
services,
utils::{self, mutex_map},
Error,
PduCount,
PduEvent,
Result,
appservice::NamespaceRegex,
pdu::{EventHash, PduBuilder},
rooms::{event_handler::parse_incoming_pdu, state_compressor::CompressedStateEvent},
server_is_ours, services, PduCount, PduEvent,
};
// Update Relationships
@ -77,13 +65,13 @@ struct ExtractBody {
}
pub struct Service {
pub db: Data,
db: Data,
pub lasttimelinecount_cache: Mutex<HashMap<OwnedRoomId, PduCount>>,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
lasttimelinecount_cache: Mutex::new(HashMap::new()),

View file

@ -1,20 +1,15 @@
use std::{collections::BTreeMap, sync::Arc};
use conduit::Server;
use database::KeyValueDatabase;
use conduit::{debug_info, trace, utils, Result, Server};
use database::Database;
use ruma::{
api::federation::transactions::edu::{Edu, TypingContent},
events::SyncEphemeralRoomEvent,
OwnedRoomId, OwnedUserId, RoomId, UserId,
};
use tokio::sync::{broadcast, RwLock};
use tracing::trace;
use crate::{
debug_info, services, user_is_local,
utils::{self},
Result,
};
use crate::{services, user_is_local};
pub struct Service {
pub typing: RwLock<BTreeMap<OwnedRoomId, BTreeMap<OwnedUserId, u64>>>, // u64 is unix timestamp of timeout
@ -25,7 +20,7 @@ pub struct Service {
}
impl Service {
pub fn build(_server: &Arc<Server>, _db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, _db: &Arc<Database>) -> Result<Self> {
Ok(Self {
typing: RwLock::new(BTreeMap::new()),
last_typing_update: RwLock::new(BTreeMap::new()),

View file

@ -1,26 +1,27 @@
use std::sync::Arc;
use database::KvTree;
use conduit::{utils, Error, Result};
use database::{Database, Map};
use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId};
use crate::{services, utils, Error, KeyValueDatabase, Result};
use crate::services;
pub struct Data {
userroomid_notificationcount: Arc<dyn KvTree>,
userroomid_highlightcount: Arc<dyn KvTree>,
roomuserid_lastnotificationread: Arc<dyn KvTree>,
roomsynctoken_shortstatehash: Arc<dyn KvTree>,
userroomid_joined: Arc<dyn KvTree>,
pub(super) struct Data {
userroomid_notificationcount: Arc<Map>,
userroomid_highlightcount: Arc<Map>,
roomuserid_lastnotificationread: Arc<Map>,
roomsynctoken_shortstatehash: Arc<Map>,
userroomid_joined: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
userroomid_notificationcount: db.userroomid_notificationcount.clone(),
userroomid_highlightcount: db.userroomid_highlightcount.clone(),
roomuserid_lastnotificationread: db.roomuserid_lastnotificationread.clone(),
roomsynctoken_shortstatehash: db.roomsynctoken_shortstatehash.clone(),
userroomid_joined: db.userroomid_joined.clone(),
userroomid_notificationcount: db["userroomid_notificationcount"].clone(),
userroomid_highlightcount: db["userroomid_highlightcount"].clone(),
roomuserid_lastnotificationread: db["userroomid_highlightcount"].clone(), //< NOTE: known bug from conduit
roomsynctoken_shortstatehash: db["roomsynctoken_shortstatehash"].clone(),
userroomid_joined: db["userroomid_joined"].clone(),
}
}

View file

@ -1,21 +1,18 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::sync::Arc;
use conduit::{Result, Server};
use data::Data;
use database::Database;
use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId};
use crate::Result;
pub struct Service {
pub db: Data,
db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,26 +1,28 @@
use std::sync::Arc;
use conduit::{utils, Error, Result};
use database::{Database, Map};
use ruma::{ServerName, UserId};
use super::{Destination, SendingEvent};
use crate::{services, utils, Error, KeyValueDatabase, KvTree, Result};
use crate::services;
type OutgoingSendingIter<'a> = Box<dyn Iterator<Item = Result<(Vec<u8>, Destination, SendingEvent)>> + 'a>;
type SendingEventIter<'a> = Box<dyn Iterator<Item = Result<(Vec<u8>, SendingEvent)>> + 'a>;
pub struct Data {
servercurrentevent_data: Arc<dyn KvTree>,
servernameevent_data: Arc<dyn KvTree>,
servername_educount: Arc<dyn KvTree>,
_db: Arc<KeyValueDatabase>,
servercurrentevent_data: Arc<Map>,
servernameevent_data: Arc<Map>,
servername_educount: Arc<Map>,
_db: Arc<Database>,
}
impl Data {
pub(super) fn new(db: Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: Arc<Database>) -> Self {
Self {
servercurrentevent_data: db.servercurrentevent_data.clone(),
servernameevent_data: db.servernameevent_data.clone(),
servername_educount: db.servername_educount.clone(),
servercurrentevent_data: db["servercurrentevent_data"].clone(),
servernameevent_data: db["servernameevent_data"].clone(),
servername_educount: db["servername_educount"].clone(),
_db: db,
}
}

View file

@ -1,5 +1,3 @@
use conduit::Server;
mod appservice;
mod data;
pub mod resolve;
@ -8,7 +6,9 @@ mod sender;
use std::{fmt::Debug, sync::Arc};
use conduit::{Error, Result, Server};
use data::Data;
use database::Database;
pub use resolve::FedDest;
use ruma::{
api::{appservice::Registration, OutgoingRequest},
@ -17,7 +17,7 @@ use ruma::{
use tokio::{sync::Mutex, task::JoinHandle};
use tracing::{error, warn};
use crate::{server_is_ours, services, Error, KeyValueDatabase, Result};
use crate::{server_is_ours, services};
pub struct Service {
pub db: Data,
@ -53,7 +53,7 @@ pub enum SendingEvent {
}
impl Service {
pub fn build(server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Arc<Self>> {
pub fn build(server: &Arc<Server>, db: &Arc<Database>) -> Result<Arc<Self>> {
let config = &server.config;
let (sender, receiver) = loole::unbounded();
Ok(Arc::new(Self {

View file

@ -23,7 +23,7 @@ use ruma::{
use tracing::{debug, error, warn};
use super::{appservice, send, Destination, Msg, SendingEvent, Service};
use crate::{service::presence::Presence, services, user_is_local, utils::calculate_hash, Error, PduEvent, Result};
use crate::{presence::Presence, services, user_is_local, utils::calculate_hash, Error, PduEvent, Result};
#[derive(Debug)]
enum TransactionStatus {

View file

@ -1,7 +1,7 @@
use std::sync::Arc;
use conduit::{debug_info, Result, Server};
use database::KeyValueDatabase;
use database::Database;
use tracing::{debug, info, trace};
use crate::{
@ -24,11 +24,11 @@ pub struct Services {
pub media: media::Service,
pub sending: Arc<sending::Service>,
pub server: Arc<Server>,
pub db: Arc<KeyValueDatabase>,
pub db: Arc<Database>,
}
impl Services {
pub async fn build(server: Arc<Server>, db: Arc<KeyValueDatabase>) -> Result<Self> {
pub async fn build(server: Arc<Server>, db: Arc<Database>) -> Result<Self> {
Ok(Self {
rooms: rooms::Service {
alias: rooms::alias::Service::build(&server, &db)?,

View file

@ -1,17 +1,17 @@
use std::sync::Arc;
use conduit::Result;
use database::{KeyValueDatabase, KvTree};
use database::{Database, Map};
use ruma::{DeviceId, TransactionId, UserId};
pub struct Data {
userdevicetxnid_response: Arc<dyn KvTree>,
userdevicetxnid_response: Arc<Map>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
userdevicetxnid_response: db.userdevicetxnid_response.clone(),
userdevicetxnid_response: db["userdevicetxnid_response"].clone(),
}
}

View file

@ -1,21 +1,18 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::sync::Arc;
use conduit::{Result, Server};
use data::Data;
use database::Database;
use ruma::{DeviceId, TransactionId, UserId};
use crate::Result;
pub struct Service {
pub db: Data,
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,21 +1,21 @@
use std::sync::Arc;
use conduit::{Error, Result};
use database::{KeyValueDatabase, KvTree};
use database::{Database, Map};
use ruma::{
api::client::{error::ErrorKind, uiaa::UiaaInfo},
CanonicalJsonValue, DeviceId, UserId,
};
pub struct Data {
userdevicesessionid_uiaainfo: Arc<dyn KvTree>,
db: Arc<KeyValueDatabase>,
userdevicesessionid_uiaainfo: Arc<Map>,
db: Arc<Database>,
}
impl Data {
pub(super) fn new(db: &Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: &Arc<Database>) -> Self {
Self {
userdevicesessionid_uiaainfo: db.userdevicesessionid_uiaainfo.clone(),
userdevicesessionid_uiaainfo: db["userdevicesessionid_uiaainfo"].clone(),
db: db.clone(),
}
}

View file

@ -1,12 +1,10 @@
use conduit::Server;
use database::KeyValueDatabase;
mod data;
use std::sync::Arc;
use conduit::{utils, utils::hash, Error, Result};
use conduit::{utils, utils::hash, Error, Result, Server};
use data::Data;
use database::Database;
use ruma::{
api::client::{
error::ErrorKind,
@ -25,7 +23,7 @@ pub struct Service {
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db),
})

View file

@ -1,5 +1,7 @@
use std::{collections::BTreeMap, mem::size_of, sync::Arc};
use conduit::{utils, warn, Error, Result};
use database::{Database, Map};
use ruma::{
api::client::{device::Device, error::ErrorKind, filter::FilterDefinition},
encryption::{CrossSigningKey, DeviceKeys, OneTimeKey},
@ -8,51 +10,50 @@ use ruma::{
uint, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedDeviceKeyId,
OwnedMxcUri, OwnedUserId, UInt, UserId,
};
use tracing::warn;
use crate::{services, users::clean_signatures, utils, Error, KeyValueDatabase, KvTree, Result};
use crate::{services, users::clean_signatures};
pub struct Data {
userid_password: Arc<dyn KvTree>,
token_userdeviceid: Arc<dyn KvTree>,
userid_displayname: Arc<dyn KvTree>,
userid_avatarurl: Arc<dyn KvTree>,
userid_blurhash: Arc<dyn KvTree>,
userid_devicelistversion: Arc<dyn KvTree>,
userdeviceid_token: Arc<dyn KvTree>,
userdeviceid_metadata: Arc<dyn KvTree>,
onetimekeyid_onetimekeys: Arc<dyn KvTree>,
userid_lastonetimekeyupdate: Arc<dyn KvTree>,
keyid_key: Arc<dyn KvTree>,
userid_masterkeyid: Arc<dyn KvTree>,
userid_selfsigningkeyid: Arc<dyn KvTree>,
userid_usersigningkeyid: Arc<dyn KvTree>,
keychangeid_userid: Arc<dyn KvTree>,
todeviceid_events: Arc<dyn KvTree>,
userfilterid_filter: Arc<dyn KvTree>,
_db: Arc<KeyValueDatabase>,
userid_password: Arc<Map>,
token_userdeviceid: Arc<Map>,
userid_displayname: Arc<Map>,
userid_avatarurl: Arc<Map>,
userid_blurhash: Arc<Map>,
userid_devicelistversion: Arc<Map>,
userdeviceid_token: Arc<Map>,
userdeviceid_metadata: Arc<Map>,
onetimekeyid_onetimekeys: Arc<Map>,
userid_lastonetimekeyupdate: Arc<Map>,
keyid_key: Arc<Map>,
userid_masterkeyid: Arc<Map>,
userid_selfsigningkeyid: Arc<Map>,
userid_usersigningkeyid: Arc<Map>,
keychangeid_userid: Arc<Map>,
todeviceid_events: Arc<Map>,
userfilterid_filter: Arc<Map>,
_db: Arc<Database>,
}
impl Data {
pub(super) fn new(db: Arc<KeyValueDatabase>) -> Self {
pub(super) fn new(db: Arc<Database>) -> Self {
Self {
userid_password: db.userid_password.clone(),
token_userdeviceid: db.token_userdeviceid.clone(),
userid_displayname: db.userid_displayname.clone(),
userid_avatarurl: db.userid_avatarurl.clone(),
userid_blurhash: db.userid_blurhash.clone(),
userid_devicelistversion: db.userid_devicelistversion.clone(),
userdeviceid_token: db.userdeviceid_token.clone(),
userdeviceid_metadata: db.userdeviceid_metadata.clone(),
onetimekeyid_onetimekeys: db.onetimekeyid_onetimekeys.clone(),
userid_lastonetimekeyupdate: db.userid_lastonetimekeyupdate.clone(),
keyid_key: db.keyid_key.clone(),
userid_masterkeyid: db.userid_masterkeyid.clone(),
userid_selfsigningkeyid: db.userid_selfsigningkeyid.clone(),
userid_usersigningkeyid: db.userid_usersigningkeyid.clone(),
keychangeid_userid: db.keychangeid_userid.clone(),
todeviceid_events: db.todeviceid_events.clone(),
userfilterid_filter: db.userfilterid_filter.clone(),
userid_password: db["userid_password"].clone(),
token_userdeviceid: db["token_userdeviceid"].clone(),
userid_displayname: db["userid_displayname"].clone(),
userid_avatarurl: db["userid_avatarurl"].clone(),
userid_blurhash: db["userid_blurhash"].clone(),
userid_devicelistversion: db["userid_devicelistversion"].clone(),
userdeviceid_token: db["userdeviceid_token"].clone(),
userdeviceid_metadata: db["userdeviceid_metadata"].clone(),
onetimekeyid_onetimekeys: db["onetimekeyid_onetimekeys"].clone(),
userid_lastonetimekeyupdate: db["userid_lastonetimekeyupdate"].clone(),
keyid_key: db["keyid_key"].clone(),
userid_masterkeyid: db["userid_masterkeyid"].clone(),
userid_selfsigningkeyid: db["userid_selfsigningkeyid"].clone(),
userid_usersigningkeyid: db["userid_usersigningkeyid"].clone(),
keychangeid_userid: db["keychangeid_userid"].clone(),
todeviceid_events: db["todeviceid_events"].clone(),
userfilterid_filter: db["userfilterid_filter"].clone(),
_db: db,
}
}

View file

@ -1,13 +1,14 @@
use conduit::Server;
mod data;
use std::{
collections::{BTreeMap, BTreeSet},
mem,
sync::{Arc, Mutex, Mutex as StdMutex},
};
use conduit::{Error, Result, Server};
use data::Data;
use database::Database;
use ruma::{
api::client::{
device::Device,
@ -24,7 +25,7 @@ use ruma::{
UInt, UserId,
};
use crate::{database::KeyValueDatabase, service, services, Error, Result};
use crate::services;
pub struct SlidingSyncCache {
lists: BTreeMap<String, SyncRequestList>,
@ -41,7 +42,7 @@ pub struct Service {
}
impl Service {
pub fn build(_server: &Arc<Server>, db: &Arc<KeyValueDatabase>) -> Result<Self> {
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
Ok(Self {
db: Data::new(db.clone()),
connections: StdMutex::new(BTreeMap::new()),
@ -242,7 +243,7 @@ impl Service {
/// Check if a user is an admin
pub fn is_admin(&self, user_id: &UserId) -> Result<bool> {
if let Some(admin_room_id) = service::admin::Service::get_admin_room()? {
if let Some(admin_room_id) = crate::admin::Service::get_admin_room()? {
services()
.rooms
.state_cache