move remaining runtime caches into their respective service
Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
parent
e1d1dac95e
commit
473b29d524
12 changed files with 108 additions and 85 deletions
1
Cargo.lock
generated
1
Cargo.lock
generated
|
@ -696,7 +696,6 @@ dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"conduit_core",
|
"conduit_core",
|
||||||
"log",
|
"log",
|
||||||
"lru-cache",
|
|
||||||
"ruma",
|
"ruma",
|
||||||
"rust-rocksdb-uwu",
|
"rust-rocksdb-uwu",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
|
|
@ -38,7 +38,6 @@ zstd_compression = [
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
conduit-core.workspace = true
|
conduit-core.workspace = true
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
lru-cache.workspace = true
|
|
||||||
ruma.workspace = true
|
ruma.workspace = true
|
||||||
rust-rocksdb.workspace = true
|
rust-rocksdb.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|
|
@ -1,41 +1,21 @@
|
||||||
use std::{
|
use std::{ops::Index, sync::Arc};
|
||||||
collections::{BTreeMap, HashMap},
|
|
||||||
ops::Index,
|
|
||||||
sync::{Arc, Mutex, RwLock},
|
|
||||||
};
|
|
||||||
|
|
||||||
use conduit::{PduCount, Result, Server};
|
use conduit::{Result, Server};
|
||||||
use lru_cache::LruCache;
|
|
||||||
use ruma::{CanonicalJsonValue, OwnedDeviceId, OwnedRoomId, OwnedUserId};
|
|
||||||
|
|
||||||
use crate::{cork::Cork, maps, maps::Maps, Engine, Map};
|
use crate::{cork::Cork, maps, maps::Maps, Engine, Map};
|
||||||
|
|
||||||
pub struct Database {
|
pub struct Database {
|
||||||
pub db: Arc<Engine>,
|
pub db: Arc<Engine>,
|
||||||
pub map: Maps,
|
pub map: Maps,
|
||||||
|
|
||||||
//TODO: not a database
|
|
||||||
pub userdevicesessionid_uiaarequest: RwLock<BTreeMap<(OwnedUserId, OwnedDeviceId, String), CanonicalJsonValue>>,
|
|
||||||
pub auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<[u64]>>>,
|
|
||||||
pub appservice_in_room_cache: RwLock<HashMap<OwnedRoomId, HashMap<String, bool>>>,
|
|
||||||
pub lasttimelinecount_cache: Mutex<HashMap<OwnedRoomId, PduCount>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Database {
|
impl Database {
|
||||||
/// Load an existing database or create a new one.
|
/// Load an existing database or create a new one.
|
||||||
pub async fn open(server: &Arc<Server>) -> Result<Self> {
|
pub async fn open(server: &Arc<Server>) -> Result<Self> {
|
||||||
let config = &server.config;
|
|
||||||
let db = Engine::open(server)?;
|
let db = Engine::open(server)?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
db: db.clone(),
|
db: db.clone(),
|
||||||
map: maps::open(&db)?,
|
map: maps::open(&db)?,
|
||||||
|
|
||||||
userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()),
|
|
||||||
appservice_in_room_cache: RwLock::new(HashMap::new()),
|
|
||||||
lasttimelinecount_cache: Mutex::new(HashMap::new()),
|
|
||||||
auth_chain_cache: Mutex::new(LruCache::new(
|
|
||||||
(f64::from(config.auth_chain_cache_capacity) * config.conduit_cache_capacity_modifier) as usize,
|
|
||||||
)),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,11 @@
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, HashMap},
|
collections::BTreeMap,
|
||||||
sync::{Arc, RwLock},
|
sync::{Arc, RwLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
use conduit::{trace, utils, Error, Result};
|
use conduit::{trace, utils, Error, Result};
|
||||||
use database::{Database, Map};
|
use database::{Database, Map};
|
||||||
use futures_util::{stream::FuturesUnordered, StreamExt};
|
use futures_util::{stream::FuturesUnordered, StreamExt};
|
||||||
use lru_cache::LruCache;
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::federation::discovery::{ServerSigningKeys, VerifyKey},
|
api::federation::discovery::{ServerSigningKeys, VerifyKey},
|
||||||
signatures::Ed25519KeyPair,
|
signatures::Ed25519KeyPair,
|
||||||
|
@ -210,36 +209,37 @@ impl Data {
|
||||||
pub fn cleanup(&self) -> Result<()> { self.db.db.cleanup() }
|
pub fn cleanup(&self) -> Result<()> { self.db.db.cleanup() }
|
||||||
|
|
||||||
pub fn memory_usage(&self) -> String {
|
pub fn memory_usage(&self) -> String {
|
||||||
let auth_chain_cache = self.db.auth_chain_cache.lock().unwrap().len();
|
let (auth_chain_cache, max_auth_chain_cache) = services().rooms.auth_chain.get_cache_usage();
|
||||||
let appservice_in_room_cache = self.db.appservice_in_room_cache.read().unwrap().len();
|
let (appservice_in_room_cache, max_appservice_in_room_cache) = services()
|
||||||
let lasttimelinecount_cache = self.db.lasttimelinecount_cache.lock().unwrap().len();
|
.rooms
|
||||||
|
.state_cache
|
||||||
let max_auth_chain_cache = self.db.auth_chain_cache.lock().unwrap().capacity();
|
.get_appservice_in_room_cache_usage();
|
||||||
let max_appservice_in_room_cache = self.db.appservice_in_room_cache.read().unwrap().capacity();
|
let (lasttimelinecount_cache, max_lasttimelinecount_cache) = services()
|
||||||
let max_lasttimelinecount_cache = self.db.lasttimelinecount_cache.lock().unwrap().capacity();
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_lasttimelinecount_cache_usage();
|
||||||
|
|
||||||
format!(
|
format!(
|
||||||
"\
|
"auth_chain_cache: {auth_chain_cache} / {max_auth_chain_cache}\nappservice_in_room_cache: \
|
||||||
auth_chain_cache: {auth_chain_cache} / {max_auth_chain_cache}
|
{appservice_in_room_cache} / {max_appservice_in_room_cache}\nlasttimelinecount_cache: \
|
||||||
appservice_in_room_cache: {appservice_in_room_cache} / {max_appservice_in_room_cache}
|
{lasttimelinecount_cache} / {max_lasttimelinecount_cache}\n\n{}",
|
||||||
lasttimelinecount_cache: {lasttimelinecount_cache} / {max_lasttimelinecount_cache}\n\n
|
|
||||||
{}",
|
|
||||||
self.db.db.memory_usage().unwrap_or_default()
|
self.db.db.memory_usage().unwrap_or_default()
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::unused_self)]
|
||||||
pub fn clear_caches(&self, amount: u32) {
|
pub fn clear_caches(&self, amount: u32) {
|
||||||
if amount > 1 {
|
if amount > 1 {
|
||||||
let c = &mut *self.db.auth_chain_cache.lock().unwrap();
|
services().rooms.auth_chain.clear_cache();
|
||||||
*c = LruCache::new(c.capacity());
|
|
||||||
}
|
}
|
||||||
if amount > 2 {
|
if amount > 2 {
|
||||||
let c = &mut *self.db.appservice_in_room_cache.write().unwrap();
|
services()
|
||||||
*c = HashMap::new();
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.clear_appservice_in_room_cache();
|
||||||
}
|
}
|
||||||
if amount > 3 {
|
if amount > 3 {
|
||||||
let c = &mut *self.db.lasttimelinecount_cache.lock().unwrap();
|
services().rooms.timeline.clear_lasttimelinecount_cache();
|
||||||
*c = HashMap::new();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -595,8 +595,10 @@ async fn db_lt_10(db: &Arc<Database>, _config: &Config) -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn db_lt_11(db: &Arc<Database>, _config: &Config) -> Result<()> {
|
#[allow(unreachable_code)]
|
||||||
let _userdevicesessionid_uiaarequest = &db["userdevicesessionid_uiaarequest"];
|
async fn db_lt_11(_db: &Arc<Database>, _config: &Config) -> Result<()> {
|
||||||
|
todo!("Dropping a column to clear data is not implemented yet.");
|
||||||
|
//let userdevicesessionid_uiaarequest = &db["userdevicesessionid_uiaarequest"];
|
||||||
//userdevicesessionid_uiaarequest.clear()?;
|
//userdevicesessionid_uiaarequest.clear()?;
|
||||||
|
|
||||||
services().globals.bump_database_version(11)?;
|
services().globals.bump_database_version(11)?;
|
||||||
|
|
|
@ -1,24 +1,31 @@
|
||||||
use std::{mem::size_of, sync::Arc};
|
use std::{
|
||||||
|
mem::size_of,
|
||||||
|
sync::{Arc, Mutex},
|
||||||
|
};
|
||||||
|
|
||||||
use conduit::{utils, Result};
|
use conduit::{utils, Result, Server};
|
||||||
use database::{Database, Map};
|
use database::{Database, Map};
|
||||||
|
use lru_cache::LruCache;
|
||||||
|
|
||||||
pub(super) struct Data {
|
pub(super) struct Data {
|
||||||
shorteventid_authchain: Arc<Map>,
|
shorteventid_authchain: Arc<Map>,
|
||||||
db: Arc<Database>,
|
pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<[u64]>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Data {
|
impl Data {
|
||||||
pub(super) fn new(db: &Arc<Database>) -> Self {
|
pub(super) fn new(server: &Arc<Server>, db: &Arc<Database>) -> Self {
|
||||||
|
let config = &server.config;
|
||||||
|
let cache_size = f64::from(config.auth_chain_cache_capacity);
|
||||||
|
let cache_size = (cache_size * config.conduit_cache_capacity_modifier) as usize;
|
||||||
Self {
|
Self {
|
||||||
shorteventid_authchain: db["shorteventid_authchain"].clone(),
|
shorteventid_authchain: db["shorteventid_authchain"].clone(),
|
||||||
db: db.clone(),
|
auth_chain_cache: Mutex::new(LruCache::new(cache_size)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result<Option<Arc<[u64]>>> {
|
pub(super) fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result<Option<Arc<[u64]>>> {
|
||||||
// Check RAM cache
|
// Check RAM cache
|
||||||
if let Some(result) = self.db.auth_chain_cache.lock().unwrap().get_mut(key) {
|
if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) {
|
||||||
return Ok(Some(Arc::clone(result)));
|
return Ok(Some(Arc::clone(result)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,10 +44,9 @@ impl Data {
|
||||||
|
|
||||||
if let Some(chain) = chain {
|
if let Some(chain) = chain {
|
||||||
// Cache in RAM
|
// Cache in RAM
|
||||||
self.db
|
self.auth_chain_cache
|
||||||
.auth_chain_cache
|
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.expect("locked")
|
||||||
.insert(vec![key[0]], Arc::clone(&chain));
|
.insert(vec![key[0]], Arc::clone(&chain));
|
||||||
|
|
||||||
return Ok(Some(chain));
|
return Ok(Some(chain));
|
||||||
|
@ -63,10 +69,9 @@ impl Data {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache in RAM
|
// Cache in RAM
|
||||||
self.db
|
self.auth_chain_cache
|
||||||
.auth_chain_cache
|
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.expect("locked")
|
||||||
.insert(key, auth_chain);
|
.insert(key, auth_chain);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -17,9 +17,9 @@ pub struct Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
pub fn build(_server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
|
pub fn build(server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
db: Data::new(db),
|
db: Data::new(server, db),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,4 +181,11 @@ impl Service {
|
||||||
self.db
|
self.db
|
||||||
.cache_auth_chain(key, auth_chain.iter().copied().collect::<Arc<[u64]>>())
|
.cache_auth_chain(key, auth_chain.iter().copied().collect::<Arc<[u64]>>())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_cache_usage(&self) -> (usize, usize) {
|
||||||
|
let cache = self.db.auth_chain_cache.lock().expect("locked");
|
||||||
|
(cache.len(), cache.capacity())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn clear_cache(&self) { self.db.auth_chain_cache.lock().expect("locked").clear(); }
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,7 @@
|
||||||
use std::{collections::HashSet, sync::Arc};
|
use std::{
|
||||||
|
collections::{HashMap, HashSet},
|
||||||
|
sync::{Arc, RwLock},
|
||||||
|
};
|
||||||
|
|
||||||
use conduit::{utils, Error, Result};
|
use conduit::{utils, Error, Result};
|
||||||
use database::{Database, Map};
|
use database::{Database, Map};
|
||||||
|
@ -13,6 +16,7 @@ use crate::{appservice::RegistrationInfo, services, user_is_local};
|
||||||
|
|
||||||
type StrippedStateEventIter<'a> = Box<dyn Iterator<Item = Result<(OwnedRoomId, Vec<Raw<AnyStrippedStateEvent>>)>> + 'a>;
|
type StrippedStateEventIter<'a> = Box<dyn Iterator<Item = Result<(OwnedRoomId, Vec<Raw<AnyStrippedStateEvent>>)>> + 'a>;
|
||||||
type AnySyncStateEventIter<'a> = Box<dyn Iterator<Item = Result<(OwnedRoomId, Vec<Raw<AnySyncStateEvent>>)>> + 'a>;
|
type AnySyncStateEventIter<'a> = Box<dyn Iterator<Item = Result<(OwnedRoomId, Vec<Raw<AnySyncStateEvent>>)>> + 'a>;
|
||||||
|
type AppServiceInRoomCache = RwLock<HashMap<OwnedRoomId, HashMap<String, bool>>>;
|
||||||
|
|
||||||
pub(super) struct Data {
|
pub(super) struct Data {
|
||||||
userroomid_joined: Arc<Map>,
|
userroomid_joined: Arc<Map>,
|
||||||
|
@ -27,7 +31,7 @@ pub(super) struct Data {
|
||||||
roomid_invitedcount: Arc<Map>,
|
roomid_invitedcount: Arc<Map>,
|
||||||
roomserverids: Arc<Map>,
|
roomserverids: Arc<Map>,
|
||||||
serverroomids: Arc<Map>,
|
serverroomids: Arc<Map>,
|
||||||
db: Arc<Database>,
|
pub(super) appservice_in_room_cache: AppServiceInRoomCache,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Data {
|
impl Data {
|
||||||
|
@ -45,7 +49,7 @@ impl Data {
|
||||||
roomid_invitedcount: db["roomid_invitedcount"].clone(),
|
roomid_invitedcount: db["roomid_invitedcount"].clone(),
|
||||||
roomserverids: db["roomserverids"].clone(),
|
roomserverids: db["roomserverids"].clone(),
|
||||||
serverroomids: db["serverroomids"].clone(),
|
serverroomids: db["serverroomids"].clone(),
|
||||||
db: db.clone(),
|
appservice_in_room_cache: RwLock::new(HashMap::new()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -201,8 +205,7 @@ impl Data {
|
||||||
self.serverroomids.insert(&serverroom_id, &[])?;
|
self.serverroomids.insert(&serverroom_id, &[])?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.db
|
self.appservice_in_room_cache
|
||||||
.appservice_in_room_cache
|
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.remove(room_id);
|
.remove(room_id);
|
||||||
|
@ -213,7 +216,6 @@ impl Data {
|
||||||
#[tracing::instrument(skip(self, room_id, appservice))]
|
#[tracing::instrument(skip(self, room_id, appservice))]
|
||||||
pub(super) fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result<bool> {
|
pub(super) fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result<bool> {
|
||||||
let maybe = self
|
let maybe = self
|
||||||
.db
|
|
||||||
.appservice_in_room_cache
|
.appservice_in_room_cache
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -235,8 +237,7 @@ impl Data {
|
||||||
.room_members(room_id)
|
.room_members(room_id)
|
||||||
.any(|userid| userid.map_or(false, |userid| appservice.users.is_match(userid.as_str())));
|
.any(|userid| userid.map_or(false, |userid| appservice.users.is_match(userid.as_str())));
|
||||||
|
|
||||||
self.db
|
self.appservice_in_room_cache
|
||||||
.appservice_in_room_cache
|
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.entry(room_id.to_owned())
|
.entry(room_id.to_owned())
|
||||||
|
|
|
@ -441,4 +441,17 @@ impl Service {
|
||||||
|
|
||||||
Ok(servers)
|
Ok(servers)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_appservice_in_room_cache_usage(&self) -> (usize, usize) {
|
||||||
|
let cache = self.db.appservice_in_room_cache.read().expect("locked");
|
||||||
|
(cache.len(), cache.capacity())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn clear_appservice_in_room_cache(&self) {
|
||||||
|
self.db
|
||||||
|
.appservice_in_room_cache
|
||||||
|
.write()
|
||||||
|
.expect("locked")
|
||||||
|
.clear();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,12 @@
|
||||||
use std::{collections::hash_map, mem::size_of, sync::Arc};
|
use std::{
|
||||||
|
collections::{hash_map, HashMap},
|
||||||
|
mem::size_of,
|
||||||
|
sync::{Arc, Mutex},
|
||||||
|
};
|
||||||
|
|
||||||
use conduit::{error, utils, Error, Result};
|
use conduit::{error, utils, Error, Result};
|
||||||
use database::{Database, Map};
|
use database::{Database, Map};
|
||||||
use ruma::{api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId};
|
use ruma::{api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId};
|
||||||
|
|
||||||
use crate::{services, PduCount, PduEvent};
|
use crate::{services, PduCount, PduEvent};
|
||||||
|
|
||||||
|
@ -12,11 +16,12 @@ pub(super) struct Data {
|
||||||
eventid_outlierpdu: Arc<Map>,
|
eventid_outlierpdu: Arc<Map>,
|
||||||
userroomid_notificationcount: Arc<Map>,
|
userroomid_notificationcount: Arc<Map>,
|
||||||
userroomid_highlightcount: Arc<Map>,
|
userroomid_highlightcount: Arc<Map>,
|
||||||
db: Arc<Database>,
|
pub(super) lasttimelinecount_cache: LastTimelineCountCache,
|
||||||
}
|
}
|
||||||
|
|
||||||
type PdusIterItem = Result<(PduCount, PduEvent)>;
|
type PdusIterItem = Result<(PduCount, PduEvent)>;
|
||||||
type PdusIterator<'a> = Box<dyn Iterator<Item = PdusIterItem> + 'a>;
|
type PdusIterator<'a> = Box<dyn Iterator<Item = PdusIterItem> + 'a>;
|
||||||
|
type LastTimelineCountCache = Mutex<HashMap<OwnedRoomId, PduCount>>;
|
||||||
|
|
||||||
impl Data {
|
impl Data {
|
||||||
pub(super) fn new(db: &Arc<Database>) -> Self {
|
pub(super) fn new(db: &Arc<Database>) -> Self {
|
||||||
|
@ -26,16 +31,15 @@ impl Data {
|
||||||
eventid_outlierpdu: db["eventid_outlierpdu"].clone(),
|
eventid_outlierpdu: db["eventid_outlierpdu"].clone(),
|
||||||
userroomid_notificationcount: db["userroomid_notificationcount"].clone(),
|
userroomid_notificationcount: db["userroomid_notificationcount"].clone(),
|
||||||
userroomid_highlightcount: db["userroomid_highlightcount"].clone(),
|
userroomid_highlightcount: db["userroomid_highlightcount"].clone(),
|
||||||
db: db.clone(),
|
lasttimelinecount_cache: Mutex::new(HashMap::new()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result<PduCount> {
|
pub(super) fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result<PduCount> {
|
||||||
match self
|
match self
|
||||||
.db
|
|
||||||
.lasttimelinecount_cache
|
.lasttimelinecount_cache
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.expect("locked")
|
||||||
.entry(room_id.to_owned())
|
.entry(room_id.to_owned())
|
||||||
{
|
{
|
||||||
hash_map::Entry::Vacant(v) => {
|
hash_map::Entry::Vacant(v) => {
|
||||||
|
@ -162,10 +166,9 @@ impl Data {
|
||||||
&serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"),
|
&serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
self.db
|
self.lasttimelinecount_cache
|
||||||
.lasttimelinecount_cache
|
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.expect("locked")
|
||||||
.insert(pdu.room_id.clone(), PduCount::Normal(count));
|
.insert(pdu.room_id.clone(), PduCount::Normal(count));
|
||||||
|
|
||||||
self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?;
|
self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?;
|
||||||
|
|
|
@ -1241,6 +1241,19 @@ impl Service {
|
||||||
debug!("Prepended backfill pdu");
|
debug!("Prepended backfill pdu");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_lasttimelinecount_cache_usage(&self) -> (usize, usize) {
|
||||||
|
let cache = self.db.lasttimelinecount_cache.lock().expect("locked");
|
||||||
|
(cache.len(), cache.capacity())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn clear_lasttimelinecount_cache(&self) {
|
||||||
|
self.db
|
||||||
|
.lasttimelinecount_cache
|
||||||
|
.lock()
|
||||||
|
.expect("locked")
|
||||||
|
.clear();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|
|
@ -1,30 +1,32 @@
|
||||||
use std::sync::Arc;
|
use std::{
|
||||||
|
collections::BTreeMap,
|
||||||
|
sync::{Arc, RwLock},
|
||||||
|
};
|
||||||
|
|
||||||
use conduit::{Error, Result};
|
use conduit::{Error, Result};
|
||||||
use database::{Database, Map};
|
use database::{Database, Map};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{error::ErrorKind, uiaa::UiaaInfo},
|
api::client::{error::ErrorKind, uiaa::UiaaInfo},
|
||||||
CanonicalJsonValue, DeviceId, UserId,
|
CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct Data {
|
pub struct Data {
|
||||||
|
userdevicesessionid_uiaarequest: RwLock<BTreeMap<(OwnedUserId, OwnedDeviceId, String), CanonicalJsonValue>>,
|
||||||
userdevicesessionid_uiaainfo: Arc<Map>,
|
userdevicesessionid_uiaainfo: Arc<Map>,
|
||||||
db: Arc<Database>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Data {
|
impl Data {
|
||||||
pub(super) fn new(db: &Arc<Database>) -> Self {
|
pub(super) fn new(db: &Arc<Database>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()),
|
||||||
userdevicesessionid_uiaainfo: db["userdevicesessionid_uiaainfo"].clone(),
|
userdevicesessionid_uiaainfo: db["userdevicesessionid_uiaainfo"].clone(),
|
||||||
db: db.clone(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn set_uiaa_request(
|
pub(super) fn set_uiaa_request(
|
||||||
&self, user_id: &UserId, device_id: &DeviceId, session: &str, request: &CanonicalJsonValue,
|
&self, user_id: &UserId, device_id: &DeviceId, session: &str, request: &CanonicalJsonValue,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
self.db
|
self.userdevicesessionid_uiaarequest
|
||||||
.userdevicesessionid_uiaarequest
|
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.insert(
|
.insert(
|
||||||
|
@ -38,8 +40,7 @@ impl Data {
|
||||||
pub(super) fn get_uiaa_request(
|
pub(super) fn get_uiaa_request(
|
||||||
&self, user_id: &UserId, device_id: &DeviceId, session: &str,
|
&self, user_id: &UserId, device_id: &DeviceId, session: &str,
|
||||||
) -> Option<CanonicalJsonValue> {
|
) -> Option<CanonicalJsonValue> {
|
||||||
self.db
|
self.userdevicesessionid_uiaarequest
|
||||||
.userdevicesessionid_uiaarequest
|
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned()))
|
.get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned()))
|
||||||
|
|
Loading…
Add table
Reference in a new issue