partial impl of query_trusted_key_servers_first
option
65% finished Signed-off-by: strawberry <strawberry@puppygock.gay>
This commit is contained in:
parent
2832d8cb93
commit
9fd521f041
4 changed files with 146 additions and 69 deletions
|
@ -22,10 +22,14 @@
|
|||
# YOU NEED TO EDIT THIS
|
||||
#server_name = "your.server.name"
|
||||
|
||||
# Servers listed here will be used to gather public keys of other servers.
|
||||
# Generally, copying this exactly should be enough. (Currently, conduwuit doesn't
|
||||
# support batched key requests, so this list should only contain Synapse
|
||||
# servers.) Defaults to `matrix.org`
|
||||
# Servers listed here will be used to gather public keys of other servers (notary trusted key servers).
|
||||
#
|
||||
# The default behaviour for conduwuit is to attempt to query trusted key servers before querying the individual servers.
|
||||
# This is done for performance reasons, but if you would like to query individual servers before the notary servers
|
||||
# configured below, set to
|
||||
#
|
||||
# (Currently, conduwuit doesn't support batched key requests, so this list should only contain Synapse servers)
|
||||
# Defaults to `matrix.org`
|
||||
# trusted_servers = ["matrix.org"]
|
||||
|
||||
|
||||
|
@ -228,6 +232,22 @@ allow_check_for_updates = true
|
|||
# Defaults to "🏳️⚧️" (trans pride flag)
|
||||
#new_user_displayname_suffix = "🏳️⚧️"
|
||||
|
||||
# Option to control whether conduwuit will query your list of trusted notary key servers (`trusted_servers`) for
|
||||
# remote homeserver signing keys it doesn't know *first*, or query the individual servers first before falling back to the trusted
|
||||
# key servers.
|
||||
#
|
||||
# The former/default behaviour makes federated/remote rooms joins generally faster because we're querying a single (or list of) server
|
||||
# that we know works, is reasonably fast, and is reliable for just about all the homeserver signing keys in the room. Querying individual
|
||||
# servers may take longer depending on the general infrastructure of everyone in there, how many dead servers there are, etc.
|
||||
#
|
||||
# However, this does create an increased reliance on one single or multiple large entities as `trusted_servers` should generally
|
||||
# contain long-term and large servers who know a very large number of homeservers.
|
||||
#
|
||||
# If you don't know what any of this means, leave this and `trusted_servers` alone to their defaults.
|
||||
#
|
||||
# Defaults to true as this is the fastest option for federation.
|
||||
#query_trusted_key_servers_first = true
|
||||
|
||||
# Set this to any float value to multiply conduwuit's in-memory LRU caches with.
|
||||
# May be useful if you have significant memory to spare to increase performance.
|
||||
# Defaults to 1.0.
|
||||
|
|
|
@ -92,6 +92,8 @@ pub struct Config {
|
|||
pub jwt_secret: Option<String>,
|
||||
#[serde(default = "default_trusted_servers")]
|
||||
pub trusted_servers: Vec<OwnedServerName>,
|
||||
#[serde(default = "true_fn")]
|
||||
pub query_trusted_key_servers_first: bool,
|
||||
#[serde(default = "default_log")]
|
||||
pub log: String,
|
||||
#[serde(default)]
|
||||
|
@ -319,6 +321,10 @@ impl fmt::Display for Config {
|
|||
}
|
||||
&lst.join(", ")
|
||||
}),
|
||||
(
|
||||
"Query Trusted Key Servers First",
|
||||
&self.query_trusted_key_servers_first.to_string(),
|
||||
),
|
||||
(
|
||||
"TURN username",
|
||||
if self.turn_username.is_empty() {
|
||||
|
|
|
@ -1,30 +1,3 @@
|
|||
mod data;
|
||||
use argon2::Argon2;
|
||||
pub use data::Data;
|
||||
use regex::RegexSet;
|
||||
use ruma::{
|
||||
serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName,
|
||||
OwnedServerSigningKeyId, OwnedUserId,
|
||||
};
|
||||
|
||||
use sha2::Digest;
|
||||
|
||||
use crate::api::server_server::FedDest;
|
||||
|
||||
use crate::{services, Config, Error, Result};
|
||||
use futures_util::FutureExt;
|
||||
use hyper::{
|
||||
client::connect::dns::{GaiResolver, Name},
|
||||
service::Service as HyperService,
|
||||
};
|
||||
use reqwest::dns::{Addrs, Resolve, Resolving};
|
||||
use ruma::{
|
||||
api::{
|
||||
client::sync::sync_events,
|
||||
federation::discovery::{ServerSigningKeys, VerifyKey},
|
||||
},
|
||||
DeviceId, RoomVersionId, ServerName, UserId,
|
||||
};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
error::Error as StdError,
|
||||
|
@ -39,11 +12,38 @@ use std::{
|
|||
},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use argon2::Argon2;
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
use futures_util::FutureExt;
|
||||
use hyper::{
|
||||
client::connect::dns::{GaiResolver, Name},
|
||||
service::Service as HyperService,
|
||||
};
|
||||
use regex::RegexSet;
|
||||
use reqwest::dns::{Addrs, Resolve, Resolving};
|
||||
use ruma::{
|
||||
api::{
|
||||
client::sync::sync_events,
|
||||
federation::discovery::{ServerSigningKeys, VerifyKey},
|
||||
},
|
||||
DeviceId, RoomVersionId, ServerName, UserId,
|
||||
};
|
||||
use ruma::{
|
||||
serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName,
|
||||
OwnedServerSigningKeyId, OwnedUserId,
|
||||
};
|
||||
use sha2::Digest;
|
||||
use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore};
|
||||
use tracing::{error, info};
|
||||
use trust_dns_resolver::TokioAsyncResolver;
|
||||
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
pub use data::Data;
|
||||
|
||||
use crate::api::server_server::FedDest;
|
||||
use crate::{services, Config, Error, Result};
|
||||
|
||||
mod data;
|
||||
|
||||
type WellKnownMap = HashMap<OwnedServerName, (FedDest, String)>;
|
||||
type TlsNameMap = HashMap<String, (Vec<IpAddr>, u16)>;
|
||||
|
@ -363,6 +363,10 @@ impl Service<'_> {
|
|||
&self.config.trusted_servers
|
||||
}
|
||||
|
||||
pub fn query_trusted_key_servers_first(&self) -> bool {
|
||||
self.config.query_trusted_key_servers_first
|
||||
}
|
||||
|
||||
pub fn dns_resolver(&self) -> &TokioAsyncResolver {
|
||||
&self.dns_resolver
|
||||
}
|
||||
|
|
|
@ -1544,39 +1544,11 @@ impl Service {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn fetch_join_signing_keys(
|
||||
async fn batch_request_signing_keys(
|
||||
&self,
|
||||
event: &create_join_event::v2::Response,
|
||||
room_version: &RoomVersionId,
|
||||
mut servers: BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
) -> Result<()> {
|
||||
let mut servers: BTreeMap<
|
||||
OwnedServerName,
|
||||
BTreeMap<OwnedServerSigningKeyId, QueryCriteria>,
|
||||
> = BTreeMap::new();
|
||||
|
||||
{
|
||||
let mut pkm = pub_key_map
|
||||
.write()
|
||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?;
|
||||
|
||||
// Try to fetch keys, failure is okay
|
||||
// Servers we couldn't find in the cache will be added to `servers`
|
||||
for pdu in &event.room_state.state {
|
||||
let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm);
|
||||
}
|
||||
for pdu in &event.room_state.auth_chain {
|
||||
let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm);
|
||||
}
|
||||
|
||||
drop(pkm);
|
||||
};
|
||||
|
||||
if servers.is_empty() {
|
||||
info!("server is empty, we had all keys locally, not fetching any keys");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for server in services().globals.trusted_servers() {
|
||||
info!("Asking batch signing keys from trusted server {}", server);
|
||||
if let Ok(keys) = services()
|
||||
|
@ -1619,13 +1591,16 @@ impl Service {
|
|||
pkm.insert(k.server_name.to_string(), result);
|
||||
}
|
||||
}
|
||||
|
||||
if servers.is_empty() {
|
||||
info!("Trusted server supplied all signing keys, no more keys to fetch");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn request_signing_keys(
|
||||
&self,
|
||||
servers: BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
) -> Result<()> {
|
||||
info!("Asking individual servers for signing keys: {servers:?}");
|
||||
let mut futures: FuturesUnordered<_> = servers
|
||||
.into_keys()
|
||||
|
@ -1641,7 +1616,7 @@ impl Service {
|
|||
.collect();
|
||||
|
||||
while let Some(result) = futures.next().await {
|
||||
info!("Received new result");
|
||||
debug!("Received new Future result");
|
||||
if let (Ok(get_keys_response), origin) = result {
|
||||
info!("Result is from {origin}");
|
||||
if let Ok(key) = get_keys_response.server_key.deserialize() {
|
||||
|
@ -1657,11 +1632,83 @@ impl Service {
|
|||
.insert(origin.to_string(), result);
|
||||
}
|
||||
}
|
||||
info!("Done handling result");
|
||||
debug!("Done handling Future result");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn fetch_join_signing_keys(
|
||||
&self,
|
||||
event: &create_join_event::v2::Response,
|
||||
room_version: &RoomVersionId,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
) -> Result<()> {
|
||||
let mut servers: BTreeMap<
|
||||
OwnedServerName,
|
||||
BTreeMap<OwnedServerSigningKeyId, QueryCriteria>,
|
||||
> = BTreeMap::new();
|
||||
|
||||
{
|
||||
let mut pkm = pub_key_map
|
||||
.write()
|
||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?;
|
||||
|
||||
// Try to fetch keys, failure is okay
|
||||
// Servers we couldn't find in the cache will be added to `servers`
|
||||
for pdu in &event.room_state.state {
|
||||
let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm);
|
||||
}
|
||||
for pdu in &event.room_state.auth_chain {
|
||||
let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm);
|
||||
}
|
||||
|
||||
drop(pkm);
|
||||
};
|
||||
|
||||
if servers.is_empty() {
|
||||
info!("We had all keys cached locally, not fetching any keys from remote servers");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if services().globals.query_trusted_key_servers_first() {
|
||||
info!("query_trusted_key_servers_first is set to true, querying notary trusted key servers first for homeserver signing keys.");
|
||||
|
||||
self.batch_request_signing_keys(servers.clone(), pub_key_map)
|
||||
.await?;
|
||||
|
||||
if servers.is_empty() {
|
||||
info!("Trusted server supplied all signing keys, no more keys to fetch");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
info!("Remaining servers left that the notary/trusted servers did not provide: {servers:?}");
|
||||
|
||||
self.request_signing_keys(servers.clone(), pub_key_map)
|
||||
.await?;
|
||||
} else {
|
||||
info!("query_trusted_key_servers_first is set to false, querying individual homeservers first");
|
||||
|
||||
self.request_signing_keys(servers.clone(), pub_key_map)
|
||||
.await?;
|
||||
|
||||
if servers.is_empty() {
|
||||
info!("Individual homeservers supplied all signing keys, no more keys to fetch");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
info!("Remaining servers left the individual homeservers did not provide: {servers:?}");
|
||||
|
||||
self.batch_request_signing_keys(servers.clone(), pub_key_map)
|
||||
.await?;
|
||||
}
|
||||
|
||||
info!("Search for signing keys done");
|
||||
|
||||
/*if servers.is_empty() {
|
||||
warn!("Failed to find homeserver signing keys for the remaining servers: {servers:?}");
|
||||
}*/
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue