Compare commits

..

1 commit

Author SHA1 Message Date
Matthias Ahouansou
41e56baf60
refactor: all the clippy lints 2024-05-06 21:58:34 +01:00
101 changed files with 2785 additions and 4458 deletions

View file

@ -103,11 +103,6 @@ artifacts:
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
- cp result/bin/conduit aarch64-unknown-linux-musl
- mkdir -p target/aarch64-unknown-linux-musl/release
- cp result/bin/conduit target/aarch64-unknown-linux-musl/release
- direnv exec . cargo deb --no-strip --no-build --target aarch64-unknown-linux-musl
- mv target/aarch64-unknown-linux-musl/debian/*.deb aarch64-unknown-linux-musl.deb
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
- cp result oci-image-arm64v8.tar.gz
@ -119,7 +114,6 @@ artifacts:
- x86_64-unknown-linux-musl
- aarch64-unknown-linux-musl
- x86_64-unknown-linux-musl.deb
- aarch64-unknown-linux-musl.deb
- oci-image-amd64.tar.gz
- oci-image-arm64v8.tar.gz
- public

1307
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -3,23 +3,54 @@ explicit_outlives_requirements = "warn"
unused_qualifications = "warn"
[workspace.lints.clippy]
# Restrictions
cloned_instead_of_copied = "warn"
create_dir = "warn"
dbg_macro = "warn"
get_unwrap = "warn"
rc_mutex = "warn"
str_to_string = "warn"
string_lit_chars_any = "warn"
# Groups
complexity = "warn"
perf = "warn"
style = "warn"
suspicious = "warn"
# To allow us to override certain lints
cargo = { level = "warn", priority = -1 }
pedantic = { level = "warn", priority = -1 }
# lints we want to allow
cast_possible_truncation = "allow" # Not really a good way to handle this issue
missing_errors_doc = "allow" # Becomes highly redundant for functions accessing the database
missing_panics_doc = "allow" # We *should* only ever use unwraps/expects where infallable
module_name_repetitions = "allow" # Many things are ex-expored, and in many cases removing repetitions just makes things more confusing
multiple_crate_versions = "allow" # Would require quite a lot of effort for minimal gain
must_use_candidate = "allow" # Basically useless, even the docs say it's "Not bad at all"
redundant_closure_for_method_calls = "allow" # `.ok()` is nicer than `std::result::Result::ok` (which needs to be done where we use our own `Result`)
struct_excessive_bools = "allow" # Very easy to spot, and the behaviour it encourages to use is much worse than not using this at all
too_many_lines = "allow" # Some functions are complicated, and won't really be re-used in other places
# fixed in !670
doc_markdown = "allow"
[package]
authors = ["timokoesters <timo@koesters.xyz>"]
categories = ["web-programming::http-server"]
description = "A Matrix homeserver written in Rust"
edition = "2021"
homepage = "https://conduit.rs"
keywords = ["homeserver", "matrix", "selfhost"]
license = "Apache-2.0"
name = "conduit"
readme = "README.md"
repository = "https://gitlab.com/famedly/conduit"
version = "0.10.0-alpha"
version = "0.8.0-alpha"
# See also `rust-toolchain.toml`
rust-version = "1.79.0"
rust-version = "1.78.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@ -28,24 +59,42 @@ workspace = true
[dependencies]
# Web framework
axum = { version = "0.7", default-features = false, features = [
axum = { version = "0.6.18", default-features = false, features = [
"form",
"headers",
"http1",
"http2",
"json",
"matched-path",
], optional = true }
axum-extra = { version = "0.9", features = ["typed-header"] }
axum-server = { version = "0.6", features = ["tls-rustls"] }
axum-server = { version = "0.5.1", features = ["tls-rustls"] }
tower = { version = "0.4.13", features = ["util"] }
tower-http = { version = "0.5", features = [
tower-http = { version = "0.4.1", features = [
"add-extension",
"cors",
"sensitive-headers",
"trace",
"util",
] }
tower-service = "0.3"
# Used for matrix spec type definitions and helpers
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
ruma = { git = "https://github.com/ruma/ruma", rev = "5495b85aa311c2805302edb0a7de40399e22b397", features = [
"appservice-api-c",
"client-api",
"compat",
"federation-api",
"push-gateway-api-c",
"rand",
"ring-compat",
"state-res",
"unstable-exhaustive-types",
"unstable-msc2448",
"unstable-msc3575",
"unstable-unspecified",
] }
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
# Async runtime and utilities
tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] }
@ -56,7 +105,7 @@ persy = { version = "1.4.4", optional = true, features = ["background_ops"] }
# Used for the http request / response body type for Ruma endpoints used with reqwest
bytes = "1.4.0"
http = "1"
http = "0.2.9"
# Used to find data directory for default db path
directories = "5"
# Used for ruma wrapper
@ -70,14 +119,8 @@ rand = "0.8.5"
# Used to hash passwords
rust-argon2 = "2"
# Used to send requests
hyper = "1.1"
hyper-util = { version = "0.1", features = [
"client",
"client-legacy",
"http1",
"http2",
] }
reqwest = { version = "0.12", default-features = false, features = [
hyper = "0.14.26"
reqwest = { version = "0.11.18", default-features = false, features = [
"rustls-tls-native-roots",
"socks",
] }
@ -100,13 +143,11 @@ regex = "1.8.1"
# jwt jsonwebtokens
jsonwebtoken = "9.2.0"
# Performance measurements
opentelemetry = "0.22"
opentelemetry-jaeger-propagator = "0.1"
opentelemetry-otlp = "0.15"
opentelemetry_sdk = { version = "0.22", features = ["rt-tokio"] }
tracing = "0.1.37"
opentelemetry = { version = "0.18.0", features = ["rt-tokio"] }
opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] }
tracing = { version = "0.1.37", features = [] }
tracing-flame = "0.2.0"
tracing-opentelemetry = "0.23"
tracing-opentelemetry = "0.18.0"
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
lru-cache = "0.1.2"
@ -147,25 +188,6 @@ tikv-jemallocator = { version = "0.5.0", features = [
sd-notify = { version = "0.4.1", optional = true }
# Used for matrix spec type definitions and helpers
[dependencies.ruma]
features = [
"appservice-api-c",
"client-api",
"compat",
"federation-api",
"push-gateway-api-c",
"rand",
"ring-compat",
"server-util",
"state-res",
"unstable-exhaustive-types",
"unstable-msc2448",
"unstable-msc3575",
"unstable-unspecified",
]
git = "https://github.com/ruma/ruma"
[dependencies.rocksdb]
features = ["lz4", "multi-threaded-cf", "zstd"]
optional = true

View file

@ -56,13 +56,6 @@ If you have any questions, feel free to
- Send an direct message to `@timokoesters:fachschaften.org` on Matrix
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
#### Security
If you believe you have found a security issue, please send a message to [Timo](https://matrix.to/#/@timo:conduit.rs)
and/or [Matthias](https://matrix.to/#/@matthias:ahouansou.cz) on Matrix, or send an email to
[conduit@koesters.xyz](mailto:conduit@koesters.xyz). Please do not disclose details about the issue to anyone else before
a fix is released publically.
#### Thanks to
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.

View file

@ -1,4 +1,4 @@
FROM rust:1.79.0
FROM rust:1.78.0
WORKDIR /workdir

View file

@ -6,8 +6,6 @@
> **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect
> **Note:** You can also configure Conduit by using `CONDUIT_{field_name}` environment variables. To set values inside a table, use `CONDUIT_{table_name}__{field_name}`. Example: `CONDUIT_SERVER_NAME="example.org"`
Conduit's configuration file is divided into the following sections:
- [Global](#global)
@ -58,8 +56,7 @@ The `global` section contains the following fields:
| `turn_secret` | `string` | The TURN secret | `""` |
| `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` |
| `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A |
| `well_known_client` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
| `well_known_server` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
| `well_known` | `table` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
### TLS

View file

@ -16,18 +16,18 @@ are connected to the server running Conduit using something like a VPN.
> **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration
To configure it, use the following options:
To configure it, use the following options in the `global.well_known` table:
| Field | Type | Description | Default |
| --- | --- | --- | --- |
| `well_known_client` | `String` | The URL that clients should use to connect to Conduit | `https://<server_name>` |
| `well_known_server` | `String` | The hostname and port servers should use to connect to Conduit | `<server_name>:443` |
| `client` | `String` | The URL that clients should use to connect to Conduit | `https://<server_name>` |
| `server` | `String` | The hostname and port servers should use to connect to Conduit | `<server_name>:443` |
### Example
```toml
[global]
well_known_client = "https://matrix.example.org"
well_known_server = "matrix.example.org:443"
[global.well_known]
client = "https://matrix.example.org"
server = "matrix.example.org:443"
```
## Manual

View file

@ -64,7 +64,6 @@ docker run -d -p 8448:6167 \
-e CONDUIT_MAX_REQUEST_SIZE="20000000" \
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
-e CONDUIT_PORT="6167" \
--name conduit <link>
```

View file

@ -17,7 +17,6 @@ You may simply download the binary that fits your machine. Run `uname -m` to see
| Target | Type | Download |
|-|-|-|
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl?job=artifacts) |
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl?job=artifacts) |
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-amd64.tar.gz?job=artifacts) |
@ -31,7 +30,6 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to
| Target | Type | Download |
|-|-|-|
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) |
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) |
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) |

View file

@ -35,7 +35,3 @@ Here is an example:
Not really. You can reuse the domain of your current server with Conduit, but you will not be able to migrate accounts automatically.
Rooms that were federated can be re-joined via the other participating servers, however media and the like may be deleted from remote servers after some time, and hence might not be recoverable.
## How do I make someone an admin?
Simply invite them to the admin room. Once joined, they can administer the server by interacting with the `@conduit:<server_name>` user.

View file

@ -2,7 +2,7 @@
## General instructions
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/element-hq/synapse/blob/develop/docs/turn-howto.md).
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md).
## Edit/Add a few settings to your existing conduit.toml

View file

@ -59,7 +59,7 @@
file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml`
sha256 = "sha256-Ngiz76YP4HTY75GGdH2P+APE/DEIx2R/Dn+BwwOyzZU=";
sha256 = "sha256-opUgs6ckUQCyDxcB9Wy51pqhd0MPGHUVbwRKKPGiwZU=";
};
});
in

View file

@ -23,7 +23,7 @@ mkShell {
};
# Development tools
nativeBuildInputs = [
nativeBuildInputs = default.nativeBuildInputs ++ [
# Always use nightly rustfmt because most of its options are unstable
#
# This needs to come before `toolchain` in this list, otherwise
@ -57,5 +57,5 @@ mkShell {
# Useful for editing the book locally
mdbook
] ++ default.nativeBuildInputs ;
];
}

View file

@ -2,6 +2,7 @@
#
# Other files that need upkeep when this changes:
#
# * `.gitlab-ci.yml`
# * `Cargo.toml`
# * `flake.nix`
#
@ -9,7 +10,7 @@
# If you're having trouble making the relevant changes, bug a maintainer.
[toolchain]
channel = "1.79.0"
channel = "1.78.0"
components = [
# For rust-analyzer
"rust-src",

View file

@ -17,11 +17,8 @@ pub(crate) async fn send_request<T>(
where
T: OutgoingRequest + Debug,
{
let destination = match registration.url {
Some(url) => url,
None => {
return Ok(None);
}
let Some(destination) = registration.url else {
return Ok(None);
};
let hs_token = registration.hs_token.as_str();
@ -33,7 +30,7 @@ where
&[MatrixVersion::V1_0],
)
.unwrap()
.map(|body| body.freeze());
.map(BytesMut::freeze);
let mut parts = http_request.uri().clone().into_parts();
let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned();

View file

@ -69,15 +69,15 @@ pub async fn get_register_available_route(
/// to check if the user id is valid and available.
///
/// - Only works if registration is enabled
/// - If type is guest: ignores all parameters except initial_device_display_name
/// - If type is guest: ignores all parameters except `initial_device_display_name`
/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage)
/// - If type is not guest and no username is given: Always fails after UIAA check
/// - Creates a new account and populates it with default account data
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
/// - If `inhibit_login` is false: Creates a device and returns device id and `access_token`
pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> {
if !services().globals.allow_registration().await && body.appservice_info.is_none() {
if !services().globals.allow_registration() && body.appservice_info.is_none() {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Registration has been disabled.",
));
}
@ -149,7 +149,7 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
stages: vec![AuthType::RegistrationToken],
}],
completed: Vec::new(),
params: Default::default(),
params: Box::default(),
session: None,
auth_error: None,
};
@ -161,7 +161,7 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
stages: vec![AuthType::Dummy],
}],
completed: Vec::new(),
params: Default::default(),
params: Box::default(),
session: None,
auth_error: None,
};
@ -307,7 +307,7 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
/// - The password hash is calculated using argon2 with 32 character salt, the plain password is
/// not saved
///
/// If logout_devices is true it does the following for each device except the sender device:
/// If `logout_devices` is true it does the following for each device except the sender device:
/// - Invalidates access token
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets to-device events
@ -315,11 +315,7 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
pub async fn change_password_route(
body: Ruma<change_password::v3::Request>,
) -> Result<change_password::v3::Response> {
let sender_user = body
.sender_user
.as_ref()
// In the future password changes could be performed with UIA with 3PIDs, but we don't support that currently
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut uiaainfo = UiaaInfo {
@ -327,7 +323,7 @@ pub async fn change_password_route(
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Default::default(),
params: Box::default(),
session: None,
auth_error: None,
};
@ -379,12 +375,12 @@ pub async fn change_password_route(
/// # `GET _matrix/client/r0/account/whoami`
///
/// Get user_id of the sender user.
/// Get `user_id` of the sender user.
///
/// Note: Also works for Application Services
pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let device_id = body.sender_device.as_ref().cloned();
let device_id = body.sender_device.clone();
Ok(whoami::v3::Response {
user_id: sender_user.clone(),
@ -406,11 +402,7 @@ pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3:
pub async fn deactivate_route(
body: Ruma<deactivate::v3::Request>,
) -> Result<deactivate::v3::Response> {
let sender_user = body
.sender_user
.as_ref()
// In the future password changes could be performed with UIA with SSO, but we don't support that currently
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut uiaainfo = UiaaInfo {
@ -418,7 +410,7 @@ pub async fn deactivate_route(
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Default::default(),
params: Box::default(),
session: None,
auth_error: None,
};
@ -483,7 +475,7 @@ pub async fn request_3pid_management_token_via_email_route(
) -> Result<request_3pid_management_token_via_email::v3::Response> {
Err(Error::BadRequest(
ErrorKind::ThreepidDenied,
"Third party identifiers are currently unsupported by this server implementation",
"Third party identifier is not allowed",
))
}
@ -497,6 +489,6 @@ pub async fn request_3pid_management_token_via_msisdn_route(
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
Err(Error::BadRequest(
ErrorKind::ThreepidDenied,
"Third party identifiers are currently unsupported by this server implementation",
"Third party identifier is not allowed",
))
}

View file

@ -18,8 +18,6 @@ use ruma::{
pub async fn create_alias_route(
body: Ruma<create_alias::v3::Request>,
) -> Result<create_alias::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.room_alias.server_name() != services().globals.server_name() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
@ -57,7 +55,7 @@ pub async fn create_alias_route(
services()
.rooms
.alias
.set_alias(&body.room_alias, &body.room_id, sender_user)?;
.set_alias(&body.room_alias, &body.room_id)?;
Ok(create_alias::v3::Response::new())
}
@ -66,12 +64,11 @@ pub async fn create_alias_route(
///
/// Deletes a room alias from this server.
///
/// - TODO: additional access control checks
/// - TODO: Update canonical alias event
pub async fn delete_alias_route(
body: Ruma<delete_alias::v3::Request>,
) -> Result<delete_alias::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.room_alias.server_name() != services().globals.server_name() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
@ -97,10 +94,7 @@ pub async fn delete_alias_route(
));
}
services()
.rooms
.alias
.remove_alias(&body.room_alias, sender_user)?;
services().rooms.alias.remove_alias(&body.room_alias)?;
// TODO: update alt_aliases?
@ -127,7 +121,7 @@ pub(crate) async fn get_alias_helper(
.send_federation_request(
room_alias.server_name(),
federation::query::get_room_information::v1::Request {
room_alias: room_alias.to_owned(),
room_alias: room_alias.clone(),
},
)
.await?;
@ -172,14 +166,11 @@ pub(crate) async fn get_alias_helper(
}
};
let room_id = match room_id {
Some(room_id) => room_id,
None => {
return Err(Error::BadRequest(
ErrorKind::NotFound,
"Room with alias not found.",
))
}
let Some(room_id) = room_id else {
return Err(Error::BadRequest(
ErrorKind::NotFound,
"Room with alias not found.",
));
};
Ok(get_alias::v3::Response::new(

View file

@ -86,7 +86,7 @@ pub async fn get_backup_info_route(
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
version: body.version.to_owned(),
version: body.version.clone(),
})
}
@ -139,7 +139,7 @@ pub async fn add_backup_keys_route(
room_id,
session_id,
key_data,
)?
)?;
}
}
@ -185,7 +185,7 @@ pub async fn add_backup_keys_for_room_route(
&body.room_id,
session_id,
key_data,
)?
)?;
}
Ok(add_backup_keys_for_room::v3::Response {

View file

@ -11,7 +11,7 @@ use tracing::error;
/// Allows loading room history around an event.
///
/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
/// joined, depending on history_visibility)
/// joined, depending on `history_visibility`)
pub async fn get_context_route(
body: Ruma<get_context::v3::Request>,
) -> Result<get_context::v3::Response> {
@ -22,7 +22,7 @@ pub async fn get_context_route(
LazyLoadOptions::Enabled {
include_redundant_members,
} => (true, *include_redundant_members),
_ => (false, false),
LazyLoadOptions::Disabled => (false, false),
};
let mut lazy_loaded = HashSet::new();
@ -54,7 +54,7 @@ pub async fn get_context_route(
.user_can_see_event(sender_user, &room_id, &body.event_id)?
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"You don't have permission to view this event.",
));
}
@ -103,8 +103,7 @@ pub async fn get_context_route(
let start_token = events_before
.last()
.map(|(count, _)| count.stringify())
.unwrap_or_else(|| base_token.stringify());
.map_or_else(|| base_token.stringify(), |(count, _)| count.stringify());
let events_before: Vec<_> = events_before
.into_iter()
@ -159,8 +158,7 @@ pub async fn get_context_route(
let end_token = events_after
.last()
.map(|(count, _)| count.stringify())
.unwrap_or_else(|| base_token.stringify());
.map_or_else(|| base_token.stringify(), |(count, _)| count.stringify());
let events_after: Vec<_> = events_after
.into_iter()
@ -176,21 +174,15 @@ pub async fn get_context_route(
.get_statekey_from_short(shortstatekey)?;
if event_type != StateEventType::RoomMember {
let pdu = match services().rooms.timeline.get_pdu(&id)? {
Some(pdu) => pdu,
None => {
error!("Pdu in state not found: {}", id);
continue;
}
let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
};
state.push(pdu.to_state_event());
} else if !lazy_load_enabled || lazy_loaded.contains(&state_key) {
let pdu = match services().rooms.timeline.get_pdu(&id)? {
Some(pdu) => pdu,
None => {
error!("Pdu in state not found: {}", id);
continue;
}
let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
};
state.push(pdu.to_state_event());
}

View file

@ -83,7 +83,7 @@ pub async fn delete_device_route(
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Default::default(),
params: Box::default(),
session: None,
auth_error: None,
};
@ -137,7 +137,7 @@ pub async fn delete_devices_route(
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Default::default(),
params: Box::default(),
session: None,
auth_error: None,
};
@ -162,7 +162,7 @@ pub async fn delete_devices_route(
}
for device_id in &body.devices {
services().users.remove_device(sender_user, device_id)?
services().users.remove_device(sender_user, device_id)?;
}
Ok(delete_devices::v3::Response {})

View file

@ -13,9 +13,8 @@ pub async fn get_filter_route(
body: Ruma<get_filter::v3::Request>,
) -> Result<get_filter::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let filter = match services().users.get_filter(sender_user, &body.filter_id)? {
Some(filter) => filter,
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")),
let Some(filter) = services().users.get_filter(sender_user, &body.filter_id)? else {
return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found."));
};
Ok(get_filter::v3::Response::new(filter))

View file

@ -106,7 +106,7 @@ pub async fn upload_signing_keys_route(
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Default::default(),
params: Box::default(),
session: None,
auth_error: None,
};
@ -173,7 +173,6 @@ pub async fn upload_signatures_route(
"Invalid signature.",
))?
.clone()
.into_iter()
{
// Signature validation?
let signature = (
@ -401,48 +400,45 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
.collect();
while let Some((server, response)) = futures.next().await {
match response {
Ok(Ok(response)) => {
for (user, masterkey) in response.master_keys {
let (master_key_id, mut master_key) =
services().users.parse_master_key(&user, &masterkey)?;
if let Ok(Ok(response)) = response {
for (user, masterkey) in response.master_keys {
let (master_key_id, mut master_key) =
services().users.parse_master_key(&user, &masterkey)?;
if let Some(our_master_key) = services().users.get_key(
&master_key_id,
sender_user,
&user,
&allowed_signatures,
)? {
let (_, our_master_key) =
services().users.parse_master_key(&user, &our_master_key)?;
master_key.signatures.extend(our_master_key.signatures);
}
let json = serde_json::to_value(master_key).expect("to_value always works");
let raw = serde_json::from_value(json).expect("Raw::from_value always works");
services().users.add_cross_signing_keys(
&user, &raw, &None, &None,
false, // Dont notify. A notification would trigger another key request resulting in an endless loop
)?;
master_keys.insert(user, raw);
if let Some(our_master_key) = services().users.get_key(
&master_key_id,
sender_user,
&user,
&allowed_signatures,
)? {
let (_, our_master_key) =
services().users.parse_master_key(&user, &our_master_key)?;
master_key.signatures.extend(our_master_key.signatures);
}
self_signing_keys.extend(response.self_signing_keys);
device_keys.extend(response.device_keys);
let json = serde_json::to_value(master_key).expect("to_value always works");
let raw = serde_json::from_value(json).expect("Raw::from_value always works");
services().users.add_cross_signing_keys(
&user, &raw, &None, &None,
false, // Dont notify. A notification would trigger another key request resulting in an endless loop
)?;
master_keys.insert(user, raw);
}
_ => {
back_off(server.to_owned()).await;
failures.insert(server.to_string(), json!({}));
}
self_signing_keys.extend(response.self_signing_keys);
device_keys.extend(response.device_keys);
} else {
back_off(server.to_owned()).await;
failures.insert(server.to_string(), json!({}));
}
}
Ok(get_keys::v3::Response {
failures,
device_keys,
master_keys,
self_signing_keys,
user_signing_keys,
device_keys,
failures,
})
}

View file

@ -1,24 +1,12 @@
// Unauthenticated media is deprecated
#![allow(deprecated)]
use std::time::Duration;
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE};
use ruma::{
api::{
client::{
authenticated_media::{
get_content, get_content_as_filename, get_content_thumbnail, get_media_config,
},
error::ErrorKind,
media::{self, create_content},
},
federation::authenticated_media::{self as federation_media, FileOrLocation},
use ruma::api::client::{
error::ErrorKind,
media::{
create_content, get_content, get_content_as_filename, get_content_thumbnail,
get_media_config,
},
http_headers::{ContentDisposition, ContentDispositionType},
media::Method,
ServerName, UInt,
};
const MXC_LENGTH: usize = 32;
@ -27,22 +15,19 @@ const MXC_LENGTH: usize = 32;
///
/// Returns max upload size.
pub async fn get_media_config_route(
_body: Ruma<media::get_media_config::v3::Request>,
) -> Result<media::get_media_config::v3::Response> {
Ok(media::get_media_config::v3::Response {
_body: Ruma<get_media_config::v3::Request>,
) -> Result<get_media_config::v3::Response> {
Ok(get_media_config::v3::Response {
upload_size: services().globals.max_request_size().into(),
})
}
/// # `GET /_matrix/client/v1/media/config`
///
/// Returns max upload size.
pub async fn get_media_config_auth_route(
_body: Ruma<get_media_config::v1::Request>,
) -> Result<get_media_config::v1::Response> {
Ok(get_media_config::v1::Response {
upload_size: services().globals.max_request_size().into(),
})
fn sanitize_content_type(content_type: String) -> String {
if content_type == "image/jpeg" || content_type == "image/png" {
content_type
} else {
"application/octet-stream".to_owned()
}
}
/// # `POST /_matrix/media/r0/upload`
@ -64,10 +49,10 @@ pub async fn create_content_route(
.media
.create(
mxc.clone(),
Some(
ContentDisposition::new(ContentDispositionType::Inline)
.with_filename(body.filename.clone()),
),
body.filename
.as_ref()
.map(|filename| "inline; filename=".to_owned() + filename)
.as_deref(),
body.content_type.as_deref(),
&body.file,
)
@ -81,67 +66,28 @@ pub async fn create_content_route(
pub async fn get_remote_content(
mxc: &str,
server_name: &ServerName,
server_name: &ruma::ServerName,
media_id: String,
) -> Result<get_content::v1::Response, Error> {
let content_response = match services()
) -> Result<get_content::v3::Response, Error> {
let content_response = services()
.sending
.send_federation_request(
server_name,
federation_media::get_content::v1::Request {
media_id: media_id.clone(),
get_content::v3::Request {
allow_remote: false,
server_name: server_name.to_owned(),
media_id,
timeout_ms: Duration::from_secs(20),
allow_redirect: false,
},
)
.await
{
Ok(federation_media::get_content::v1::Response {
metadata: _,
content: FileOrLocation::File(content),
}) => get_content::v1::Response {
file: content.file,
content_type: content.content_type,
content_disposition: content.content_disposition,
},
Ok(federation_media::get_content::v1::Response {
metadata: _,
content: FileOrLocation::Location(url),
}) => get_location_content(url).await?,
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
let media::get_content::v3::Response {
file,
content_type,
content_disposition,
..
} = services()
.sending
.send_federation_request(
server_name,
media::get_content::v3::Request {
server_name: server_name.to_owned(),
media_id,
timeout_ms: Duration::from_secs(20),
allow_remote: false,
allow_redirect: true,
},
)
.await?;
get_content::v1::Response {
file,
content_type,
content_disposition,
}
}
Err(e) => return Err(e),
};
.await?;
services()
.media
.create(
mxc.to_owned(),
content_response.content_disposition.clone(),
content_response.content_disposition.as_deref(),
content_response.content_type.as_deref(),
&content_response.file,
)
@ -156,57 +102,31 @@ pub async fn get_remote_content(
///
/// - Only allows federation if `allow_remote` is true
pub async fn get_content_route(
body: Ruma<media::get_content::v3::Request>,
) -> Result<media::get_content::v3::Response> {
let get_content::v1::Response {
file,
body: Ruma<get_content::v3::Request>,
) -> Result<get_content::v3::Response> {
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
if let Some(FileMeta {
content_disposition,
content_type,
} = get_content(&body.server_name, body.media_id.clone(), body.allow_remote).await?;
Ok(media::get_content::v3::Response {
file,
content_type,
content_disposition,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
}
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}`
///
/// Load media from our server or over federation.
pub async fn get_content_auth_route(
body: Ruma<get_content::v1::Request>,
) -> Result<get_content::v1::Response> {
get_content(&body.server_name, body.media_id.clone(), true).await
}
async fn get_content(
server_name: &ServerName,
media_id: String,
allow_remote: bool,
) -> Result<get_content::v1::Response, Error> {
let mxc = format!("mxc://{}/{}", server_name, media_id);
if let Ok(Some(FileMeta {
content_disposition,
content_type,
file,
})) = services().media.get(mxc.clone()).await
..
}) = services().media.get(mxc.clone()).await?
{
Ok(get_content::v1::Response {
Ok(get_content::v3::Response {
file,
content_type,
content_disposition: Some(content_disposition),
content_type: Some("application/octet-stream".to_owned()),
content_disposition,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
} else if server_name != services().globals.server_name() && allow_remote {
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
let remote_content_response =
get_remote_content(&mxc, server_name, media_id.clone()).await?;
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
Ok(get_content::v1::Response {
Ok(get_content::v3::Response {
content_disposition: remote_content_response.content_disposition,
content_type: remote_content_response.content_type,
content_type: Some("application/octet-stream".to_owned()),
file: remote_content_response.file,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
} else {
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
@ -219,74 +139,26 @@ async fn get_content(
///
/// - Only allows federation if `allow_remote` is true
pub async fn get_content_as_filename_route(
body: Ruma<media::get_content_as_filename::v3::Request>,
) -> Result<media::get_content_as_filename::v3::Response> {
let get_content_as_filename::v1::Response {
file,
content_type,
content_disposition,
} = get_content_as_filename(
&body.server_name,
body.media_id.clone(),
body.filename.clone(),
body.allow_remote,
)
.await?;
body: Ruma<get_content_as_filename::v3::Request>,
) -> Result<get_content_as_filename::v3::Response> {
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
Ok(media::get_content_as_filename::v3::Response {
file,
content_type,
content_disposition,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
}
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}`
///
/// Load media from our server or over federation, permitting desired filename.
pub async fn get_content_as_filename_auth_route(
body: Ruma<get_content_as_filename::v1::Request>,
) -> Result<get_content_as_filename::v1::Response, Error> {
get_content_as_filename(
&body.server_name,
body.media_id.clone(),
body.filename.clone(),
true,
)
.await
}
async fn get_content_as_filename(
server_name: &ServerName,
media_id: String,
filename: String,
allow_remote: bool,
) -> Result<get_content_as_filename::v1::Response, Error> {
let mxc = format!("mxc://{}/{}", server_name, media_id);
if let Ok(Some(FileMeta {
file, content_type, ..
})) = services().media.get(mxc.clone()).await
{
Ok(get_content_as_filename::v1::Response {
if let Some(FileMeta { file, .. }) = services().media.get(mxc.clone()).await? {
Ok(get_content_as_filename::v3::Response {
file,
content_type,
content_disposition: Some(
ContentDisposition::new(ContentDispositionType::Inline)
.with_filename(Some(filename.clone())),
),
content_type: Some("application/octet-stream".to_owned()),
content_disposition: Some(format!("inline; filename={}", body.filename)),
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
} else if server_name != services().globals.server_name() && allow_remote {
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
let remote_content_response =
get_remote_content(&mxc, server_name, media_id.clone()).await?;
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
Ok(get_content_as_filename::v1::Response {
content_disposition: Some(
ContentDisposition::new(ContentDispositionType::Inline)
.with_filename(Some(filename.clone())),
),
content_type: remote_content_response.content_type,
Ok(get_content_as_filename::v3::Response {
content_disposition: Some(format!("inline: filename={}", body.filename)),
content_type: Some("application/octet-stream".to_owned()),
file: remote_content_response.file,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
} else {
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
@ -299,169 +171,66 @@ async fn get_content_as_filename(
///
/// - Only allows federation if `allow_remote` is true
pub async fn get_content_thumbnail_route(
body: Ruma<media::get_content_thumbnail::v3::Request>,
) -> Result<media::get_content_thumbnail::v3::Response> {
let get_content_thumbnail::v1::Response { file, content_type } = get_content_thumbnail(
&body.server_name,
body.media_id.clone(),
body.height,
body.width,
body.method.clone(),
body.animated,
body.allow_remote,
)
.await?;
body: Ruma<get_content_thumbnail::v3::Request>,
) -> Result<get_content_thumbnail::v3::Response> {
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
Ok(media::get_content_thumbnail::v3::Response {
file,
content_type,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
}
/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}`
///
/// Load media thumbnail from our server or over federation.
pub async fn get_content_thumbnail_auth_route(
body: Ruma<get_content_thumbnail::v1::Request>,
) -> Result<get_content_thumbnail::v1::Response> {
get_content_thumbnail(
&body.server_name,
body.media_id.clone(),
body.height,
body.width,
body.method.clone(),
body.animated,
true,
)
.await
}
async fn get_content_thumbnail(
server_name: &ServerName,
media_id: String,
height: UInt,
width: UInt,
method: Option<Method>,
animated: Option<bool>,
allow_remote: bool,
) -> Result<get_content_thumbnail::v1::Response, Error> {
let mxc = format!("mxc://{}/{}", server_name, media_id);
if let Ok(Some(FileMeta {
if let Some(FileMeta {
file, content_type, ..
})) = services()
}) = services()
.media
.get_thumbnail(
mxc.clone(),
width
body.width
.try_into()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
height
body.height
.try_into()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Height is invalid."))?,
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
)
.await
.await?
{
Ok(get_content_thumbnail::v1::Response { file, content_type })
} else if server_name != services().globals.server_name() && allow_remote {
let thumbnail_response = match services()
Ok(get_content_thumbnail::v3::Response {
file,
content_type: content_type.map(sanitize_content_type),
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
let mut get_thumbnail_response = services()
.sending
.send_federation_request(
server_name,
federation_media::get_content_thumbnail::v1::Request {
height,
width,
method: method.clone(),
media_id: media_id.clone(),
&body.server_name,
get_content_thumbnail::v3::Request {
allow_remote: false,
height: body.height,
width: body.width,
method: body.method.clone(),
server_name: body.server_name.clone(),
media_id: body.media_id.clone(),
timeout_ms: Duration::from_secs(20),
animated,
allow_redirect: false,
},
)
.await
{
Ok(federation_media::get_content_thumbnail::v1::Response {
metadata: _,
content: FileOrLocation::File(content),
}) => get_content_thumbnail::v1::Response {
file: content.file,
content_type: content.content_type,
},
Ok(federation_media::get_content_thumbnail::v1::Response {
metadata: _,
content: FileOrLocation::Location(url),
}) => {
let get_content::v1::Response {
file, content_type, ..
} = get_location_content(url).await?;
get_content_thumbnail::v1::Response { file, content_type }
}
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
let media::get_content_thumbnail::v3::Response {
file, content_type, ..
} = services()
.sending
.send_federation_request(
server_name,
media::get_content_thumbnail::v3::Request {
height,
width,
method: method.clone(),
server_name: server_name.to_owned(),
media_id: media_id.clone(),
timeout_ms: Duration::from_secs(20),
allow_redirect: false,
animated,
allow_remote: false,
},
)
.await?;
get_content_thumbnail::v1::Response { file, content_type }
}
Err(e) => return Err(e),
};
.await?;
services()
.media
.upload_thumbnail(
mxc,
thumbnail_response.content_type.as_deref(),
width.try_into().expect("all UInts are valid u32s"),
height.try_into().expect("all UInts are valid u32s"),
&thumbnail_response.file,
None,
get_thumbnail_response.content_type.as_deref(),
body.width.try_into().expect("all UInts are valid u32s"),
body.height.try_into().expect("all UInts are valid u32s"),
&get_thumbnail_response.file,
)
.await?;
Ok(thumbnail_response)
get_thumbnail_response.content_type = get_thumbnail_response
.content_type
.map(sanitize_content_type);
Ok(get_thumbnail_response)
} else {
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
}
}
async fn get_location_content(url: String) -> Result<get_content::v1::Response, Error> {
let client = services().globals.default_client();
let response = client.get(url).send().await?;
let headers = response.headers();
let content_type = headers
.get(CONTENT_TYPE)
.and_then(|header| header.to_str().ok())
.map(ToOwned::to_owned);
let content_disposition = headers
.get(CONTENT_DISPOSITION)
.map(|header| header.as_bytes())
.map(TryFrom::try_from)
.and_then(Result::ok);
let file = response.bytes().await?.to_vec();
Ok(get_content::v1::Response {
file,
content_type,
content_disposition,
})
}

File diff suppressed because it is too large Load diff

View file

@ -43,7 +43,7 @@ pub async fn send_message_event_route(
&& !services().globals.allow_encryption()
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Encryption has been disabled",
));
}
@ -84,11 +84,6 @@ pub async fn send_message_event_route(
unsigned: Some(unsigned),
state_key: None,
redacts: None,
timestamp: if body.appservice_info.is_some() {
body.timestamp
} else {
None
},
},
sender_user,
&body.room_id,
@ -115,7 +110,7 @@ pub async fn send_message_event_route(
/// Allows paginating through room history.
///
/// - Only works if the user is joined (TODO: always allow, but only show events where the user was
/// joined, depending on history_visibility)
/// joined, depending on `history_visibility`)
pub async fn get_message_events_route(
body: Ruma<get_message_events::v3::Request>,
) -> Result<get_message_events::v3::Response> {

View file

@ -11,7 +11,6 @@ mod keys;
mod media;
mod membership;
mod message;
mod openid;
mod presence;
mod profile;
mod push;
@ -48,7 +47,6 @@ pub use keys::*;
pub use media::*;
pub use membership::*;
pub use message::*;
pub use openid::*;
pub use presence::*;
pub use profile::*;
pub use push::*;

View file

@ -1,23 +0,0 @@
use std::time::Duration;
use ruma::{api::client::account, authentication::TokenType};
use crate::{services, Result, Ruma};
/// # `POST /_matrix/client/r0/user/{userId}/openid/request_token`
///
/// Request an OpenID token to verify identity with third-party services.
///
/// - The token generated is only valid for the OpenID API.
pub async fn create_openid_token_route(
body: Ruma<account::request_openid_token::v3::Request>,
) -> Result<account::request_openid_token::v3::Response> {
let (access_token, expires_in) = services().users.create_openid_token(&body.user_id)?;
Ok(account::request_openid_token::v3::Response {
access_token,
token_type: TokenType::Bearer,
matrix_server_name: services().globals.server_name().to_owned(),
expires_in: Duration::from_secs(expires_in),
})
}

View file

@ -65,7 +65,6 @@ pub async fn set_displayname_route(
unsigned: None,
state_key: Some(sender_user.to_string()),
redacts: None,
timestamp: None,
},
room_id,
))
@ -148,7 +147,7 @@ pub async fn get_displayname_route(
/// # `PUT /_matrix/client/r0/profile/{userId}/avatar_url`
///
/// Updates the avatar_url and blurhash.
/// Updates the `avatar_url` and blurhash.
///
/// - Also makes sure other users receive the update using presence EDUs
pub async fn set_avatar_url_route(
@ -201,7 +200,6 @@ pub async fn set_avatar_url_route(
unsigned: None,
state_key: Some(sender_user.to_string()),
redacts: None,
timestamp: None,
},
room_id,
))
@ -254,9 +252,9 @@ pub async fn set_avatar_url_route(
/// # `GET /_matrix/client/r0/profile/{userId}/avatar_url`
///
/// Returns the avatar_url and blurhash of the user.
/// Returns the `avatar_url` and blurhash of the user.
///
/// - If user is on another server: Fetches avatar_url and blurhash over federation
/// - If user is on another server: Fetches `avatar_url` and blurhash over federation
pub async fn get_avatar_url_route(
body: Ruma<get_avatar_url::v3::Request>,
) -> Result<get_avatar_url::v3::Response> {
@ -286,7 +284,7 @@ pub async fn get_avatar_url_route(
/// # `GET /_matrix/client/r0/profile/{userId}`
///
/// Returns the displayname, avatar_url and blurhash of the user.
/// Returns the displayname, `avatar_url` and blurhash of the user.
///
/// - If user is on another server: Fetches profile over federation
pub async fn get_profile_route(

View file

@ -281,7 +281,7 @@ pub async fn get_pushrule_enabled_route(
let global = account_data.content.global;
let enabled = global
.get(body.kind.clone(), &body.rule_id)
.map(|r| r.enabled())
.map(ruma::push::AnyPushRuleRef::enabled)
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"Push rule not found.",

View file

@ -140,7 +140,7 @@ pub async fn create_receipt_route(
receipts.insert(ReceiptType::Read, user_receipts);
let mut receipt_content = BTreeMap::new();
receipt_content.insert(body.event_id.to_owned(), receipts);
receipt_content.insert(body.event_id.clone(), receipts);
services().rooms.edus.read_receipt.readreceipt_update(
sender_user,

View file

@ -44,7 +44,6 @@ pub async fn redact_event_route(
unsigned: None,
state_key: None,
redacts: Some(body.event_id.into()),
timestamp: None,
},
sender_user,
&body.room_id,

View file

@ -3,7 +3,7 @@ use ruma::api::client::relations::{
get_relating_events_with_rel_type_and_event_type,
};
use crate::{services, Result, Ruma};
use crate::{service::rooms::timeline::PduCount, services, Result, Ruma};
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
pub async fn get_relating_events_with_rel_type_and_event_type_route(
@ -11,6 +11,27 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
) -> Result<get_relating_events_with_rel_type_and_event_type::v1::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let from = match body.from.clone() {
Some(from) => PduCount::try_from_string(&from)?,
None => match ruma::api::Direction::Backward {
// TODO: fix ruma so `body.dir` exists
ruma::api::Direction::Forward => PduCount::min(),
ruma::api::Direction::Backward => PduCount::max(),
},
};
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(t).ok());
// Use limit or else 10, with maximum 100
let limit = body
.limit
.and_then(|u| u32::try_from(u).ok())
.map_or(10_usize, |u| u as usize)
.min(100);
let res = services()
.rooms
.pdu_metadata
@ -18,13 +39,11 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
sender_user,
&body.room_id,
&body.event_id,
Some(body.event_type.clone()),
Some(body.rel_type.clone()),
body.from.clone(),
body.to.clone(),
body.limit,
body.recurse,
&body.dir,
&Some(body.event_type.clone()),
&Some(body.rel_type.clone()),
from,
to,
limit,
)?;
Ok(
@ -32,7 +51,6 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
chunk: res.chunk,
next_batch: res.next_batch,
prev_batch: res.prev_batch,
recursion_depth: res.recursion_depth,
},
)
}
@ -43,6 +61,27 @@ pub async fn get_relating_events_with_rel_type_route(
) -> Result<get_relating_events_with_rel_type::v1::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let from = match body.from.clone() {
Some(from) => PduCount::try_from_string(&from)?,
None => match ruma::api::Direction::Backward {
// TODO: fix ruma so `body.dir` exists
ruma::api::Direction::Forward => PduCount::min(),
ruma::api::Direction::Backward => PduCount::max(),
},
};
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(t).ok());
// Use limit or else 10, with maximum 100
let limit = body
.limit
.and_then(|u| u32::try_from(u).ok())
.map_or(10_usize, |u| u as usize)
.min(100);
let res = services()
.rooms
.pdu_metadata
@ -50,20 +89,17 @@ pub async fn get_relating_events_with_rel_type_route(
sender_user,
&body.room_id,
&body.event_id,
None,
Some(body.rel_type.clone()),
body.from.clone(),
body.to.clone(),
body.limit,
body.recurse,
&body.dir,
&None,
&Some(body.rel_type.clone()),
from,
to,
limit,
)?;
Ok(get_relating_events_with_rel_type::v1::Response {
chunk: res.chunk,
next_batch: res.next_batch,
prev_batch: res.prev_batch,
recursion_depth: res.recursion_depth,
})
}
@ -73,6 +109,27 @@ pub async fn get_relating_events_route(
) -> Result<get_relating_events::v1::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let from = match body.from.clone() {
Some(from) => PduCount::try_from_string(&from)?,
None => match ruma::api::Direction::Backward {
// TODO: fix ruma so `body.dir` exists
ruma::api::Direction::Forward => PduCount::min(),
ruma::api::Direction::Backward => PduCount::max(),
},
};
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(t).ok());
// Use limit or else 10, with maximum 100
let limit = body
.limit
.and_then(|u| u32::try_from(u).ok())
.map_or(10_usize, |u| u as usize)
.min(100);
services()
.rooms
.pdu_metadata
@ -80,12 +137,10 @@ pub async fn get_relating_events_route(
sender_user,
&body.room_id,
&body.event_id,
None,
None,
body.from.clone(),
body.to.clone(),
body.limit,
body.recurse,
&body.dir,
&None,
&None,
from,
to,
limit,
)
}

View file

@ -14,14 +14,11 @@ pub async fn report_event_route(
) -> Result<report_content::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let pdu = match services().rooms.timeline.get_pdu(&body.event_id)? {
Some(pdu) => pdu,
_ => {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid Event ID",
))
}
let Some(pdu) = services().rooms.timeline.get_pdu(&body.event_id)? else {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid Event ID",
));
};
if let Some(true) = body.score.map(|s| s > int!(0) || s < int!(-100)) {

View file

@ -34,7 +34,7 @@ use tracing::{info, warn};
/// Creates a new room.
///
/// - Room ID is randomly generated
/// - Create alias if room_alias_name is set
/// - Create alias if `room_alias_name` is set
/// - Send create event
/// - Join sender user
/// - Send power levels event
@ -72,7 +72,7 @@ pub async fn create_room_route(
&& !services().users.is_admin(sender_user)?
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Room creation has been disabled.",
));
}
@ -228,9 +228,8 @@ pub async fn create_room_route(
event_type: TimelineEventType::RoomCreate,
content: to_raw_value(&content).expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
sender_user,
&room_id,
@ -259,7 +258,6 @@ pub async fn create_room_route(
unsigned: None,
state_key: Some(sender_user.to_string()),
redacts: None,
timestamp: None,
},
sender_user,
&room_id,
@ -271,9 +269,10 @@ pub async fn create_room_route(
// Figure out preset. We need it for preset specific events
let preset = body.preset.clone().unwrap_or(match &body.visibility {
room::Visibility::Private => RoomPreset::PrivateChat,
room::Visibility::Public => RoomPreset::PublicChat,
_ => RoomPreset::PrivateChat, // Room visibility should not be custom
// Room visibility is set to private, or custom
// Room visibility should not be custom
_ => RoomPreset::PrivateChat,
});
let mut users = BTreeMap::new();
@ -311,9 +310,8 @@ pub async fn create_room_route(
content: to_raw_value(&power_levels_content)
.expect("to_raw_value always works on serde_json::Value"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
sender_user,
&room_id,
@ -335,9 +333,8 @@ pub async fn create_room_route(
})
.expect("We checked that alias earlier, it must be fine"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
sender_user,
&room_id,
@ -362,9 +359,8 @@ pub async fn create_room_route(
}))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
sender_user,
&room_id,
@ -384,9 +380,8 @@ pub async fn create_room_route(
))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
sender_user,
&room_id,
@ -407,9 +402,8 @@ pub async fn create_room_route(
}))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
sender_user,
&room_id,
@ -425,7 +419,7 @@ pub async fn create_room_route(
})?;
// Implicit state key defaults to ""
pdu_builder.state_key.get_or_insert_with(|| "".to_owned());
pdu_builder.state_key.get_or_insert_with(String::new);
// Silently skip encryption events if they are not allowed
if pdu_builder.event_type == TimelineEventType::RoomEncryption
@ -452,9 +446,8 @@ pub async fn create_room_route(
content: to_raw_value(&RoomNameEventContent::new(name.clone()))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
sender_user,
&room_id,
@ -475,9 +468,8 @@ pub async fn create_room_route(
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
sender_user,
&room_id,
@ -494,10 +486,7 @@ pub async fn create_room_route(
// Homeserver specific stuff
if let Some(alias) = alias {
services()
.rooms
.alias
.set_alias(&alias, &room_id, sender_user)?;
services().rooms.alias.set_alias(&alias, &room_id)?;
}
if body.visibility == room::Visibility::Public {
@ -534,7 +523,7 @@ pub async fn get_room_event_route(
&body.event_id,
)? {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"You don't have permission to view this event.",
));
}
@ -551,7 +540,7 @@ pub async fn get_room_event_route(
///
/// Lists all aliases of the room.
///
/// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable
/// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if `history_visibility` is world readable
pub async fn get_room_aliases_route(
body: Ruma<aliases::v3::Request>,
) -> Result<aliases::v3::Response> {
@ -563,7 +552,7 @@ pub async fn get_room_aliases_route(
.is_joined(sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"You don't have permission to view this room.",
));
}
@ -636,9 +625,8 @@ pub async fn upgrade_room_route(
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
sender_user,
&body.room_id,
@ -738,9 +726,8 @@ pub async fn upgrade_room_route(
content: to_raw_value(&create_event_content)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
sender_user,
&replacement_room,
@ -769,7 +756,6 @@ pub async fn upgrade_room_route(
unsigned: None,
state_key: Some(sender_user.to_string()),
redacts: None,
timestamp: None,
},
sender_user,
&replacement_room,
@ -810,9 +796,8 @@ pub async fn upgrade_room_route(
event_type: event_type.to_string().into(),
content: event_content,
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
sender_user,
&replacement_room,
@ -831,7 +816,7 @@ pub async fn upgrade_room_route(
services()
.rooms
.alias
.set_alias(&alias, &replacement_room, sender_user)?;
.set_alias(&alias, &replacement_room)?;
}
// Get the old room power levels
@ -861,9 +846,8 @@ pub async fn upgrade_room_route(
content: to_raw_value(&power_levels_event_content)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
sender_user,
&body.room_id,

View file

@ -43,7 +43,7 @@ pub async fn search_events_route(
.is_joined(sender_user, &room_id)?
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"You don't have permission to view this room.",
));
}
@ -89,12 +89,11 @@ pub async fn search_events_route(
.get_pdu_from_id(result)
.ok()?
.filter(|pdu| {
!pdu.is_redacted()
&& services()
.rooms
.state_accessor
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
.unwrap_or(false)
services()
.rooms
.state_accessor
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
.unwrap_or(false)
})
.map(|pdu| pdu.to_room_event())
})

View file

@ -3,7 +3,13 @@ use crate::{services, utils, Error, Result, Ruma};
use ruma::{
api::client::{
error::ErrorKind,
session::{get_login_types, login, logout, logout_all},
session::{
get_login_types::{
self,
v3::{ApplicationServiceLoginType, PasswordLoginType},
},
login, logout, logout_all,
},
uiaa::UserIdentifier,
},
UserId,
@ -25,8 +31,8 @@ pub async fn get_login_types_route(
_body: Ruma<get_login_types::v3::Request>,
) -> Result<get_login_types::v3::Response> {
Ok(get_login_types::v3::Response::new(vec![
get_login_types::v3::LoginType::Password(Default::default()),
get_login_types::v3::LoginType::ApplicationService(Default::default()),
get_login_types::v3::LoginType::Password(PasswordLoginType::default()),
get_login_types::v3::LoginType::ApplicationService(ApplicationServiceLoginType::default()),
]))
}
@ -63,7 +69,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
UserId::parse(user)
} else {
warn!("Bad login type: {:?}", &body.login_info);
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type."));
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
}
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
@ -78,7 +84,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
.users
.password_hash(&user_id)?
.ok_or(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Wrong username or password.",
))?;
@ -93,7 +99,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
if !hash_matches {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Wrong username or password.",
));
}
@ -143,7 +149,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
UserId::parse(user)
} else {
warn!("Bad login type: {:?}", &body.login_info);
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type."));
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
}
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;

View file

@ -1,7 +1,7 @@
use crate::{services, Result, Ruma};
use ruma::api::client::space::get_hierarchy;
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy``
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy`
///
/// Paginates over the space tree in a depth-first manner to locate child rooms of a given space.
pub async fn get_hierarchy_route(

View file

@ -10,7 +10,7 @@ use ruma::{
room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType,
},
serde::Raw,
EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId,
EventId, RoomId, UserId,
};
use tracing::log::warn;
@ -20,7 +20,7 @@ use tracing::log::warn;
///
/// - The only requirement for the content is that it has to be valid json
/// - Tries to send the event into the room, auth rules will determine if it is allowed
/// - If event is new canonical_alias: Rejects if alias is incorrect
/// - If event is new `canonical_alias`: Rejects if alias is incorrect
pub async fn send_state_event_for_key_route(
body: Ruma<send_state_event::v3::Request>,
) -> Result<send_state_event::v3::Response> {
@ -31,12 +31,7 @@ pub async fn send_state_event_for_key_route(
&body.room_id,
&body.event_type,
&body.body.body, // Yes, I hate it too
body.state_key.to_owned(),
if body.appservice_info.is_some() {
body.timestamp
} else {
None
},
body.state_key.clone(),
)
.await?;
@ -50,7 +45,7 @@ pub async fn send_state_event_for_key_route(
///
/// - The only requirement for the content is that it has to be valid json
/// - Tries to send the event into the room, auth rules will determine if it is allowed
/// - If event is new canonical_alias: Rejects if alias is incorrect
/// - If event is new `canonical_alias`: Rejects if alias is incorrect
pub async fn send_state_event_for_empty_key_route(
body: Ruma<send_state_event::v3::Request>,
) -> Result<RumaResponse<send_state_event::v3::Response>> {
@ -59,7 +54,7 @@ pub async fn send_state_event_for_empty_key_route(
// Forbid m.room.encryption if encryption is disabled
if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Encryption has been disabled",
));
}
@ -69,12 +64,7 @@ pub async fn send_state_event_for_empty_key_route(
&body.room_id,
&body.event_type.to_string().into(),
&body.body.body,
body.state_key.to_owned(),
if body.appservice_info.is_some() {
body.timestamp
} else {
None
},
body.state_key.clone(),
)
.await?;
@ -98,7 +88,7 @@ pub async fn get_state_events_route(
.user_can_see_state_events(sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"You don't have permission to view the room state.",
));
}
@ -131,7 +121,7 @@ pub async fn get_state_events_for_key_route(
.user_can_see_state_events(sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"You don't have permission to view the room state.",
));
}
@ -170,7 +160,7 @@ pub async fn get_state_events_for_empty_key_route(
.user_can_see_state_events(sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"You don't have permission to view the room state.",
));
}
@ -200,7 +190,6 @@ async fn send_state_event_for_key_helper(
event_type: &StateEventType,
json: &Raw<AnyStateEventContent>,
state_key: String,
timestamp: Option<MilliSecondsSinceUnixEpoch>,
) -> Result<Arc<EventId>> {
let sender_user = sender;
@ -225,7 +214,7 @@ async fn send_state_event_for_key_helper(
.is_none()
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"You are only allowed to send canonical_alias \
events when it's aliases already exists",
));
@ -254,7 +243,6 @@ async fn send_state_event_for_key_helper(
unsigned: None,
state_key: Some(state_key),
redacts: None,
timestamp,
},
sender_user,
room_id,

View file

@ -12,7 +12,7 @@ use ruma::{
Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom,
LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice,
},
v4::{SlidingOp, SlidingSyncRoomHero},
v4::SlidingOp,
DeviceLists, UnreadNotificationsCount,
},
uiaa::UiaaResponse,
@ -62,7 +62,7 @@ use tracing::{error, info};
/// - If the user was invited after `since`: A subset of the state of the room at the point of the invite
///
/// For left rooms:
/// - If the user left after `since`: prev_batch token, empty state (TODO: subset of the state at the point of the leave)
/// - If the user left after `since`: `prev_batch` token, empty state (TODO: subset of the state at the point of the leave)
///
/// - Sync is handled in an async task, multiple requests from the same device with the same
/// `since` will be cached
@ -83,7 +83,7 @@ pub async fn sync_events_route(
Entry::Vacant(v) => {
let (tx, rx) = tokio::sync::watch::channel(None);
v.insert((body.since.to_owned(), rx.clone()));
v.insert((body.since.clone(), rx.clone()));
tokio::spawn(sync_helper_wrapper(
sender_user.clone(),
@ -95,7 +95,9 @@ pub async fn sync_events_route(
rx
}
Entry::Occupied(mut o) => {
if o.get().0 != body.since {
if o.get().0 == body.since {
o.get().1.clone()
} else {
let (tx, rx) = tokio::sync::watch::channel(None);
o.insert((body.since.clone(), rx.clone()));
@ -110,8 +112,6 @@ pub async fn sync_events_route(
));
rx
} else {
o.get().1.clone()
}
}
};
@ -198,7 +198,7 @@ async fn sync_helper(
LazyLoadOptions::Enabled {
include_redundant_members: redundant,
} => (true, redundant),
_ => (false, false),
LazyLoadOptions::Disabled => (false, false),
};
let full_state = body.full_state;
@ -376,28 +376,23 @@ async fn sync_helper(
None => HashMap::new(),
};
let left_event_id = match services().rooms.state_accessor.room_state_get_id(
let Some(left_event_id) = services().rooms.state_accessor.room_state_get_id(
&room_id,
&StateEventType::RoomMember,
sender_user.as_str(),
)? {
Some(e) => e,
None => {
error!("Left room but no left state event");
continue;
}
)?
else {
error!("Left room but no left state event");
continue;
};
let left_shortstatehash = match services()
let Some(left_shortstatehash) = services()
.rooms
.state_accessor
.pdu_shortstatehash(&left_event_id)?
{
Some(s) => s,
None => {
error!("Leave event has no state");
continue;
}
else {
error!("Leave event has no state");
continue;
};
let mut left_state_ids = services()
@ -425,12 +420,9 @@ async fn sync_helper(
// TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565
|| *sender_user == state_key
{
let pdu = match services().rooms.timeline.get_pdu(&id)? {
Some(pdu) => pdu,
None => {
error!("Pdu in state not found: {}", id);
continue;
}
let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
};
left_state_events.push(pdu.to_sync_state_event());
@ -648,13 +640,11 @@ async fn load_joined_room(
// Database queries:
let current_shortstatehash =
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
s
} else {
error!("Room {} has no state", room_id);
return Err(Error::BadDatabase("Room has no state"));
};
let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)?
else {
error!("Room {} has no state", room_id);
return Err(Error::BadDatabase("Room has no state"));
};
let since_shortstatehash = services()
.rooms
@ -716,7 +706,7 @@ async fn load_joined_room(
.state_cache
.is_invited(&user_id, room_id)?)
{
Ok::<_, Error>(Some(user_id))
Ok::<_, Error>(Some(state_key.clone()))
} else {
Ok(None)
}
@ -788,12 +778,9 @@ async fn load_joined_room(
.get_statekey_from_short(shortstatekey)?;
if event_type != StateEventType::RoomMember {
let pdu = match services().rooms.timeline.get_pdu(&id)? {
Some(pdu) => pdu,
None => {
error!("Pdu in state not found: {}", id);
continue;
}
let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
};
state_events.push(pdu);
@ -807,12 +794,9 @@ async fn load_joined_room(
// TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565
|| *sender_user == state_key
{
let pdu = match services().rooms.timeline.get_pdu(&id)? {
Some(pdu) => pdu,
None => {
error!("Pdu in state not found: {}", id);
continue;
}
let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
};
// This check is in case a bad user ID made it into the database
@ -877,12 +861,9 @@ async fn load_joined_room(
for (key, id) in current_state_ids {
if full_state || since_state_ids.get(&key) != Some(&id) {
let pdu = match services().rooms.timeline.get_pdu(&id)? {
Some(pdu) => pdu,
None => {
error!("Pdu in state not found: {}", id);
continue;
}
let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
};
if pdu.kind == TimelineEventType::RoomMember {
@ -1248,7 +1229,7 @@ pub async fn sync_events_v4_route(
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
)
);
}
}
@ -1286,13 +1267,12 @@ pub async fn sync_events_v4_route(
);
for room_id in &all_joined_rooms {
let current_shortstatehash =
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
s
} else {
error!("Room {} has no state", room_id);
continue;
};
let Some(current_shortstatehash) =
services().rooms.state.get_room_shortstatehash(room_id)?
else {
error!("Room {} has no state", room_id);
continue;
};
let since_shortstatehash = services()
.rooms
@ -1354,12 +1334,9 @@ pub async fn sync_events_v4_route(
for (key, id) in current_state_ids {
if since_state_ids.get(&key) != Some(&id) {
let pdu = match services().rooms.timeline.get_pdu(&id)? {
Some(pdu) => pdu,
None => {
error!("Pdu in state not found: {}", id);
continue;
}
let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
};
if pdu.kind == TimelineEventType::RoomMember {
if let Some(state_key) = &pdu.state_key {
@ -1572,7 +1549,7 @@ pub async fn sync_events_v4_route(
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
body.room_subscriptions.clone(),
body.room_subscriptions,
);
}
@ -1599,10 +1576,10 @@ pub async fn sync_events_v4_route(
}))
})?
.or_else(|| {
if roomsince != &0 {
Some(roomsince.to_string())
} else {
if roomsince == &0 {
None
} else {
Some(roomsince.to_string())
}
});
@ -1613,7 +1590,7 @@ pub async fn sync_events_v4_route(
let required_state = required_state_request
.iter()
.flat_map(|state| {
.filter_map(|state| {
services()
.rooms
.state_accessor
@ -1631,44 +1608,40 @@ pub async fn sync_events_v4_route(
.room_members(room_id)
.filter_map(|r| r.ok())
.filter(|member| member != &sender_user)
.flat_map(|member| {
.filter_map(|member| {
services()
.rooms
.state_accessor
.get_member(room_id, &member)
.ok()
.flatten()
.map(|memberevent| SlidingSyncRoomHero {
user_id: member,
name: memberevent.displayname,
avatar: memberevent.avatar_url,
.map(|memberevent| {
(
memberevent
.displayname
.unwrap_or_else(|| member.to_string()),
memberevent.avatar_url,
)
})
})
.take(5)
.collect::<Vec<_>>();
let name = match &heroes[..] {
[] => None,
[only] => Some(
only.name
.clone()
.unwrap_or_else(|| only.user_id.to_string()),
),
[only] => Some(only.0.clone()),
[firsts @ .., last] => Some(
firsts
.iter()
.map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string()))
.map(|h| h.0.clone())
.collect::<Vec<_>>()
.join(", ")
+ " and "
+ &last
.name
.clone()
.unwrap_or_else(|| last.user_id.to_string()),
+ &last.0,
),
};
let avatar = if let [only] = &heroes[..] {
only.avatar.clone()
only.1.clone()
} else {
None
};
@ -1729,16 +1702,6 @@ pub async fn sync_events_v4_route(
),
num_live: None, // Count events in timeline greater than global sync counter
timestamp: None,
heroes: if body
.room_subscriptions
.get(room_id)
.map(|sub| sub.include_heroes.unwrap_or_default())
.unwrap_or_default()
{
Some(heroes)
} else {
None
},
},
);
}

View file

@ -24,18 +24,19 @@ pub async fn update_tag_route(
RoomAccountDataEventType::Tag,
)?;
let mut tags_event = event
.map(|e| {
serde_json::from_str(e.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))
})
.unwrap_or_else(|| {
let mut tags_event = event.map_or_else(
|| {
Ok(TagEvent {
content: TagEventContent {
tags: BTreeMap::new(),
},
})
})?;
},
|e| {
serde_json::from_str(e.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))
},
)?;
tags_event
.content
@ -68,18 +69,19 @@ pub async fn delete_tag_route(
RoomAccountDataEventType::Tag,
)?;
let mut tags_event = event
.map(|e| {
serde_json::from_str(e.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))
})
.unwrap_or_else(|| {
let mut tags_event = event.map_or_else(
|| {
Ok(TagEvent {
content: TagEventContent {
tags: BTreeMap::new(),
},
})
})?;
},
|e| {
serde_json::from_str(e.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))
},
)?;
tags_event.content.tags.remove(&body.tag.clone().into());
@ -107,18 +109,19 @@ pub async fn get_tags_route(body: Ruma<get_tags::v3::Request>) -> Result<get_tag
RoomAccountDataEventType::Tag,
)?;
let tags_event = event
.map(|e| {
serde_json::from_str(e.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))
})
.unwrap_or_else(|| {
let tags_event = event.map_or_else(
|| {
Ok(TagEvent {
content: TagEventContent {
tags: BTreeMap::new(),
},
})
})?;
},
|e| {
serde_json::from_str(e.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))
},
)?;
Ok(get_tags::v3::Response {
tags: tags_event.content.tags,

View file

@ -63,7 +63,7 @@ pub async fn send_event_to_device_route(
event.deserialize_as().map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
})?,
)?
)?;
}
DeviceIdOrAllDevices::AllDevices => {

View file

@ -17,7 +17,7 @@ pub async fn create_typing_event_route(
.is_joined(sender_user, &body.room_id)?
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"You are not in this room.",
));
}

View file

@ -27,10 +27,7 @@ pub async fn get_supported_versions_route(
"v1.4".to_owned(),
"v1.5".to_owned(),
],
unstable_features: BTreeMap::from_iter([
("org.matrix.e2e_cross_signing".to_owned(), true),
("org.matrix.msc3916.stable".to_owned(), true),
]),
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
};
Ok(resp)

View file

@ -17,7 +17,12 @@ pub async fn turn_server_route(
let turn_secret = services().globals.turn_secret().clone();
let (username, password) = if !turn_secret.is_empty() {
let (username, password) = if turn_secret.is_empty() {
(
services().globals.turn_username().clone(),
services().globals.turn_password().clone(),
)
} else {
let expiry = SecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(services().globals.turn_ttl()),
)
@ -32,11 +37,6 @@ pub async fn turn_server_route(
let password: String = general_purpose::STANDARD.encode(mac.finalize().into_bytes());
(username, password)
} else {
(
services().globals.turn_username().clone(),
services().globals.turn_password().clone(),
)
};
Ok(get_turn_server_info::v3::Response {

View file

@ -2,22 +2,20 @@ use std::{collections::BTreeMap, iter::FromIterator, str};
use axum::{
async_trait,
body::Body,
extract::{FromRequest, Path},
body::{Full, HttpBody},
extract::{rejection::TypedHeaderRejectionReason, FromRequest, Path, TypedHeader},
headers::{
authorization::{Bearer, Credentials},
Authorization,
},
response::{IntoResponse, Response},
RequestExt, RequestPartsExt,
BoxError, RequestExt, RequestPartsExt,
};
use axum_extra::{
headers::{authorization::Bearer, Authorization},
typed_header::TypedHeaderRejectionReason,
TypedHeader,
};
use bytes::{BufMut, BytesMut};
use bytes::{Buf, BufMut, Bytes, BytesMut};
use http::{Request, StatusCode};
use ruma::{
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
server_util::authorization::XMatrix,
CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedUserId, UserId,
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
};
use serde::Deserialize;
use tracing::{debug, error, warn};
@ -33,33 +31,37 @@ enum Token {
}
#[async_trait]
impl<T, S> FromRequest<S> for Ruma<T>
impl<T, S, B> FromRequest<S, B> for Ruma<T>
where
T: IncomingRequest,
B: HttpBody + Send + 'static,
B::Data: Send,
B::Error: Into<BoxError>,
{
type Rejection = Error;
async fn from_request(req: Request<Body>, _state: &S) -> Result<Self, Self::Rejection> {
async fn from_request(req: Request<B>, _state: &S) -> Result<Self, Self::Rejection> {
#[derive(Deserialize)]
struct QueryParams {
access_token: Option<String>,
user_id: Option<String>,
}
let (mut parts, mut body) = {
let limited_req = req.with_limited_body();
let (parts, body) = limited_req.into_parts();
let body = axum::body::to_bytes(
body,
services()
.globals
.max_request_size()
.try_into()
.unwrap_or(usize::MAX),
)
.await
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
(parts, body)
let (mut parts, mut body) = match req.with_limited_body() {
Ok(limited_req) => {
let (parts, body) = limited_req.into_parts();
let body = to_bytes(body)
.await
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
(parts, body)
}
Err(original_req) => {
let (parts, body) = original_req.into_parts();
let body = to_bytes(body)
.await
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
(parts, body)
}
};
let metadata = T::METADATA;
@ -100,15 +102,10 @@ where
let (sender_user, sender_device, sender_servername, appservice_info) =
match (metadata.authentication, token) {
(_, Token::Invalid) => {
// OpenID endpoint uses a query param with the same name, drop this once query params for user auth are removed from the spec
if query_params.access_token.is_some() {
(None, None, None, None)
} else {
return Err(Error::BadRequest(
ErrorKind::UnknownToken { soft_logout: false },
"Unknown access token.",
));
}
return Err(Error::BadRequest(
ErrorKind::UnknownToken { soft_logout: false },
"Unknown access token.",
))
}
(AuthScheme::AccessToken, Token::Appservice(info)) => {
let user_id = query_params
@ -135,7 +132,7 @@ where
if !services().users.exists(&user_id)? {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"User does not exist.",
));
}
@ -175,7 +172,7 @@ where
_ => "Unknown header-related error",
};
Error::BadRequest(ErrorKind::forbidden(), msg)
Error::BadRequest(ErrorKind::Forbidden, msg)
})?;
if let Some(dest) = x_matrix.destination {
@ -189,17 +186,12 @@ where
let origin_signatures = BTreeMap::from_iter([(
x_matrix.key.clone(),
CanonicalJsonValue::String(x_matrix.sig.to_string()),
CanonicalJsonValue::String(x_matrix.sig),
)]);
let signatures = BTreeMap::from_iter([(
x_matrix.origin.as_str().to_owned(),
CanonicalJsonValue::Object(
origin_signatures
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect(),
),
CanonicalJsonValue::Object(origin_signatures),
)]);
let mut request_map = BTreeMap::from_iter([
@ -234,7 +226,7 @@ where
let keys_result = services()
.rooms
.event_handler
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_string()], false)
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.clone()])
.await;
let keys = match keys_result {
@ -242,25 +234,14 @@ where
Err(e) => {
warn!("Failed to fetch signing keys: {}", e);
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Failed to fetch signing keys.",
));
}
};
// Only verify_keys that are currently valid should be used for validating requests
// as per MSC4029
let pub_key_map = BTreeMap::from_iter([(
x_matrix.origin.as_str().to_owned(),
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
keys.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect()
} else {
BTreeMap::new()
},
)]);
let pub_key_map =
BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]);
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
Ok(()) => (None, None, Some(x_matrix.origin), None),
@ -279,7 +260,7 @@ where
}
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Failed to verify X-Matrix signatures.",
));
}
@ -353,17 +334,130 @@ where
sender_user,
sender_device,
sender_servername,
appservice_info,
json_body,
appservice_info,
})
}
}
struct XMatrix {
destination: Option<OwnedServerName>,
origin: OwnedServerName,
key: String, // KeyName?
sig: String,
}
impl Credentials for XMatrix {
const SCHEME: &'static str = "X-Matrix";
fn decode(value: &http::HeaderValue) -> Option<Self> {
debug_assert!(
value.as_bytes().starts_with(b"X-Matrix "),
"HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}",
);
let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..])
.ok()?
.trim_start();
let mut origin = None;
let mut key = None;
let mut sig = None;
let mut destination = None;
for entry in parameters.split_terminator(',') {
let (name, value) = entry.split_once('=')?;
// It's not at all clear why some fields are quoted and others not in the spec,
// let's simply accept either form for every field.
let value = value
.strip_prefix('"')
.and_then(|rest| rest.strip_suffix('"'))
.unwrap_or(value);
// FIXME: Catch multiple fields of the same name
match name {
"origin" => origin = Some(value.try_into().ok()?),
"key" => key = Some(value.to_owned()),
"sig" => sig = Some(value.to_owned()),
"destination" => destination = Some(value.try_into().ok()?),
_ => debug!(
"Unexpected field `{}` in X-Matrix Authorization header",
name
),
}
}
Some(Self {
destination,
origin: origin?,
key: key?,
sig: sig?,
})
}
fn encode(&self) -> http::HeaderValue {
todo!()
}
}
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
fn into_response(self) -> Response {
match self.0.try_into_http_response::<BytesMut>() {
Ok(res) => res.map(BytesMut::freeze).map(Body::from).into_response(),
Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(),
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
}
}
}
// copied from hyper under the following license:
// Copyright (c) 2014-2021 Sean McArthur
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
pub(crate) async fn to_bytes<T>(body: T) -> Result<Bytes, T::Error>
where
T: HttpBody,
{
futures_util::pin_mut!(body);
// If there's only 1 chunk, we can just return Buf::to_bytes()
let mut first = if let Some(buf) = body.data().await {
buf?
} else {
return Ok(Bytes::new());
};
let second = if let Some(buf) = body.data().await {
buf?
} else {
return Ok(first.copy_to_bytes(first.remaining()));
};
// With more than 1 buf, we gotta flatten into a Vec first.
let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize;
let mut vec = Vec::with_capacity(cap);
vec.put(first);
vec.put(second);
while let Some(buf) = body.data().await {
vec.put(buf?);
}
Ok(vec.into())
}

File diff suppressed because it is too large Load diff

View file

@ -5,13 +5,13 @@ use clap::Parser;
/// Returns the current version of the crate with extra info if supplied
///
/// Set the environment variable `CONDUIT_VERSION_EXTRA` to any UTF-8 string to
/// include it in parenthesis after the SemVer version. A common value are git
/// include it in parenthesis after the `SemVer` version. A common value are git
/// commit hashes.
fn version() -> String {
let cargo_pkg_version = env!("CARGO_PKG_VERSION");
match option_env!("CONDUIT_VERSION_EXTRA") {
Some(x) => format!("{} ({})", cargo_pkg_version, x),
Some(x) => format!("{cargo_pkg_version} ({x})"),
None => cargo_pkg_version.to_owned(),
}
}

View file

@ -47,8 +47,6 @@ pub struct Config {
#[serde(default = "false_fn")]
pub allow_registration: bool,
pub registration_token: Option<String>,
#[serde(default = "default_openid_token_ttl")]
pub openid_token_ttl: u64,
#[serde(default = "true_fn")]
pub allow_encryption: bool,
#[serde(default = "false_fn")]
@ -59,7 +57,7 @@ pub struct Config {
pub allow_unstable_room_versions: bool,
#[serde(default = "default_default_room_version")]
pub default_room_version: RoomVersionId,
#[serde(default, flatten)]
#[serde(default)]
pub well_known: WellKnownConfig,
#[serde(default = "false_fn")]
pub allow_jaeger: bool,
@ -86,6 +84,7 @@ pub struct Config {
pub emergency_password: Option<String>,
#[serde(flatten)]
#[allow(clippy::zero_sized_map_values)]
pub catchall: BTreeMap<String, IgnoredAny>,
}
@ -97,9 +96,7 @@ pub struct TlsConfig {
#[derive(Clone, Debug, Deserialize, Default)]
pub struct WellKnownConfig {
#[serde(rename = "well_known_client")]
pub client: Option<Url>,
#[serde(rename = "well_known_server")]
pub server: Option<OwnedServerName>,
}
@ -137,7 +134,7 @@ impl Config {
Some(server_name) => server_name.to_owned(),
None => {
if self.server_name.port().is_some() {
self.server_name.to_owned()
self.server_name.clone()
} else {
format!("{}:443", self.server_name.host())
.try_into()
@ -306,10 +303,6 @@ fn default_turn_ttl() -> u64 {
60 * 60 * 24
}
fn default_openid_token_ttl() -> u64 {
60 * 60
}
// I know, it's a great name
pub fn default_default_room_version() -> RoomVersionId {
RoomVersionId::V10

View file

@ -67,7 +67,7 @@ impl PartialProxyConfig {
let mut excluded_because = None; // most specific reason it was excluded
if self.include.is_empty() {
// treat empty include list as `*`
included_because = Some(&WildCardedDomain::WildCard)
included_because = Some(&WildCardedDomain::WildCard);
}
for wc_domain in &self.include {
if wc_domain.matches(domain) {
@ -127,7 +127,7 @@ impl std::str::FromStr for WildCardedDomain {
Ok(if s.starts_with("*.") {
WildCardedDomain::WildCarded(s[1..].to_owned())
} else if s == "*" {
WildCardedDomain::WildCarded("".to_owned())
WildCardedDomain::WildCarded(String::new())
} else {
WildCardedDomain::Exact(s.to_owned())
})

View file

@ -32,7 +32,7 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O
let mut db_opts = rocksdb::Options::default();
db_opts.set_block_based_table_factory(&block_based_options);
db_opts.create_if_missing(true);
db_opts.increase_parallelism(num_cpus::get() as i32);
db_opts.increase_parallelism(num_cpus::get().try_into().unwrap_or(i32::MAX));
db_opts.set_max_open_files(max_open_files);
db_opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
db_opts.set_bottommost_compression_type(rocksdb::DBCompressionType::Zstd);
@ -41,7 +41,7 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O
// https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
db_opts.set_level_compaction_dynamic_level_bytes(true);
db_opts.set_max_background_jobs(6);
db_opts.set_bytes_per_sync(1048576);
db_opts.set_bytes_per_sync(1_048_576);
// https://github.com/facebook/rocksdb/issues/849
db_opts.set_keep_log_file_num(100);
@ -112,6 +112,7 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
fn memory_usage(&self) -> Result<String> {
let stats =
rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.cache]))?;
#[allow(clippy::cast_precision_loss)]
Ok(format!(
"Approximate memory usage of all the mem-tables: {:.3} MB\n\
Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\
@ -219,7 +220,7 @@ impl KvTree for RocksDbEngineTree<'_> {
let lock = self.write_lock.write().unwrap();
let old = self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?;
let new = utils::increment(old.as_deref()).unwrap();
let new = utils::increment(old.as_deref());
self.db
.rocks
.put_cf_opt(&self.cf(), key, &new, &writeoptions)?;
@ -236,7 +237,7 @@ impl KvTree for RocksDbEngineTree<'_> {
for key in iter {
let old = self.db.rocks.get_cf_opt(&self.cf(), &key, &readoptions)?;
let new = utils::increment(old.as_deref()).unwrap();
let new = utils::increment(old.as_deref());
self.db
.rocks
.put_cf_opt(&self.cf(), key, new, &writeoptions)?;

View file

@ -88,6 +88,7 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
// 1. convert MB to KiB
// 2. divide by permanent connections + permanent iter connections + write connection
// 3. round down to nearest integer
#[allow(clippy::cast_precision_loss)]
let cache_size_per_thread: u32 = ((config.db_cache_capacity_mb * 1024.0)
/ ((num_cpus::get().max(1) * 2) + 1) as f64)
as u32;
@ -217,8 +218,7 @@ impl KvTree for SqliteTable {
guard.execute("BEGIN", [])?;
for key in iter {
let old = self.get_with_guard(&guard, &key)?;
let new = crate::utils::increment(old.as_deref())
.expect("utils::increment always returns Some");
let new = crate::utils::increment(old.as_deref());
self.insert_with_guard(&guard, &key, &new)?;
}
guard.execute("COMMIT", [])?;
@ -308,8 +308,7 @@ impl KvTree for SqliteTable {
let old = self.get_with_guard(&guard, key)?;
let new =
crate::utils::increment(old.as_deref()).expect("utils::increment always returns Some");
let new = crate::utils::increment(old.as_deref());
self.insert_with_guard(&guard, key, &new)?;

View file

@ -20,7 +20,7 @@ impl service::account_data::Data for KeyValueDatabase {
data: &serde_json::Value,
) -> Result<()> {
let mut prefix = room_id
.map(|r| r.to_string())
.map(ToString::to_string)
.unwrap_or_default()
.as_bytes()
.to_vec();
@ -70,7 +70,7 @@ impl service::account_data::Data for KeyValueDatabase {
kind: RoomAccountDataEventType,
) -> Result<Option<Box<serde_json::value::RawValue>>> {
let mut key = room_id
.map(|r| r.to_string())
.map(ToString::to_string)
.unwrap_or_default()
.as_bytes()
.to_vec();
@ -105,7 +105,7 @@ impl service::account_data::Data for KeyValueDatabase {
let mut userdata = HashMap::new();
let mut prefix = room_id
.map(|r| r.to_string())
.map(ToString::to_string)
.unwrap_or_default()
.as_bytes()
.to_vec();

View file

@ -1,19 +1,15 @@
use std::collections::HashMap;
use std::collections::{BTreeMap, HashMap};
use async_trait::async_trait;
use futures_util::{stream::FuturesUnordered, StreamExt};
use lru_cache::LruCache;
use ruma::{
api::federation::discovery::{OldVerifyKey, ServerSigningKeys},
api::federation::discovery::{ServerSigningKeys, VerifyKey},
signatures::Ed25519KeyPair,
DeviceId, ServerName, UserId,
DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId,
};
use crate::{
database::KeyValueDatabase,
service::{self, globals::SigningKeys},
services, utils, Error, Result,
};
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
pub const COUNTER: &[u8] = b"c";
pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u";
@ -140,7 +136,7 @@ impl service::globals::Data for KeyValueDatabase {
}
fn cleanup(&self) -> Result<()> {
self._db.cleanup()
self.db.cleanup()
}
fn memory_usage(&self) -> String {
@ -164,7 +160,7 @@ our_real_users_cache: {our_real_users_cache}
appservice_in_room_cache: {appservice_in_room_cache}
lasttimelinecount_cache: {lasttimelinecount_cache}\n"
);
if let Ok(db_stats) = self._db.memory_usage() {
if let Ok(db_stats) = self.db.memory_usage() {
response += &db_stats;
}
@ -213,7 +209,7 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
self.global.insert(b"keypair", &keypair)?;
Ok::<_, Error>(keypair)
},
|s| Ok(s.to_vec()),
|s| Ok(s.clone()),
)?;
let mut parts = keypair_bytes.splitn(2, |&b| b == 0xff);
@ -241,97 +237,63 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
self.global.remove(b"keypair")
}
fn add_signing_key_from_trusted_server(
fn add_signing_key(
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<SigningKeys> {
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
// Not atomic, but this is not critical
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
Ok(
if let Some(mut prev_keys) =
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok())
{
let ServerSigningKeys {
verify_keys,
old_verify_keys,
..
} = new_keys;
let mut keys = signingkeys
.and_then(|keys| serde_json::from_slice(&keys).ok())
.unwrap_or_else(|| {
// Just insert "now", it doesn't matter
ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
});
prev_keys.verify_keys.extend(verify_keys);
prev_keys.old_verify_keys.extend(old_verify_keys);
prev_keys.valid_until_ts = new_keys.valid_until_ts;
let ServerSigningKeys {
verify_keys,
old_verify_keys,
..
} = new_keys;
self.server_signingkeys.insert(
origin.as_bytes(),
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"),
)?;
keys.verify_keys.extend(verify_keys);
keys.old_verify_keys.extend(old_verify_keys);
prev_keys.into()
} else {
self.server_signingkeys.insert(
origin.as_bytes(),
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"),
)?;
self.server_signingkeys.insert(
origin.as_bytes(),
&serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"),
)?;
new_keys.into()
},
)
}
let mut tree = keys.verify_keys;
tree.extend(
keys.old_verify_keys
.into_iter()
.map(|old| (old.0, VerifyKey::new(old.1.key))),
);
fn add_signing_key_from_origin(
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<SigningKeys> {
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
Ok(
if let Some(mut prev_keys) =
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok())
{
let ServerSigningKeys {
verify_keys,
old_verify_keys,
..
} = new_keys;
// Moving `verify_keys` no longer present to `old_verify_keys`
for (key_id, key) in prev_keys.verify_keys {
if !verify_keys.contains_key(&key_id) {
prev_keys
.old_verify_keys
.insert(key_id, OldVerifyKey::new(prev_keys.valid_until_ts, key.key));
}
}
prev_keys.verify_keys = verify_keys;
prev_keys.old_verify_keys.extend(old_verify_keys);
prev_keys.valid_until_ts = new_keys.valid_until_ts;
self.server_signingkeys.insert(
origin.as_bytes(),
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"),
)?;
prev_keys.into()
} else {
self.server_signingkeys.insert(
origin.as_bytes(),
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"),
)?;
new_keys.into()
},
)
Ok(tree)
}
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>> {
fn signing_keys_for(
&self,
origin: &ServerName,
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
let signingkeys = self
.server_signingkeys
.get(origin.as_bytes())?
.and_then(|bytes| serde_json::from_slice::<SigningKeys>(&bytes).ok());
.and_then(|bytes| serde_json::from_slice(&bytes).ok())
.map_or_else(BTreeMap::new, |keys: ServerSigningKeys| {
let mut tree = keys.verify_keys;
tree.extend(
keys.old_verify_keys
.into_iter()
.map(|old| (old.0, VerifyKey::new(old.1.key))),
);
tree
});
Ok(signingkeys)
}

View file

@ -1,4 +1,4 @@
use ruma::{api::client::error::ErrorKind, http_headers::ContentDisposition};
use ruma::api::client::error::ErrorKind;
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
@ -8,7 +8,7 @@ impl service::media::Data for KeyValueDatabase {
mxc: String,
width: u32,
height: u32,
content_disposition: &ContentDisposition,
content_disposition: Option<&str>,
content_type: Option<&str>,
) -> Result<Vec<u8>> {
let mut key = mxc.as_bytes().to_vec();
@ -16,7 +16,12 @@ impl service::media::Data for KeyValueDatabase {
key.extend_from_slice(&width.to_be_bytes());
key.extend_from_slice(&height.to_be_bytes());
key.push(0xff);
key.extend_from_slice(content_disposition.to_string().as_bytes());
key.extend_from_slice(
content_disposition
.as_ref()
.map(|f| f.as_bytes())
.unwrap_or_default(),
);
key.push(0xff);
key.extend_from_slice(
content_type
@ -35,7 +40,7 @@ impl service::media::Data for KeyValueDatabase {
mxc: String,
width: u32,
height: u32,
) -> Result<(ContentDisposition, Option<String>, Vec<u8>)> {
) -> Result<(Option<String>, Option<String>, Vec<u8>)> {
let mut prefix = mxc.as_bytes().to_vec();
prefix.push(0xff);
prefix.extend_from_slice(&width.to_be_bytes());
@ -63,9 +68,15 @@ impl service::media::Data for KeyValueDatabase {
.next()
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
let content_disposition = content_disposition_bytes.try_into().unwrap_or_else(|_| {
ContentDisposition::new(ruma::http_headers::ContentDispositionType::Inline)
});
let content_disposition = if content_disposition_bytes.is_empty() {
None
} else {
Some(
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
Error::bad_database("Content Disposition in mediaid_file is invalid unicode.")
})?,
)
};
Ok((content_disposition, content_type, key))
}
}

View file

@ -22,10 +22,7 @@ impl service::pusher::Data for KeyValueDatabase {
let mut key = sender.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(ids.pushkey.as_bytes());
self.senderkey_pusher
.remove(&key)
.map(|_| ())
.map_err(Into::into)
self.senderkey_pusher.remove(&key).map_err(Into::into)
}
}
}

View file

@ -1,15 +1,9 @@
use ruma::{
api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId,
UserId,
};
use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
impl service::rooms::alias::Data for KeyValueDatabase {
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> {
// Comes first as we don't want a stuck alias
self.alias_userid
.insert(alias.alias().as_bytes(), user_id.as_bytes())?;
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
self.alias_roomid
.insert(alias.alias().as_bytes(), room_id.as_bytes())?;
let mut aliasid = room_id.as_bytes().to_vec();
@ -21,20 +15,20 @@ impl service::rooms::alias::Data for KeyValueDatabase {
fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> {
if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? {
let mut prefix = room_id.to_vec();
let mut prefix = room_id.clone();
prefix.push(0xff);
for (key, _) in self.aliasid_alias.scan_prefix(prefix) {
self.aliasid_alias.remove(&key)?;
}
self.alias_roomid.remove(alias.alias().as_bytes())?;
self.alias_userid.remove(alias.alias().as_bytes())
} else {
Err(Error::BadRequest(
return Err(Error::BadRequest(
ErrorKind::NotFound,
"Alias does not exist.",
))
));
}
Ok(())
}
fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
@ -63,16 +57,4 @@ impl service::rooms::alias::Data for KeyValueDatabase {
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
}))
}
fn who_created_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedUserId>> {
self.alias_userid
.get(alias.alias().as_bytes())?
.map(|bytes| {
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("User ID in alias_userid is invalid unicode.")
})?)
.map_err(|_| Error::bad_database("User ID in alias_roomid is invalid."))
})
.transpose()
}
}

View file

@ -2,46 +2,24 @@ use ruma::RoomId;
use crate::{database::KeyValueDatabase, service, services, utils, Result};
/// Splits a string into tokens used as keys in the search inverted index
///
/// This may be used to tokenize both message bodies (for indexing) or search
/// queries (for querying).
fn tokenize(body: &str) -> impl Iterator<Item = String> + '_ {
body.split_terminator(|c: char| !c.is_alphanumeric())
.filter(|s| !s.is_empty())
.filter(|word| word.len() <= 50)
.map(str::to_lowercase)
}
impl service::rooms::search::Data for KeyValueDatabase {
fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
let mut batch = tokenize(message_body).map(|word| {
let mut key = shortroomid.to_be_bytes().to_vec();
key.extend_from_slice(word.as_bytes());
key.push(0xff);
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here
(key, Vec::new())
});
let mut batch = message_body
.split_terminator(|c: char| !c.is_alphanumeric())
.filter(|s| !s.is_empty())
.filter(|word| word.len() <= 50)
.map(str::to_lowercase)
.map(|word| {
let mut key = shortroomid.to_be_bytes().to_vec();
key.extend_from_slice(word.as_bytes());
key.push(0xff);
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here
(key, Vec::new())
});
self.tokenids.insert_batch(&mut batch)
}
fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
let batch = tokenize(message_body).map(|word| {
let mut key = shortroomid.to_be_bytes().to_vec();
key.extend_from_slice(word.as_bytes());
key.push(0xFF);
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here
key
});
for token in batch {
self.tokenids.remove(&token)?;
}
Ok(())
}
fn search_pdus<'a>(
&'a self,
room_id: &RoomId,
@ -55,7 +33,11 @@ impl service::rooms::search::Data for KeyValueDatabase {
.to_be_bytes()
.to_vec();
let words: Vec<_> = tokenize(search_string).collect();
let words: Vec<_> = search_string
.split_terminator(|c: char| !c.is_alphanumeric())
.filter(|s| !s.is_empty())
.map(str::to_lowercase)
.collect();
let iterators = words.clone().into_iter().map(move |word| {
let mut prefix2 = prefix.clone();
@ -72,12 +54,11 @@ impl service::rooms::search::Data for KeyValueDatabase {
.map(move |(key, _)| key[prefix3.len()..].to_vec())
});
let common_elements = match utils::common_elements(iterators, |a, b| {
let Some(common_elements) = utils::common_elements(iterators, |a, b| {
// We compare b with a because we reversed the iterator earlier
b.cmp(a)
}) {
Some(it) => it,
None => return Ok(None),
}) else {
return Ok(None);
};
Ok(Some((Box::new(common_elements), words)))

View file

@ -10,18 +10,18 @@ impl service::rooms::short::Data for KeyValueDatabase {
return Ok(*short);
}
let short = match self.eventid_shorteventid.get(event_id.as_bytes())? {
Some(shorteventid) => utils::u64_from_bytes(&shorteventid)
.map_err(|_| Error::bad_database("Invalid shorteventid in db."))?,
None => {
let short =
if let Some(shorteventid) = self.eventid_shorteventid.get(event_id.as_bytes())? {
utils::u64_from_bytes(&shorteventid)
.map_err(|_| Error::bad_database("Invalid shorteventid in db."))?
} else {
let shorteventid = services().globals.next_count()?;
self.eventid_shorteventid
.insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?;
self.shorteventid_eventid
.insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?;
shorteventid
}
};
};
self.eventidshort_cache
.lock()
@ -45,13 +45,13 @@ impl service::rooms::short::Data for KeyValueDatabase {
return Ok(Some(*short));
}
let mut statekey = event_type.to_string().as_bytes().to_vec();
statekey.push(0xff);
statekey.extend_from_slice(state_key.as_bytes());
let mut state_key_bytes = event_type.to_string().as_bytes().to_vec();
state_key_bytes.push(0xff);
state_key_bytes.extend_from_slice(state_key.as_bytes());
let short = self
.statekey_shortstatekey
.get(&statekey)?
.get(&state_key_bytes)?
.map(|shortstatekey| {
utils::u64_from_bytes(&shortstatekey)
.map_err(|_| Error::bad_database("Invalid shortstatekey in db."))
@ -82,22 +82,22 @@ impl service::rooms::short::Data for KeyValueDatabase {
return Ok(*short);
}
let mut statekey = event_type.to_string().as_bytes().to_vec();
statekey.push(0xff);
statekey.extend_from_slice(state_key.as_bytes());
let mut state_key_bytes = event_type.to_string().as_bytes().to_vec();
state_key_bytes.push(0xff);
state_key_bytes.extend_from_slice(state_key.as_bytes());
let short = match self.statekey_shortstatekey.get(&statekey)? {
Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey)
.map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?,
None => {
let short =
if let Some(shortstatekey) = self.statekey_shortstatekey.get(&state_key_bytes)? {
utils::u64_from_bytes(&shortstatekey)
.map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?
} else {
let shortstatekey = services().globals.next_count()?;
self.statekey_shortstatekey
.insert(&statekey, &shortstatekey.to_be_bytes())?;
.insert(&state_key_bytes, &shortstatekey.to_be_bytes())?;
self.shortstatekey_statekey
.insert(&shortstatekey.to_be_bytes(), &statekey)?;
.insert(&shortstatekey.to_be_bytes(), &state_key_bytes)?;
shortstatekey
}
};
};
self.statekeyshort_cache
.lock()
@ -175,21 +175,22 @@ impl service::rooms::short::Data for KeyValueDatabase {
Ok(result)
}
/// Returns (shortstatehash, already_existed)
/// Returns (shortstatehash, `already_existed`)
fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> {
Ok(match self.statehash_shortstatehash.get(state_hash)? {
Some(shortstatehash) => (
utils::u64_from_bytes(&shortstatehash)
.map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?,
true,
),
None => {
Ok(
if let Some(shortstatehash) = self.statehash_shortstatehash.get(state_hash)? {
(
utils::u64_from_bytes(&shortstatehash)
.map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?,
true,
)
} else {
let shortstatehash = services().globals.next_count()?;
self.statehash_shortstatehash
.insert(state_hash, &shortstatehash.to_be_bytes())?;
(shortstatehash, false)
}
})
},
)
}
fn get_shortroomid(&self, room_id: &RoomId) -> Result<Option<u64>> {
@ -203,15 +204,16 @@ impl service::rooms::short::Data for KeyValueDatabase {
}
fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result<u64> {
Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? {
Some(short) => utils::u64_from_bytes(&short)
.map_err(|_| Error::bad_database("Invalid shortroomid in db."))?,
None => {
Ok(
if let Some(short) = self.roomid_shortroomid.get(room_id.as_bytes())? {
utils::u64_from_bytes(&short)
.map_err(|_| Error::bad_database("Invalid shortroomid in db."))?
} else {
let short = services().globals.next_count()?;
self.roomid_shortroomid
.insert(room_id.as_bytes(), &short.to_be_bytes())?;
short
}
})
},
)
}
}

View file

@ -63,7 +63,7 @@ impl service::rooms::state::Data for KeyValueDatabase {
}
for event_id in event_ids {
let mut key = prefix.to_owned();
let mut key = prefix.clone();
key.extend_from_slice(event_id.as_bytes());
self.roomid_pduleaves.insert(&key, event_id.as_bytes())?;
}

View file

@ -79,13 +79,12 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
event_type: &StateEventType,
state_key: &str,
) -> Result<Option<Arc<EventId>>> {
let shortstatekey = match services()
let Some(shortstatekey) = services()
.rooms
.short
.get_shortstatekey(event_type, state_key)?
{
Some(s) => s,
None => return Ok(None),
else {
return Ok(None);
};
let full_state = services()
.rooms

View file

@ -31,11 +31,10 @@ impl service::rooms::user::Data for KeyValueDatabase {
self.userroomid_notificationcount
.get(&userroom_id)?
.map(|bytes| {
.map_or(Ok(0), |bytes| {
utils::u64_from_bytes(&bytes)
.map_err(|_| Error::bad_database("Invalid notification count in db."))
})
.unwrap_or(Ok(0))
}
fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64> {
@ -45,11 +44,10 @@ impl service::rooms::user::Data for KeyValueDatabase {
self.userroomid_highlightcount
.get(&userroom_id)?
.map(|bytes| {
.map_or(Ok(0), |bytes| {
utils::u64_from_bytes(&bytes)
.map_err(|_| Error::bad_database("Invalid highlight count in db."))
})
.unwrap_or(Ok(0))
}
fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64> {

View file

@ -67,9 +67,9 @@ impl service::sending::Data for KeyValueDatabase {
for (outgoing_kind, event) in requests {
let mut key = outgoing_kind.get_prefix();
if let SendingEventType::Pdu(value) = &event {
key.extend_from_slice(value)
key.extend_from_slice(value);
} else {
key.extend_from_slice(&services().globals.next_count()?.to_be_bytes())
key.extend_from_slice(&services().globals.next_count()?.to_be_bytes());
}
let value = if let SendingEventType::Edu(value) = &event {
&**value

View file

@ -12,7 +12,7 @@ impl service::transaction_ids::Data for KeyValueDatabase {
) -> Result<()> {
let mut key = user_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default());
key.extend_from_slice(device_id.map(DeviceId::as_bytes).unwrap_or_default());
key.push(0xff);
key.extend_from_slice(txn_id.as_bytes());
@ -29,7 +29,7 @@ impl service::transaction_ids::Data for KeyValueDatabase {
) -> Result<Option<Vec<u8>>> {
let mut key = user_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default());
key.extend_from_slice(device_id.map(DeviceId::as_bytes).unwrap_or_default());
key.push(0xff);
key.extend_from_slice(txn_id.as_bytes());

View file

@ -34,7 +34,7 @@ impl service::uiaa::Data for KeyValueDatabase {
.read()
.unwrap()
.get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned()))
.map(|j| j.to_owned())
.map(ToOwned::to_owned)
}
fn update_uiaa_session(
@ -80,7 +80,7 @@ impl service::uiaa::Data for KeyValueDatabase {
.userdevicesessionid_uiaainfo
.get(&userdevicesessionid)?
.ok_or(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"UIAA session does not exist.",
))?,
)

View file

@ -11,7 +11,6 @@ use ruma::{
use tracing::warn;
use crate::{
api::client_server::TOKEN_LENGTH,
database::KeyValueDatabase,
service::{self, users::clean_signatures},
services, utils, Error, Result,
@ -142,7 +141,7 @@ impl service::users::Data for KeyValueDatabase {
Ok(())
}
/// Get the avatar_url of a user.
/// Get the `avatar_url` of a user.
fn avatar_url(&self, user_id: &UserId) -> Result<Option<OwnedMxcUri>> {
self.userid_avatarurl
.get(user_id.as_bytes())?
@ -154,7 +153,7 @@ impl service::users::Data for KeyValueDatabase {
.transpose()
}
/// Sets a new avatar_url or removes it if avatar_url is None.
/// Sets a new `avatar_url` or removes it if `avatar_url` is None.
fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<OwnedMxcUri>) -> Result<()> {
if let Some(avatar_url) = avatar_url {
self.userid_avatarurl
@ -179,7 +178,7 @@ impl service::users::Data for KeyValueDatabase {
.transpose()
}
/// Sets a new avatar_url or removes it if avatar_url is None.
/// Sets a new `avatar_url` or removes it if `avatar_url` is None.
fn set_blurhash(&self, user_id: &UserId, blurhash: Option<String>) -> Result<()> {
if let Some(blurhash) = blurhash {
self.userid_blurhash
@ -344,12 +343,11 @@ impl service::users::Data for KeyValueDatabase {
fn last_one_time_keys_update(&self, user_id: &UserId) -> Result<u64> {
self.userid_lastonetimekeyupdate
.get(user_id.as_bytes())?
.map(|bytes| {
.map_or(Ok(0), |bytes| {
utils::u64_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.")
})
})
.unwrap_or(Ok(0))
}
fn take_one_time_key(
@ -944,59 +942,13 @@ impl service::users::Data for KeyValueDatabase {
Ok(None)
}
}
// Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations)
fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)> {
let token = utils::random_string(TOKEN_LENGTH);
let expires_in = services().globals.config.openid_token_ttl;
let expires_at = utils::millis_since_unix_epoch()
.checked_add(expires_in * 1000)
.expect("time is valid");
let mut value = expires_at.to_be_bytes().to_vec();
value.extend_from_slice(user_id.as_bytes());
self.openidtoken_expiresatuserid
.insert(token.as_bytes(), value.as_slice())?;
Ok((token, expires_in))
}
/// Find out which user an OpenID access token belongs to.
fn find_from_openid_token(&self, token: &str) -> Result<Option<OwnedUserId>> {
let Some(value) = self.openidtoken_expiresatuserid.get(token.as_bytes())? else {
return Ok(None);
};
let (expires_at_bytes, user_bytes) = value.split_at(0u64.to_be_bytes().len());
let expires_at = u64::from_be_bytes(
expires_at_bytes
.try_into()
.map_err(|_| Error::bad_database("expires_at in openid_userid is invalid u64."))?,
);
if expires_at < utils::millis_since_unix_epoch() {
self.openidtoken_expiresatuserid.remove(token.as_bytes())?;
return Ok(None);
}
Some(
UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| {
Error::bad_database("User ID in openid_userid is invalid unicode.")
})?)
.map_err(|_| Error::bad_database("User ID in openid_userid is invalid.")),
)
.transpose()
}
}
impl KeyValueDatabase {}
/// Will only return with Some(username) if the password was not empty and the
/// username could be successfully parsed.
/// If utils::string_from_bytes(...) returns an error that username will be skipped
/// If `utils::string_from_bytes`(...) returns an error that username will be skipped
/// and the error will be logged.
fn get_username_with_valid_password(username: &[u8], password: &[u8]) -> Option<String> {
// A valid password is not empty

View file

@ -6,7 +6,6 @@ use crate::{
SERVICES,
};
use abstraction::{KeyValueDatabaseEngine, KvTree};
use base64::{engine::general_purpose, Engine};
use directories::ProjectDirs;
use lru_cache::LruCache;
@ -35,7 +34,7 @@ use tokio::time::interval;
use tracing::{debug, error, info, warn};
pub struct KeyValueDatabase {
_db: Arc<dyn KeyValueDatabaseEngine>,
db: Arc<dyn KeyValueDatabaseEngine>,
//pub globals: globals::Globals,
pub(super) global: Arc<dyn KvTree>,
@ -58,7 +57,6 @@ pub struct KeyValueDatabase {
pub(super) userid_masterkeyid: Arc<dyn KvTree>,
pub(super) userid_selfsigningkeyid: Arc<dyn KvTree>,
pub(super) userid_usersigningkeyid: Arc<dyn KvTree>,
pub(super) openidtoken_expiresatuserid: Arc<dyn KvTree>, // expiresatuserid = expiresat + userid
pub(super) userfilterid_filter: Arc<dyn KvTree>, // UserFilterId = UserId + FilterId
@ -102,8 +100,6 @@ pub struct KeyValueDatabase {
pub(super) userroomid_leftstate: Arc<dyn KvTree>,
pub(super) roomuserid_leftcount: Arc<dyn KvTree>,
pub(super) alias_userid: Arc<dyn KvTree>, // User who created the alias
pub(super) disabledroomids: Arc<dyn KvTree>, // Rooms where incoming federation handling is disabled
pub(super) lazyloadedids: Arc<dyn KvTree>, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId
@ -117,7 +113,7 @@ pub struct KeyValueDatabase {
pub(super) roomsynctoken_shortstatehash: Arc<dyn KvTree>,
/// Remember the state hash at events in the past.
pub(super) shorteventid_shortstatehash: Arc<dyn KvTree>,
/// StateKey = EventType + StateKey, ShortStateKey = Count
/// `StateKey` = `EventType` + `StateKey`, `ShortStateKey` = Count
pub(super) statekey_shortstatekey: Arc<dyn KvTree>,
pub(super) shortstatekey_statekey: Arc<dyn KvTree>,
@ -131,14 +127,14 @@ pub struct KeyValueDatabase {
pub(super) shorteventid_authchain: Arc<dyn KvTree>,
/// RoomId + EventId -> outlier PDU.
/// `RoomId` + `EventId` -> outlier PDU.
/// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn.
pub(super) eventid_outlierpdu: Arc<dyn KvTree>,
pub(super) softfailedeventids: Arc<dyn KvTree>,
/// ShortEventId + ShortEventId -> ().
/// `ShortEventId` + `ShortEventId` -> ().
pub(super) tofrom_relation: Arc<dyn KvTree>,
/// RoomId + EventId -> Parent PDU EventId.
/// `RoomId` + `EventId` -> Parent PDU `EventId`.
pub(super) referencedevents: Arc<dyn KvTree>,
//pub account_data: account_data::AccountData,
@ -245,6 +241,8 @@ impl KeyValueDatabase {
.map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?;
}
// Databases which are disabled will trigger this
#[allow(clippy::match_same_arms)]
let builder: Arc<dyn KeyValueDatabaseEngine> = match &*config.database_backend {
"sqlite" => {
#[cfg(not(feature = "sqlite"))]
@ -278,7 +276,7 @@ impl KeyValueDatabase {
}
let db_raw = Box::new(Self {
_db: builder.clone(),
db: builder.clone(),
userid_password: builder.open_tree("userid_password")?,
userid_displayname: builder.open_tree("userid_displayname")?,
userid_avatarurl: builder.open_tree("userid_avatarurl")?,
@ -294,7 +292,6 @@ impl KeyValueDatabase {
userid_masterkeyid: builder.open_tree("userid_masterkeyid")?,
userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?,
userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?,
openidtoken_expiresatuserid: builder.open_tree("openidtoken_expiresatuserid")?,
userfilterid_filter: builder.open_tree("userfilterid_filter")?,
todeviceid_events: builder.open_tree("todeviceid_events")?,
@ -330,8 +327,6 @@ impl KeyValueDatabase {
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
alias_userid: builder.open_tree("alias_userid")?,
disabledroomids: builder.open_tree("disabledroomids")?,
lazyloadedids: builder.open_tree("lazyloadedids")?,
@ -411,9 +406,11 @@ impl KeyValueDatabase {
// Matrix resource ownership is based on the server name; changing it
// requires recreating the database from scratch.
if services().users.count()? > 0 {
let conduit_user = services().globals.server_user();
let conduit_user =
UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("@conduit:server_name is valid");
if !services().users.exists(conduit_user)? {
if !services().users.exists(&conduit_user)? {
error!(
"The {} server user does not exist, and the database is not new.",
conduit_user
@ -425,7 +422,7 @@ impl KeyValueDatabase {
}
// If the database has any data, perform data migrations before starting
let latest_database_version = 16;
let latest_database_version = 13;
if services().users.count()? > 0 {
// MIGRATIONS
@ -433,12 +430,9 @@ impl KeyValueDatabase {
for (roomserverid, _) in db.roomserverids.iter() {
let mut parts = roomserverid.split(|&b| b == 0xff);
let room_id = parts.next().expect("split always returns one element");
let servername = match parts.next() {
Some(s) => s,
None => {
error!("Migration: Invalid roomserverid in db.");
continue;
}
let Some(servername) = parts.next() else {
error!("Migration: Invalid roomserverid in db.");
continue;
};
let mut serverroomid = servername.to_vec();
serverroomid.push(0xff);
@ -625,7 +619,7 @@ impl KeyValueDatabase {
Ok::<_, Error>(())
};
for (k, seventid) in db._db.open_tree("stateid_shorteventid")?.iter() {
for (k, seventid) in db.db.open_tree("stateid_shorteventid")?.iter() {
let sstatehash = utils::u64_from_bytes(&k[0..size_of::<u64>()])
.expect("number of bytes is correct");
let sstatekey = k[size_of::<u64>()..].to_vec();
@ -798,7 +792,7 @@ impl KeyValueDatabase {
}
// Force E2EE device list updates so we can send them over federation
for user_id in services().users.iter().filter_map(|r| r.ok()) {
for user_id in services().users.iter().filter_map(std::result::Result::ok) {
services().users.mark_device_key_update(&user_id)?;
}
@ -808,7 +802,7 @@ impl KeyValueDatabase {
}
if services().globals.database_version()? < 11 {
db._db
db.db
.open_tree("userdevicesessionid_uiaarequest")?
.clear()?;
services().globals.bump_database_version(11)?;
@ -942,86 +936,6 @@ impl KeyValueDatabase {
warn!("Migration: 12 -> 13 finished");
}
if services().globals.database_version()? < 16 {
// Reconstruct all media using the filesystem
db.mediaid_file.clear().unwrap();
for file in fs::read_dir(services().globals.get_media_folder()).unwrap() {
let file = file.unwrap();
let mediaid = general_purpose::URL_SAFE_NO_PAD
.decode(file.file_name().into_string().unwrap())
.unwrap();
let mut parts = mediaid.rsplit(|&b| b == 0xff);
let mut removed_bytes = 0;
let content_type_bytes = parts.next().unwrap();
removed_bytes += content_type_bytes.len() + 1;
let content_disposition_bytes = parts
.next()
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
removed_bytes += content_disposition_bytes.len();
let mut content_disposition =
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
Error::bad_database("Content Disposition in mediaid_file is invalid.")
})?;
if content_disposition.contains("filename=")
&& !content_disposition.contains("filename=\"")
{
println!("{}", &content_disposition);
content_disposition =
content_disposition.replacen("filename=", "filename=\"", 1);
content_disposition.push('"');
println!("{}", &content_disposition);
let mut new_key = mediaid[..(mediaid.len() - removed_bytes)].to_vec();
assert!(*new_key.last().unwrap() == 0xff);
let mut shorter_key = new_key.clone();
shorter_key.extend(
ruma::http_headers::ContentDisposition::new(
ruma::http_headers::ContentDispositionType::Inline,
)
.to_string()
.as_bytes(),
);
shorter_key.push(0xff);
shorter_key.extend_from_slice(content_type_bytes);
new_key.extend_from_slice(content_disposition.to_string().as_bytes());
new_key.push(0xff);
new_key.extend_from_slice(content_type_bytes);
// Some file names are too long. Ignore those.
match fs::rename(
services().globals.get_media_file(&mediaid),
services().globals.get_media_file(&new_key),
) {
Ok(_) => {
db.mediaid_file.insert(&new_key, &[])?;
}
Err(_) => {
fs::rename(
services().globals.get_media_file(&mediaid),
services().globals.get_media_file(&shorter_key),
)
.unwrap();
db.mediaid_file.insert(&shorter_key, &[])?;
}
}
} else {
db.mediaid_file.insert(&mediaid, &[])?;
}
}
services().globals.bump_database_version(16)?;
warn!("Migration: 13 -> 16 finished");
}
assert_eq!(
services().globals.database_version().unwrap(),
latest_database_version
@ -1064,7 +978,7 @@ impl KeyValueDatabase {
error!(
"Could not set the configured emergency password for the conduit user: {}",
e
)
);
}
};
@ -1082,7 +996,7 @@ impl KeyValueDatabase {
pub fn flush(&self) -> Result<()> {
let start = std::time::Instant::now();
let res = self._db.flush();
let res = self.db.flush();
debug!("flush: took {:?}", start.elapsed());
@ -1102,17 +1016,10 @@ impl KeyValueDatabase {
}
async fn try_handle_updates() -> Result<()> {
let response = services()
.globals
.default_client()
.get("https://conduit.rs/check-for-updates/stable")
.send()
.await?;
#[derive(Deserialize)]
struct CheckForUpdatesResponseEntry {
id: u64,
date: String,
id: u64,
message: String,
}
#[derive(Deserialize)]
@ -1120,6 +1027,13 @@ impl KeyValueDatabase {
updates: Vec<CheckForUpdatesResponseEntry>,
}
let response = services()
.globals
.default_client()
.get("https://conduit.rs/check-for-updates/stable")
.send()
.await?;
let response = serde_json::from_str::<CheckForUpdatesResponse>(&response.text().await?)
.map_err(|_| Error::BadServerResponse("Bad version check response"))?;
@ -1133,7 +1047,7 @@ impl KeyValueDatabase {
.send_message(RoomMessageEventContent::text_plain(format!(
"@room: The following is a message from the Conduit developers. It was sent on '{}':\n\n{}",
update.date, update.message
)))
)));
}
}
services()
@ -1151,7 +1065,7 @@ impl KeyValueDatabase {
use std::time::{Duration, Instant};
let timer_interval =
Duration::from_secs(services().globals.config.cleanup_second_interval as u64);
Duration::from_secs(u64::from(services().globals.config.cleanup_second_interval));
tokio::spawn(async move {
let mut i = interval(timer_interval);
@ -1187,21 +1101,22 @@ impl KeyValueDatabase {
/// Sets the emergency password and push rules for the @conduit account in case emergency password is set
fn set_emergency_access() -> Result<bool> {
let conduit_user = services().globals.server_user();
let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("@conduit:server_name is a valid UserId");
services().users.set_password(
conduit_user,
&conduit_user,
services().globals.emergency_password().as_deref(),
)?;
let (ruleset, res) = match services().globals.emergency_password() {
Some(_) => (Ruleset::server_default(conduit_user), Ok(true)),
Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)),
None => (Ruleset::new(), Ok(false)),
};
services().account_data.update(
None,
conduit_user,
&conduit_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(&GlobalAccountDataEvent {
content: PushRulesEventContent { global: ruleset },

View file

@ -1,7 +1,15 @@
// All API endpoints must be async
#[allow(clippy::unused_async)]
// We expect request users and servers (probably shouldn't tho)
#[allow(clippy::missing_panics_doc)]
pub mod api;
pub mod clap;
mod config;
// Results in large capacity if set to a negative number, user's fault really :P
#[allow(clippy::cast_sign_loss)]
mod database;
// `self` is required for easy access to methods
#[allow(clippy::unused_self)]
mod service;
mod utils;

View file

@ -1,10 +1,8 @@
use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration};
use axum::{
body::Body,
extract::{DefaultBodyLimit, FromRequestParts, MatchedPath},
middleware::map_response,
response::{IntoResponse, Response},
response::IntoResponse,
routing::{any, get, on, MethodFilter},
Router,
};
@ -15,7 +13,7 @@ use figment::{
Figment,
};
use http::{
header::{self, HeaderName, CONTENT_SECURITY_POLICY},
header::{self, HeaderName},
Method, StatusCode, Uri,
};
use ruma::api::{
@ -57,7 +55,7 @@ async fn main() {
))
.nested(),
)
.merge(Env::prefixed("CONDUIT_").global().split("__"));
.merge(Env::prefixed("CONDUIT_").global());
let config = match raw_config.extract::<Config>() {
Ok(s) => s,
@ -70,13 +68,11 @@ async fn main() {
config.warn_deprecated();
if config.allow_jaeger {
opentelemetry::global::set_text_map_propagator(
opentelemetry_jaeger_propagator::Propagator::new(),
);
let tracer = opentelemetry_otlp::new_pipeline()
.tracing()
.with_exporter(opentelemetry_otlp::new_exporter().tonic())
.install_batch(opentelemetry_sdk::runtime::Tokio)
opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
let tracer = opentelemetry_jaeger::new_agent_pipeline()
.with_auto_split_batch(true)
.with_service_name("conduit")
.install_batch(opentelemetry::runtime::Tokio)
.unwrap();
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
@ -145,13 +141,6 @@ async fn main() {
}
}
/// Adds additional headers to prevent any potential XSS attacks via the media repo
async fn set_csp_header(response: Response) -> impl IntoResponse {
(
[(CONTENT_SECURITY_POLICY, "sandbox; default-src 'none'; script-src 'none'; plugin-types application/pdf; style-src 'unsafe-inline'; object-src 'self';")], response
)
}
async fn run_server() -> io::Result<()> {
let config = &services().globals.config;
let addr = SocketAddr::from((config.address, config.port));
@ -192,7 +181,6 @@ async fn run_server() -> io::Result<()> {
])
.max_age(Duration::from_secs(86400)),
)
.layer(map_response(set_csp_header))
.layer(DefaultBodyLimit::max(
config
.max_request_size
@ -213,7 +201,7 @@ async fn run_server() -> io::Result<()> {
#[cfg(feature = "systemd")]
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]);
server.await?
server.await?;
}
None => {
let server = bind(addr).handle(handle).serve(app);
@ -221,17 +209,17 @@ async fn run_server() -> io::Result<()> {
#[cfg(feature = "systemd")]
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]);
server.await?
server.await?;
}
}
Ok(())
}
async fn spawn_task(
req: http::Request<Body>,
next: axum::middleware::Next,
) -> std::result::Result<Response, StatusCode> {
async fn spawn_task<B: Send + 'static>(
req: http::Request<B>,
next: axum::middleware::Next<B>,
) -> std::result::Result<axum::response::Response, StatusCode> {
if services().globals.shutdown.load(atomic::Ordering::Relaxed) {
return Err(StatusCode::SERVICE_UNAVAILABLE);
}
@ -240,10 +228,10 @@ async fn spawn_task(
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
}
async fn unrecognized_method(
req: http::Request<Body>,
next: axum::middleware::Next,
) -> std::result::Result<Response, StatusCode> {
async fn unrecognized_method<B: Send>(
req: http::Request<B>,
next: axum::middleware::Next<B>,
) -> std::result::Result<axum::response::Response, StatusCode> {
let method = req.method().clone();
let uri = req.uri().clone();
let inner = next.run(req).await;
@ -289,7 +277,6 @@ fn routes(config: &Config) -> Router {
.ruma_route(client_server::get_room_aliases_route)
.ruma_route(client_server::get_filter_route)
.ruma_route(client_server::create_filter_route)
.ruma_route(client_server::create_openid_token_route)
.ruma_route(client_server::set_global_account_data_route)
.ruma_route(client_server::set_room_account_data_route)
.ruma_route(client_server::get_global_account_data_route)
@ -379,14 +366,10 @@ fn routes(config: &Config) -> Router {
.ruma_route(client_server::turn_server_route)
.ruma_route(client_server::send_event_to_device_route)
.ruma_route(client_server::get_media_config_route)
.ruma_route(client_server::get_media_config_auth_route)
.ruma_route(client_server::create_content_route)
.ruma_route(client_server::get_content_route)
.ruma_route(client_server::get_content_auth_route)
.ruma_route(client_server::get_content_as_filename_route)
.ruma_route(client_server::get_content_as_filename_auth_route)
.ruma_route(client_server::get_content_thumbnail_route)
.ruma_route(client_server::get_content_thumbnail_auth_route)
.ruma_route(client_server::get_devices_route)
.ruma_route(client_server::get_device_route)
.ruma_route(client_server::update_device_route)
@ -444,13 +427,10 @@ fn routes(config: &Config) -> Router {
.ruma_route(server_server::create_join_event_v2_route)
.ruma_route(server_server::create_invite_route)
.ruma_route(server_server::get_devices_route)
.ruma_route(server_server::get_content_route)
.ruma_route(server_server::get_content_thumbnail_route)
.ruma_route(server_server::get_room_information_route)
.ruma_route(server_server::get_profile_information_route)
.ruma_route(server_server::get_keys_route)
.ruma_route(server_server::claim_keys_route)
.ruma_route(server_server::get_openid_userinfo_route)
.ruma_route(server_server::well_known_server)
} else {
router
@ -481,8 +461,8 @@ async fn shutdown_signal(handle: ServerHandle) {
let sig: &str;
tokio::select! {
_ = ctrl_c => { sig = "Ctrl+C"; },
_ = terminate => { sig = "SIGTERM"; },
() = ctrl_c => { sig = "Ctrl+C"; },
() = terminate => { sig = "SIGTERM"; },
}
warn!("Received {}, shutting down...", sig);

View file

@ -1,4 +1,9 @@
use std::{collections::BTreeMap, convert::TryFrom, sync::Arc, time::Instant};
use std::{
collections::BTreeMap,
convert::{TryFrom, TryInto},
sync::Arc,
time::Instant,
};
use clap::Parser;
use regex::Regex;
@ -19,8 +24,7 @@ use ruma::{
},
TimelineEventType,
},
EventId, MilliSecondsSinceUnixEpoch, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId,
RoomVersionId, ServerName, UserId,
EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
};
use serde_json::value::to_raw_value;
use tokio::sync::{mpsc, Mutex, RwLock};
@ -73,12 +77,6 @@ enum AdminCommand {
/// List all rooms we are currently handling an incoming pdu from
IncomingFederation,
/// Removes an alias from the server
RemoveAlias {
/// The alias to be removed
alias: Box<RoomAliasId>,
},
/// Deactivate a user
///
/// User will not be removed from all rooms by default.
@ -112,7 +110,7 @@ enum AdminCommand {
force: bool,
},
/// Get the auth_chain of a PDU
/// Get the `auth_chain` of a PDU
GetAuthChain {
/// An event ID (the $ character followed by the base64 reference hash)
event_id: Box<EventId>,
@ -162,23 +160,24 @@ enum AdminCommand {
password: Option<String>,
},
/// Temporarily toggle user registration by passing either true or false as an argument, does not persist between restarts
AllowRegistration { status: Option<bool> },
/// Disables incoming federation handling for a room.
DisableRoom { room_id: Box<RoomId> },
/// Enables incoming federation handling for a room again.
EnableRoom { room_id: Box<RoomId> },
/// Sign a json object using Conduit's signing keys, putting the json in a codeblock
/// Verify json signatures
/// [commandbody]()
/// # ```
/// # json here
/// # ```
SignJson,
/// Verify json signatures, putting the json in a codeblock
/// Verify json signatures
/// [commandbody]()
/// # ```
/// # json here
/// # ```
VerifyJson,
/// Parses a JSON object as an event then creates a hash and signs it, putting a room
/// version as an argument, and the json in a codeblock
HashAndSignEvent { room_version_id: RoomVersionId },
}
#[derive(Debug)]
@ -213,7 +212,8 @@ impl Service {
// TODO: Use futures when we have long admin commands
//let mut futures = FuturesUnordered::new();
let conduit_user = services().globals.server_user();
let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name()))
.expect("@conduit:server_name is valid");
if let Ok(Some(conduit_room)) = services().admin.get_admin_room() {
loop {
@ -229,7 +229,7 @@ impl Service {
.roomid_mutex_state
.write()
.await
.entry(conduit_room.to_owned())
.entry(conduit_room.clone())
.or_default(),
);
@ -246,9 +246,8 @@ impl Service {
unsigned: None,
state_key: None,
redacts: None,
timestamp: None,
},
conduit_user,
&conduit_user,
&conduit_room,
&state_lock,
)
@ -661,24 +660,6 @@ impl Service {
"Created user with user_id: {user_id} and password: {password}"
))
}
AdminCommand::AllowRegistration { status } => {
if let Some(status) = status {
services().globals.set_registration(status).await;
RoomMessageEventContent::text_plain(if status {
"Registration is now enabled"
} else {
"Registration is now disabled"
})
} else {
RoomMessageEventContent::text_plain(
if services().globals.allow_registration().await {
"Registration is currently enabled"
} else {
"Registration is currently disabled"
},
)
}
}
AdminCommand::DisableRoom { room_id } => {
services().rooms.metadata.disable_room(&room_id, true)?;
RoomMessageEventContent::text_plain("Room disabled.")
@ -730,11 +711,11 @@ impl Service {
match <&UserId>::try_from(user) {
Ok(user_id) => {
if user_id.server_name() != services().globals.server_name() {
remote_ids.push(user_id)
remote_ids.push(user_id);
} else if !services().users.exists(user_id)? {
non_existant_ids.push(user_id)
non_existant_ids.push(user_id);
} else {
user_ids.push(user_id)
user_ids.push(user_id);
}
}
Err(_) => {
@ -789,20 +770,21 @@ impl Service {
if !force {
user_ids.retain(|&user_id| match services().users.is_admin(user_id) {
Ok(is_admin) => match is_admin {
true => {
Ok(is_admin) => {
if is_admin {
admins.push(user_id.localpart());
false
} else {
true
}
false => true,
},
}
Err(_) => false,
})
});
}
for &user_id in &user_ids {
if services().users.deactivate_account(user_id).is_ok() {
deactivation_count += 1
deactivation_count += 1;
}
}
@ -860,46 +842,15 @@ impl Service {
services()
.rooms
.event_handler
// Generally we shouldn't be checking against expired keys unless required, so in the admin
// room it might be best to not allow expired keys
.fetch_required_signing_keys(&value, &pub_key_map)
.await?;
let mut expired_key_map = BTreeMap::new();
let mut valid_key_map = BTreeMap::new();
for (server, keys) in pub_key_map.into_inner().into_iter() {
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
valid_key_map.insert(
server,
keys.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect(),
);
} else {
expired_key_map.insert(
server,
keys.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect(),
);
}
}
if ruma::signatures::verify_json(&valid_key_map, &value).is_ok() {
RoomMessageEventContent::text_plain("Signature correct")
} else if let Err(e) =
ruma::signatures::verify_json(&expired_key_map, &value)
{
RoomMessageEventContent::text_plain(format!(
let pub_key_map = pub_key_map.read().await;
match ruma::signatures::verify_json(&pub_key_map, &value) {
Ok(()) => RoomMessageEventContent::text_plain("Signature correct"),
Err(e) => RoomMessageEventContent::text_plain(format!(
"Signature verification failed: {e}"
))
} else {
RoomMessageEventContent::text_plain(
"Signature correct (with expired keys)",
)
)),
}
}
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
@ -910,61 +861,6 @@ impl Service {
)
}
}
AdminCommand::HashAndSignEvent { room_version_id } => {
if body.len() > 2
// Language may be specified as part of the codeblock (e.g. "```json")
&& body[0].trim().starts_with("```")
&& body.last().unwrap().trim() == "```"
{
let string = body[1..body.len() - 1].join("\n");
match serde_json::from_str(&string) {
Ok(mut value) => {
if let Err(e) = ruma::signatures::hash_and_sign_event(
services().globals.server_name().as_str(),
services().globals.keypair(),
&mut value,
&room_version_id,
) {
RoomMessageEventContent::text_plain(format!("Invalid event: {e}"))
} else {
let json_text = serde_json::to_string_pretty(&value)
.expect("canonical json is valid json");
RoomMessageEventContent::text_plain(json_text)
}
}
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
}
} else {
RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
)
}
}
AdminCommand::RemoveAlias { alias } => {
if alias.server_name() != services().globals.server_name() {
RoomMessageEventContent::text_plain(
"Cannot remove alias which is not from this server",
)
} else if services()
.rooms
.alias
.resolve_local_alias(&alias)?
.is_none()
{
RoomMessageEventContent::text_plain("No such alias exists")
} else {
// We execute this as the server user for two reasons
// 1. If the user can execute commands in the admin room, they can always remove the alias.
// 2. In the future, we are likely going to be able to allow users to execute commands via
// other methods, such as IPC, which would lead to us not knowing their user id
services()
.rooms
.alias
.remove_alias(&alias, services().globals.server_user())?;
RoomMessageEventContent::text_plain("Alias removed sucessfully")
}
}
};
Ok(reply_message_content)
@ -1014,8 +910,7 @@ impl Service {
while text_lines
.get(line_index)
.map(|line| line.starts_with('#'))
.unwrap_or(false)
.is_some_and(|line| line.starts_with('#'))
{
command_body += if text_lines[line_index].starts_with("# ") {
&text_lines[line_index][2..]
@ -1072,9 +967,11 @@ impl Service {
let state_lock = mutex_state.lock().await;
// Create a user for the server
let conduit_user = services().globals.server_user();
let conduit_user =
UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("@conduit:server_name is valid");
services().users.create(conduit_user, None)?;
services().users.create(&conduit_user, None)?;
let room_version = services().globals.default_room_version();
let mut content = match room_version {
@ -1087,7 +984,7 @@ impl Service {
| RoomVersionId::V7
| RoomVersionId::V8
| RoomVersionId::V9
| RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.to_owned()),
| RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.clone()),
RoomVersionId::V11 => RoomCreateEventContent::new_v11(),
_ => unreachable!("Validity of room version already checked"),
};
@ -1104,11 +1001,10 @@ impl Service {
event_type: TimelineEventType::RoomCreate,
content: to_raw_value(&content).expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
conduit_user,
&conduit_user,
&room_id,
&state_lock,
)
@ -1135,9 +1031,8 @@ impl Service {
unsigned: None,
state_key: Some(conduit_user.to_string()),
redacts: None,
timestamp: None,
},
conduit_user,
&conduit_user,
&room_id,
&state_lock,
)
@ -1145,7 +1040,7 @@ impl Service {
// 3. Power levels
let mut users = BTreeMap::new();
users.insert(conduit_user.to_owned(), 100.into());
users.insert(conduit_user.clone(), 100.into());
services()
.rooms
@ -1159,11 +1054,10 @@ impl Service {
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
conduit_user,
&conduit_user,
&room_id,
&state_lock,
)
@ -1179,11 +1073,10 @@ impl Service {
content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
conduit_user,
&conduit_user,
&room_id,
&state_lock,
)
@ -1201,11 +1094,10 @@ impl Service {
))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
conduit_user,
&conduit_user,
&room_id,
&state_lock,
)
@ -1223,11 +1115,10 @@ impl Service {
))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
conduit_user,
&conduit_user,
&room_id,
&state_lock,
)
@ -1244,11 +1135,10 @@ impl Service {
content: to_raw_value(&RoomNameEventContent::new(room_name))
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
conduit_user,
&conduit_user,
&room_id,
&state_lock,
)
@ -1265,18 +1155,19 @@ impl Service {
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
conduit_user,
&conduit_user,
&room_id,
&state_lock,
)
.await?;
// 6. Room alias
let alias: OwnedRoomAliasId = services().globals.admin_alias().to_owned();
let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
services()
.rooms
@ -1290,20 +1181,16 @@ impl Service {
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
conduit_user,
&conduit_user,
&room_id,
&state_lock,
)
.await?;
services()
.rooms
.alias
.set_alias(&alias, &room_id, conduit_user)?;
services().rooms.alias.set_alias(&alias, &room_id)?;
Ok(())
}
@ -1312,10 +1199,15 @@ impl Service {
///
/// Errors are propagated from the database, and will have None if there is no admin room
pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> {
let admin_room_alias: Box<RoomAliasId> =
format!("#admins:{}", services().globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
services()
.rooms
.alias
.resolve_local_alias(services().globals.admin_alias())
.resolve_local_alias(&admin_room_alias)
}
/// Invite the user to the conduit admin room.
@ -1339,7 +1231,9 @@ impl Service {
let state_lock = mutex_state.lock().await;
// Use the server user to grant the new admin's power level
let conduit_user = services().globals.server_user();
let conduit_user =
UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("@conduit:server_name is valid");
// Invite and join the real user
services()
@ -1362,9 +1256,8 @@ impl Service {
unsigned: None,
state_key: Some(user_id.to_string()),
redacts: None,
timestamp: None,
},
conduit_user,
&conduit_user,
&room_id,
&state_lock,
)
@ -1389,7 +1282,6 @@ impl Service {
unsigned: None,
state_key: Some(user_id.to_string()),
redacts: None,
timestamp: None,
},
user_id,
&room_id,
@ -1399,7 +1291,7 @@ impl Service {
// Set power level
let mut users = BTreeMap::new();
users.insert(conduit_user.to_owned(), 100.into());
users.insert(conduit_user.clone(), 100.into());
users.insert(user_id.to_owned(), 100.into());
services()
@ -1414,11 +1306,10 @@ impl Service {
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
state_key: Some(String::new()),
redacts: None,
timestamp: None,
},
conduit_user,
&conduit_user,
&room_id,
&state_lock,
)
@ -1436,24 +1327,14 @@ impl Service {
unsigned: None,
state_key: None,
redacts: None,
timestamp: None,
},
conduit_user,
&conduit_user,
&room_id,
&state_lock,
).await?;
}
Ok(())
}
/// Checks whether a given user is an admin of this server
pub fn user_is_admin(&self, user_id: &UserId) -> Result<bool> {
let Some(admin_room) = self.get_admin_room()? else {
return Ok(false);
};
services().rooms.state_cache.is_joined(user_id, &admin_room)
}
}
#[cfg(test)]

View file

@ -1,71 +1,13 @@
use std::{
collections::BTreeMap,
time::{Duration, SystemTime},
};
use std::collections::BTreeMap;
use crate::{services, Result};
use async_trait::async_trait;
use ruma::{
api::federation::discovery::{OldVerifyKey, ServerSigningKeys, VerifyKey},
serde::Base64,
api::federation::discovery::{ServerSigningKeys, VerifyKey},
signatures::Ed25519KeyPair,
DeviceId, MilliSecondsSinceUnixEpoch, ServerName, UserId,
DeviceId, OwnedServerSigningKeyId, ServerName, UserId,
};
use serde::Deserialize;
/// Similar to ServerSigningKeys, but drops a few unnecessary fields we don't require post-validation
#[derive(Deserialize, Debug, Clone)]
pub struct SigningKeys {
pub verify_keys: BTreeMap<String, VerifyKey>,
pub old_verify_keys: BTreeMap<String, OldVerifyKey>,
pub valid_until_ts: MilliSecondsSinceUnixEpoch,
}
impl SigningKeys {
/// Creates the SigningKeys struct, using the keys of the current server
pub fn load_own_keys() -> Self {
let mut keys = Self {
verify_keys: BTreeMap::new(),
old_verify_keys: BTreeMap::new(),
valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000"),
};
keys.verify_keys.insert(
format!("ed25519:{}", services().globals.keypair().version()),
VerifyKey {
key: Base64::new(services().globals.keypair.public_key().to_vec()),
},
);
keys
}
}
impl From<ServerSigningKeys> for SigningKeys {
fn from(value: ServerSigningKeys) -> Self {
let ServerSigningKeys {
verify_keys,
old_verify_keys,
valid_until_ts,
..
} = value;
Self {
verify_keys: verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key))
.collect(),
old_verify_keys: old_verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key))
.collect(),
valid_until_ts,
}
}
}
use crate::Result;
#[async_trait]
pub trait Data: Send + Sync {
@ -79,23 +21,17 @@ pub trait Data: Send + Sync {
fn clear_caches(&self, amount: u32);
fn load_keypair(&self) -> Result<Ed25519KeyPair>;
fn remove_keypair(&self) -> Result<()>;
/// Only extends the cached keys, not moving any verify_keys to old_verify_keys, as if we suddenly
/// recieve requests from the origin server, we want to be able to accept requests from them
fn add_signing_key_from_trusted_server(
fn add_signing_key(
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<SigningKeys>;
/// Extends cached keys, as well as moving verify_keys that are not present in these new keys to
/// old_verify_keys, so that potnetially comprimised keys cannot be used to make requests
fn add_signing_key_from_origin(
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<SigningKeys>;
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>>;
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>>;
fn signing_keys_for(
&self,
origin: &ServerName,
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>>;
fn database_version(&self) -> Result<u64>;
fn bump_database_version(&self, new_version: u64) -> Result<()>;
}

View file

@ -1,19 +1,25 @@
mod data;
pub use data::{Data, SigningKeys};
pub use data::Data;
use ruma::{
serde::Base64, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedEventId, OwnedRoomAliasId,
OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId,
serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName,
OwnedServerSigningKeyId, OwnedUserId,
};
use crate::api::server_server::DestinationResponse;
use crate::api::server_server::FedDest;
use crate::{services, Config, Error, Result};
use futures_util::FutureExt;
use hickory_resolver::TokioAsyncResolver;
use hyper_util::client::legacy::connect::dns::{GaiResolver, Name as HyperName};
use reqwest::dns::{Addrs, Name, Resolve, Resolving};
use hyper::{
client::connect::dns::{GaiResolver, Name},
service::Service as HyperService,
};
use reqwest::dns::{Addrs, Resolve, Resolving};
use ruma::{
api::{client::sync::sync_events, federation::discovery::ServerSigningKeys},
api::{
client::sync::sync_events,
federation::discovery::{ServerSigningKeys, VerifyKey},
},
DeviceId, RoomVersionId, ServerName, UserId,
};
use std::{
@ -24,7 +30,6 @@ use std::{
iter,
net::{IpAddr, SocketAddr},
path::PathBuf,
str::FromStr,
sync::{
atomic::{self, AtomicBool},
Arc, RwLock as StdRwLock,
@ -32,12 +37,11 @@ use std::{
time::{Duration, Instant},
};
use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore};
use tower_service::Service as TowerService;
use tracing::{error, info};
use base64::{engine::general_purpose, Engine as _};
type WellKnownMap = HashMap<OwnedServerName, DestinationResponse>;
type WellKnownMap = HashMap<OwnedServerName, (FedDest, String)>;
type TlsNameMap = HashMap<String, (Vec<IpAddr>, u16)>;
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
type SyncHandle = (
@ -51,7 +55,6 @@ pub struct Service {
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
pub tls_name_override: Arc<StdRwLock<TlsNameMap>>,
pub config: Config,
allow_registration: RwLock<bool>,
keypair: Arc<ruma::signatures::Ed25519KeyPair>,
dns_resolver: TokioAsyncResolver,
jwt_decoding_key: Option<jsonwebtoken::DecodingKey>,
@ -68,8 +71,6 @@ pub struct Service {
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
server_user: OwnedUserId,
admin_alias: OwnedRoomAliasId,
pub stateres_mutex: Arc<Mutex<()>>,
pub rotate: RotationHandler,
@ -136,19 +137,11 @@ impl Resolve for Resolver {
})
.unwrap_or_else(|| {
let this = &mut self.inner.clone();
Box::pin(
TowerService::<HyperName>::call(
this,
// Beautiful hack, please remove this in the future.
HyperName::from_str(name.as_str())
.expect("reqwest Name is just wrapper for hyper-util Name"),
)
.map(|result| {
result
.map(|addrs| -> Addrs { Box::new(addrs) })
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
}),
)
Box::pin(HyperService::<Name>::call(this, name).map(|result| {
result
.map(|addrs| -> Addrs { Box::new(addrs) })
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
}))
})
}
}
@ -191,11 +184,6 @@ impl Service {
let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5];
let mut s = Self {
allow_registration: RwLock::new(config.allow_registration),
admin_alias: RoomAliasId::parse(format!("#admins:{}", &config.server_name))
.expect("#admins:server_name is a valid alias name"),
server_user: UserId::parse(format!("@conduit:{}", &config.server_name))
.expect("@conduit:server_name is valid"),
db,
config,
keypair: Arc::new(keypair),
@ -289,14 +277,6 @@ impl Service {
self.config.server_name.as_ref()
}
pub fn server_user(&self) -> &UserId {
self.server_user.as_ref()
}
pub fn admin_alias(&self) -> &RoomAliasId {
self.admin_alias.as_ref()
}
pub fn max_request_size(&self) -> u32 {
self.config.max_request_size
}
@ -305,15 +285,8 @@ impl Service {
self.config.max_fetch_prev_events
}
/// Allows for the temporary (non-persistant) toggling of registration
pub async fn set_registration(&self, status: bool) {
let mut lock = self.allow_registration.write().await;
*lock = status;
}
/// Checks whether user registration is allowed
pub async fn allow_registration(&self) -> bool {
*self.allow_registration.read().await
pub fn allow_registration(&self) -> bool {
self.config.allow_registration
}
pub fn allow_encryption(&self) -> bool {
@ -389,89 +362,36 @@ impl Service {
room_versions
}
/// TODO: the key valid until timestamp is only honored in room version > 4
/// Remove the outdated keys and insert the new ones.
///
/// This doesn't actually check that the keys provided are newer than the old set.
pub fn add_signing_key_from_trusted_server(
pub fn add_signing_key(
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<SigningKeys> {
self.db
.add_signing_key_from_trusted_server(origin, new_keys)
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
self.db.add_signing_key(origin, new_keys)
}
/// Same as from_trusted_server, except it will move active keys not present in `new_keys` to old_signing_keys
pub fn add_signing_key_from_origin(
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
pub fn signing_keys_for(
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<SigningKeys> {
self.db.add_signing_key_from_origin(origin, new_keys)
}
/// This returns Ok(None) when there are no keys found for the server.
pub fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>> {
Ok(self.db.signing_keys_for(origin)?.or_else(|| {
if origin == self.server_name() {
Some(SigningKeys::load_own_keys())
} else {
None
}
}))
}
/// Filters the key map of multiple servers down to keys that should be accepted given the expiry time,
/// room version, and timestamp of the paramters
pub fn filter_keys_server_map(
&self,
keys: BTreeMap<String, SigningKeys>,
timestamp: MilliSecondsSinceUnixEpoch,
room_version_id: &RoomVersionId,
) -> BTreeMap<String, BTreeMap<String, Base64>> {
keys.into_iter()
.filter_map(|(server, keys)| {
self.filter_keys_single_server(keys, timestamp, room_version_id)
.map(|keys| (server, keys))
})
.collect()
}
/// Filters the keys of a single server down to keys that should be accepted given the expiry time,
/// room version, and timestamp of the paramters
pub fn filter_keys_single_server(
&self,
keys: SigningKeys,
timestamp: MilliSecondsSinceUnixEpoch,
room_version_id: &RoomVersionId,
) -> Option<BTreeMap<String, Base64>> {
if keys.valid_until_ts > timestamp
// valid_until_ts MUST be ignored in room versions 1, 2, 3, and 4.
// https://spec.matrix.org/v1.10/server-server-api/#get_matrixkeyv2server
|| matches!(room_version_id, RoomVersionId::V1
| RoomVersionId::V2
| RoomVersionId::V4
| RoomVersionId::V3)
{
// Given that either the room version allows stale keys, or the valid_until_ts is
// in the future, all verify_keys are valid
let mut map: BTreeMap<_, _> = keys
.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect();
map.extend(keys.old_verify_keys.into_iter().filter_map(|(id, key)| {
// Even on old room versions, we don't allow old keys if they are expired
if key.expired_ts > timestamp {
Some((id, key.key))
} else {
None
}
}));
Some(map)
} else {
None
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
let mut keys = self.db.signing_keys_for(origin)?;
if origin == self.server_name() {
keys.insert(
format!("ed25519:{}", services().globals.keypair().version())
.try_into()
.expect("found invalid server signing keys in DB"),
VerifyKey {
key: Base64::new(self.keypair.public_key().to_vec()),
},
);
}
Ok(keys)
}
pub fn database_version(&self) -> Result<u64> {

View file

@ -1,5 +1,3 @@
use ruma::http_headers::ContentDisposition;
use crate::Result;
pub trait Data: Send + Sync {
@ -8,15 +6,15 @@ pub trait Data: Send + Sync {
mxc: String,
width: u32,
height: u32,
content_disposition: &ContentDisposition,
content_disposition: Option<&str>,
content_type: Option<&str>,
) -> Result<Vec<u8>>;
/// Returns content_disposition, content_type and the metadata key.
/// Returns `content_disposition`, `content_type` and the metadata key.
fn search_file_metadata(
&self,
mxc: String,
width: u32,
height: u32,
) -> Result<(ContentDisposition, Option<String>, Vec<u8>)>;
) -> Result<(Option<String>, Option<String>, Vec<u8>)>;
}

View file

@ -2,7 +2,6 @@ mod data;
use std::io::Cursor;
pub use data::Data;
use ruma::http_headers::{ContentDisposition, ContentDispositionType};
use crate::{services, Result};
use image::imageops::FilterType;
@ -13,7 +12,7 @@ use tokio::{
};
pub struct FileMeta {
pub content_disposition: ContentDisposition,
pub content_disposition: Option<String>,
pub content_type: Option<String>,
pub file: Vec<u8>,
}
@ -27,17 +26,14 @@ impl Service {
pub async fn create(
&self,
mxc: String,
content_disposition: Option<ContentDisposition>,
content_disposition: Option<&str>,
content_type: Option<&str>,
file: &[u8],
) -> Result<()> {
let content_disposition =
content_disposition.unwrap_or(ContentDisposition::new(ContentDispositionType::Inline));
// Width, Height = 0 if it's not a thumbnail
let key = self
.db
.create_file_metadata(mxc, 0, 0, &content_disposition, content_type)?;
.create_file_metadata(mxc, 0, 0, content_disposition, content_type)?;
let path = services().globals.get_media_file(&key);
let mut f = File::create(path).await?;
@ -50,18 +46,15 @@ impl Service {
pub async fn upload_thumbnail(
&self,
mxc: String,
content_disposition: Option<&str>,
content_type: Option<&str>,
width: u32,
height: u32,
file: &[u8],
) -> Result<()> {
let key = self.db.create_file_metadata(
mxc,
width,
height,
&ContentDisposition::new(ContentDispositionType::Inline),
content_type,
)?;
let key =
self.db
.create_file_metadata(mxc, width, height, content_disposition, content_type)?;
let path = services().globals.get_media_file(&key);
let mut f = File::create(path).await?;
@ -135,7 +128,7 @@ impl Service {
Ok(Some(FileMeta {
content_disposition,
content_type,
file: file.to_vec(),
file: file.clone(),
}))
} else if let Ok((content_disposition, content_type, key)) =
self.db.search_file_metadata(mxc.clone(), 0, 0)
@ -152,7 +145,7 @@ impl Service {
return Ok(Some(FileMeta {
content_disposition,
content_type,
file: file.to_vec(),
file: file.clone(),
}));
}
@ -173,20 +166,22 @@ impl Service {
/ u64::from(original_height)
};
if use_width {
if intermediate <= u64::from(u32::MAX) {
if intermediate <= u64::from(::std::u32::MAX) {
(width, intermediate as u32)
} else {
(
(u64::from(width) * u64::from(u32::MAX) / intermediate) as u32,
u32::MAX,
(u64::from(width) * u64::from(::std::u32::MAX) / intermediate)
as u32,
::std::u32::MAX,
)
}
} else if intermediate <= u64::from(u32::MAX) {
} else if intermediate <= u64::from(::std::u32::MAX) {
(intermediate as u32, height)
} else {
(
u32::MAX,
(u64::from(height) * u64::from(u32::MAX) / intermediate) as u32,
::std::u32::MAX,
(u64::from(height) * u64::from(::std::u32::MAX) / intermediate)
as u32,
)
}
};
@ -205,7 +200,7 @@ impl Service {
mxc,
width,
height,
&content_disposition,
content_disposition.as_deref(),
content_type.as_deref(),
)?;
@ -216,14 +211,14 @@ impl Service {
Ok(Some(FileMeta {
content_disposition,
content_type,
file: thumbnail_bytes.to_vec(),
file: thumbnail_bytes.clone(),
}))
} else {
// Couldn't parse file to generate thumbnail, send original
Ok(Some(FileMeta {
content_disposition,
content_type,
file: file.to_vec(),
file: file.clone(),
}))
}
} else {

View file

@ -39,6 +39,8 @@ pub struct Services {
}
impl Services {
// Results in large capacity if set to a negative number, user's fault really :P
#[allow(clippy::cast_sign_loss)]
pub fn build<
D: appservice::Data
+ pusher::Data

View file

@ -1,6 +1,5 @@
use crate::Error;
use ruma::{
api::client::error::ErrorKind,
canonical_json::redact_content_in_place,
events::{
room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent},
@ -73,23 +72,6 @@ impl PduEvent {
Ok(())
}
pub fn is_redacted(&self) -> bool {
#[derive(Deserialize)]
struct ExtractRedactedBecause {
redacted_because: Option<serde::de::IgnoredAny>,
}
let Some(unsigned) = &self.unsigned else {
return false;
};
let Ok(unsigned) = ExtractRedactedBecause::deserialize(&**unsigned) else {
return false;
};
unsigned.redacted_because.is_some()
}
pub fn remove_transaction_id(&mut self) -> crate::Result<()> {
if let Some(unsigned) = &self.unsigned {
let mut unsigned: BTreeMap<String, Box<RawJsonValue>> =
@ -444,7 +426,7 @@ pub(crate) fn gen_event_id_canonical_json(
"${}",
// Anything higher than version3 behaves the same
ruma::signatures::reference_hash(&value, room_version_id)
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))?
.expect("ruma can calculate reference hashes")
)
.try_into()
.expect("ruma's reference hashes are valid event ids");
@ -461,8 +443,4 @@ pub struct PduBuilder {
pub unsigned: Option<BTreeMap<String, serde_json::Value>>,
pub state_key: Option<String>,
pub redacts: Option<Arc<EventId>>,
/// For timestamped messaging, should only be used for appservices
///
/// Will be set to current time if None
pub timestamp: Option<MilliSecondsSinceUnixEpoch>,
}

View file

@ -64,7 +64,7 @@ impl Service {
warn!("Failed to find destination {}: {}", destination, e);
Error::BadServerResponse("Invalid destination")
})?
.map(|body| body.freeze());
.map(BytesMut::freeze);
let reqwest_request = reqwest::Request::try_from(http_request)?;
@ -252,7 +252,7 @@ impl Service {
.iter()
.any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_)))
{
notifi.prio = NotificationPriority::High
notifi.prio = NotificationPriority::High;
}
if event_id_only {
@ -279,7 +279,6 @@ impl Service {
Ok(())
}
// TODO: Handle email
PusherKind::Email(_) => Ok(()),
_ => Ok(()),
}
}

View file

@ -1,12 +1,9 @@
use crate::Result;
use ruma::{OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, UserId};
use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
pub trait Data: Send + Sync {
/// Creates or updates the alias to the given room id.
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()>;
/// Finds the user who assigned the given alias to a room
fn who_created_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedUserId>>;
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()>;
/// Forgets about an alias. Returns an error if the alias did not exist.
fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>;

View file

@ -1,17 +1,9 @@
mod data;
pub use data::Data;
use tracing::error;
use crate::{services, Error, Result};
use ruma::{
api::client::error::ErrorKind,
events::{
room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
StateEventType,
},
OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, UserId,
};
use crate::Result;
use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
pub struct Service {
pub db: &'static dyn Data,
@ -19,71 +11,13 @@ pub struct Service {
impl Service {
#[tracing::instrument(skip(self))]
pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> {
if alias == services().globals.admin_alias() && user_id != services().globals.server_user()
{
Err(Error::BadRequest(
ErrorKind::forbidden(),
"Only the server user can set this alias",
))
} else {
self.db.set_alias(alias, room_id, user_id)
}
pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
self.db.set_alias(alias, room_id)
}
#[tracing::instrument(skip(self))]
fn user_can_remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<bool> {
let Some(room_id) = self.resolve_local_alias(alias)? else {
return Err(Error::BadRequest(ErrorKind::NotFound, "Alias not found."));
};
// The creator of an alias can remove it
if self
.db
.who_created_alias(alias)?
.map(|user| user == user_id)
.unwrap_or_default()
// Server admins can remove any local alias
|| services().admin.user_is_admin(user_id)?
// Always allow the Conduit user to remove the alias, since there may not be an admin room
|| services().globals.server_user ()== user_id
{
Ok(true)
// Checking whether the user is able to change canonical aliases of the room
} else if let Some(event) = services().rooms.state_accessor.room_state_get(
&room_id,
&StateEventType::RoomPowerLevels,
"",
)? {
serde_json::from_str(event.content.get())
.map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels"))
.map(|content: RoomPowerLevelsEventContent| {
RoomPowerLevels::from(content)
.user_can_send_state(user_id, StateEventType::RoomCanonicalAlias)
})
// If there is no power levels event, only the room creator can change canonical aliases
} else if let Some(event) = services().rooms.state_accessor.room_state_get(
&room_id,
&StateEventType::RoomCreate,
"",
)? {
Ok(event.sender == user_id)
} else {
error!("Room {} has no m.room.create event (VERY BAD)!", room_id);
Err(Error::bad_database("Room has no m.room.create event"))
}
}
#[tracing::instrument(skip(self))]
pub fn remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<()> {
if self.user_can_remove_alias(alias, user_id)? {
self.db.remove_alias(alias)
} else {
Err(Error::BadRequest(
ErrorKind::forbidden(),
"User is not permitted to remove this alias.",
))
}
pub fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> {
self.db.remove_alias(alias)
}
#[tracing::instrument(skip(self))]

View file

@ -133,10 +133,7 @@ impl Service {
match services().rooms.timeline.get_pdu(&event_id) {
Ok(Some(pdu)) => {
if pdu.room_id != room_id {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"Evil event in db",
));
return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db"));
}
for auth_event in &pdu.auth_events {
let sauthevent = services()

View file

@ -6,7 +6,7 @@ use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId};
pub trait Data: Send + Sync {
/// Adds a presence event which will be saved until a new event replaces it.
///
/// Note: This method takes a RoomId because presence updates are always bound to rooms to
/// Note: This method takes a `RoomId` because presence updates are always bound to rooms to
/// make sure users outside these rooms can't see them.
fn update_presence(
&self,
@ -21,7 +21,7 @@ pub trait Data: Send + Sync {
/// Returns the timestamp of the last presence update of this user in millis since the unix epoch.
fn last_presence_update(&self, user_id: &UserId) -> Result<Option<u64>>;
/// Returns the presence event with correct last_active_ago.
/// Returns the presence event with correct `last_active_ago`.
fn get_presence_event(
&self,
room_id: &RoomId,

View file

@ -10,10 +10,13 @@ pub struct Service {
pub db: &'static dyn Data,
}
// TODO: remove when presence is implemented
#[allow(clippy::unnecessary_wraps)]
#[allow(clippy::needless_pass_by_value)]
impl Service {
/// Adds a presence event which will be saved until a new event replaces it.
///
/// Note: This method takes a RoomId because presence updates are always bound to rooms to
/// Note: This method takes a `RoomId` because presence updates are always bound to rooms to
/// make sure users outside these rooms can't see them.
pub fn update_presence(
&self,

View file

@ -10,7 +10,7 @@ pub trait Data: Send + Sync {
event: ReceiptEvent,
) -> Result<()>;
/// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`.
/// Returns an iterator over the most recent `read_receipts` in a room that happened after the event with id `since`.
#[allow(clippy::type_complexity)]
fn readreceipts_since<'a>(
&'a self,

View file

@ -11,7 +11,7 @@ pub struct Service {
}
impl Service {
/// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is
/// Sets a user as typing until the timeout timestamp is reached or `roomtyping_remove` is
/// called.
pub async fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
self.typing

View file

@ -9,7 +9,6 @@ use std::{
};
use futures_util::{stream::FuturesUnordered, Future, StreamExt};
use globals::SigningKeys;
use ruma::{
api::{
client::error::ErrorKind,
@ -31,6 +30,7 @@ use ruma::{
StateEventType, TimelineEventType,
},
int,
serde::Base64,
state_res::{self, RoomVersion, StateMap},
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName,
@ -39,7 +39,7 @@ use serde_json::value::RawValue as RawJsonValue;
use tokio::sync::{RwLock, RwLockWriteGuard, Semaphore};
use tracing::{debug, error, info, trace, warn};
use crate::{service::*, services, Error, PduEvent, Result};
use crate::{service::pdu, services, Error, PduEvent, Result};
use super::state_compressor::CompressedStateEvent;
@ -78,7 +78,7 @@ impl Service {
room_id: &'a RoomId,
value: BTreeMap<String, CanonicalJsonValue>,
is_timeline_event: bool,
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>,
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<Option<Vec<u8>>> {
// 0. Check the server is in the room
if !services().rooms.metadata.exists(room_id)? {
@ -90,7 +90,7 @@ impl Service {
if services().rooms.metadata.is_disabled(room_id)? {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Federation of this room is currently disabled on this server.",
));
}
@ -99,7 +99,7 @@ impl Service {
// 1. Skip the PDU if we already have it as a timeline event
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? {
return Ok(Some(pdu_id.to_vec()));
return Ok(Some(pdu_id.clone()));
}
let create_event = services()
@ -162,7 +162,7 @@ impl Service {
// Check for disabled again because it might have changed
if services().rooms.metadata.is_disabled(room_id)? {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Federation of this room is currently disabled on this server.",
));
}
@ -199,7 +199,7 @@ impl Service {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1 + 1)
*e.get_mut() = (Instant::now(), e.get().1 + 1);
}
}
continue;
@ -243,7 +243,7 @@ impl Service {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1 + 1)
*e.get_mut() = (Instant::now(), e.get().1 + 1);
}
}
}
@ -304,12 +304,19 @@ impl Service {
room_id: &'a RoomId,
mut value: BTreeMap<String, CanonicalJsonValue>,
auth_events_known: bool,
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>,
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>> {
Box::pin(async move {
// 1.1. Remove unsigned field
value.remove("unsigned");
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
// We go through all the signatures we see on the value and fetch the corresponding signing
// keys
self.fetch_required_signing_keys(&value, pub_key_map)
.await?;
// 2. Check signatures, otherwise drop
// 3. check content hash, redact if doesn't match
let create_event_content: RoomCreateEventContent =
@ -322,80 +329,38 @@ impl Service {
let room_version =
RoomVersion::new(room_version_id).expect("room version is supported");
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
// We go through all the signatures we see on the value and fetch the corresponding signing
// keys
self.fetch_required_signing_keys(&value, pub_key_map)
.await?;
let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| {
error!("Invalid PDU, no origin_server_ts field");
Error::BadRequest(
ErrorKind::MissingParam,
"Invalid PDU, no origin_server_ts field",
)
})?;
let origin_server_ts: MilliSecondsSinceUnixEpoch = {
let ts = origin_server_ts.as_integer().ok_or_else(|| {
Error::BadRequest(
ErrorKind::InvalidParam,
"origin_server_ts must be an integer",
)
})?;
MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Time must be after the unix epoch")
})?)
};
let guard = pub_key_map.read().await;
let pkey_map = (*guard).clone();
// Removing all the expired keys, unless the room version allows stale keys
let filtered_keys = services().globals.filter_keys_server_map(
pkey_map,
origin_server_ts,
room_version_id,
);
let mut val =
match ruma::signatures::verify_event(&filtered_keys, &value, room_version_id) {
Err(e) => {
// Drop
warn!("Dropping bad event {}: {}", event_id, e,);
let mut val = match ruma::signatures::verify_event(&guard, &value, room_version_id) {
Err(e) => {
// Drop
warn!("Dropping bad event {}: {}", event_id, e,);
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Signature verification failed",
));
}
Ok(ruma::signatures::Verified::Signatures) => {
// Redact
warn!("Calculated hash does not match: {}", event_id);
let Ok(obj) = ruma::canonical_json::redact(value, room_version_id, None) else {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Signature verification failed",
"Redaction failed",
));
};
// Skip the PDU if it is redacted and we already have it as an outlier event
if services().rooms.timeline.get_pdu_json(event_id)?.is_some() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Event was redacted and we already knew about it",
));
}
Ok(ruma::signatures::Verified::Signatures) => {
// Redact
warn!("Calculated hash does not match: {}", event_id);
let obj = match ruma::canonical_json::redact(value, room_version_id, None) {
Ok(obj) => obj,
Err(_) => {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Redaction failed",
))
}
};
// Skip the PDU if it is redacted and we already have it as an outlier event
if services().rooms.timeline.get_pdu_json(event_id)?.is_some() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Event was redacted and we already knew about it",
));
}
obj
}
Ok(ruma::signatures::Verified::All) => value,
};
obj
}
Ok(ruma::signatures::Verified::All) => value,
};
drop(guard);
@ -441,12 +406,9 @@ impl Service {
// Build map of auth events
let mut auth_events = HashMap::new();
for id in &incoming_pdu.auth_events {
let auth_event = match services().rooms.timeline.get_pdu(id)? {
Some(e) => e,
None => {
warn!("Could not find auth event {}", id);
continue;
}
let Some(auth_event) = services().rooms.timeline.get_pdu(id)? else {
warn!("Could not find auth event {}", id);
continue;
};
self.check_room_id(room_id, &auth_event)?;
@ -473,8 +435,8 @@ impl Service {
// The original create event must be in the auth events
if !matches!(
auth_events
.get(&(StateEventType::RoomCreate, "".to_owned()))
.map(|a| a.as_ref()),
.get(&(StateEventType::RoomCreate, String::new()))
.map(AsRef::as_ref),
Some(_) | None
) {
return Err(Error::BadRequest(
@ -519,7 +481,7 @@ impl Service {
create_event: &PduEvent,
origin: &ServerName,
room_id: &RoomId,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<Option<Vec<u8>>> {
// Skip the PDU if we already have it as a timeline event
if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) {
@ -606,21 +568,16 @@ impl Service {
let mut okay = true;
for prev_eventid in &incoming_pdu.prev_events {
let prev_event =
if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(prev_eventid) {
pdu
} else {
okay = false;
break;
};
let Ok(Some(prev_event)) = services().rooms.timeline.get_pdu(prev_eventid) else {
okay = false;
break;
};
let sstatehash = if let Ok(Some(s)) = services()
let Ok(Some(sstatehash)) = services()
.rooms
.state_accessor
.pdu_shortstatehash(prev_eventid)
{
s
} else {
else {
okay = false;
break;
};
@ -770,7 +727,7 @@ impl Service {
.get_shortstatekey(&StateEventType::RoomCreate, "")?
.expect("Room exists");
if state.get(&create_shortstatekey).map(|id| id.as_ref())
if state.get(&create_shortstatekey).map(AsRef::as_ref)
!= Some(&create_event.event_id)
{
return Err(Error::bad_database(
@ -1078,16 +1035,12 @@ impl Service {
};
let lock = services().globals.stateres_mutex.lock();
let state = match state_res::resolve(
room_version_id,
&fork_states,
auth_chain_sets,
fetch_event,
) {
Ok(new_state) => new_state,
Err(_) => {
return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization"));
}
let Ok(state) =
state_res::resolve(room_version_id, &fork_states, auth_chain_sets, fetch_event)
else {
return Err(Error::bad_database(
"State resolution failed, either an event could not be found or deserialization",
));
};
drop(lock);
@ -1129,7 +1082,7 @@ impl Service {
create_event: &'a PduEvent,
room_id: &'a RoomId,
room_version_id: &'a RoomVersionId,
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>,
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>>
{
Box::pin(async move {
@ -1145,7 +1098,7 @@ impl Service {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1 + 1)
*e.get_mut() = (Instant::now(), e.get().1 + 1);
}
}
};
@ -1204,7 +1157,7 @@ impl Service {
}
info!("Fetching {} over federation.", next_id);
match services()
if let Ok(res) = services()
.sending
.send_federation_request(
origin,
@ -1214,46 +1167,41 @@ impl Service {
)
.await
{
Ok(res) => {
info!("Got {} over federation", next_id);
let (calculated_event_id, value) =
match pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) {
Ok(t) => t,
Err(_) => {
back_off((*next_id).to_owned()).await;
continue;
}
};
if calculated_event_id != *next_id {
warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}",
next_id, calculated_event_id, &res.pdu);
}
if let Some(auth_events) =
value.get("auth_events").and_then(|c| c.as_array())
{
for auth_event in auth_events {
if let Ok(auth_event) =
serde_json::from_value(auth_event.clone().into())
{
let a: Arc<EventId> = auth_event;
todo_auth_events.push(a);
} else {
warn!("Auth event id is not valid");
}
}
} else {
warn!("Auth event list invalid");
}
events_in_reverse_order.push((next_id.clone(), value));
events_all.insert(next_id);
}
Err(_) => {
warn!("Failed to fetch event: {}", next_id);
info!("Got {} over federation", next_id);
let Ok((calculated_event_id, value)) =
pdu::gen_event_id_canonical_json(&res.pdu, room_version_id)
else {
back_off((*next_id).to_owned()).await;
continue;
};
if calculated_event_id != *next_id {
warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}",
next_id, calculated_event_id, &res.pdu);
}
if let Some(auth_events) =
value.get("auth_events").and_then(|c| c.as_array())
{
for auth_event in auth_events {
if let Ok(auth_event) =
serde_json::from_value(auth_event.clone().into())
{
let a: Arc<EventId> = auth_event;
todo_auth_events.push(a);
} else {
warn!("Auth event id is not valid");
}
}
} else {
warn!("Auth event list invalid");
}
events_in_reverse_order.push((next_id.clone(), value));
events_all.insert(next_id);
} else {
warn!("Failed to fetch event: {}", next_id);
back_off((*next_id).to_owned()).await;
}
}
@ -1312,7 +1260,7 @@ impl Service {
create_event: &PduEvent,
room_id: &RoomId,
room_version_id: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
initial_set: Vec<Arc<EventId>>,
) -> Result<(
Vec<Arc<EventId>>,
@ -1410,7 +1358,7 @@ impl Service {
pub(crate) async fn fetch_required_signing_keys(
&self,
event: &BTreeMap<String, CanonicalJsonValue>,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> {
let signatures = event
.get("signatures")
@ -1439,16 +1387,12 @@ impl Service {
)
})?,
signature_ids,
true,
)
.await;
let keys = match fetch_res {
Ok(keys) => keys,
Err(_) => {
warn!("Signature verification failed: Could not fetch signing key.",);
continue;
}
let Ok(keys) = fetch_res else {
warn!("Signature verification failed: Could not fetch signing key.",);
continue;
};
pub_key_map
@ -1467,7 +1411,7 @@ impl Service {
pdu: &RawJsonValue,
servers: &mut BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
room_version: &RoomVersionId,
pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, SigningKeys>>,
pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> {
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
@ -1477,7 +1421,7 @@ impl Service {
let event_id = format!(
"${}",
ruma::signatures::reference_hash(&value, room_version)
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))?
.expect("ruma can calculate reference hashes")
);
let event_id = <&EventId>::try_from(event_id.as_str())
.expect("ruma's reference hashes are valid event ids");
@ -1518,18 +1462,8 @@ impl Service {
let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>();
let contains_all_ids = |keys: &SigningKeys| {
signature_ids.iter().all(|id| {
keys.verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
|| keys
.old_verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
})
let contains_all_ids = |keys: &BTreeMap<String, Base64>| {
signature_ids.iter().all(|id| keys.contains_key(id))
};
let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| {
@ -1542,14 +1476,19 @@ impl Service {
trace!("Loading signing keys for {}", origin);
if let Some(result) = services().globals.signing_keys_for(origin)? {
if !contains_all_ids(&result) {
trace!("Signing key not loaded for {}", origin);
servers.insert(origin.to_owned(), BTreeMap::new());
}
let result: BTreeMap<_, _> = services()
.globals
.signing_keys_for(origin)?
.into_iter()
.map(|(k, v)| (k.to_string(), v.key))
.collect();
pub_key_map.insert(origin.to_string(), result);
if !contains_all_ids(&result) {
trace!("Signing key not loaded for {}", origin);
servers.insert(origin.to_owned(), BTreeMap::new());
}
pub_key_map.insert(origin.to_string(), result);
}
Ok(())
@ -1559,7 +1498,7 @@ impl Service {
&self,
event: &create_join_event::v2::Response,
room_version: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> {
let mut servers: BTreeMap<
OwnedServerName,
@ -1622,7 +1561,10 @@ impl Service {
let result = services()
.globals
.add_signing_key_from_trusted_server(&k.server_name, k.clone())?;
.add_signing_key(&k.server_name, k.clone())?
.into_iter()
.map(|(k, v)| (k.to_string(), v.key))
.collect::<BTreeMap<_, _>>();
pkm.insert(k.server_name.to_string(), result);
}
@ -1653,9 +1595,12 @@ impl Service {
if let (Ok(get_keys_response), origin) = result {
info!("Result is from {origin}");
if let Ok(key) = get_keys_response.server_key.deserialize() {
let result = services()
let result: BTreeMap<_, _> = services()
.globals
.add_signing_key_from_origin(&origin, key)?;
.add_signing_key(&origin, key)?
.into_iter()
.map(|(k, v)| (k.to_string(), v.key))
.collect();
pub_key_map.write().await.insert(origin.to_string(), result);
}
}
@ -1669,24 +1614,28 @@ impl Service {
/// Returns Ok if the acl allows the server
pub fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> {
let acl_event = match services().rooms.state_accessor.room_state_get(
let Some(acl_event) = services().rooms.state_accessor.room_state_get(
room_id,
&StateEventType::RoomServerAcl,
"",
)? {
Some(acl) => acl,
None => return Ok(()),
)?
else {
return Ok(());
};
let acl_event_content: RoomServerAclEventContent =
match serde_json::from_str(acl_event.content.get()) {
Ok(content) => content,
Err(_) => {
warn!("Invalid ACL event");
return Ok(());
}
if let Ok(content) = serde_json::from_str(acl_event.content.get()) {
content
} else {
warn!("Invalid ACL event");
return Ok(());
};
if acl_event_content.allow.is_empty() {
// Ignore broken acl events
return Ok(());
}
if acl_event_content.is_allowed(server_name) {
Ok(())
} else {
@ -1695,7 +1644,7 @@ impl Service {
server_name, room_id
);
Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Server was denied by room ACL",
))
}
@ -1708,23 +1657,9 @@ impl Service {
&self,
origin: &ServerName,
signature_ids: Vec<String>,
// Whether to ask for keys from trusted servers. Should be false when getting
// keys for validating requests, as per MSC4029
query_via_trusted_servers: bool,
) -> Result<SigningKeys> {
let contains_all_ids = |keys: &SigningKeys| {
signature_ids.iter().all(|id| {
keys.verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
|| keys
.old_verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
})
};
) -> Result<BTreeMap<String, Base64>> {
let contains_all_ids =
|keys: &BTreeMap<String, Base64>| signature_ids.iter().all(|id| keys.contains_key(id));
let permit = services()
.globals
@ -1734,18 +1669,17 @@ impl Service {
.get(origin)
.map(|s| Arc::clone(s).acquire_owned());
let permit = match permit {
Some(p) => p,
None => {
let mut write = services().globals.servername_ratelimiter.write().await;
let s = Arc::clone(
write
.entry(origin.to_owned())
.or_insert_with(|| Arc::new(Semaphore::new(1))),
);
let permit = if let Some(permit) = permit {
permit
} else {
let mut write = services().globals.servername_ratelimiter.write().await;
let s = Arc::clone(
write
.entry(origin.to_owned())
.or_insert_with(|| Arc::new(Semaphore::new(1))),
);
s.acquire_owned()
}
s.acquire_owned()
}
.await;
@ -1785,172 +1719,94 @@ impl Service {
trace!("Loading signing keys for {}", origin);
let result = services().globals.signing_keys_for(origin)?;
let mut result: BTreeMap<_, _> = services()
.globals
.signing_keys_for(origin)?
.into_iter()
.map(|(k, v)| (k.to_string(), v.key))
.collect();
let mut expires_soon_or_has_expired = false;
if let Some(result) = result.clone() {
let ts_threshold = MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(30 * 60),
)
.expect("Should be valid until year 500,000,000");
debug!(
"The treshhold is {:?}, found time is {:?} for server {}",
ts_threshold, result.valid_until_ts, origin
);
if contains_all_ids(&result) {
// We want to ensure that the keys remain valid by the time the other functions that handle signatures reach them
if result.valid_until_ts > ts_threshold {
debug!(
"Keys for {} are deemed as valid, as they expire at {:?}",
&origin, &result.valid_until_ts
);
return Ok(result);
}
expires_soon_or_has_expired = true;
}
if contains_all_ids(&result) {
return Ok(result);
}
let mut keys = result.unwrap_or_else(|| SigningKeys {
verify_keys: BTreeMap::new(),
old_verify_keys: BTreeMap::new(),
valid_until_ts: MilliSecondsSinceUnixEpoch::now(),
});
// We want to set this to the max, and then lower it whenever we see older keys
keys.valid_until_ts = MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000");
debug!("Fetching signing keys for {} over federation", origin);
if let Some(mut server_key) = services()
if let Some(server_key) = services()
.sending
.send_federation_request(origin, get_server_keys::v2::Request::new())
.await
.ok()
.and_then(|resp| resp.server_key.deserialize().ok())
{
// Keys should only be valid for a maximum of seven days
server_key.valid_until_ts = server_key.valid_until_ts.min(
MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000"),
);
services()
.globals
.add_signing_key_from_origin(origin, server_key.clone())?;
.add_signing_key(origin, server_key.clone())?;
if keys.valid_until_ts > server_key.valid_until_ts {
keys.valid_until_ts = server_key.valid_until_ts;
}
keys.verify_keys.extend(
result.extend(
server_key
.verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key)),
.map(|(k, v)| (k.to_string(), v.key)),
);
keys.old_verify_keys.extend(
result.extend(
server_key
.old_verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key)),
.map(|(k, v)| (k.to_string(), v.key)),
);
if contains_all_ids(&keys) {
return Ok(keys);
if contains_all_ids(&result) {
return Ok(result);
}
}
if query_via_trusted_servers {
for server in services().globals.trusted_servers() {
debug!("Asking {} for {}'s signing key", server, origin);
if let Some(server_keys) = services()
.sending
.send_federation_request(
server,
get_remote_server_keys::v2::Request::new(
origin.to_owned(),
MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now()
.checked_add(Duration::from_secs(3600))
.expect("SystemTime to large"),
)
.expect("time is valid"),
),
)
.await
.ok()
.map(|resp| {
resp.server_keys
for server in services().globals.trusted_servers() {
debug!("Asking {} for {}'s signing key", server, origin);
if let Some(server_keys) = services()
.sending
.send_federation_request(
server,
get_remote_server_keys::v2::Request::new(
origin.to_owned(),
MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now()
.checked_add(Duration::from_secs(3600))
.expect("SystemTime to large"),
)
.expect("time is valid"),
),
)
.await
.ok()
.map(|resp| {
resp.server_keys
.into_iter()
.filter_map(|e| e.deserialize().ok())
.collect::<Vec<_>>()
})
{
trace!("Got signing keys: {:?}", server_keys);
for k in server_keys {
services().globals.add_signing_key(origin, k.clone())?;
result.extend(
k.verify_keys
.into_iter()
.filter_map(|e| e.deserialize().ok())
.collect::<Vec<_>>()
})
{
trace!("Got signing keys: {:?}", server_keys);
for mut k in server_keys {
if k.valid_until_ts
// Half an hour should give plenty of time for the server to respond with keys that are still
// valid, given we requested keys which are valid at least an hour from now
< MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(30 * 60),
)
.expect("Should be valid until year 500,000,000")
{
// Keys should only be valid for a maximum of seven days
k.valid_until_ts = k.valid_until_ts.min(
MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000"),
);
.map(|(k, v)| (k.to_string(), v.key)),
);
result.extend(
k.old_verify_keys
.into_iter()
.map(|(k, v)| (k.to_string(), v.key)),
);
}
if keys.valid_until_ts > k.valid_until_ts {
keys.valid_until_ts = k.valid_until_ts;
}
services()
.globals
.add_signing_key_from_trusted_server(origin, k.clone())?;
keys.verify_keys.extend(
k.verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key)),
);
keys.old_verify_keys.extend(
k.old_verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key)),
);
} else {
warn!(
"Server {} gave us keys older than we requested, valid until: {:?}",
origin, k.valid_until_ts
);
}
if contains_all_ids(&keys) {
return Ok(keys);
}
}
if contains_all_ids(&result) {
return Ok(result);
}
}
}
// We should return these keys if fresher keys were not found
if expires_soon_or_has_expired {
info!("Returning stale keys for {}", origin);
return Ok(keys);
}
drop(permit);
back_off(signature_ids).await;

View file

@ -3,9 +3,9 @@ use std::sync::Arc;
pub use data::Data;
use ruma::{
api::{client::relations::get_relating_events, Direction},
api::client::relations::get_relating_events,
events::{relation::RelationType, TimelineEventType},
EventId, RoomId, UInt, UserId,
EventId, RoomId, UserId,
};
use serde::Deserialize;
@ -46,59 +46,39 @@ impl Service {
sender_user: &UserId,
room_id: &RoomId,
target: &EventId,
filter_event_type: Option<TimelineEventType>,
filter_rel_type: Option<RelationType>,
from: Option<String>,
to: Option<String>,
limit: Option<UInt>,
recurse: bool,
dir: &Direction,
filter_event_type: &Option<TimelineEventType>,
filter_rel_type: &Option<RelationType>,
from: PduCount,
to: Option<PduCount>,
limit: usize,
) -> Result<get_relating_events::v1::Response> {
let from = match from {
Some(from) => PduCount::try_from_string(&from)?,
None => match dir {
Direction::Forward => PduCount::min(),
Direction::Backward => PduCount::max(),
},
};
let to = to.as_ref().and_then(|t| PduCount::try_from_string(t).ok());
// Use limit or else 10, with maximum 100
let limit = limit
.and_then(|u| u32::try_from(u).ok())
.map_or(10_usize, |u| u as usize)
.min(100);
let next_token;
// Spec (v1.10) recommends depth of at least 3
let depth: u8 = if recurse { 3 } else { 1 };
match dir {
Direction::Forward => {
let relations_until = &services().rooms.pdu_metadata.relations_until(
sender_user,
room_id,
target,
from,
depth,
)?;
let events_after: Vec<_> = relations_until // TODO: should be relations_after
.iter()
.filter(|(_, pdu)| {
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
&& if let Ok(content) =
serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get())
{
filter_rel_type
.as_ref()
.map_or(true, |r| &content.relates_to.rel_type == r)
} else {
false
}
//TODO: Fix ruma: match body.dir {
match ruma::api::Direction::Backward {
ruma::api::Direction::Forward => {
let events_after: Vec<_> = services()
.rooms
.pdu_metadata
.relations_until(sender_user, room_id, target, from)? // TODO: should be relations_after
.filter(|r| {
r.as_ref().map_or(true, |(_, pdu)| {
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
&& if let Ok(content) =
serde_json::from_str::<ExtractRelatesToEventId>(
pdu.content.get(),
)
{
filter_rel_type
.as_ref()
.map_or(true, |r| &content.relates_to.rel_type == r)
} else {
false
}
})
})
.take(limit)
.filter_map(|r| r.ok()) // Filter out buggy events
.filter(|(_, pdu)| {
services()
.rooms
@ -106,7 +86,7 @@ impl Service {
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.unwrap_or(false)
})
.take_while(|(k, _)| Some(k) != to.as_ref()) // Stop at `to`
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
.collect();
next_token = events_after.last().map(|(count, _)| count).copied();
@ -121,32 +101,31 @@ impl Service {
chunk: events_after,
next_batch: next_token.map(|t| t.stringify()),
prev_batch: Some(from.stringify()),
recursion_depth: if recurse { Some(depth.into()) } else { None },
})
}
Direction::Backward => {
let relations_until = &services().rooms.pdu_metadata.relations_until(
sender_user,
room_id,
target,
from,
depth,
)?;
let events_before: Vec<_> = relations_until
.iter()
.filter(|(_, pdu)| {
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
&& if let Ok(content) =
serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get())
{
filter_rel_type
.as_ref()
.map_or(true, |r| &content.relates_to.rel_type == r)
} else {
false
}
ruma::api::Direction::Backward => {
let events_before: Vec<_> = services()
.rooms
.pdu_metadata
.relations_until(sender_user, room_id, target, from)?
.filter(|r| {
r.as_ref().map_or(true, |(_, pdu)| {
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
&& if let Ok(content) =
serde_json::from_str::<ExtractRelatesToEventId>(
pdu.content.get(),
)
{
filter_rel_type
.as_ref()
.map_or(true, |r| &content.relates_to.rel_type == r)
} else {
false
}
})
})
.take(limit)
.filter_map(|r| r.ok()) // Filter out buggy events
.filter(|(_, pdu)| {
services()
.rooms
@ -154,7 +133,7 @@ impl Service {
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.unwrap_or(false)
})
.take_while(|&(k, _)| Some(k) != to.as_ref()) // Stop at `to`
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
.collect();
next_token = events_before.last().map(|(count, _)| count).copied();
@ -168,7 +147,6 @@ impl Service {
chunk: events_before,
next_batch: next_token.map(|t| t.stringify()),
prev_batch: Some(from.stringify()),
recursion_depth: if recurse { Some(depth.into()) } else { None },
})
}
}
@ -180,44 +158,14 @@ impl Service {
room_id: &'a RoomId,
target: &'a EventId,
until: PduCount,
max_depth: u8,
) -> Result<Vec<(PduCount, PduEvent)>> {
) -> Result<impl Iterator<Item = Result<(PduCount, PduEvent)>> + 'a> {
let room_id = services().rooms.short.get_or_create_shortroomid(room_id)?;
let target = match services().rooms.timeline.get_pdu_count(target)? {
Some(PduCount::Normal(c)) => c,
// TODO: Support backfilled relations
_ => 0, // This will result in an empty iterator
};
self.db
.relations_until(user_id, room_id, target, until)
.map(|mut relations| {
let mut pdus: Vec<_> = (*relations).into_iter().filter_map(Result::ok).collect();
let mut stack: Vec<_> =
pdus.clone().iter().map(|pdu| (pdu.to_owned(), 1)).collect();
while let Some(stack_pdu) = stack.pop() {
let target = match stack_pdu.0 .0 {
PduCount::Normal(c) => c,
// TODO: Support backfilled relations
PduCount::Backfilled(_) => 0, // This will result in an empty iterator
};
if let Ok(relations) = self.db.relations_until(user_id, room_id, target, until)
{
for relation in relations.flatten() {
if stack_pdu.1 < max_depth {
stack.push((relation.clone(), stack_pdu.1 + 1));
}
pdus.push(relation);
}
}
}
pdus.sort_by(|a, b| a.0.cmp(&b.0));
pdus
})
self.db.relations_until(user_id, room_id, target, until)
}
#[tracing::instrument(skip(self, room_id, event_ids))]

View file

@ -4,8 +4,6 @@ use ruma::RoomId;
pub trait Data: Send + Sync {
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
#[allow(clippy::type_complexity)]
fn search_pdus<'a>(
&'a self,

View file

@ -15,16 +15,6 @@ impl Service {
self.db.index_pdu(shortroomid, pdu_id, message_body)
}
#[tracing::instrument(skip(self))]
pub fn deindex_pdu<'a>(
&self,
shortroomid: u64,
pdu_id: &[u8],
message_body: &str,
) -> Result<()> {
self.db.deindex_pdu(shortroomid, pdu_id, message_body)
}
#[tracing::instrument(skip(self))]
pub fn search_pdus<'a>(
&'a self,

View file

@ -22,7 +22,7 @@ pub trait Data: Send + Sync {
fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)>;
/// Returns (shortstatehash, already_existed)
/// Returns (shortstatehash, `already_existed`)
fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)>;
fn get_shortroomid(&self, room_id: &RoomId) -> Result<Option<u64>>;

View file

@ -39,7 +39,7 @@ impl Service {
self.db.get_statekey_from_short(shortstatekey)
}
/// Returns (shortstatehash, already_existed)
/// Returns (shortstatehash, `already_existed`)
pub fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> {
self.db.get_or_create_shortstatehash(state_hash)
}

View file

@ -63,13 +63,13 @@ impl Service {
let mut results = Vec::new();
while let Some(current_room) = {
while stack.last().map_or(false, |s| s.is_empty()) {
while stack.last().map_or(false, Vec::is_empty) {
stack.pop();
}
if !stack.is_empty() {
stack.last_mut().and_then(|s| s.pop())
} else {
if stack.is_empty() {
None
} else {
stack.last_mut().and_then(Vec::pop)
}
} {
rooms_in_path.push(current_room.clone());
@ -81,7 +81,7 @@ impl Service {
.roomid_spacechunk_cache
.lock()
.await
.get_mut(&current_room.to_owned())
.get_mut(&current_room.clone())
.as_ref()
{
if let Some(cached) = cached {
@ -202,7 +202,7 @@ impl Service {
.send_federation_request(
server,
federation::space::get_hierarchy::v1::Request {
room_id: current_room.to_owned(),
room_id: current_room.clone(),
suggested_only,
},
)
@ -408,7 +408,7 @@ impl Service {
debug!("User is not allowed to see room {room_id}");
// This error will be caught later
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"User is not allowed to see the room",
));
}
@ -453,8 +453,7 @@ impl Service {
room_id: &RoomId,
) -> Result<bool> {
let allowed = match join_rule {
SpaceRoomJoinRule::Public => true,
SpaceRoomJoinRule::Knock => true,
SpaceRoomJoinRule::Knock | SpaceRoomJoinRule::Public => true,
SpaceRoomJoinRule::Invite => services()
.rooms
.state_cache

View file

@ -7,7 +7,7 @@ pub trait Data: Send + Sync {
/// Returns the last state hash key added to the db for the given room.
fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result<Option<u64>>;
/// Set the state hash to a new version, but does not update state_cache.
/// Set the state hash to a new version, but does not update `state_cache`.
fn set_room_state(
&self,
room_id: &RoomId,
@ -18,7 +18,7 @@ pub trait Data: Send + Sync {
/// Associates a state with an event.
fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()>;
/// Returns all events we would send as the prev_events of the next event.
/// Returns all events we would send as the `prev_events` of the next event.
fn get_forward_extremities(&self, room_id: &RoomId) -> Result<HashSet<Arc<EventId>>>;
/// Replace the forward extremities of the room.

View file

@ -45,9 +45,8 @@ impl Service {
.ok()
.map(|(_, id)| id)
}) {
let pdu = match services().rooms.timeline.get_pdu_json(&event_id)? {
Some(pdu) => pdu,
None => continue,
let Some(pdu) = services().rooms.timeline.get_pdu_json(&event_id)? else {
continue;
};
let pdu: PduEvent = match serde_json::from_str(
@ -70,14 +69,12 @@ impl Service {
Err(_) => continue,
};
let state_key = match pdu.state_key {
Some(k) => k,
None => continue,
let Some(state_key) = pdu.state_key else {
continue;
};
let user_id = match UserId::parse(state_key) {
Ok(id) => id,
Err(_) => continue,
let Ok(user_id) = UserId::parse(state_key) else {
continue;
};
services().rooms.state_cache.update_membership(
@ -374,11 +371,7 @@ impl Service {
state_key: Option<&str>,
content: &serde_json::value::RawValue,
) -> Result<StateMap<Arc<PduEvent>>> {
let shortstatehash = if let Some(current_shortstatehash) =
services().rooms.state.get_room_shortstatehash(room_id)?
{
current_shortstatehash
} else {
let Some(shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? else {
return Ok(HashMap::new());
};

View file

@ -85,16 +85,15 @@ impl Service {
/// The user was a joined member at this state (potentially in the past)
fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> bool {
self.user_membership(shortstatehash, user_id)
.map(|s| s == MembershipState::Join)
.unwrap_or_default() // Return sensible default, i.e. false
.is_ok_and(|s| s == MembershipState::Join) // Return sensible default, i.e. false
}
/// The user was an invited or joined room member at this state (potentially
/// in the past)
fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> bool {
self.user_membership(shortstatehash, user_id)
.map(|s| s == MembershipState::Join || s == MembershipState::Invite)
.unwrap_or_default() // Return sensible default, i.e. false
.is_ok_and(|s| s == MembershipState::Join || s == MembershipState::Invite)
// Return sensible default, i.e. false
}
/// Whether a server is allowed to see an event through federation, based on
@ -106,9 +105,8 @@ impl Service {
room_id: &RoomId,
event_id: &EventId,
) -> Result<bool> {
let shortstatehash = match self.pdu_shortstatehash(event_id)? {
Some(shortstatehash) => shortstatehash,
None => return Ok(true),
let Some(shortstatehash) = self.pdu_shortstatehash(event_id)? else {
return Ok(true);
};
if let Some(visibility) = self
@ -170,9 +168,8 @@ impl Service {
room_id: &RoomId,
event_id: &EventId,
) -> Result<bool> {
let shortstatehash = match self.pdu_shortstatehash(event_id)? {
Some(shortstatehash) => shortstatehash,
None => return Ok(true),
let Some(shortstatehash) = self.pdu_shortstatehash(event_id)? else {
return Ok(true);
};
if let Some(visibility) = self
@ -305,13 +302,13 @@ impl Service {
})
}
pub async fn user_can_invite(
pub fn user_can_invite(
&self,
room_id: &RoomId,
sender: &UserId,
target_user: &UserId,
state_lock: &MutexGuard<'_, ()>,
) -> Result<bool> {
) -> bool {
let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite))
.expect("Event content always serializes");
@ -321,14 +318,13 @@ impl Service {
unsigned: None,
state_key: Some(target_user.into()),
redacts: None,
timestamp: None,
};
Ok(services()
services()
.rooms
.timeline
.create_hash_and_sign_event(new_event, sender, room_id, state_lock)
.is_ok())
.is_ok()
}
pub fn get_member(
@ -359,41 +355,46 @@ impl Service {
federation: bool,
) -> Result<bool> {
self.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")?
.map(|e| {
serde_json::from_str(e.content.get())
.map(|c: RoomPowerLevelsEventContent| c.into())
.map(|e: RoomPowerLevels| {
e.user_can_redact_event_of_other(sender)
|| e.user_can_redact_own_event(sender)
&& if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(redacts)
{
if federation {
pdu.sender().server_name() == sender.server_name()
.map_or_else(
// Falling back on m.room.create to judge power levels
|| {
if let Some(pdu) =
self.room_state_get(room_id, &StateEventType::RoomCreate, "")?
{
Ok(pdu.sender == sender
|| if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(redacts) {
pdu.sender == sender
} else {
false
})
} else {
Err(Error::bad_database(
"No m.room.power_levels or m.room.create events in database for room",
))
}
},
|e| {
serde_json::from_str(e.content.get())
.map(|c: RoomPowerLevelsEventContent| c.into())
.map(|e: RoomPowerLevels| {
e.user_can_redact_event_of_other(sender)
|| e.user_can_redact_own_event(sender)
&& if let Ok(Some(pdu)) =
services().rooms.timeline.get_pdu(redacts)
{
if federation {
pdu.sender().server_name() == sender.server_name()
} else {
pdu.sender == sender
}
} else {
pdu.sender == sender
false
}
} else {
false
}
})
.map_err(|_| {
Error::bad_database("Invalid m.room.power_levels event in database")
})
})
// Falling back on m.room.create to judge power levels
.unwrap_or_else(|| {
if let Some(pdu) = self.room_state_get(room_id, &StateEventType::RoomCreate, "")? {
Ok(pdu.sender == sender
|| if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(redacts) {
pdu.sender == sender
} else {
false
})
} else {
Err(Error::bad_database(
"No m.room.power_levels or m.room.create events in database for room",
))
}
})
.map_err(|_| {
Error::bad_database("Invalid m.room.power_levels event in database")
})
},
)
}
}

View file

@ -1,5 +1,7 @@
mod data;
use std::collections::BTreeMap;
pub use data::Data;
use ruma::{
api::client::{error::ErrorKind, threads::get_threads::v1::IncludeThreads},
@ -56,7 +58,7 @@ impl Service {
if let CanonicalJsonValue::Object(unsigned) = root_pdu_json
.entry("unsigned".to_owned())
.or_insert_with(|| CanonicalJsonValue::Object(Default::default()))
.or_insert_with(|| CanonicalJsonValue::Object(BTreeMap::default()))
{
if let Some(mut relations) = unsigned
.get("m.relations")

View file

@ -21,9 +21,10 @@ use ruma::{
GlobalAccountDataEventType, StateEventType, TimelineEventType,
},
push::{Action, Ruleset, Tweak},
serde::Base64,
state_res::{self, Event, RoomVersion},
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId,
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
OwnedServerName, RoomId, RoomVersionId, ServerName, UserId,
};
use serde::Deserialize;
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
@ -32,10 +33,7 @@ use tracing::{error, info, warn};
use crate::{
api::server_server,
service::{
globals::SigningKeys,
pdu::{EventHash, PduBuilder},
},
service::pdu::{EventHash, PduBuilder},
services, utils, Error, PduEvent, Result,
};
@ -206,6 +204,23 @@ impl Service {
leaves: Vec<OwnedEventId>,
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
) -> Result<Vec<u8>> {
// Update Relationships
#[derive(Deserialize)]
struct ExtractRelatesTo {
#[serde(rename = "m.relates_to")]
relates_to: Relation,
}
#[derive(Clone, Debug, Deserialize)]
struct ExtractEventId {
event_id: OwnedEventId,
}
#[derive(Clone, Debug, Deserialize)]
struct ExtractRelatesToEventId {
#[serde(rename = "m.relates_to")]
relates_to: ExtractEventId,
}
let shortroomid = services()
.rooms
.short
@ -218,7 +233,7 @@ impl Service {
if let Some(state_key) = &pdu.state_key {
if let CanonicalJsonValue::Object(unsigned) = pdu_json
.entry("unsigned".to_owned())
.or_insert_with(|| CanonicalJsonValue::Object(Default::default()))
.or_insert_with(|| CanonicalJsonValue::Object(BTreeMap::default()))
{
if let Some(shortstatehash) = services()
.rooms
@ -342,8 +357,10 @@ impl Service {
.map_err(|_| Error::bad_database("Invalid push rules event in db."))
})
.transpose()?
.map(|ev: PushRulesEvent| ev.content.global)
.unwrap_or_else(|| Ruleset::server_default(user));
.map_or_else(
|| Ruleset::server_default(user),
|ev: PushRulesEvent| ev.content.global,
);
let mut highlight = false;
let mut notify = false;
@ -401,7 +418,7 @@ impl Service {
&pdu.room_id,
false,
)? {
self.redact_pdu(redact_id, pdu, shortroomid)?;
self.redact_pdu(redact_id, pdu)?;
}
}
}
@ -418,7 +435,7 @@ impl Service {
&pdu.room_id,
false,
)? {
self.redact_pdu(redact_id, pdu, shortroomid)?;
self.redact_pdu(redact_id, pdu)?;
}
}
}
@ -485,27 +502,20 @@ impl Service {
.search
.index_pdu(shortroomid, &pdu_id, &body)?;
let server_user = services().globals.server_user();
let server_user = format!("@conduit:{}", services().globals.server_name());
let to_conduit = body.starts_with(&format!("{server_user}: "))
|| body.starts_with(&format!("{server_user} "))
|| body == format!("{server_user}:")
|| body == server_user.as_str();
|| body == server_user;
// This will evaluate to false if the emergency password is set up so that
// the administrator can execute commands as conduit
let from_conduit = pdu.sender == *server_user
let from_conduit = pdu.sender == server_user
&& services().globals.emergency_password().is_none();
if let Some(admin_room) = services().admin.get_admin_room()? {
if to_conduit
&& !from_conduit
&& admin_room == pdu.room_id
&& services()
.rooms
.state_cache
.is_joined(server_user, &admin_room)?
{
if to_conduit && !from_conduit && admin_room == pdu.room_id {
services().admin.process_message(body);
}
}
@ -514,23 +524,6 @@ impl Service {
_ => {}
}
// Update Relationships
#[derive(Deserialize)]
struct ExtractRelatesTo {
#[serde(rename = "m.relates_to")]
relates_to: Relation,
}
#[derive(Clone, Debug, Deserialize)]
struct ExtractEventId {
event_id: OwnedEventId,
}
#[derive(Clone, Debug, Deserialize)]
struct ExtractRelatesToEventId {
#[serde(rename = "m.relates_to")]
relates_to: ExtractEventId,
}
if let Ok(content) = serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get()) {
if let Some(related_pducount) = services()
.rooms
@ -665,7 +658,6 @@ impl Service {
unsigned,
state_key,
redacts,
timestamp,
} = pdu_builder;
let prev_events: Vec<_> = services()
@ -735,9 +727,9 @@ impl Service {
event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
room_id: room_id.to_owned(),
sender: sender.to_owned(),
origin_server_ts: timestamp
.map(|ts| ts.get())
.unwrap_or_else(|| MilliSecondsSinceUnixEpoch::now().get()),
origin_server_ts: utils::millis_since_unix_epoch()
.try_into()
.expect("time is valid"),
kind: event_type,
content,
state_key,
@ -772,7 +764,7 @@ impl Service {
if !auth_check {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Event is not authorized.",
));
}
@ -796,7 +788,7 @@ impl Service {
&mut pdu_json,
&room_version_id,
) {
Ok(_) => {}
Ok(()) => {}
Err(e) => {
return match e {
ruma::signatures::Error::PduSize => Err(Error::BadRequest(
@ -815,7 +807,7 @@ impl Service {
pdu.event_id = EventId::parse_arc(format!(
"${}",
ruma::signatures::reference_hash(&pdu_json, &room_version_id)
.expect("Event format validated when event was hashed")
.expect("ruma can calculate reference hashes")
))
.expect("ruma's reference hashes are valid event ids");
@ -852,7 +844,7 @@ impl Service {
TimelineEventType::RoomEncryption => {
warn!("Encryption is not allowed in the admins room");
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Encryption is not allowed in the admins room.",
));
}
@ -867,7 +859,7 @@ impl Service {
.filter(|v| v.starts_with('@'))
.unwrap_or(sender.as_str());
let server_name = services().globals.server_name();
let server_user = services().globals.server_user().as_str();
let server_user = format!("@conduit:{server_name}");
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
@ -875,7 +867,7 @@ impl Service {
if target == server_user {
warn!("Conduit user cannot leave from admins room");
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Conduit user cannot leave from admins room.",
));
}
@ -891,7 +883,7 @@ impl Service {
if count < 2 {
warn!("Last admin cannot leave from admins room");
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Last admin cannot leave from admins room.",
));
}
@ -901,7 +893,7 @@ impl Service {
if target == server_user {
warn!("Conduit user cannot be banned in admins room");
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Conduit user cannot be banned in admins room.",
));
}
@ -917,7 +909,7 @@ impl Service {
if count < 2 {
warn!("Last admin cannot be banned in admins room");
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Last admin cannot be banned in admins room.",
));
}
@ -949,7 +941,7 @@ impl Service {
false,
)? {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"User cannot redact this event.",
));
}
@ -970,7 +962,7 @@ impl Service {
false,
)? {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"User cannot redact this event.",
));
}
@ -1110,33 +1102,14 @@ impl Service {
/// Replace a PDU with the redacted form.
#[tracing::instrument(skip(self, reason))]
pub fn redact_pdu(
&self,
event_id: &EventId,
reason: &PduEvent,
shortroomid: u64,
) -> Result<()> {
pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> {
// TODO: Don't reserialize, keep original json
if let Some(pdu_id) = self.get_pdu_id(event_id)? {
let mut pdu = self
.get_pdu_from_id(&pdu_id)?
.ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?;
#[derive(Deserialize)]
struct ExtractBody {
body: String,
}
if let Ok(content) = serde_json::from_str::<ExtractBody>(pdu.content.get()) {
services()
.rooms
.search
.deindex_pdu(shortroomid, &pdu_id, &content.body)?;
}
let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?;
pdu.redact(room_version_id, reason)?;
self.replace_pdu(
&pdu_id,
&utils::to_canonical_object(&pdu).expect("PDU is an object"),
@ -1217,7 +1190,7 @@ impl Service {
&self,
origin: &ServerName,
pdu: Box<RawJsonValue>,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> {
let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?;
@ -1228,7 +1201,7 @@ impl Service {
.roomid_mutex_federation
.write()
.await
.entry(room_id.to_owned())
.entry(room_id.clone())
.or_default(),
);
let mutex_lock = mutex.lock().await;

View file

@ -160,7 +160,9 @@ impl Service {
// Find events that have been added since starting the last request
let new_events = self.db.queued_requests(&outgoing_kind).filter_map(|r| r.ok()).take(30).collect::<Vec<_>>();
if !new_events.is_empty() {
if new_events.is_empty() {
current_transaction_status.remove(&outgoing_kind);
} else {
// Insert pdus we found
self.db.mark_as_active(&new_events)?;
@ -170,8 +172,6 @@ impl Service {
new_events.into_iter().map(|(event, _)| event).collect(),
)
);
} else {
current_transaction_status.remove(&outgoing_kind);
}
}
Err((outgoing_kind, _)) => {
@ -305,41 +305,38 @@ impl Service {
let event: AnySyncEphemeralRoomEvent =
serde_json::from_str(read_receipt.json().get())
.map_err(|_| Error::bad_database("Invalid edu event in read_receipts."))?;
let federation_event = match event {
AnySyncEphemeralRoomEvent::Receipt(r) => {
let mut read = BTreeMap::new();
let federation_event = if let AnySyncEphemeralRoomEvent::Receipt(r) = event {
let mut read = BTreeMap::new();
let (event_id, mut receipt) = r
.content
.0
.into_iter()
.next()
.expect("we only use one event per read receipt");
let receipt = receipt
.remove(&ReceiptType::Read)
.expect("our read receipts always set this")
.remove(&user_id)
.expect("our read receipts always have the user here");
let (event_id, mut receipt) = r
.content
.0
.into_iter()
.next()
.expect("we only use one event per read receipt");
let receipt = receipt
.remove(&ReceiptType::Read)
.expect("our read receipts always set this")
.remove(&user_id)
.expect("our read receipts always have the user here");
read.insert(
user_id,
ReceiptData {
data: receipt.clone(),
event_ids: vec![event_id.clone()],
},
);
read.insert(
user_id,
ReceiptData {
data: receipt.clone(),
event_ids: vec![event_id.clone()],
},
);
let receipt_map = ReceiptMap { read };
let receipt_map = ReceiptMap { read };
let mut receipts = BTreeMap::new();
receipts.insert(room_id.clone(), receipt_map);
let mut receipts = BTreeMap::new();
receipts.insert(room_id.clone(), receipt_map);
Edu::Receipt(ReceiptContent { receipts })
}
_ => {
Error::bad_database("Invalid event type in read_receipts");
continue;
}
Edu::Receipt(ReceiptContent { receipts })
} else {
Error::bad_database("Invalid event type in read_receipts");
continue;
};
events.push(serde_json::to_vec(&federation_event).expect("json can be serialized"));
@ -404,7 +401,7 @@ impl Service {
)?;
for ((outgoing_kind, event), key) in requests.into_iter().zip(keys) {
self.sender
.send((outgoing_kind.to_owned(), event, key))
.send((outgoing_kind.clone(), event, key))
.unwrap();
}
@ -474,7 +471,7 @@ impl Service {
),
)
})?
.to_room_event())
.to_room_event());
}
SendingEventType::Edu(_) => {
// Appservices don't need EDUs (?)
@ -559,13 +556,12 @@ impl Service {
}
}
let pusher = match services()
let Some(pusher) = services()
.pusher
.get_pusher(userid, pushkey)
.map_err(|e| (OutgoingKind::Push(userid.clone(), pushkey.clone()), e))?
{
Some(pusher) => pusher,
None => continue,
else {
continue;
};
let rules_for_user = services()
@ -577,8 +573,10 @@ impl Service {
)
.unwrap_or_default()
.and_then(|event| serde_json::from_str::<PushRulesEvent>(event.get()).ok())
.map(|ev: PushRulesEvent| ev.content.global)
.unwrap_or_else(|| push::Ruleset::server_default(userid));
.map_or_else(
|| push::Ruleset::server_default(userid),
|ev: PushRulesEvent| ev.content.global,
);
let unread: UInt = services()
.rooms

View file

@ -47,10 +47,10 @@ impl Service {
auth: &AuthData,
uiaainfo: &UiaaInfo,
) -> Result<(bool, UiaaInfo)> {
let mut uiaainfo = auth
.session()
.map(|session| self.db.get_uiaa_session(user_id, device_id, session))
.unwrap_or_else(|| Ok(uiaainfo.clone()))?;
let mut uiaainfo = auth.session().map_or_else(
|| Ok(uiaainfo.clone()),
|session| self.db.get_uiaa_session(user_id, device_id, session),
)?;
if uiaainfo.session.is_none() {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
@ -63,14 +63,11 @@ impl Service {
password,
..
}) => {
let username = match identifier {
UserIdentifier::UserIdOrLocalpart(username) => username,
_ => {
return Err(Error::BadRequest(
ErrorKind::Unrecognized,
"Identifier type not recognized.",
))
}
let UserIdentifier::UserIdOrLocalpart(username) = identifier else {
return Err(Error::BadRequest(
ErrorKind::Unrecognized,
"Identifier type not recognized.",
));
};
let user_id = UserId::parse_with_server_name(
@ -86,7 +83,7 @@ impl Service {
if !hash_matches {
uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody {
kind: ErrorKind::forbidden(),
kind: ErrorKind::Forbidden,
message: "Invalid username or password.".to_owned(),
});
return Ok((false, uiaainfo));
@ -101,7 +98,7 @@ impl Service {
uiaainfo.completed.push(AuthType::RegistrationToken);
} else {
uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody {
kind: ErrorKind::forbidden(),
kind: ErrorKind::Forbidden,
message: "Invalid registration token.".to_owned(),
});
return Ok((false, uiaainfo));

View file

@ -42,16 +42,16 @@ pub trait Data: Send + Sync {
/// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change.
fn set_displayname(&self, user_id: &UserId, displayname: Option<String>) -> Result<()>;
/// Get the avatar_url of a user.
/// Get the `avatar_url` of a user.
fn avatar_url(&self, user_id: &UserId) -> Result<Option<OwnedMxcUri>>;
/// Sets a new avatar_url or removes it if avatar_url is None.
/// Sets a new `avatar_url` or removes it if `avatar_url` is None.
fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<OwnedMxcUri>) -> Result<()>;
/// Get the blurhash of a user.
fn blurhash(&self, user_id: &UserId) -> Result<Option<String>>;
/// Sets a new avatar_url or removes it if avatar_url is None.
/// Sets a new `avatar_url` or removes it if `avatar_url` is None.
fn set_blurhash(&self, user_id: &UserId, blurhash: Option<String>) -> Result<()>;
/// Adds a new device to a user.
@ -211,10 +211,4 @@ pub trait Data: Send + Sync {
fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result<String>;
fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result<Option<FilterDefinition>>;
// Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations)
fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)>;
/// Find out which user an OpenID access token belongs to.
fn find_from_openid_token(&self, token: &str) -> Result<Option<OwnedUserId>>;
}

View file

@ -9,6 +9,7 @@ pub use data::Data;
use ruma::{
api::client::{
device::Device,
error::ErrorKind,
filter::FilterDefinition,
sync::sync_events::{
self,
@ -19,7 +20,7 @@ use ruma::{
events::AnyToDeviceEvent,
serde::Raw,
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri,
OwnedRoomId, OwnedUserId, UInt, UserId,
OwnedRoomId, OwnedUserId, RoomAliasId, UInt, UserId,
};
use crate::{services, Error, Result};
@ -261,14 +262,19 @@ impl Service {
/// Check if a user is an admin
pub fn is_admin(&self, user_id: &UserId) -> Result<bool> {
if let Some(admin_room_id) = services().admin.get_admin_room()? {
services()
.rooms
.state_cache
.is_joined(user_id, &admin_room_id)
} else {
Ok(false)
}
let admin_room_alias_id =
RoomAliasId::parse(format!("#admins:{}", services().globals.server_name()))
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
let admin_room_id = services()
.rooms
.alias
.resolve_local_alias(&admin_room_alias_id)?
.unwrap();
services()
.rooms
.state_cache
.is_joined(user_id, &admin_room_id)
}
/// Create a new user account on this homeserver.
@ -319,12 +325,12 @@ impl Service {
self.db.set_displayname(user_id, displayname)
}
/// Get the avatar_url of a user.
/// Get the `avatar_url` of a user.
pub fn avatar_url(&self, user_id: &UserId) -> Result<Option<OwnedMxcUri>> {
self.db.avatar_url(user_id)
}
/// Sets a new avatar_url or removes it if avatar_url is None.
/// Sets a new `avatar_url` or removes it if `avatar_url` is None.
pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<OwnedMxcUri>) -> Result<()> {
self.db.set_avatar_url(user_id, avatar_url)
}
@ -334,7 +340,7 @@ impl Service {
self.db.blurhash(user_id)
}
/// Sets a new avatar_url or removes it if avatar_url is None.
/// Sets a new `avatar_url` or removes it if `avatar_url` is None.
pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option<String>) -> Result<()> {
self.db.set_blurhash(user_id, blurhash)
}
@ -592,16 +598,6 @@ impl Service {
) -> Result<Option<FilterDefinition>> {
self.db.get_filter(user_id, filter_id)
}
// Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations)
pub fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)> {
self.db.create_openid_token(user_id)
}
/// Find out which user an OpenID access token belongs to.
pub fn find_from_openid_token(&self, token: &str) -> Result<Option<OwnedUserId>> {
self.db.find_from_openid_token(token)
}
}
/// Ensure that a user only sees signatures from themselves and the target user

View file

@ -71,7 +71,7 @@ pub enum Error {
#[error("{0}")]
BadConfig(&'static str),
#[error("{0}")]
/// Don't create this directly. Use Error::bad_database instead.
/// Don't create this directly. Use `Error::bad_database` instead.
BadDatabase(&'static str),
#[error("uiaa")]
Uiaa(UiaaInfo),
@ -107,6 +107,9 @@ impl Error {
impl Error {
pub fn to_response(&self) -> RumaResponse<UiaaResponse> {
#[allow(clippy::enum_glob_use)]
use ErrorKind::*;
if let Self::Uiaa(uiaainfo) = self {
return RumaResponse(UiaaResponse::AuthResponse(uiaainfo.clone()));
}
@ -122,20 +125,19 @@ impl Error {
let message = format!("{self}");
use ErrorKind::*;
let (kind, status_code) = match self {
Self::BadRequest(kind, _) => (
kind.clone(),
match kind {
WrongRoomKeysVersion { .. }
| Forbidden { .. }
| GuestAccessForbidden
| ThreepidAuthFailed
| ThreepidDenied => StatusCode::FORBIDDEN,
Unauthorized | UnknownToken { .. } | MissingToken => StatusCode::UNAUTHORIZED,
NotFound | Unrecognized => StatusCode::NOT_FOUND,
LimitExceeded { .. } => StatusCode::TOO_MANY_REQUESTS,
UserDeactivated => StatusCode::FORBIDDEN,
UserDeactivated
| WrongRoomKeysVersion { .. }
| Forbidden
| GuestAccessForbidden
| ThreepidAuthFailed
| ThreepidDenied => StatusCode::FORBIDDEN,
TooLarge => StatusCode::PAYLOAD_TOO_LARGE,
_ => StatusCode::BAD_REQUEST,
},

Some files were not shown because too many files have changed in this diff Show more