Compare commits

..

1 commit

Author SHA1 Message Date
Matthias Ahouansou
e571acad92
feat(federation): implement /make_leave and /send_leave 2024-06-12 11:03:39 +01:00
46 changed files with 999 additions and 2003 deletions

View file

@ -103,11 +103,6 @@ artifacts:
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl - ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
- cp result/bin/conduit aarch64-unknown-linux-musl - cp result/bin/conduit aarch64-unknown-linux-musl
- mkdir -p target/aarch64-unknown-linux-musl/release
- cp result/bin/conduit target/aarch64-unknown-linux-musl/release
- direnv exec . cargo deb --no-strip --no-build --target aarch64-unknown-linux-musl
- mv target/aarch64-unknown-linux-musl/debian/*.deb aarch64-unknown-linux-musl.deb
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl - ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
- cp result oci-image-arm64v8.tar.gz - cp result oci-image-arm64v8.tar.gz
@ -119,7 +114,6 @@ artifacts:
- x86_64-unknown-linux-musl - x86_64-unknown-linux-musl
- aarch64-unknown-linux-musl - aarch64-unknown-linux-musl
- x86_64-unknown-linux-musl.deb - x86_64-unknown-linux-musl.deb
- aarch64-unknown-linux-musl.deb
- oci-image-amd64.tar.gz - oci-image-amd64.tar.gz
- oci-image-arm64v8.tar.gz - oci-image-arm64v8.tar.gz
- public - public

59
Cargo.lock generated
View file

@ -487,7 +487,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
[[package]] [[package]]
name = "conduit" name = "conduit"
version = "0.10.0-alpha" version = "0.8.0-alpha"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"axum 0.7.5", "axum 0.7.5",
@ -1158,15 +1158,6 @@ dependencies = [
"itoa", "itoa",
] ]
[[package]]
name = "http-auth"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "643c9bbf6a4ea8a656d6b4cd53d34f79e3f841ad5203c1a55fb7d761923bc255"
dependencies = [
"memchr",
]
[[package]] [[package]]
name = "http-body" name = "http-body"
version = "0.4.6" version = "0.4.6"
@ -1203,9 +1194,9 @@ dependencies = [
[[package]] [[package]]
name = "httparse" name = "httparse"
version = "1.9.4" version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904"
[[package]] [[package]]
name = "httpdate" name = "httpdate"
@ -2232,7 +2223,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma" name = "ruma"
version = "0.10.1" version = "0.10.1"
source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e"
dependencies = [ dependencies = [
"assign", "assign",
"js_int", "js_int",
@ -2253,7 +2244,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-appservice-api" name = "ruma-appservice-api"
version = "0.10.0" version = "0.10.0"
source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-common", "ruma-common",
@ -2265,7 +2256,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-client-api" name = "ruma-client-api"
version = "0.18.0" version = "0.18.0"
source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e"
dependencies = [ dependencies = [
"as_variant", "as_variant",
"assign", "assign",
@ -2288,7 +2279,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-common" name = "ruma-common"
version = "0.13.0" version = "0.13.0"
source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e"
dependencies = [ dependencies = [
"as_variant", "as_variant",
"base64 0.22.1", "base64 0.22.1",
@ -2318,7 +2309,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-events" name = "ruma-events"
version = "0.28.1" version = "0.28.1"
source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e"
dependencies = [ dependencies = [
"as_variant", "as_variant",
"indexmap 2.2.6", "indexmap 2.2.6",
@ -2334,22 +2325,15 @@ dependencies = [
"thiserror", "thiserror",
"tracing", "tracing",
"url", "url",
"web-time",
"wildmatch", "wildmatch",
] ]
[[package]] [[package]]
name = "ruma-federation-api" name = "ruma-federation-api"
version = "0.9.0" version = "0.9.0"
source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e"
dependencies = [ dependencies = [
"bytes",
"http 1.1.0",
"httparse",
"js_int", "js_int",
"memchr",
"mime",
"rand",
"ruma-common", "ruma-common",
"ruma-events", "ruma-events",
"serde", "serde",
@ -2359,7 +2343,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers-validation" name = "ruma-identifiers-validation"
version = "0.9.5" version = "0.9.5"
source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e"
dependencies = [ dependencies = [
"js_int", "js_int",
"thiserror", "thiserror",
@ -2368,7 +2352,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identity-service-api" name = "ruma-identity-service-api"
version = "0.9.0" version = "0.9.0"
source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-common", "ruma-common",
@ -2378,9 +2362,8 @@ dependencies = [
[[package]] [[package]]
name = "ruma-macros" name = "ruma-macros"
version = "0.13.0" version = "0.13.0"
source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e"
dependencies = [ dependencies = [
"cfg-if",
"once_cell", "once_cell",
"proc-macro-crate", "proc-macro-crate",
"proc-macro2", "proc-macro2",
@ -2394,7 +2377,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-push-gateway-api" name = "ruma-push-gateway-api"
version = "0.9.0" version = "0.9.0"
source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-common", "ruma-common",
@ -2406,20 +2389,18 @@ dependencies = [
[[package]] [[package]]
name = "ruma-server-util" name = "ruma-server-util"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e"
dependencies = [ dependencies = [
"headers", "headers",
"http 1.1.0",
"http-auth",
"ruma-common", "ruma-common",
"thiserror",
"tracing", "tracing",
"yap",
] ]
[[package]] [[package]]
name = "ruma-signatures" name = "ruma-signatures"
version = "0.15.0" version = "0.15.0"
source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e"
dependencies = [ dependencies = [
"base64 0.22.1", "base64 0.22.1",
"ed25519-dalek", "ed25519-dalek",
@ -2435,7 +2416,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-state-res" name = "ruma-state-res"
version = "0.11.0" version = "0.11.0"
source = "git+https://github.com/ruma/ruma#c06af4385e0e30c48a8e9ca3d488da32102d0db9" source = "git+https://github.com/ruma/ruma#ef40b184b7410a93e933b4ad719a72aea1bdd20e"
dependencies = [ dependencies = [
"itertools", "itertools",
"js_int", "js_int",
@ -3776,6 +3757,12 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049"
[[package]]
name = "yap"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfe269e7b803a5e8e20cbd97860e136529cd83bf2c9c6d37b142467e7e1f051f"
[[package]] [[package]]
name = "zerocopy" name = "zerocopy"
version = "0.7.34" version = "0.7.34"

View file

@ -16,10 +16,10 @@ license = "Apache-2.0"
name = "conduit" name = "conduit"
readme = "README.md" readme = "README.md"
repository = "https://gitlab.com/famedly/conduit" repository = "https://gitlab.com/famedly/conduit"
version = "0.10.0-alpha" version = "0.8.0-alpha"
# See also `rust-toolchain.toml` # See also `rust-toolchain.toml`
rust-version = "1.79.0" rust-version = "1.78.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

View file

@ -1,4 +1,4 @@
FROM rust:1.79.0 FROM rust:1.78.0
WORKDIR /workdir WORKDIR /workdir

View file

@ -6,8 +6,6 @@
> **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect > **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect
> **Note:** You can also configure Conduit by using `CONDUIT_{field_name}` environment variables. To set values inside a table, use `CONDUIT_{table_name}__{field_name}`. Example: `CONDUIT_SERVER_NAME="example.org"`
Conduit's configuration file is divided into the following sections: Conduit's configuration file is divided into the following sections:
- [Global](#global) - [Global](#global)
@ -58,8 +56,7 @@ The `global` section contains the following fields:
| `turn_secret` | `string` | The TURN secret | `""` | | `turn_secret` | `string` | The TURN secret | `""` |
| `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` | | `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` |
| `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A | | `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A |
| `well_known_client` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) | | `well_known` | `table` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
| `well_known_server` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
### TLS ### TLS

View file

@ -16,18 +16,18 @@ are connected to the server running Conduit using something like a VPN.
> **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration > **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration
To configure it, use the following options: To configure it, use the following options in the `global.well_known` table:
| Field | Type | Description | Default | | Field | Type | Description | Default |
| --- | --- | --- | --- | | --- | --- | --- | --- |
| `well_known_client` | `String` | The URL that clients should use to connect to Conduit | `https://<server_name>` | | `client` | `String` | The URL that clients should use to connect to Conduit | `https://<server_name>` |
| `well_known_server` | `String` | The hostname and port servers should use to connect to Conduit | `<server_name>:443` | | `server` | `String` | The hostname and port servers should use to connect to Conduit | `<server_name>:443` |
### Example ### Example
```toml ```toml
[global] [global.well_known]
well_known_client = "https://matrix.example.org" client = "https://matrix.example.org"
well_known_server = "matrix.example.org:443" server = "matrix.example.org:443"
``` ```
## Manual ## Manual

View file

@ -64,7 +64,6 @@ docker run -d -p 8448:6167 \
-e CONDUIT_MAX_REQUEST_SIZE="20000000" \ -e CONDUIT_MAX_REQUEST_SIZE="20000000" \
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \ -e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
-e CONDUIT_PORT="6167" \
--name conduit <link> --name conduit <link>
``` ```

View file

@ -17,7 +17,6 @@ You may simply download the binary that fits your machine. Run `uname -m` to see
| Target | Type | Download | | Target | Type | Download |
|-|-|-| |-|-|-|
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl.deb?job=artifacts) | | `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl?job=artifacts) | | `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl?job=artifacts) |
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl?job=artifacts) | | `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl?job=artifacts) |
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-amd64.tar.gz?job=artifacts) | | `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-amd64.tar.gz?job=artifacts) |
@ -31,7 +30,6 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to
| Target | Type | Download | | Target | Type | Download |
|-|-|-| |-|-|-|
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) | | `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) | | `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) |
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) | | `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) |
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) | | `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) |

View file

@ -2,7 +2,7 @@
## General instructions ## General instructions
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/element-hq/synapse/blob/develop/docs/turn-howto.md). * It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md).
## Edit/Add a few settings to your existing conduit.toml ## Edit/Add a few settings to your existing conduit.toml

View file

@ -59,7 +59,7 @@
file = ./rust-toolchain.toml; file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml` # See also `rust-toolchain.toml`
sha256 = "sha256-Ngiz76YP4HTY75GGdH2P+APE/DEIx2R/Dn+BwwOyzZU="; sha256 = "sha256-opUgs6ckUQCyDxcB9Wy51pqhd0MPGHUVbwRKKPGiwZU=";
}; };
}); });
in in

View file

@ -23,7 +23,7 @@ mkShell {
}; };
# Development tools # Development tools
nativeBuildInputs = [ nativeBuildInputs = default.nativeBuildInputs ++ [
# Always use nightly rustfmt because most of its options are unstable # Always use nightly rustfmt because most of its options are unstable
# #
# This needs to come before `toolchain` in this list, otherwise # This needs to come before `toolchain` in this list, otherwise
@ -57,5 +57,5 @@ mkShell {
# Useful for editing the book locally # Useful for editing the book locally
mdbook mdbook
] ++ default.nativeBuildInputs ; ];
} }

View file

@ -2,6 +2,7 @@
# #
# Other files that need upkeep when this changes: # Other files that need upkeep when this changes:
# #
# * `.gitlab-ci.yml`
# * `Cargo.toml` # * `Cargo.toml`
# * `flake.nix` # * `flake.nix`
# #
@ -9,7 +10,7 @@
# If you're having trouble making the relevant changes, bug a maintainer. # If you're having trouble making the relevant changes, bug a maintainer.
[toolchain] [toolchain]
channel = "1.79.0" channel = "1.78.0"
components = [ components = [
# For rust-analyzer # For rust-analyzer
"rust-src", "rust-src",

View file

@ -315,11 +315,7 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
pub async fn change_password_route( pub async fn change_password_route(
body: Ruma<change_password::v3::Request>, body: Ruma<change_password::v3::Request>,
) -> Result<change_password::v3::Response> { ) -> Result<change_password::v3::Response> {
let sender_user = body let sender_user = body.sender_user.as_ref().expect("user is authenticated");
.sender_user
.as_ref()
// In the future password changes could be performed with UIA with 3PIDs, but we don't support that currently
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut uiaainfo = UiaaInfo { let mut uiaainfo = UiaaInfo {
@ -406,11 +402,7 @@ pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3:
pub async fn deactivate_route( pub async fn deactivate_route(
body: Ruma<deactivate::v3::Request>, body: Ruma<deactivate::v3::Request>,
) -> Result<deactivate::v3::Response> { ) -> Result<deactivate::v3::Response> {
let sender_user = body let sender_user = body.sender_user.as_ref().expect("user is authenticated");
.sender_user
.as_ref()
// In the future password changes could be performed with UIA with SSO, but we don't support that currently
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut uiaainfo = UiaaInfo { let mut uiaainfo = UiaaInfo {

View file

@ -18,8 +18,6 @@ use ruma::{
pub async fn create_alias_route( pub async fn create_alias_route(
body: Ruma<create_alias::v3::Request>, body: Ruma<create_alias::v3::Request>,
) -> Result<create_alias::v3::Response> { ) -> Result<create_alias::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.room_alias.server_name() != services().globals.server_name() { if body.room_alias.server_name() != services().globals.server_name() {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
@ -57,7 +55,7 @@ pub async fn create_alias_route(
services() services()
.rooms .rooms
.alias .alias
.set_alias(&body.room_alias, &body.room_id, sender_user)?; .set_alias(&body.room_alias, &body.room_id)?;
Ok(create_alias::v3::Response::new()) Ok(create_alias::v3::Response::new())
} }
@ -66,12 +64,11 @@ pub async fn create_alias_route(
/// ///
/// Deletes a room alias from this server. /// Deletes a room alias from this server.
/// ///
/// - TODO: additional access control checks
/// - TODO: Update canonical alias event /// - TODO: Update canonical alias event
pub async fn delete_alias_route( pub async fn delete_alias_route(
body: Ruma<delete_alias::v3::Request>, body: Ruma<delete_alias::v3::Request>,
) -> Result<delete_alias::v3::Response> { ) -> Result<delete_alias::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.room_alias.server_name() != services().globals.server_name() { if body.room_alias.server_name() != services().globals.server_name() {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
@ -97,10 +94,7 @@ pub async fn delete_alias_route(
)); ));
} }
services() services().rooms.alias.remove_alias(&body.room_alias)?;
.rooms
.alias
.remove_alias(&body.room_alias, sender_user)?;
// TODO: update alt_aliases? // TODO: update alt_aliases?

View file

@ -1,24 +1,12 @@
// Unauthenticated media is deprecated
#![allow(deprecated)]
use std::time::Duration; use std::time::Duration;
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma}; use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE}; use ruma::api::client::{
use ruma::{ error::ErrorKind,
api::{ media::{
client::{ create_content, get_content, get_content_as_filename, get_content_thumbnail,
authenticated_media::{ get_media_config,
get_content, get_content_as_filename, get_content_thumbnail, get_media_config,
},
error::ErrorKind,
media::{self, create_content},
},
federation::authenticated_media::{self as federation_media, FileOrLocation},
}, },
http_headers::{ContentDisposition, ContentDispositionType},
media::Method,
ServerName, UInt,
}; };
const MXC_LENGTH: usize = 32; const MXC_LENGTH: usize = 32;
@ -27,20 +15,9 @@ const MXC_LENGTH: usize = 32;
/// ///
/// Returns max upload size. /// Returns max upload size.
pub async fn get_media_config_route( pub async fn get_media_config_route(
_body: Ruma<media::get_media_config::v3::Request>, _body: Ruma<get_media_config::v3::Request>,
) -> Result<media::get_media_config::v3::Response> { ) -> Result<get_media_config::v3::Response> {
Ok(media::get_media_config::v3::Response { Ok(get_media_config::v3::Response {
upload_size: services().globals.max_request_size().into(),
})
}
/// # `GET /_matrix/client/v1/media/config`
///
/// Returns max upload size.
pub async fn get_media_config_auth_route(
_body: Ruma<get_media_config::v1::Request>,
) -> Result<get_media_config::v1::Response> {
Ok(get_media_config::v1::Response {
upload_size: services().globals.max_request_size().into(), upload_size: services().globals.max_request_size().into(),
}) })
} }
@ -64,10 +41,10 @@ pub async fn create_content_route(
.media .media
.create( .create(
mxc.clone(), mxc.clone(),
Some( body.filename
ContentDisposition::new(ContentDispositionType::Inline) .as_ref()
.with_filename(body.filename.clone()), .map(|filename| "inline; filename=".to_owned() + filename)
), .as_deref(),
body.content_type.as_deref(), body.content_type.as_deref(),
&body.file, &body.file,
) )
@ -81,67 +58,28 @@ pub async fn create_content_route(
pub async fn get_remote_content( pub async fn get_remote_content(
mxc: &str, mxc: &str,
server_name: &ServerName, server_name: &ruma::ServerName,
media_id: String, media_id: String,
) -> Result<get_content::v1::Response, Error> { ) -> Result<get_content::v3::Response, Error> {
let content_response = match services() let content_response = services()
.sending .sending
.send_federation_request( .send_federation_request(
server_name, server_name,
federation_media::get_content::v1::Request { get_content::v3::Request {
media_id: media_id.clone(), allow_remote: false,
server_name: server_name.to_owned(),
media_id,
timeout_ms: Duration::from_secs(20), timeout_ms: Duration::from_secs(20),
allow_redirect: false,
}, },
) )
.await .await?;
{
Ok(federation_media::get_content::v1::Response {
metadata: _,
content: FileOrLocation::File(content),
}) => get_content::v1::Response {
file: content.file,
content_type: content.content_type,
content_disposition: content.content_disposition,
},
Ok(federation_media::get_content::v1::Response {
metadata: _,
content: FileOrLocation::Location(url),
}) => get_location_content(url).await?,
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
let media::get_content::v3::Response {
file,
content_type,
content_disposition,
..
} = services()
.sending
.send_federation_request(
server_name,
media::get_content::v3::Request {
server_name: server_name.to_owned(),
media_id,
timeout_ms: Duration::from_secs(20),
allow_remote: false,
allow_redirect: true,
},
)
.await?;
get_content::v1::Response {
file,
content_type,
content_disposition,
}
}
Err(e) => return Err(e),
};
services() services()
.media .media
.create( .create(
mxc.to_owned(), mxc.to_owned(),
content_response.content_disposition.clone(), content_response.content_disposition.as_deref(),
content_response.content_type.as_deref(), content_response.content_type.as_deref(),
&content_response.file, &content_response.file,
) )
@ -156,57 +94,31 @@ pub async fn get_remote_content(
/// ///
/// - Only allows federation if `allow_remote` is true /// - Only allows federation if `allow_remote` is true
pub async fn get_content_route( pub async fn get_content_route(
body: Ruma<media::get_content::v3::Request>, body: Ruma<get_content::v3::Request>,
) -> Result<media::get_content::v3::Response> { ) -> Result<get_content::v3::Response> {
let get_content::v1::Response { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
file,
content_disposition,
content_type,
} = get_content(&body.server_name, body.media_id.clone(), body.allow_remote).await?;
Ok(media::get_content::v3::Response { if let Some(FileMeta {
file,
content_type,
content_disposition,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
}
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}`
///
/// Load media from our server or over federation.
pub async fn get_content_auth_route(
body: Ruma<get_content::v1::Request>,
) -> Result<get_content::v1::Response> {
get_content(&body.server_name, body.media_id.clone(), true).await
}
async fn get_content(
server_name: &ServerName,
media_id: String,
allow_remote: bool,
) -> Result<get_content::v1::Response, Error> {
let mxc = format!("mxc://{}/{}", server_name, media_id);
if let Ok(Some(FileMeta {
content_disposition, content_disposition,
content_type, content_type,
file, file,
})) = services().media.get(mxc.clone()).await }) = services().media.get(mxc.clone()).await?
{ {
Ok(get_content::v1::Response { Ok(get_content::v3::Response {
file, file,
content_type, content_type,
content_disposition: Some(content_disposition), content_disposition,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
}) })
} else if server_name != services().globals.server_name() && allow_remote { } else if &*body.server_name != services().globals.server_name() && body.allow_remote {
let remote_content_response = let remote_content_response =
get_remote_content(&mxc, server_name, media_id.clone()).await?; get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
Ok(get_content::v1::Response { Ok(get_content::v3::Response {
content_disposition: remote_content_response.content_disposition, content_disposition: remote_content_response.content_disposition,
content_type: remote_content_response.content_type, content_type: remote_content_response.content_type,
file: remote_content_response.file, file: remote_content_response.file,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
}) })
} else { } else {
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
@ -219,74 +131,29 @@ async fn get_content(
/// ///
/// - Only allows federation if `allow_remote` is true /// - Only allows federation if `allow_remote` is true
pub async fn get_content_as_filename_route( pub async fn get_content_as_filename_route(
body: Ruma<media::get_content_as_filename::v3::Request>, body: Ruma<get_content_as_filename::v3::Request>,
) -> Result<media::get_content_as_filename::v3::Response> { ) -> Result<get_content_as_filename::v3::Response> {
let get_content_as_filename::v1::Response { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
file,
content_type,
content_disposition,
} = get_content_as_filename(
&body.server_name,
body.media_id.clone(),
body.filename.clone(),
body.allow_remote,
)
.await?;
Ok(media::get_content_as_filename::v3::Response { if let Some(FileMeta {
file,
content_type,
content_disposition,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
}
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}`
///
/// Load media from our server or over federation, permitting desired filename.
pub async fn get_content_as_filename_auth_route(
body: Ruma<get_content_as_filename::v1::Request>,
) -> Result<get_content_as_filename::v1::Response, Error> {
get_content_as_filename(
&body.server_name,
body.media_id.clone(),
body.filename.clone(),
true,
)
.await
}
async fn get_content_as_filename(
server_name: &ServerName,
media_id: String,
filename: String,
allow_remote: bool,
) -> Result<get_content_as_filename::v1::Response, Error> {
let mxc = format!("mxc://{}/{}", server_name, media_id);
if let Ok(Some(FileMeta {
file, content_type, .. file, content_type, ..
})) = services().media.get(mxc.clone()).await }) = services().media.get(mxc.clone()).await?
{ {
Ok(get_content_as_filename::v1::Response { Ok(get_content_as_filename::v3::Response {
file, file,
content_type, content_type,
content_disposition: Some( content_disposition: Some(format!("inline; filename={}", body.filename)),
ContentDisposition::new(ContentDispositionType::Inline) cross_origin_resource_policy: Some("cross-origin".to_owned()),
.with_filename(Some(filename.clone())),
),
}) })
} else if server_name != services().globals.server_name() && allow_remote { } else if &*body.server_name != services().globals.server_name() && body.allow_remote {
let remote_content_response = let remote_content_response =
get_remote_content(&mxc, server_name, media_id.clone()).await?; get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
Ok(get_content_as_filename::v1::Response { Ok(get_content_as_filename::v3::Response {
content_disposition: Some( content_disposition: Some(format!("inline: filename={}", body.filename)),
ContentDisposition::new(ContentDispositionType::Inline)
.with_filename(Some(filename.clone())),
),
content_type: remote_content_response.content_type, content_type: remote_content_response.content_type,
file: remote_content_response.file, file: remote_content_response.file,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
}) })
} else { } else {
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
@ -299,169 +166,62 @@ async fn get_content_as_filename(
/// ///
/// - Only allows federation if `allow_remote` is true /// - Only allows federation if `allow_remote` is true
pub async fn get_content_thumbnail_route( pub async fn get_content_thumbnail_route(
body: Ruma<media::get_content_thumbnail::v3::Request>, body: Ruma<get_content_thumbnail::v3::Request>,
) -> Result<media::get_content_thumbnail::v3::Response> { ) -> Result<get_content_thumbnail::v3::Response> {
let get_content_thumbnail::v1::Response { file, content_type } = get_content_thumbnail( let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
&body.server_name,
body.media_id.clone(),
body.height,
body.width,
body.method.clone(),
body.animated,
body.allow_remote,
)
.await?;
Ok(media::get_content_thumbnail::v3::Response { if let Some(FileMeta {
file,
content_type,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
}
/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}`
///
/// Load media thumbnail from our server or over federation.
pub async fn get_content_thumbnail_auth_route(
body: Ruma<get_content_thumbnail::v1::Request>,
) -> Result<get_content_thumbnail::v1::Response> {
get_content_thumbnail(
&body.server_name,
body.media_id.clone(),
body.height,
body.width,
body.method.clone(),
body.animated,
true,
)
.await
}
async fn get_content_thumbnail(
server_name: &ServerName,
media_id: String,
height: UInt,
width: UInt,
method: Option<Method>,
animated: Option<bool>,
allow_remote: bool,
) -> Result<get_content_thumbnail::v1::Response, Error> {
let mxc = format!("mxc://{}/{}", server_name, media_id);
if let Ok(Some(FileMeta {
file, content_type, .. file, content_type, ..
})) = services() }) = services()
.media .media
.get_thumbnail( .get_thumbnail(
mxc.clone(), mxc.clone(),
width body.width
.try_into() .try_into()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
height body.height
.try_into() .try_into()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Height is invalid."))?, .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
) )
.await .await?
{ {
Ok(get_content_thumbnail::v1::Response { file, content_type }) Ok(get_content_thumbnail::v3::Response {
} else if server_name != services().globals.server_name() && allow_remote { file,
let thumbnail_response = match services() content_type,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
let get_thumbnail_response = services()
.sending .sending
.send_federation_request( .send_federation_request(
server_name, &body.server_name,
federation_media::get_content_thumbnail::v1::Request { get_content_thumbnail::v3::Request {
height, allow_remote: false,
width, height: body.height,
method: method.clone(), width: body.width,
media_id: media_id.clone(), method: body.method.clone(),
server_name: body.server_name.clone(),
media_id: body.media_id.clone(),
timeout_ms: Duration::from_secs(20), timeout_ms: Duration::from_secs(20),
animated, allow_redirect: false,
}, },
) )
.await .await?;
{
Ok(federation_media::get_content_thumbnail::v1::Response {
metadata: _,
content: FileOrLocation::File(content),
}) => get_content_thumbnail::v1::Response {
file: content.file,
content_type: content.content_type,
},
Ok(federation_media::get_content_thumbnail::v1::Response {
metadata: _,
content: FileOrLocation::Location(url),
}) => {
let get_content::v1::Response {
file, content_type, ..
} = get_location_content(url).await?;
get_content_thumbnail::v1::Response { file, content_type }
}
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
let media::get_content_thumbnail::v3::Response {
file, content_type, ..
} = services()
.sending
.send_federation_request(
server_name,
media::get_content_thumbnail::v3::Request {
height,
width,
method: method.clone(),
server_name: server_name.to_owned(),
media_id: media_id.clone(),
timeout_ms: Duration::from_secs(20),
allow_redirect: false,
animated,
allow_remote: false,
},
)
.await?;
get_content_thumbnail::v1::Response { file, content_type }
}
Err(e) => return Err(e),
};
services() services()
.media .media
.upload_thumbnail( .upload_thumbnail(
mxc, mxc,
thumbnail_response.content_type.as_deref(), None,
width.try_into().expect("all UInts are valid u32s"), get_thumbnail_response.content_type.as_deref(),
height.try_into().expect("all UInts are valid u32s"), body.width.try_into().expect("all UInts are valid u32s"),
&thumbnail_response.file, body.height.try_into().expect("all UInts are valid u32s"),
&get_thumbnail_response.file,
) )
.await?; .await?;
Ok(thumbnail_response) Ok(get_thumbnail_response)
} else { } else {
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
} }
} }
async fn get_location_content(url: String) -> Result<get_content::v1::Response, Error> {
let client = services().globals.default_client();
let response = client.get(url).send().await?;
let headers = response.headers();
let content_type = headers
.get(CONTENT_TYPE)
.and_then(|header| header.to_str().ok())
.map(ToOwned::to_owned);
let content_disposition = headers
.get(CONTENT_DISPOSITION)
.map(|header| header.as_bytes())
.map(TryFrom::try_from)
.and_then(Result::ok);
let file = response.bytes().await?.to_vec();
Ok(get_content::v1::Response {
file,
content_type,
content_disposition,
})
}

View file

@ -18,8 +18,9 @@ use ruma::{
}, },
StateEventType, TimelineEventType, StateEventType, TimelineEventType,
}, },
state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, serde::Base64,
OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId, state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId,
}; };
use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use std::{ use std::{
@ -31,10 +32,7 @@ use tokio::sync::RwLock;
use tracing::{debug, error, info, warn}; use tracing::{debug, error, info, warn};
use crate::{ use crate::{
service::{ service::pdu::{gen_event_id_canonical_json, PduBuilder},
globals::SigningKeys,
pdu::{gen_event_id_canonical_json, PduBuilder},
},
services, utils, Error, PduEvent, Result, Ruma, services, utils, Error, PduEvent, Result, Ruma,
}; };
@ -97,7 +95,7 @@ pub async fn join_room_by_id_or_alias_route(
let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) {
Ok(room_id) => { Ok(room_id) => {
let mut servers = body.via.clone(); let mut servers = body.server_name.clone();
servers.extend( servers.extend(
services() services()
.rooms .rooms
@ -241,7 +239,6 @@ pub async fn kick_user_route(
unsigned: None, unsigned: None,
state_key: Some(body.user_id.to_string()), state_key: Some(body.user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,
@ -314,7 +311,6 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
unsigned: None, unsigned: None,
state_key: Some(body.user_id.to_string()), state_key: Some(body.user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,
@ -388,7 +384,6 @@ pub async fn unban_user_route(
unsigned: None, unsigned: None,
state_key: Some(body.user_id.to_string()), state_key: Some(body.user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,
@ -627,7 +622,7 @@ async fn join_room_by_id_helper(
let event_id = format!( let event_id = format!(
"${}", "${}",
ruma::signatures::reference_hash(&join_event_stub, &room_version_id) ruma::signatures::reference_hash(&join_event_stub, &room_version_id)
.expect("Event format validated when event was hashed") .expect("ruma can calculate reference hashes")
); );
let event_id = <&EventId>::try_from(event_id.as_str()) let event_id = <&EventId>::try_from(event_id.as_str())
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
@ -941,7 +936,6 @@ async fn join_room_by_id_helper(
unsigned: None, unsigned: None,
state_key: Some(sender_user.to_string()), state_key: Some(sender_user.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
room_id, room_id,
@ -1136,7 +1130,7 @@ async fn make_join_request(
async fn validate_and_add_event_id( async fn validate_and_add_event_id(
pdu: &RawJsonValue, pdu: &RawJsonValue,
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<(OwnedEventId, CanonicalJsonObject)> { ) -> Result<(OwnedEventId, CanonicalJsonObject)> {
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
@ -1145,7 +1139,7 @@ async fn validate_and_add_event_id(
let event_id = EventId::parse(format!( let event_id = EventId::parse(format!(
"${}", "${}",
ruma::signatures::reference_hash(&value, room_version) ruma::signatures::reference_hash(&value, room_version)
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))? .expect("ruma can calculate reference hashes")
)) ))
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
@ -1183,35 +1177,8 @@ async fn validate_and_add_event_id(
} }
} }
let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| { if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version)
error!("Invalid PDU, no origin_server_ts field"); {
Error::BadRequest(
ErrorKind::MissingParam,
"Invalid PDU, no origin_server_ts field",
)
})?;
let origin_server_ts: MilliSecondsSinceUnixEpoch = {
let ts = origin_server_ts.as_integer().ok_or_else(|| {
Error::BadRequest(
ErrorKind::InvalidParam,
"origin_server_ts must be an integer",
)
})?;
MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Time must be after the unix epoch")
})?)
};
let unfiltered_keys = (*pub_key_map.read().await).clone();
let keys =
services()
.globals
.filter_keys_server_map(unfiltered_keys, origin_server_ts, room_version);
if let Err(e) = ruma::signatures::verify_event(&keys, &value, room_version) {
warn!("Event {} failed verification {:?} {}", event_id, pdu, e); warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
back_off(event_id).await; back_off(event_id).await;
return Err(Error::BadServerResponse("Event failed verification.")); return Err(Error::BadServerResponse("Event failed verification."));
@ -1264,7 +1231,6 @@ pub(crate) async fn invite_helper<'a>(
unsigned: None, unsigned: None,
state_key: Some(user_id.to_string()), state_key: Some(user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
room_id, room_id,
@ -1384,7 +1350,6 @@ pub(crate) async fn invite_helper<'a>(
unsigned: None, unsigned: None,
state_key: Some(user_id.to_string()), state_key: Some(user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
room_id, room_id,
@ -1512,7 +1477,6 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
unsigned: None, unsigned: None,
state_key: Some(user_id.to_string()), state_key: Some(user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
user_id, user_id,
room_id, room_id,
@ -1614,7 +1578,7 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> {
let event_id = EventId::parse(format!( let event_id = EventId::parse(format!(
"${}", "${}",
ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) ruma::signatures::reference_hash(&leave_event_stub, &room_version_id)
.expect("Event format validated when event was hashed") .expect("ruma can calculate reference hashes")
)) ))
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");

View file

@ -84,11 +84,6 @@ pub async fn send_message_event_route(
unsigned: Some(unsigned), unsigned: Some(unsigned),
state_key: None, state_key: None,
redacts: None, redacts: None,
timestamp: if body.appservice_info.is_some() {
body.timestamp
} else {
None
},
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,

View file

@ -65,7 +65,6 @@ pub async fn set_displayname_route(
unsigned: None, unsigned: None,
state_key: Some(sender_user.to_string()), state_key: Some(sender_user.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
room_id, room_id,
)) ))
@ -201,7 +200,6 @@ pub async fn set_avatar_url_route(
unsigned: None, unsigned: None,
state_key: Some(sender_user.to_string()), state_key: Some(sender_user.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
room_id, room_id,
)) ))

View file

@ -44,7 +44,6 @@ pub async fn redact_event_route(
unsigned: None, unsigned: None,
state_key: None, state_key: None,
redacts: Some(body.event_id.into()), redacts: Some(body.event_id.into()),
timestamp: None,
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,

View file

@ -230,7 +230,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -259,7 +258,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some(sender_user.to_string()), state_key: Some(sender_user.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -313,7 +311,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -337,7 +334,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -364,7 +360,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -386,7 +381,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -409,7 +403,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -454,7 +447,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -477,7 +469,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -494,10 +485,7 @@ pub async fn create_room_route(
// Homeserver specific stuff // Homeserver specific stuff
if let Some(alias) = alias { if let Some(alias) = alias {
services() services().rooms.alias.set_alias(&alias, &room_id)?;
.rooms
.alias
.set_alias(&alias, &room_id, sender_user)?;
} }
if body.visibility == room::Visibility::Public { if body.visibility == room::Visibility::Public {
@ -638,7 +626,6 @@ pub async fn upgrade_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,
@ -740,7 +727,6 @@ pub async fn upgrade_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&replacement_room, &replacement_room,
@ -769,7 +755,6 @@ pub async fn upgrade_room_route(
unsigned: None, unsigned: None,
state_key: Some(sender_user.to_string()), state_key: Some(sender_user.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&replacement_room, &replacement_room,
@ -812,7 +797,6 @@ pub async fn upgrade_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&replacement_room, &replacement_room,
@ -831,7 +815,7 @@ pub async fn upgrade_room_route(
services() services()
.rooms .rooms
.alias .alias
.set_alias(&alias, &replacement_room, sender_user)?; .set_alias(&alias, &replacement_room)?;
} }
// Get the old room power levels // Get the old room power levels
@ -863,7 +847,6 @@ pub async fn upgrade_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,

View file

@ -89,12 +89,11 @@ pub async fn search_events_route(
.get_pdu_from_id(result) .get_pdu_from_id(result)
.ok()? .ok()?
.filter(|pdu| { .filter(|pdu| {
!pdu.is_redacted() services()
&& services() .rooms
.rooms .state_accessor
.state_accessor .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) .unwrap_or(false)
.unwrap_or(false)
}) })
.map(|pdu| pdu.to_room_event()) .map(|pdu| pdu.to_room_event())
}) })

View file

@ -10,7 +10,7 @@ use ruma::{
room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType, room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType,
}, },
serde::Raw, serde::Raw,
EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, EventId, RoomId, UserId,
}; };
use tracing::log::warn; use tracing::log::warn;
@ -32,11 +32,6 @@ pub async fn send_state_event_for_key_route(
&body.event_type, &body.event_type,
&body.body.body, // Yes, I hate it too &body.body.body, // Yes, I hate it too
body.state_key.to_owned(), body.state_key.to_owned(),
if body.appservice_info.is_some() {
body.timestamp
} else {
None
},
) )
.await?; .await?;
@ -70,11 +65,6 @@ pub async fn send_state_event_for_empty_key_route(
&body.event_type.to_string().into(), &body.event_type.to_string().into(),
&body.body.body, &body.body.body,
body.state_key.to_owned(), body.state_key.to_owned(),
if body.appservice_info.is_some() {
body.timestamp
} else {
None
},
) )
.await?; .await?;
@ -200,7 +190,6 @@ async fn send_state_event_for_key_helper(
event_type: &StateEventType, event_type: &StateEventType,
json: &Raw<AnyStateEventContent>, json: &Raw<AnyStateEventContent>,
state_key: String, state_key: String,
timestamp: Option<MilliSecondsSinceUnixEpoch>,
) -> Result<Arc<EventId>> { ) -> Result<Arc<EventId>> {
let sender_user = sender; let sender_user = sender;
@ -254,7 +243,6 @@ async fn send_state_event_for_key_helper(
unsigned: None, unsigned: None,
state_key: Some(state_key), state_key: Some(state_key),
redacts: None, redacts: None,
timestamp,
}, },
sender_user, sender_user,
room_id, room_id,

View file

@ -27,10 +27,7 @@ pub async fn get_supported_versions_route(
"v1.4".to_owned(), "v1.4".to_owned(),
"v1.5".to_owned(), "v1.5".to_owned(),
], ],
unstable_features: BTreeMap::from_iter([ unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
("org.matrix.e2e_cross_signing".to_owned(), true),
("org.matrix.msc3916.stable".to_owned(), true),
]),
}; };
Ok(resp) Ok(resp)

View file

@ -7,17 +7,14 @@ use axum::{
response::{IntoResponse, Response}, response::{IntoResponse, Response},
RequestExt, RequestPartsExt, RequestExt, RequestPartsExt,
}; };
use axum_extra::{ use axum_extra::headers::authorization::Bearer;
headers::{authorization::Bearer, Authorization}, use axum_extra::{headers::Authorization, typed_header::TypedHeaderRejectionReason, TypedHeader};
typed_header::TypedHeaderRejectionReason,
TypedHeader,
};
use bytes::{BufMut, BytesMut}; use bytes::{BufMut, BytesMut};
use http::{Request, StatusCode}; use http::{Request, StatusCode};
use ruma::{ use ruma::{
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
server_util::authorization::XMatrix, server_util::authorization::XMatrix,
CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedUserId, UserId, CanonicalJsonValue, OwnedDeviceId, OwnedUserId, UserId,
}; };
use serde::Deserialize; use serde::Deserialize;
use tracing::{debug, error, warn}; use tracing::{debug, error, warn};
@ -189,7 +186,7 @@ where
let origin_signatures = BTreeMap::from_iter([( let origin_signatures = BTreeMap::from_iter([(
x_matrix.key.clone(), x_matrix.key.clone(),
CanonicalJsonValue::String(x_matrix.sig.to_string()), CanonicalJsonValue::String(x_matrix.sig),
)]); )]);
let signatures = BTreeMap::from_iter([( let signatures = BTreeMap::from_iter([(
@ -234,7 +231,7 @@ where
let keys_result = services() let keys_result = services()
.rooms .rooms
.event_handler .event_handler
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_string()], false) .fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_string()])
.await; .await;
let keys = match keys_result { let keys = match keys_result {
@ -248,19 +245,8 @@ where
} }
}; };
// Only verify_keys that are currently valid should be used for validating requests let pub_key_map =
// as per MSC4029 BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]);
let pub_key_map = BTreeMap::from_iter([(
x_matrix.origin.as_str().to_owned(),
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
keys.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect()
} else {
BTreeMap::new()
},
)]);
match ruma::signatures::verify_json(&pub_key_map, &request_map) { match ruma::signatures::verify_json(&pub_key_map, &request_map) {
Ok(()) => (None, None, Some(x_matrix.origin), None), Ok(()) => (None, None, Some(x_matrix.origin), None),

File diff suppressed because it is too large Load diff

View file

@ -59,7 +59,7 @@ pub struct Config {
pub allow_unstable_room_versions: bool, pub allow_unstable_room_versions: bool,
#[serde(default = "default_default_room_version")] #[serde(default = "default_default_room_version")]
pub default_room_version: RoomVersionId, pub default_room_version: RoomVersionId,
#[serde(default, flatten)] #[serde(default)]
pub well_known: WellKnownConfig, pub well_known: WellKnownConfig,
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
pub allow_jaeger: bool, pub allow_jaeger: bool,
@ -97,9 +97,7 @@ pub struct TlsConfig {
#[derive(Clone, Debug, Deserialize, Default)] #[derive(Clone, Debug, Deserialize, Default)]
pub struct WellKnownConfig { pub struct WellKnownConfig {
#[serde(rename = "well_known_client")]
pub client: Option<Url>, pub client: Option<Url>,
#[serde(rename = "well_known_server")]
pub server: Option<OwnedServerName>, pub server: Option<OwnedServerName>,
} }

View file

@ -1,19 +1,15 @@
use std::collections::HashMap; use std::collections::{BTreeMap, HashMap};
use async_trait::async_trait; use async_trait::async_trait;
use futures_util::{stream::FuturesUnordered, StreamExt}; use futures_util::{stream::FuturesUnordered, StreamExt};
use lru_cache::LruCache; use lru_cache::LruCache;
use ruma::{ use ruma::{
api::federation::discovery::{OldVerifyKey, ServerSigningKeys}, api::federation::discovery::{ServerSigningKeys, VerifyKey},
signatures::Ed25519KeyPair, signatures::Ed25519KeyPair,
DeviceId, ServerName, UserId, DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId,
}; };
use crate::{ use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
database::KeyValueDatabase,
service::{self, globals::SigningKeys},
services, utils, Error, Result,
};
pub const COUNTER: &[u8] = b"c"; pub const COUNTER: &[u8] = b"c";
pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u"; pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u";
@ -241,97 +237,64 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
self.global.remove(b"keypair") self.global.remove(b"keypair")
} }
fn add_signing_key_from_trusted_server( fn add_signing_key(
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, new_keys: ServerSigningKeys,
) -> Result<SigningKeys> { ) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?; // Not atomic, but this is not critical
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
Ok( let mut keys = signingkeys
if let Some(mut prev_keys) = .and_then(|keys| serde_json::from_slice(&keys).ok())
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok()) .unwrap_or_else(|| {
{ // Just insert "now", it doesn't matter
let ServerSigningKeys { ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
verify_keys, });
old_verify_keys,
..
} = new_keys;
prev_keys.verify_keys.extend(verify_keys); let ServerSigningKeys {
prev_keys.old_verify_keys.extend(old_verify_keys); verify_keys,
prev_keys.valid_until_ts = new_keys.valid_until_ts; old_verify_keys,
..
} = new_keys;
self.server_signingkeys.insert( keys.verify_keys.extend(verify_keys);
origin.as_bytes(), keys.old_verify_keys.extend(old_verify_keys);
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"),
)?;
prev_keys.into() self.server_signingkeys.insert(
} else { origin.as_bytes(),
self.server_signingkeys.insert( &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"),
origin.as_bytes(), )?;
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"),
)?;
new_keys.into() let mut tree = keys.verify_keys;
}, tree.extend(
) keys.old_verify_keys
} .into_iter()
.map(|old| (old.0, VerifyKey::new(old.1.key))),
);
fn add_signing_key_from_origin( Ok(tree)
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<SigningKeys> {
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
Ok(
if let Some(mut prev_keys) =
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok())
{
let ServerSigningKeys {
verify_keys,
old_verify_keys,
..
} = new_keys;
// Moving `verify_keys` no longer present to `old_verify_keys`
for (key_id, key) in prev_keys.verify_keys {
if !verify_keys.contains_key(&key_id) {
prev_keys
.old_verify_keys
.insert(key_id, OldVerifyKey::new(prev_keys.valid_until_ts, key.key));
}
}
prev_keys.verify_keys = verify_keys;
prev_keys.old_verify_keys.extend(old_verify_keys);
prev_keys.valid_until_ts = new_keys.valid_until_ts;
self.server_signingkeys.insert(
origin.as_bytes(),
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"),
)?;
prev_keys.into()
} else {
self.server_signingkeys.insert(
origin.as_bytes(),
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"),
)?;
new_keys.into()
},
)
} }
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>> { fn signing_keys_for(
&self,
origin: &ServerName,
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
let signingkeys = self let signingkeys = self
.server_signingkeys .server_signingkeys
.get(origin.as_bytes())? .get(origin.as_bytes())?
.and_then(|bytes| serde_json::from_slice::<SigningKeys>(&bytes).ok()); .and_then(|bytes| serde_json::from_slice(&bytes).ok())
.map(|keys: ServerSigningKeys| {
let mut tree = keys.verify_keys;
tree.extend(
keys.old_verify_keys
.into_iter()
.map(|old| (old.0, VerifyKey::new(old.1.key))),
);
tree
})
.unwrap_or_else(BTreeMap::new);
Ok(signingkeys) Ok(signingkeys)
} }

View file

@ -1,4 +1,4 @@
use ruma::{api::client::error::ErrorKind, http_headers::ContentDisposition}; use ruma::api::client::error::ErrorKind;
use crate::{database::KeyValueDatabase, service, utils, Error, Result}; use crate::{database::KeyValueDatabase, service, utils, Error, Result};
@ -8,7 +8,7 @@ impl service::media::Data for KeyValueDatabase {
mxc: String, mxc: String,
width: u32, width: u32,
height: u32, height: u32,
content_disposition: &ContentDisposition, content_disposition: Option<&str>,
content_type: Option<&str>, content_type: Option<&str>,
) -> Result<Vec<u8>> { ) -> Result<Vec<u8>> {
let mut key = mxc.as_bytes().to_vec(); let mut key = mxc.as_bytes().to_vec();
@ -16,7 +16,12 @@ impl service::media::Data for KeyValueDatabase {
key.extend_from_slice(&width.to_be_bytes()); key.extend_from_slice(&width.to_be_bytes());
key.extend_from_slice(&height.to_be_bytes()); key.extend_from_slice(&height.to_be_bytes());
key.push(0xff); key.push(0xff);
key.extend_from_slice(content_disposition.to_string().as_bytes()); key.extend_from_slice(
content_disposition
.as_ref()
.map(|f| f.as_bytes())
.unwrap_or_default(),
);
key.push(0xff); key.push(0xff);
key.extend_from_slice( key.extend_from_slice(
content_type content_type
@ -35,7 +40,7 @@ impl service::media::Data for KeyValueDatabase {
mxc: String, mxc: String,
width: u32, width: u32,
height: u32, height: u32,
) -> Result<(ContentDisposition, Option<String>, Vec<u8>)> { ) -> Result<(Option<String>, Option<String>, Vec<u8>)> {
let mut prefix = mxc.as_bytes().to_vec(); let mut prefix = mxc.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
prefix.extend_from_slice(&width.to_be_bytes()); prefix.extend_from_slice(&width.to_be_bytes());
@ -63,9 +68,15 @@ impl service::media::Data for KeyValueDatabase {
.next() .next()
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
let content_disposition = content_disposition_bytes.try_into().unwrap_or_else(|_| { let content_disposition = if content_disposition_bytes.is_empty() {
ContentDisposition::new(ruma::http_headers::ContentDispositionType::Inline) None
}); } else {
Some(
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
Error::bad_database("Content Disposition in mediaid_file is invalid unicode.")
})?,
)
};
Ok((content_disposition, content_type, key)) Ok((content_disposition, content_type, key))
} }
} }

View file

@ -1,15 +1,9 @@
use ruma::{ use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId,
UserId,
};
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
impl service::rooms::alias::Data for KeyValueDatabase { impl service::rooms::alias::Data for KeyValueDatabase {
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> { fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
// Comes first as we don't want a stuck alias
self.alias_userid
.insert(alias.alias().as_bytes(), user_id.as_bytes())?;
self.alias_roomid self.alias_roomid
.insert(alias.alias().as_bytes(), room_id.as_bytes())?; .insert(alias.alias().as_bytes(), room_id.as_bytes())?;
let mut aliasid = room_id.as_bytes().to_vec(); let mut aliasid = room_id.as_bytes().to_vec();
@ -28,13 +22,13 @@ impl service::rooms::alias::Data for KeyValueDatabase {
self.aliasid_alias.remove(&key)?; self.aliasid_alias.remove(&key)?;
} }
self.alias_roomid.remove(alias.alias().as_bytes())?; self.alias_roomid.remove(alias.alias().as_bytes())?;
self.alias_userid.remove(alias.alias().as_bytes())
} else { } else {
Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::NotFound, ErrorKind::NotFound,
"Alias does not exist.", "Alias does not exist.",
)) ));
} }
Ok(())
} }
fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> { fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
@ -63,16 +57,4 @@ impl service::rooms::alias::Data for KeyValueDatabase {
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
})) }))
} }
fn who_created_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedUserId>> {
self.alias_userid
.get(alias.alias().as_bytes())?
.map(|bytes| {
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("User ID in alias_userid is invalid unicode.")
})?)
.map_err(|_| Error::bad_database("User ID in alias_roomid is invalid."))
})
.transpose()
}
} }

View file

@ -2,46 +2,24 @@ use ruma::RoomId;
use crate::{database::KeyValueDatabase, service, services, utils, Result}; use crate::{database::KeyValueDatabase, service, services, utils, Result};
/// Splits a string into tokens used as keys in the search inverted index
///
/// This may be used to tokenize both message bodies (for indexing) or search
/// queries (for querying).
fn tokenize(body: &str) -> impl Iterator<Item = String> + '_ {
body.split_terminator(|c: char| !c.is_alphanumeric())
.filter(|s| !s.is_empty())
.filter(|word| word.len() <= 50)
.map(str::to_lowercase)
}
impl service::rooms::search::Data for KeyValueDatabase { impl service::rooms::search::Data for KeyValueDatabase {
fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
let mut batch = tokenize(message_body).map(|word| { let mut batch = message_body
let mut key = shortroomid.to_be_bytes().to_vec(); .split_terminator(|c: char| !c.is_alphanumeric())
key.extend_from_slice(word.as_bytes()); .filter(|s| !s.is_empty())
key.push(0xff); .filter(|word| word.len() <= 50)
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here .map(str::to_lowercase)
(key, Vec::new()) .map(|word| {
}); let mut key = shortroomid.to_be_bytes().to_vec();
key.extend_from_slice(word.as_bytes());
key.push(0xff);
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here
(key, Vec::new())
});
self.tokenids.insert_batch(&mut batch) self.tokenids.insert_batch(&mut batch)
} }
fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
let batch = tokenize(message_body).map(|word| {
let mut key = shortroomid.to_be_bytes().to_vec();
key.extend_from_slice(word.as_bytes());
key.push(0xFF);
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here
key
});
for token in batch {
self.tokenids.remove(&token)?;
}
Ok(())
}
fn search_pdus<'a>( fn search_pdus<'a>(
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,
@ -55,7 +33,11 @@ impl service::rooms::search::Data for KeyValueDatabase {
.to_be_bytes() .to_be_bytes()
.to_vec(); .to_vec();
let words: Vec<_> = tokenize(search_string).collect(); let words: Vec<_> = search_string
.split_terminator(|c: char| !c.is_alphanumeric())
.filter(|s| !s.is_empty())
.map(str::to_lowercase)
.collect();
let iterators = words.clone().into_iter().map(move |word| { let iterators = words.clone().into_iter().map(move |word| {
let mut prefix2 = prefix.clone(); let mut prefix2 = prefix.clone();

View file

@ -6,7 +6,6 @@ use crate::{
SERVICES, SERVICES,
}; };
use abstraction::{KeyValueDatabaseEngine, KvTree}; use abstraction::{KeyValueDatabaseEngine, KvTree};
use base64::{engine::general_purpose, Engine};
use directories::ProjectDirs; use directories::ProjectDirs;
use lru_cache::LruCache; use lru_cache::LruCache;
@ -102,8 +101,6 @@ pub struct KeyValueDatabase {
pub(super) userroomid_leftstate: Arc<dyn KvTree>, pub(super) userroomid_leftstate: Arc<dyn KvTree>,
pub(super) roomuserid_leftcount: Arc<dyn KvTree>, pub(super) roomuserid_leftcount: Arc<dyn KvTree>,
pub(super) alias_userid: Arc<dyn KvTree>, // User who created the alias
pub(super) disabledroomids: Arc<dyn KvTree>, // Rooms where incoming federation handling is disabled pub(super) disabledroomids: Arc<dyn KvTree>, // Rooms where incoming federation handling is disabled
pub(super) lazyloadedids: Arc<dyn KvTree>, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId pub(super) lazyloadedids: Arc<dyn KvTree>, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId
@ -330,8 +327,6 @@ impl KeyValueDatabase {
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
alias_userid: builder.open_tree("alias_userid")?,
disabledroomids: builder.open_tree("disabledroomids")?, disabledroomids: builder.open_tree("disabledroomids")?,
lazyloadedids: builder.open_tree("lazyloadedids")?, lazyloadedids: builder.open_tree("lazyloadedids")?,
@ -425,7 +420,7 @@ impl KeyValueDatabase {
} }
// If the database has any data, perform data migrations before starting // If the database has any data, perform data migrations before starting
let latest_database_version = 16; let latest_database_version = 13;
if services().users.count()? > 0 { if services().users.count()? > 0 {
// MIGRATIONS // MIGRATIONS
@ -942,86 +937,6 @@ impl KeyValueDatabase {
warn!("Migration: 12 -> 13 finished"); warn!("Migration: 12 -> 13 finished");
} }
if services().globals.database_version()? < 16 {
// Reconstruct all media using the filesystem
db.mediaid_file.clear().unwrap();
for file in fs::read_dir(services().globals.get_media_folder()).unwrap() {
let file = file.unwrap();
let mediaid = general_purpose::URL_SAFE_NO_PAD
.decode(file.file_name().into_string().unwrap())
.unwrap();
let mut parts = mediaid.rsplit(|&b| b == 0xff);
let mut removed_bytes = 0;
let content_type_bytes = parts.next().unwrap();
removed_bytes += content_type_bytes.len() + 1;
let content_disposition_bytes = parts
.next()
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
removed_bytes += content_disposition_bytes.len();
let mut content_disposition =
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
Error::bad_database("Content Disposition in mediaid_file is invalid.")
})?;
if content_disposition.contains("filename=")
&& !content_disposition.contains("filename=\"")
{
println!("{}", &content_disposition);
content_disposition =
content_disposition.replacen("filename=", "filename=\"", 1);
content_disposition.push('"');
println!("{}", &content_disposition);
let mut new_key = mediaid[..(mediaid.len() - removed_bytes)].to_vec();
assert!(*new_key.last().unwrap() == 0xff);
let mut shorter_key = new_key.clone();
shorter_key.extend(
ruma::http_headers::ContentDisposition::new(
ruma::http_headers::ContentDispositionType::Inline,
)
.to_string()
.as_bytes(),
);
shorter_key.push(0xff);
shorter_key.extend_from_slice(content_type_bytes);
new_key.extend_from_slice(content_disposition.to_string().as_bytes());
new_key.push(0xff);
new_key.extend_from_slice(content_type_bytes);
// Some file names are too long. Ignore those.
match fs::rename(
services().globals.get_media_file(&mediaid),
services().globals.get_media_file(&new_key),
) {
Ok(_) => {
db.mediaid_file.insert(&new_key, &[])?;
}
Err(_) => {
fs::rename(
services().globals.get_media_file(&mediaid),
services().globals.get_media_file(&shorter_key),
)
.unwrap();
db.mediaid_file.insert(&shorter_key, &[])?;
}
}
} else {
db.mediaid_file.insert(&mediaid, &[])?;
}
}
services().globals.bump_database_version(16)?;
warn!("Migration: 13 -> 16 finished");
}
assert_eq!( assert_eq!(
services().globals.database_version().unwrap(), services().globals.database_version().unwrap(),
latest_database_version latest_database_version

View file

@ -57,7 +57,7 @@ async fn main() {
)) ))
.nested(), .nested(),
) )
.merge(Env::prefixed("CONDUIT_").global().split("__")); .merge(Env::prefixed("CONDUIT_").global());
let config = match raw_config.extract::<Config>() { let config = match raw_config.extract::<Config>() {
Ok(s) => s, Ok(s) => s,
@ -379,14 +379,10 @@ fn routes(config: &Config) -> Router {
.ruma_route(client_server::turn_server_route) .ruma_route(client_server::turn_server_route)
.ruma_route(client_server::send_event_to_device_route) .ruma_route(client_server::send_event_to_device_route)
.ruma_route(client_server::get_media_config_route) .ruma_route(client_server::get_media_config_route)
.ruma_route(client_server::get_media_config_auth_route)
.ruma_route(client_server::create_content_route) .ruma_route(client_server::create_content_route)
.ruma_route(client_server::get_content_route) .ruma_route(client_server::get_content_route)
.ruma_route(client_server::get_content_auth_route)
.ruma_route(client_server::get_content_as_filename_route) .ruma_route(client_server::get_content_as_filename_route)
.ruma_route(client_server::get_content_as_filename_auth_route)
.ruma_route(client_server::get_content_thumbnail_route) .ruma_route(client_server::get_content_thumbnail_route)
.ruma_route(client_server::get_content_thumbnail_auth_route)
.ruma_route(client_server::get_devices_route) .ruma_route(client_server::get_devices_route)
.ruma_route(client_server::get_device_route) .ruma_route(client_server::get_device_route)
.ruma_route(client_server::update_device_route) .ruma_route(client_server::update_device_route)
@ -439,13 +435,14 @@ fn routes(config: &Config) -> Router {
.ruma_route(server_server::get_event_authorization_route) .ruma_route(server_server::get_event_authorization_route)
.ruma_route(server_server::get_room_state_route) .ruma_route(server_server::get_room_state_route)
.ruma_route(server_server::get_room_state_ids_route) .ruma_route(server_server::get_room_state_ids_route)
.ruma_route(server_server::create_leave_event_template_route)
.ruma_route(server_server::create_leave_event_v1_route)
.ruma_route(server_server::create_leave_event_v2_route)
.ruma_route(server_server::create_join_event_template_route) .ruma_route(server_server::create_join_event_template_route)
.ruma_route(server_server::create_join_event_v1_route) .ruma_route(server_server::create_join_event_v1_route)
.ruma_route(server_server::create_join_event_v2_route) .ruma_route(server_server::create_join_event_v2_route)
.ruma_route(server_server::create_invite_route) .ruma_route(server_server::create_invite_route)
.ruma_route(server_server::get_devices_route) .ruma_route(server_server::get_devices_route)
.ruma_route(server_server::get_content_route)
.ruma_route(server_server::get_content_thumbnail_route)
.ruma_route(server_server::get_room_information_route) .ruma_route(server_server::get_room_information_route)
.ruma_route(server_server::get_profile_information_route) .ruma_route(server_server::get_profile_information_route)
.ruma_route(server_server::get_keys_route) .ruma_route(server_server::get_keys_route)

View file

@ -1,4 +1,9 @@
use std::{collections::BTreeMap, convert::TryFrom, sync::Arc, time::Instant}; use std::{
collections::BTreeMap,
convert::{TryFrom, TryInto},
sync::Arc,
time::Instant,
};
use clap::Parser; use clap::Parser;
use regex::Regex; use regex::Regex;
@ -19,8 +24,7 @@ use ruma::{
}, },
TimelineEventType, TimelineEventType,
}, },
EventId, MilliSecondsSinceUnixEpoch, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
RoomVersionId, ServerName, UserId,
}; };
use serde_json::value::to_raw_value; use serde_json::value::to_raw_value;
use tokio::sync::{mpsc, Mutex, RwLock}; use tokio::sync::{mpsc, Mutex, RwLock};
@ -246,7 +250,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: None, state_key: None,
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, conduit_user,
&conduit_room, &conduit_room,
@ -860,46 +863,15 @@ impl Service {
services() services()
.rooms .rooms
.event_handler .event_handler
// Generally we shouldn't be checking against expired keys unless required, so in the admin
// room it might be best to not allow expired keys
.fetch_required_signing_keys(&value, &pub_key_map) .fetch_required_signing_keys(&value, &pub_key_map)
.await?; .await?;
let mut expired_key_map = BTreeMap::new(); let pub_key_map = pub_key_map.read().await;
let mut valid_key_map = BTreeMap::new(); match ruma::signatures::verify_json(&pub_key_map, &value) {
Ok(_) => RoomMessageEventContent::text_plain("Signature correct"),
for (server, keys) in pub_key_map.into_inner().into_iter() { Err(e) => RoomMessageEventContent::text_plain(format!(
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
valid_key_map.insert(
server,
keys.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect(),
);
} else {
expired_key_map.insert(
server,
keys.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect(),
);
}
}
if ruma::signatures::verify_json(&valid_key_map, &value).is_ok() {
RoomMessageEventContent::text_plain("Signature correct")
} else if let Err(e) =
ruma::signatures::verify_json(&expired_key_map, &value)
{
RoomMessageEventContent::text_plain(format!(
"Signature verification failed: {e}" "Signature verification failed: {e}"
)) )),
} else {
RoomMessageEventContent::text_plain(
"Signature correct (with expired keys)",
)
} }
} }
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")), Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
@ -953,15 +925,7 @@ impl Service {
{ {
RoomMessageEventContent::text_plain("No such alias exists") RoomMessageEventContent::text_plain("No such alias exists")
} else { } else {
// We execute this as the server user for two reasons services().rooms.alias.remove_alias(&alias)?;
// 1. If the user can execute commands in the admin room, they can always remove the alias.
// 2. In the future, we are likely going to be able to allow users to execute commands via
// other methods, such as IPC, which would lead to us not knowing their user id
services()
.rooms
.alias
.remove_alias(&alias, services().globals.server_user())?;
RoomMessageEventContent::text_plain("Alias removed sucessfully") RoomMessageEventContent::text_plain("Alias removed sucessfully")
} }
} }
@ -1106,7 +1070,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, conduit_user,
&room_id, &room_id,
@ -1135,7 +1098,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some(conduit_user.to_string()), state_key: Some(conduit_user.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, conduit_user,
&room_id, &room_id,
@ -1161,7 +1123,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, conduit_user,
&room_id, &room_id,
@ -1181,7 +1142,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, conduit_user,
&room_id, &room_id,
@ -1203,7 +1163,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, conduit_user,
&room_id, &room_id,
@ -1225,7 +1184,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, conduit_user,
&room_id, &room_id,
@ -1246,7 +1204,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, conduit_user,
&room_id, &room_id,
@ -1267,7 +1224,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, conduit_user,
&room_id, &room_id,
@ -1276,7 +1232,9 @@ impl Service {
.await?; .await?;
// 6. Room alias // 6. Room alias
let alias: OwnedRoomAliasId = services().globals.admin_alias().to_owned(); let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
services() services()
.rooms .rooms
@ -1292,7 +1250,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, conduit_user,
&room_id, &room_id,
@ -1300,10 +1257,7 @@ impl Service {
) )
.await?; .await?;
services() services().rooms.alias.set_alias(&alias, &room_id)?;
.rooms
.alias
.set_alias(&alias, &room_id, conduit_user)?;
Ok(()) Ok(())
} }
@ -1312,10 +1266,15 @@ impl Service {
/// ///
/// Errors are propagated from the database, and will have None if there is no admin room /// Errors are propagated from the database, and will have None if there is no admin room
pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> { pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> {
let admin_room_alias: Box<RoomAliasId> =
format!("#admins:{}", services().globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
services() services()
.rooms .rooms
.alias .alias
.resolve_local_alias(services().globals.admin_alias()) .resolve_local_alias(&admin_room_alias)
} }
/// Invite the user to the conduit admin room. /// Invite the user to the conduit admin room.
@ -1362,7 +1321,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some(user_id.to_string()), state_key: Some(user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, conduit_user,
&room_id, &room_id,
@ -1389,7 +1347,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some(user_id.to_string()), state_key: Some(user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
user_id, user_id,
&room_id, &room_id,
@ -1416,7 +1373,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, conduit_user,
&room_id, &room_id,
@ -1436,7 +1392,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: None, state_key: None,
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, conduit_user,
&room_id, &room_id,
@ -1445,15 +1400,6 @@ impl Service {
} }
Ok(()) Ok(())
} }
/// Checks whether a given user is an admin of this server
pub fn user_is_admin(&self, user_id: &UserId) -> Result<bool> {
let Some(admin_room) = self.get_admin_room()? else {
return Ok(false);
};
services().rooms.state_cache.is_joined(user_id, &admin_room)
}
} }
#[cfg(test)] #[cfg(test)]

View file

@ -1,71 +1,13 @@
use std::{ use std::collections::BTreeMap;
collections::BTreeMap,
time::{Duration, SystemTime},
};
use crate::{services, Result};
use async_trait::async_trait; use async_trait::async_trait;
use ruma::{ use ruma::{
api::federation::discovery::{OldVerifyKey, ServerSigningKeys, VerifyKey}, api::federation::discovery::{ServerSigningKeys, VerifyKey},
serde::Base64,
signatures::Ed25519KeyPair, signatures::Ed25519KeyPair,
DeviceId, MilliSecondsSinceUnixEpoch, ServerName, UserId, DeviceId, OwnedServerSigningKeyId, ServerName, UserId,
}; };
use serde::Deserialize;
/// Similar to ServerSigningKeys, but drops a few unnecessary fields we don't require post-validation use crate::Result;
#[derive(Deserialize, Debug, Clone)]
pub struct SigningKeys {
pub verify_keys: BTreeMap<String, VerifyKey>,
pub old_verify_keys: BTreeMap<String, OldVerifyKey>,
pub valid_until_ts: MilliSecondsSinceUnixEpoch,
}
impl SigningKeys {
/// Creates the SigningKeys struct, using the keys of the current server
pub fn load_own_keys() -> Self {
let mut keys = Self {
verify_keys: BTreeMap::new(),
old_verify_keys: BTreeMap::new(),
valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000"),
};
keys.verify_keys.insert(
format!("ed25519:{}", services().globals.keypair().version()),
VerifyKey {
key: Base64::new(services().globals.keypair.public_key().to_vec()),
},
);
keys
}
}
impl From<ServerSigningKeys> for SigningKeys {
fn from(value: ServerSigningKeys) -> Self {
let ServerSigningKeys {
verify_keys,
old_verify_keys,
valid_until_ts,
..
} = value;
Self {
verify_keys: verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key))
.collect(),
old_verify_keys: old_verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key))
.collect(),
valid_until_ts,
}
}
}
#[async_trait] #[async_trait]
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
@ -79,23 +21,17 @@ pub trait Data: Send + Sync {
fn clear_caches(&self, amount: u32); fn clear_caches(&self, amount: u32);
fn load_keypair(&self) -> Result<Ed25519KeyPair>; fn load_keypair(&self) -> Result<Ed25519KeyPair>;
fn remove_keypair(&self) -> Result<()>; fn remove_keypair(&self) -> Result<()>;
/// Only extends the cached keys, not moving any verify_keys to old_verify_keys, as if we suddenly fn add_signing_key(
/// recieve requests from the origin server, we want to be able to accept requests from them
fn add_signing_key_from_trusted_server(
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, new_keys: ServerSigningKeys,
) -> Result<SigningKeys>; ) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>>;
/// Extends cached keys, as well as moving verify_keys that are not present in these new keys to
/// old_verify_keys, so that potnetially comprimised keys cannot be used to make requests
fn add_signing_key_from_origin(
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<SigningKeys>;
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>>; fn signing_keys_for(
&self,
origin: &ServerName,
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>>;
fn database_version(&self) -> Result<u64>; fn database_version(&self) -> Result<u64>;
fn bump_database_version(&self, new_version: u64) -> Result<()>; fn bump_database_version(&self, new_version: u64) -> Result<()>;
} }

View file

@ -1,11 +1,11 @@
mod data; mod data;
pub use data::{Data, SigningKeys}; pub use data::Data;
use ruma::{ use ruma::{
serde::Base64, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedEventId, OwnedRoomAliasId, serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName,
OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId, OwnedServerSigningKeyId, OwnedUserId,
}; };
use crate::api::server_server::DestinationResponse; use crate::api::server_server::FedDest;
use crate::{services, Config, Error, Result}; use crate::{services, Config, Error, Result};
use futures_util::FutureExt; use futures_util::FutureExt;
@ -13,9 +13,13 @@ use hickory_resolver::TokioAsyncResolver;
use hyper_util::client::legacy::connect::dns::{GaiResolver, Name as HyperName}; use hyper_util::client::legacy::connect::dns::{GaiResolver, Name as HyperName};
use reqwest::dns::{Addrs, Name, Resolve, Resolving}; use reqwest::dns::{Addrs, Name, Resolve, Resolving};
use ruma::{ use ruma::{
api::{client::sync::sync_events, federation::discovery::ServerSigningKeys}, api::{
client::sync::sync_events,
federation::discovery::{ServerSigningKeys, VerifyKey},
},
DeviceId, RoomVersionId, ServerName, UserId, DeviceId, RoomVersionId, ServerName, UserId,
}; };
use std::str::FromStr;
use std::{ use std::{
collections::{BTreeMap, HashMap}, collections::{BTreeMap, HashMap},
error::Error as StdError, error::Error as StdError,
@ -24,7 +28,6 @@ use std::{
iter, iter,
net::{IpAddr, SocketAddr}, net::{IpAddr, SocketAddr},
path::PathBuf, path::PathBuf,
str::FromStr,
sync::{ sync::{
atomic::{self, AtomicBool}, atomic::{self, AtomicBool},
Arc, RwLock as StdRwLock, Arc, RwLock as StdRwLock,
@ -37,7 +40,7 @@ use tracing::{error, info};
use base64::{engine::general_purpose, Engine as _}; use base64::{engine::general_purpose, Engine as _};
type WellKnownMap = HashMap<OwnedServerName, DestinationResponse>; type WellKnownMap = HashMap<OwnedServerName, (FedDest, String)>;
type TlsNameMap = HashMap<String, (Vec<IpAddr>, u16)>; type TlsNameMap = HashMap<String, (Vec<IpAddr>, u16)>;
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
type SyncHandle = ( type SyncHandle = (
@ -69,7 +72,6 @@ pub struct Service {
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>, pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
server_user: OwnedUserId, server_user: OwnedUserId,
admin_alias: OwnedRoomAliasId,
pub stateres_mutex: Arc<Mutex<()>>, pub stateres_mutex: Arc<Mutex<()>>,
pub rotate: RotationHandler, pub rotate: RotationHandler,
@ -192,8 +194,6 @@ impl Service {
let mut s = Self { let mut s = Self {
allow_registration: RwLock::new(config.allow_registration), allow_registration: RwLock::new(config.allow_registration),
admin_alias: RoomAliasId::parse(format!("#admins:{}", &config.server_name))
.expect("#admins:server_name is a valid alias name"),
server_user: UserId::parse(format!("@conduit:{}", &config.server_name)) server_user: UserId::parse(format!("@conduit:{}", &config.server_name))
.expect("@conduit:server_name is valid"), .expect("@conduit:server_name is valid"),
db, db,
@ -293,10 +293,6 @@ impl Service {
self.server_user.as_ref() self.server_user.as_ref()
} }
pub fn admin_alias(&self) -> &RoomAliasId {
self.admin_alias.as_ref()
}
pub fn max_request_size(&self) -> u32 { pub fn max_request_size(&self) -> u32 {
self.config.max_request_size self.config.max_request_size
} }
@ -389,89 +385,36 @@ impl Service {
room_versions room_versions
} }
/// TODO: the key valid until timestamp is only honored in room version > 4
/// Remove the outdated keys and insert the new ones.
///
/// This doesn't actually check that the keys provided are newer than the old set. /// This doesn't actually check that the keys provided are newer than the old set.
pub fn add_signing_key_from_trusted_server( pub fn add_signing_key(
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, new_keys: ServerSigningKeys,
) -> Result<SigningKeys> { ) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
self.db self.db.add_signing_key(origin, new_keys)
.add_signing_key_from_trusted_server(origin, new_keys)
} }
/// Same as from_trusted_server, except it will move active keys not present in `new_keys` to old_signing_keys /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
pub fn add_signing_key_from_origin( pub fn signing_keys_for(
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, ) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
) -> Result<SigningKeys> { let mut keys = self.db.signing_keys_for(origin)?;
self.db.add_signing_key_from_origin(origin, new_keys) if origin == self.server_name() {
} keys.insert(
format!("ed25519:{}", services().globals.keypair().version())
/// This returns Ok(None) when there are no keys found for the server. .try_into()
pub fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>> { .expect("found invalid server signing keys in DB"),
Ok(self.db.signing_keys_for(origin)?.or_else(|| { VerifyKey {
if origin == self.server_name() { key: Base64::new(self.keypair.public_key().to_vec()),
Some(SigningKeys::load_own_keys()) },
} else { );
None
}
}))
}
/// Filters the key map of multiple servers down to keys that should be accepted given the expiry time,
/// room version, and timestamp of the paramters
pub fn filter_keys_server_map(
&self,
keys: BTreeMap<String, SigningKeys>,
timestamp: MilliSecondsSinceUnixEpoch,
room_version_id: &RoomVersionId,
) -> BTreeMap<String, BTreeMap<String, Base64>> {
keys.into_iter()
.filter_map(|(server, keys)| {
self.filter_keys_single_server(keys, timestamp, room_version_id)
.map(|keys| (server, keys))
})
.collect()
}
/// Filters the keys of a single server down to keys that should be accepted given the expiry time,
/// room version, and timestamp of the paramters
pub fn filter_keys_single_server(
&self,
keys: SigningKeys,
timestamp: MilliSecondsSinceUnixEpoch,
room_version_id: &RoomVersionId,
) -> Option<BTreeMap<String, Base64>> {
if keys.valid_until_ts > timestamp
// valid_until_ts MUST be ignored in room versions 1, 2, 3, and 4.
// https://spec.matrix.org/v1.10/server-server-api/#get_matrixkeyv2server
|| matches!(room_version_id, RoomVersionId::V1
| RoomVersionId::V2
| RoomVersionId::V4
| RoomVersionId::V3)
{
// Given that either the room version allows stale keys, or the valid_until_ts is
// in the future, all verify_keys are valid
let mut map: BTreeMap<_, _> = keys
.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect();
map.extend(keys.old_verify_keys.into_iter().filter_map(|(id, key)| {
// Even on old room versions, we don't allow old keys if they are expired
if key.expired_ts > timestamp {
Some((id, key.key))
} else {
None
}
}));
Some(map)
} else {
None
} }
Ok(keys)
} }
pub fn database_version(&self) -> Result<u64> { pub fn database_version(&self) -> Result<u64> {

View file

@ -1,5 +1,3 @@
use ruma::http_headers::ContentDisposition;
use crate::Result; use crate::Result;
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
@ -8,7 +6,7 @@ pub trait Data: Send + Sync {
mxc: String, mxc: String,
width: u32, width: u32,
height: u32, height: u32,
content_disposition: &ContentDisposition, content_disposition: Option<&str>,
content_type: Option<&str>, content_type: Option<&str>,
) -> Result<Vec<u8>>; ) -> Result<Vec<u8>>;
@ -18,5 +16,5 @@ pub trait Data: Send + Sync {
mxc: String, mxc: String,
width: u32, width: u32,
height: u32, height: u32,
) -> Result<(ContentDisposition, Option<String>, Vec<u8>)>; ) -> Result<(Option<String>, Option<String>, Vec<u8>)>;
} }

View file

@ -2,7 +2,6 @@ mod data;
use std::io::Cursor; use std::io::Cursor;
pub use data::Data; pub use data::Data;
use ruma::http_headers::{ContentDisposition, ContentDispositionType};
use crate::{services, Result}; use crate::{services, Result};
use image::imageops::FilterType; use image::imageops::FilterType;
@ -13,7 +12,7 @@ use tokio::{
}; };
pub struct FileMeta { pub struct FileMeta {
pub content_disposition: ContentDisposition, pub content_disposition: Option<String>,
pub content_type: Option<String>, pub content_type: Option<String>,
pub file: Vec<u8>, pub file: Vec<u8>,
} }
@ -27,17 +26,14 @@ impl Service {
pub async fn create( pub async fn create(
&self, &self,
mxc: String, mxc: String,
content_disposition: Option<ContentDisposition>, content_disposition: Option<&str>,
content_type: Option<&str>, content_type: Option<&str>,
file: &[u8], file: &[u8],
) -> Result<()> { ) -> Result<()> {
let content_disposition =
content_disposition.unwrap_or(ContentDisposition::new(ContentDispositionType::Inline));
// Width, Height = 0 if it's not a thumbnail // Width, Height = 0 if it's not a thumbnail
let key = self let key = self
.db .db
.create_file_metadata(mxc, 0, 0, &content_disposition, content_type)?; .create_file_metadata(mxc, 0, 0, content_disposition, content_type)?;
let path = services().globals.get_media_file(&key); let path = services().globals.get_media_file(&key);
let mut f = File::create(path).await?; let mut f = File::create(path).await?;
@ -50,18 +46,15 @@ impl Service {
pub async fn upload_thumbnail( pub async fn upload_thumbnail(
&self, &self,
mxc: String, mxc: String,
content_disposition: Option<&str>,
content_type: Option<&str>, content_type: Option<&str>,
width: u32, width: u32,
height: u32, height: u32,
file: &[u8], file: &[u8],
) -> Result<()> { ) -> Result<()> {
let key = self.db.create_file_metadata( let key =
mxc, self.db
width, .create_file_metadata(mxc, width, height, content_disposition, content_type)?;
height,
&ContentDisposition::new(ContentDispositionType::Inline),
content_type,
)?;
let path = services().globals.get_media_file(&key); let path = services().globals.get_media_file(&key);
let mut f = File::create(path).await?; let mut f = File::create(path).await?;
@ -173,20 +166,22 @@ impl Service {
/ u64::from(original_height) / u64::from(original_height)
}; };
if use_width { if use_width {
if intermediate <= u64::from(u32::MAX) { if intermediate <= u64::from(::std::u32::MAX) {
(width, intermediate as u32) (width, intermediate as u32)
} else { } else {
( (
(u64::from(width) * u64::from(u32::MAX) / intermediate) as u32, (u64::from(width) * u64::from(::std::u32::MAX) / intermediate)
u32::MAX, as u32,
::std::u32::MAX,
) )
} }
} else if intermediate <= u64::from(u32::MAX) { } else if intermediate <= u64::from(::std::u32::MAX) {
(intermediate as u32, height) (intermediate as u32, height)
} else { } else {
( (
u32::MAX, ::std::u32::MAX,
(u64::from(height) * u64::from(u32::MAX) / intermediate) as u32, (u64::from(height) * u64::from(::std::u32::MAX) / intermediate)
as u32,
) )
} }
}; };
@ -205,7 +200,7 @@ impl Service {
mxc, mxc,
width, width,
height, height,
&content_disposition, content_disposition.as_deref(),
content_type.as_deref(), content_type.as_deref(),
)?; )?;

View file

@ -1,6 +1,5 @@
use crate::Error; use crate::Error;
use ruma::{ use ruma::{
api::client::error::ErrorKind,
canonical_json::redact_content_in_place, canonical_json::redact_content_in_place,
events::{ events::{
room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent}, room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent},
@ -73,23 +72,6 @@ impl PduEvent {
Ok(()) Ok(())
} }
pub fn is_redacted(&self) -> bool {
#[derive(Deserialize)]
struct ExtractRedactedBecause {
redacted_because: Option<serde::de::IgnoredAny>,
}
let Some(unsigned) = &self.unsigned else {
return false;
};
let Ok(unsigned) = ExtractRedactedBecause::deserialize(&**unsigned) else {
return false;
};
unsigned.redacted_because.is_some()
}
pub fn remove_transaction_id(&mut self) -> crate::Result<()> { pub fn remove_transaction_id(&mut self) -> crate::Result<()> {
if let Some(unsigned) = &self.unsigned { if let Some(unsigned) = &self.unsigned {
let mut unsigned: BTreeMap<String, Box<RawJsonValue>> = let mut unsigned: BTreeMap<String, Box<RawJsonValue>> =
@ -444,7 +426,7 @@ pub(crate) fn gen_event_id_canonical_json(
"${}", "${}",
// Anything higher than version3 behaves the same // Anything higher than version3 behaves the same
ruma::signatures::reference_hash(&value, room_version_id) ruma::signatures::reference_hash(&value, room_version_id)
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))? .expect("ruma can calculate reference hashes")
) )
.try_into() .try_into()
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
@ -461,8 +443,4 @@ pub struct PduBuilder {
pub unsigned: Option<BTreeMap<String, serde_json::Value>>, pub unsigned: Option<BTreeMap<String, serde_json::Value>>,
pub state_key: Option<String>, pub state_key: Option<String>,
pub redacts: Option<Arc<EventId>>, pub redacts: Option<Arc<EventId>>,
/// For timestamped messaging, should only be used for appservices
///
/// Will be set to current time if None
pub timestamp: Option<MilliSecondsSinceUnixEpoch>,
} }

View file

@ -1,12 +1,9 @@
use crate::Result; use crate::Result;
use ruma::{OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, UserId}; use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
/// Creates or updates the alias to the given room id. /// Creates or updates the alias to the given room id.
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()>; fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()>;
/// Finds the user who assigned the given alias to a room
fn who_created_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedUserId>>;
/// Forgets about an alias. Returns an error if the alias did not exist. /// Forgets about an alias. Returns an error if the alias did not exist.
fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>; fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>;

View file

@ -1,17 +1,9 @@
mod data; mod data;
pub use data::Data; pub use data::Data;
use tracing::error;
use crate::{services, Error, Result}; use crate::Result;
use ruma::{ use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
api::client::error::ErrorKind,
events::{
room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
StateEventType,
},
OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, UserId,
};
pub struct Service { pub struct Service {
pub db: &'static dyn Data, pub db: &'static dyn Data,
@ -19,71 +11,13 @@ pub struct Service {
impl Service { impl Service {
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> { pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
if alias == services().globals.admin_alias() && user_id != services().globals.server_user() self.db.set_alias(alias, room_id)
{
Err(Error::BadRequest(
ErrorKind::forbidden(),
"Only the server user can set this alias",
))
} else {
self.db.set_alias(alias, room_id, user_id)
}
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
fn user_can_remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<bool> { pub fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> {
let Some(room_id) = self.resolve_local_alias(alias)? else { self.db.remove_alias(alias)
return Err(Error::BadRequest(ErrorKind::NotFound, "Alias not found."));
};
// The creator of an alias can remove it
if self
.db
.who_created_alias(alias)?
.map(|user| user == user_id)
.unwrap_or_default()
// Server admins can remove any local alias
|| services().admin.user_is_admin(user_id)?
// Always allow the Conduit user to remove the alias, since there may not be an admin room
|| services().globals.server_user ()== user_id
{
Ok(true)
// Checking whether the user is able to change canonical aliases of the room
} else if let Some(event) = services().rooms.state_accessor.room_state_get(
&room_id,
&StateEventType::RoomPowerLevels,
"",
)? {
serde_json::from_str(event.content.get())
.map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels"))
.map(|content: RoomPowerLevelsEventContent| {
RoomPowerLevels::from(content)
.user_can_send_state(user_id, StateEventType::RoomCanonicalAlias)
})
// If there is no power levels event, only the room creator can change canonical aliases
} else if let Some(event) = services().rooms.state_accessor.room_state_get(
&room_id,
&StateEventType::RoomCreate,
"",
)? {
Ok(event.sender == user_id)
} else {
error!("Room {} has no m.room.create event (VERY BAD)!", room_id);
Err(Error::bad_database("Room has no m.room.create event"))
}
}
#[tracing::instrument(skip(self))]
pub fn remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<()> {
if self.user_can_remove_alias(alias, user_id)? {
self.db.remove_alias(alias)
} else {
Err(Error::BadRequest(
ErrorKind::forbidden(),
"User is not permitted to remove this alias.",
))
}
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]

View file

@ -9,7 +9,6 @@ use std::{
}; };
use futures_util::{stream::FuturesUnordered, Future, StreamExt}; use futures_util::{stream::FuturesUnordered, Future, StreamExt};
use globals::SigningKeys;
use ruma::{ use ruma::{
api::{ api::{
client::error::ErrorKind, client::error::ErrorKind,
@ -31,6 +30,7 @@ use ruma::{
StateEventType, TimelineEventType, StateEventType, TimelineEventType,
}, },
int, int,
serde::Base64,
state_res::{self, RoomVersion, StateMap}, state_res::{self, RoomVersion, StateMap},
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName, OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName,
@ -78,7 +78,7 @@ impl Service {
room_id: &'a RoomId, room_id: &'a RoomId,
value: BTreeMap<String, CanonicalJsonValue>, value: BTreeMap<String, CanonicalJsonValue>,
is_timeline_event: bool, is_timeline_event: bool,
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<Option<Vec<u8>>> { ) -> Result<Option<Vec<u8>>> {
// 0. Check the server is in the room // 0. Check the server is in the room
if !services().rooms.metadata.exists(room_id)? { if !services().rooms.metadata.exists(room_id)? {
@ -304,12 +304,19 @@ impl Service {
room_id: &'a RoomId, room_id: &'a RoomId,
mut value: BTreeMap<String, CanonicalJsonValue>, mut value: BTreeMap<String, CanonicalJsonValue>,
auth_events_known: bool, auth_events_known: bool,
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>> { ) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>> {
Box::pin(async move { Box::pin(async move {
// 1.1. Remove unsigned field // 1.1. Remove unsigned field
value.remove("unsigned"); value.remove("unsigned");
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
// We go through all the signatures we see on the value and fetch the corresponding signing
// keys
self.fetch_required_signing_keys(&value, pub_key_map)
.await?;
// 2. Check signatures, otherwise drop // 2. Check signatures, otherwise drop
// 3. check content hash, redact if doesn't match // 3. check content hash, redact if doesn't match
let create_event_content: RoomCreateEventContent = let create_event_content: RoomCreateEventContent =
@ -322,80 +329,41 @@ impl Service {
let room_version = let room_version =
RoomVersion::new(room_version_id).expect("room version is supported"); RoomVersion::new(room_version_id).expect("room version is supported");
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
// We go through all the signatures we see on the value and fetch the corresponding signing
// keys
self.fetch_required_signing_keys(&value, pub_key_map)
.await?;
let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| {
error!("Invalid PDU, no origin_server_ts field");
Error::BadRequest(
ErrorKind::MissingParam,
"Invalid PDU, no origin_server_ts field",
)
})?;
let origin_server_ts: MilliSecondsSinceUnixEpoch = {
let ts = origin_server_ts.as_integer().ok_or_else(|| {
Error::BadRequest(
ErrorKind::InvalidParam,
"origin_server_ts must be an integer",
)
})?;
MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Time must be after the unix epoch")
})?)
};
let guard = pub_key_map.read().await; let guard = pub_key_map.read().await;
let mut val = match ruma::signatures::verify_event(&guard, &value, room_version_id) {
let pkey_map = (*guard).clone(); Err(e) => {
// Drop
// Removing all the expired keys, unless the room version allows stale keys warn!("Dropping bad event {}: {}", event_id, e,);
let filtered_keys = services().globals.filter_keys_server_map( return Err(Error::BadRequest(
pkey_map, ErrorKind::InvalidParam,
origin_server_ts, "Signature verification failed",
room_version_id, ));
); }
Ok(ruma::signatures::Verified::Signatures) => {
let mut val = // Redact
match ruma::signatures::verify_event(&filtered_keys, &value, room_version_id) { warn!("Calculated hash does not match: {}", event_id);
Err(e) => { let obj = match ruma::canonical_json::redact(value, room_version_id, None) {
// Drop Ok(obj) => obj,
warn!("Dropping bad event {}: {}", event_id, e,); Err(_) => {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Signature verification failed",
));
}
Ok(ruma::signatures::Verified::Signatures) => {
// Redact
warn!("Calculated hash does not match: {}", event_id);
let obj = match ruma::canonical_json::redact(value, room_version_id, None) {
Ok(obj) => obj,
Err(_) => {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Redaction failed",
))
}
};
// Skip the PDU if it is redacted and we already have it as an outlier event
if services().rooms.timeline.get_pdu_json(event_id)?.is_some() {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
"Event was redacted and we already knew about it", "Redaction failed",
)); ))
} }
};
obj // Skip the PDU if it is redacted and we already have it as an outlier event
if services().rooms.timeline.get_pdu_json(event_id)?.is_some() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Event was redacted and we already knew about it",
));
} }
Ok(ruma::signatures::Verified::All) => value,
}; obj
}
Ok(ruma::signatures::Verified::All) => value,
};
drop(guard); drop(guard);
@ -519,7 +487,7 @@ impl Service {
create_event: &PduEvent, create_event: &PduEvent,
origin: &ServerName, origin: &ServerName,
room_id: &RoomId, room_id: &RoomId,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<Option<Vec<u8>>> { ) -> Result<Option<Vec<u8>>> {
// Skip the PDU if we already have it as a timeline event // Skip the PDU if we already have it as a timeline event
if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) { if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) {
@ -1129,7 +1097,7 @@ impl Service {
create_event: &'a PduEvent, create_event: &'a PduEvent,
room_id: &'a RoomId, room_id: &'a RoomId,
room_version_id: &'a RoomVersionId, room_version_id: &'a RoomVersionId,
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>> ) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>>
{ {
Box::pin(async move { Box::pin(async move {
@ -1312,7 +1280,7 @@ impl Service {
create_event: &PduEvent, create_event: &PduEvent,
room_id: &RoomId, room_id: &RoomId,
room_version_id: &RoomVersionId, room_version_id: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
initial_set: Vec<Arc<EventId>>, initial_set: Vec<Arc<EventId>>,
) -> Result<( ) -> Result<(
Vec<Arc<EventId>>, Vec<Arc<EventId>>,
@ -1410,7 +1378,7 @@ impl Service {
pub(crate) async fn fetch_required_signing_keys( pub(crate) async fn fetch_required_signing_keys(
&self, &self,
event: &BTreeMap<String, CanonicalJsonValue>, event: &BTreeMap<String, CanonicalJsonValue>,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> { ) -> Result<()> {
let signatures = event let signatures = event
.get("signatures") .get("signatures")
@ -1439,7 +1407,6 @@ impl Service {
) )
})?, })?,
signature_ids, signature_ids,
true,
) )
.await; .await;
@ -1467,7 +1434,7 @@ impl Service {
pdu: &RawJsonValue, pdu: &RawJsonValue,
servers: &mut BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>, servers: &mut BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, SigningKeys>>, pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> { ) -> Result<()> {
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
@ -1477,7 +1444,7 @@ impl Service {
let event_id = format!( let event_id = format!(
"${}", "${}",
ruma::signatures::reference_hash(&value, room_version) ruma::signatures::reference_hash(&value, room_version)
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))? .expect("ruma can calculate reference hashes")
); );
let event_id = <&EventId>::try_from(event_id.as_str()) let event_id = <&EventId>::try_from(event_id.as_str())
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
@ -1518,18 +1485,8 @@ impl Service {
let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>(); let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>();
let contains_all_ids = |keys: &SigningKeys| { let contains_all_ids = |keys: &BTreeMap<String, Base64>| {
signature_ids.iter().all(|id| { signature_ids.iter().all(|id| keys.contains_key(id))
keys.verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
|| keys
.old_verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
})
}; };
let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| {
@ -1542,14 +1499,19 @@ impl Service {
trace!("Loading signing keys for {}", origin); trace!("Loading signing keys for {}", origin);
if let Some(result) = services().globals.signing_keys_for(origin)? { let result: BTreeMap<_, _> = services()
if !contains_all_ids(&result) { .globals
trace!("Signing key not loaded for {}", origin); .signing_keys_for(origin)?
servers.insert(origin.to_owned(), BTreeMap::new()); .into_iter()
} .map(|(k, v)| (k.to_string(), v.key))
.collect();
pub_key_map.insert(origin.to_string(), result); if !contains_all_ids(&result) {
trace!("Signing key not loaded for {}", origin);
servers.insert(origin.to_owned(), BTreeMap::new());
} }
pub_key_map.insert(origin.to_string(), result);
} }
Ok(()) Ok(())
@ -1559,7 +1521,7 @@ impl Service {
&self, &self,
event: &create_join_event::v2::Response, event: &create_join_event::v2::Response,
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> { ) -> Result<()> {
let mut servers: BTreeMap< let mut servers: BTreeMap<
OwnedServerName, OwnedServerName,
@ -1622,7 +1584,10 @@ impl Service {
let result = services() let result = services()
.globals .globals
.add_signing_key_from_trusted_server(&k.server_name, k.clone())?; .add_signing_key(&k.server_name, k.clone())?
.into_iter()
.map(|(k, v)| (k.to_string(), v.key))
.collect::<BTreeMap<_, _>>();
pkm.insert(k.server_name.to_string(), result); pkm.insert(k.server_name.to_string(), result);
} }
@ -1653,9 +1618,12 @@ impl Service {
if let (Ok(get_keys_response), origin) = result { if let (Ok(get_keys_response), origin) = result {
info!("Result is from {origin}"); info!("Result is from {origin}");
if let Ok(key) = get_keys_response.server_key.deserialize() { if let Ok(key) = get_keys_response.server_key.deserialize() {
let result = services() let result: BTreeMap<_, _> = services()
.globals .globals
.add_signing_key_from_origin(&origin, key)?; .add_signing_key(&origin, key)?
.into_iter()
.map(|(k, v)| (k.to_string(), v.key))
.collect();
pub_key_map.write().await.insert(origin.to_string(), result); pub_key_map.write().await.insert(origin.to_string(), result);
} }
} }
@ -1687,6 +1655,11 @@ impl Service {
} }
}; };
if acl_event_content.allow.is_empty() {
// Ignore broken acl events
return Ok(());
}
if acl_event_content.is_allowed(server_name) { if acl_event_content.is_allowed(server_name) {
Ok(()) Ok(())
} else { } else {
@ -1708,23 +1681,9 @@ impl Service {
&self, &self,
origin: &ServerName, origin: &ServerName,
signature_ids: Vec<String>, signature_ids: Vec<String>,
// Whether to ask for keys from trusted servers. Should be false when getting ) -> Result<BTreeMap<String, Base64>> {
// keys for validating requests, as per MSC4029 let contains_all_ids =
query_via_trusted_servers: bool, |keys: &BTreeMap<String, Base64>| signature_ids.iter().all(|id| keys.contains_key(id));
) -> Result<SigningKeys> {
let contains_all_ids = |keys: &SigningKeys| {
signature_ids.iter().all(|id| {
keys.verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
|| keys
.old_verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
})
};
let permit = services() let permit = services()
.globals .globals
@ -1785,172 +1744,94 @@ impl Service {
trace!("Loading signing keys for {}", origin); trace!("Loading signing keys for {}", origin);
let result = services().globals.signing_keys_for(origin)?; let mut result: BTreeMap<_, _> = services()
.globals
.signing_keys_for(origin)?
.into_iter()
.map(|(k, v)| (k.to_string(), v.key))
.collect();
let mut expires_soon_or_has_expired = false; if contains_all_ids(&result) {
return Ok(result);
if let Some(result) = result.clone() {
let ts_threshold = MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(30 * 60),
)
.expect("Should be valid until year 500,000,000");
debug!(
"The treshhold is {:?}, found time is {:?} for server {}",
ts_threshold, result.valid_until_ts, origin
);
if contains_all_ids(&result) {
// We want to ensure that the keys remain valid by the time the other functions that handle signatures reach them
if result.valid_until_ts > ts_threshold {
debug!(
"Keys for {} are deemed as valid, as they expire at {:?}",
&origin, &result.valid_until_ts
);
return Ok(result);
}
expires_soon_or_has_expired = true;
}
} }
let mut keys = result.unwrap_or_else(|| SigningKeys {
verify_keys: BTreeMap::new(),
old_verify_keys: BTreeMap::new(),
valid_until_ts: MilliSecondsSinceUnixEpoch::now(),
});
// We want to set this to the max, and then lower it whenever we see older keys
keys.valid_until_ts = MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000");
debug!("Fetching signing keys for {} over federation", origin); debug!("Fetching signing keys for {} over federation", origin);
if let Some(mut server_key) = services() if let Some(server_key) = services()
.sending .sending
.send_federation_request(origin, get_server_keys::v2::Request::new()) .send_federation_request(origin, get_server_keys::v2::Request::new())
.await .await
.ok() .ok()
.and_then(|resp| resp.server_key.deserialize().ok()) .and_then(|resp| resp.server_key.deserialize().ok())
{ {
// Keys should only be valid for a maximum of seven days
server_key.valid_until_ts = server_key.valid_until_ts.min(
MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000"),
);
services() services()
.globals .globals
.add_signing_key_from_origin(origin, server_key.clone())?; .add_signing_key(origin, server_key.clone())?;
if keys.valid_until_ts > server_key.valid_until_ts { result.extend(
keys.valid_until_ts = server_key.valid_until_ts;
}
keys.verify_keys.extend(
server_key server_key
.verify_keys .verify_keys
.into_iter() .into_iter()
.map(|(id, key)| (id.to_string(), key)), .map(|(k, v)| (k.to_string(), v.key)),
); );
keys.old_verify_keys.extend( result.extend(
server_key server_key
.old_verify_keys .old_verify_keys
.into_iter() .into_iter()
.map(|(id, key)| (id.to_string(), key)), .map(|(k, v)| (k.to_string(), v.key)),
); );
if contains_all_ids(&keys) { if contains_all_ids(&result) {
return Ok(keys); return Ok(result);
} }
} }
if query_via_trusted_servers { for server in services().globals.trusted_servers() {
for server in services().globals.trusted_servers() { debug!("Asking {} for {}'s signing key", server, origin);
debug!("Asking {} for {}'s signing key", server, origin); if let Some(server_keys) = services()
if let Some(server_keys) = services() .sending
.sending .send_federation_request(
.send_federation_request( server,
server, get_remote_server_keys::v2::Request::new(
get_remote_server_keys::v2::Request::new( origin.to_owned(),
origin.to_owned(), MilliSecondsSinceUnixEpoch::from_system_time(
MilliSecondsSinceUnixEpoch::from_system_time( SystemTime::now()
SystemTime::now() .checked_add(Duration::from_secs(3600))
.checked_add(Duration::from_secs(3600)) .expect("SystemTime to large"),
.expect("SystemTime to large"), )
) .expect("time is valid"),
.expect("time is valid"), ),
), )
) .await
.await .ok()
.ok() .map(|resp| {
.map(|resp| { resp.server_keys
resp.server_keys .into_iter()
.filter_map(|e| e.deserialize().ok())
.collect::<Vec<_>>()
})
{
trace!("Got signing keys: {:?}", server_keys);
for k in server_keys {
services().globals.add_signing_key(origin, k.clone())?;
result.extend(
k.verify_keys
.into_iter() .into_iter()
.filter_map(|e| e.deserialize().ok()) .map(|(k, v)| (k.to_string(), v.key)),
.collect::<Vec<_>>() );
}) result.extend(
{ k.old_verify_keys
trace!("Got signing keys: {:?}", server_keys); .into_iter()
for mut k in server_keys { .map(|(k, v)| (k.to_string(), v.key)),
if k.valid_until_ts );
// Half an hour should give plenty of time for the server to respond with keys that are still }
// valid, given we requested keys which are valid at least an hour from now
< MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(30 * 60),
)
.expect("Should be valid until year 500,000,000")
{
// Keys should only be valid for a maximum of seven days
k.valid_until_ts = k.valid_until_ts.min(
MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000"),
);
if keys.valid_until_ts > k.valid_until_ts { if contains_all_ids(&result) {
keys.valid_until_ts = k.valid_until_ts; return Ok(result);
}
services()
.globals
.add_signing_key_from_trusted_server(origin, k.clone())?;
keys.verify_keys.extend(
k.verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key)),
);
keys.old_verify_keys.extend(
k.old_verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key)),
);
} else {
warn!(
"Server {} gave us keys older than we requested, valid until: {:?}",
origin, k.valid_until_ts
);
}
if contains_all_ids(&keys) {
return Ok(keys);
}
}
} }
} }
} }
// We should return these keys if fresher keys were not found
if expires_soon_or_has_expired {
info!("Returning stale keys for {}", origin);
return Ok(keys);
}
drop(permit); drop(permit);
back_off(signature_ids).await; back_off(signature_ids).await;

View file

@ -4,8 +4,6 @@ use ruma::RoomId;
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn search_pdus<'a>( fn search_pdus<'a>(
&'a self, &'a self,

View file

@ -15,16 +15,6 @@ impl Service {
self.db.index_pdu(shortroomid, pdu_id, message_body) self.db.index_pdu(shortroomid, pdu_id, message_body)
} }
#[tracing::instrument(skip(self))]
pub fn deindex_pdu<'a>(
&self,
shortroomid: u64,
pdu_id: &[u8],
message_body: &str,
) -> Result<()> {
self.db.deindex_pdu(shortroomid, pdu_id, message_body)
}
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn search_pdus<'a>( pub fn search_pdus<'a>(
&'a self, &'a self,

View file

@ -321,7 +321,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some(target_user.into()), state_key: Some(target_user.into()),
redacts: None, redacts: None,
timestamp: None,
}; };
Ok(services() Ok(services()

View file

@ -21,9 +21,10 @@ use ruma::{
GlobalAccountDataEventType, StateEventType, TimelineEventType, GlobalAccountDataEventType, StateEventType, TimelineEventType,
}, },
push::{Action, Ruleset, Tweak}, push::{Action, Ruleset, Tweak},
serde::Base64,
state_res::{self, Event, RoomVersion}, state_res::{self, Event, RoomVersion},
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId,
}; };
use serde::Deserialize; use serde::Deserialize;
use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
@ -32,10 +33,7 @@ use tracing::{error, info, warn};
use crate::{ use crate::{
api::server_server, api::server_server,
service::{ service::pdu::{EventHash, PduBuilder},
globals::SigningKeys,
pdu::{EventHash, PduBuilder},
},
services, utils, Error, PduEvent, Result, services, utils, Error, PduEvent, Result,
}; };
@ -401,7 +399,7 @@ impl Service {
&pdu.room_id, &pdu.room_id,
false, false,
)? { )? {
self.redact_pdu(redact_id, pdu, shortroomid)?; self.redact_pdu(redact_id, pdu)?;
} }
} }
} }
@ -418,7 +416,7 @@ impl Service {
&pdu.room_id, &pdu.room_id,
false, false,
)? { )? {
self.redact_pdu(redact_id, pdu, shortroomid)?; self.redact_pdu(redact_id, pdu)?;
} }
} }
} }
@ -498,14 +496,7 @@ impl Service {
&& services().globals.emergency_password().is_none(); && services().globals.emergency_password().is_none();
if let Some(admin_room) = services().admin.get_admin_room()? { if let Some(admin_room) = services().admin.get_admin_room()? {
if to_conduit if to_conduit && !from_conduit && admin_room == pdu.room_id {
&& !from_conduit
&& admin_room == pdu.room_id
&& services()
.rooms
.state_cache
.is_joined(server_user, &admin_room)?
{
services().admin.process_message(body); services().admin.process_message(body);
} }
} }
@ -665,7 +656,6 @@ impl Service {
unsigned, unsigned,
state_key, state_key,
redacts, redacts,
timestamp,
} = pdu_builder; } = pdu_builder;
let prev_events: Vec<_> = services() let prev_events: Vec<_> = services()
@ -735,9 +725,9 @@ impl Service {
event_id: ruma::event_id!("$thiswillbefilledinlater").into(), event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
room_id: room_id.to_owned(), room_id: room_id.to_owned(),
sender: sender.to_owned(), sender: sender.to_owned(),
origin_server_ts: timestamp origin_server_ts: utils::millis_since_unix_epoch()
.map(|ts| ts.get()) .try_into()
.unwrap_or_else(|| MilliSecondsSinceUnixEpoch::now().get()), .expect("time is valid"),
kind: event_type, kind: event_type,
content, content,
state_key, state_key,
@ -815,7 +805,7 @@ impl Service {
pdu.event_id = EventId::parse_arc(format!( pdu.event_id = EventId::parse_arc(format!(
"${}", "${}",
ruma::signatures::reference_hash(&pdu_json, &room_version_id) ruma::signatures::reference_hash(&pdu_json, &room_version_id)
.expect("Event format validated when event was hashed") .expect("ruma can calculate reference hashes")
)) ))
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
@ -1110,33 +1100,14 @@ impl Service {
/// Replace a PDU with the redacted form. /// Replace a PDU with the redacted form.
#[tracing::instrument(skip(self, reason))] #[tracing::instrument(skip(self, reason))]
pub fn redact_pdu( pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> {
&self,
event_id: &EventId,
reason: &PduEvent,
shortroomid: u64,
) -> Result<()> {
// TODO: Don't reserialize, keep original json // TODO: Don't reserialize, keep original json
if let Some(pdu_id) = self.get_pdu_id(event_id)? { if let Some(pdu_id) = self.get_pdu_id(event_id)? {
let mut pdu = self let mut pdu = self
.get_pdu_from_id(&pdu_id)? .get_pdu_from_id(&pdu_id)?
.ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?;
#[derive(Deserialize)]
struct ExtractBody {
body: String,
}
if let Ok(content) = serde_json::from_str::<ExtractBody>(pdu.content.get()) {
services()
.rooms
.search
.deindex_pdu(shortroomid, &pdu_id, &content.body)?;
}
let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?; let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?;
pdu.redact(room_version_id, reason)?; pdu.redact(room_version_id, reason)?;
self.replace_pdu( self.replace_pdu(
&pdu_id, &pdu_id,
&utils::to_canonical_object(&pdu).expect("PDU is an object"), &utils::to_canonical_object(&pdu).expect("PDU is an object"),
@ -1217,7 +1188,7 @@ impl Service {
&self, &self,
origin: &ServerName, origin: &ServerName,
pdu: Box<RawJsonValue>, pdu: Box<RawJsonValue>,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> { ) -> Result<()> {
let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?; let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?;

View file

@ -9,6 +9,7 @@ pub use data::Data;
use ruma::{ use ruma::{
api::client::{ api::client::{
device::Device, device::Device,
error::ErrorKind,
filter::FilterDefinition, filter::FilterDefinition,
sync::sync_events::{ sync::sync_events::{
self, self,
@ -19,7 +20,7 @@ use ruma::{
events::AnyToDeviceEvent, events::AnyToDeviceEvent,
serde::Raw, serde::Raw,
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri,
OwnedRoomId, OwnedUserId, UInt, UserId, OwnedRoomId, OwnedUserId, RoomAliasId, UInt, UserId,
}; };
use crate::{services, Error, Result}; use crate::{services, Error, Result};
@ -261,14 +262,19 @@ impl Service {
/// Check if a user is an admin /// Check if a user is an admin
pub fn is_admin(&self, user_id: &UserId) -> Result<bool> { pub fn is_admin(&self, user_id: &UserId) -> Result<bool> {
if let Some(admin_room_id) = services().admin.get_admin_room()? { let admin_room_alias_id =
services() RoomAliasId::parse(format!("#admins:{}", services().globals.server_name()))
.rooms .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
.state_cache let admin_room_id = services()
.is_joined(user_id, &admin_room_id) .rooms
} else { .alias
Ok(false) .resolve_local_alias(&admin_room_alias_id)?
} .unwrap();
services()
.rooms
.state_cache
.is_joined(user_id, &admin_room_id)
} }
/// Create a new user account on this homeserver. /// Create a new user account on this homeserver.