Compare commits

..

1 commit

Author SHA1 Message Date
Samuel Meenzen
88b24310b5
chore: format nix using alejandra 2024-05-09 15:20:48 +02:00
69 changed files with 2151 additions and 3739 deletions

View file

@ -103,11 +103,6 @@ artifacts:
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl - ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
- cp result/bin/conduit aarch64-unknown-linux-musl - cp result/bin/conduit aarch64-unknown-linux-musl
- mkdir -p target/aarch64-unknown-linux-musl/release
- cp result/bin/conduit target/aarch64-unknown-linux-musl/release
- direnv exec . cargo deb --no-strip --no-build --target aarch64-unknown-linux-musl
- mv target/aarch64-unknown-linux-musl/debian/*.deb aarch64-unknown-linux-musl.deb
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl - ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
- cp result oci-image-arm64v8.tar.gz - cp result oci-image-arm64v8.tar.gz
@ -119,7 +114,6 @@ artifacts:
- x86_64-unknown-linux-musl - x86_64-unknown-linux-musl
- aarch64-unknown-linux-musl - aarch64-unknown-linux-musl
- x86_64-unknown-linux-musl.deb - x86_64-unknown-linux-musl.deb
- aarch64-unknown-linux-musl.deb
- oci-image-amd64.tar.gz - oci-image-amd64.tar.gz
- oci-image-arm64v8.tar.gz - oci-image-arm64v8.tar.gz
- public - public

1307
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -16,10 +16,10 @@ license = "Apache-2.0"
name = "conduit" name = "conduit"
readme = "README.md" readme = "README.md"
repository = "https://gitlab.com/famedly/conduit" repository = "https://gitlab.com/famedly/conduit"
version = "0.10.0-alpha" version = "0.8.0-alpha"
# See also `rust-toolchain.toml` # See also `rust-toolchain.toml`
rust-version = "1.79.0" rust-version = "1.78.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@ -28,24 +28,42 @@ workspace = true
[dependencies] [dependencies]
# Web framework # Web framework
axum = { version = "0.7", default-features = false, features = [ axum = { version = "0.6.18", default-features = false, features = [
"form", "form",
"headers",
"http1", "http1",
"http2", "http2",
"json", "json",
"matched-path", "matched-path",
], optional = true } ], optional = true }
axum-extra = { version = "0.9", features = ["typed-header"] } axum-server = { version = "0.5.1", features = ["tls-rustls"] }
axum-server = { version = "0.6", features = ["tls-rustls"] }
tower = { version = "0.4.13", features = ["util"] } tower = { version = "0.4.13", features = ["util"] }
tower-http = { version = "0.5", features = [ tower-http = { version = "0.4.1", features = [
"add-extension", "add-extension",
"cors", "cors",
"sensitive-headers", "sensitive-headers",
"trace", "trace",
"util", "util",
] } ] }
tower-service = "0.3"
# Used for matrix spec type definitions and helpers
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
ruma = { git = "https://github.com/ruma/ruma", rev = "5495b85aa311c2805302edb0a7de40399e22b397", features = [
"appservice-api-c",
"client-api",
"compat",
"federation-api",
"push-gateway-api-c",
"rand",
"ring-compat",
"state-res",
"unstable-exhaustive-types",
"unstable-msc2448",
"unstable-msc3575",
"unstable-unspecified",
] }
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
# Async runtime and utilities # Async runtime and utilities
tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] } tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] }
@ -56,7 +74,7 @@ persy = { version = "1.4.4", optional = true, features = ["background_ops"] }
# Used for the http request / response body type for Ruma endpoints used with reqwest # Used for the http request / response body type for Ruma endpoints used with reqwest
bytes = "1.4.0" bytes = "1.4.0"
http = "1" http = "0.2.9"
# Used to find data directory for default db path # Used to find data directory for default db path
directories = "5" directories = "5"
# Used for ruma wrapper # Used for ruma wrapper
@ -70,14 +88,8 @@ rand = "0.8.5"
# Used to hash passwords # Used to hash passwords
rust-argon2 = "2" rust-argon2 = "2"
# Used to send requests # Used to send requests
hyper = "1.1" hyper = "0.14.26"
hyper-util = { version = "0.1", features = [ reqwest = { version = "0.11.18", default-features = false, features = [
"client",
"client-legacy",
"http1",
"http2",
] }
reqwest = { version = "0.12", default-features = false, features = [
"rustls-tls-native-roots", "rustls-tls-native-roots",
"socks", "socks",
] } ] }
@ -100,13 +112,11 @@ regex = "1.8.1"
# jwt jsonwebtokens # jwt jsonwebtokens
jsonwebtoken = "9.2.0" jsonwebtoken = "9.2.0"
# Performance measurements # Performance measurements
opentelemetry = "0.22" opentelemetry = { version = "0.18.0", features = ["rt-tokio"] }
opentelemetry-jaeger-propagator = "0.1" opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] }
opentelemetry-otlp = "0.15" tracing = { version = "0.1.37", features = [] }
opentelemetry_sdk = { version = "0.22", features = ["rt-tokio"] }
tracing = "0.1.37"
tracing-flame = "0.2.0" tracing-flame = "0.2.0"
tracing-opentelemetry = "0.23" tracing-opentelemetry = "0.18.0"
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
lru-cache = "0.1.2" lru-cache = "0.1.2"
@ -147,25 +157,6 @@ tikv-jemallocator = { version = "0.5.0", features = [
sd-notify = { version = "0.4.1", optional = true } sd-notify = { version = "0.4.1", optional = true }
# Used for matrix spec type definitions and helpers
[dependencies.ruma]
features = [
"appservice-api-c",
"client-api",
"compat",
"federation-api",
"push-gateway-api-c",
"rand",
"ring-compat",
"server-util",
"state-res",
"unstable-exhaustive-types",
"unstable-msc2448",
"unstable-msc3575",
"unstable-unspecified",
]
git = "https://github.com/ruma/ruma"
[dependencies.rocksdb] [dependencies.rocksdb]
features = ["lz4", "multi-threaded-cf", "zstd"] features = ["lz4", "multi-threaded-cf", "zstd"]
optional = true optional = true

View file

@ -56,13 +56,6 @@ If you have any questions, feel free to
- Send an direct message to `@timokoesters:fachschaften.org` on Matrix - Send an direct message to `@timokoesters:fachschaften.org` on Matrix
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new) - [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
#### Security
If you believe you have found a security issue, please send a message to [Timo](https://matrix.to/#/@timo:conduit.rs)
and/or [Matthias](https://matrix.to/#/@matthias:ahouansou.cz) on Matrix, or send an email to
[conduit@koesters.xyz](mailto:conduit@koesters.xyz). Please do not disclose details about the issue to anyone else before
a fix is released publically.
#### Thanks to #### Thanks to
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project. Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.

View file

@ -1,4 +1,4 @@
FROM rust:1.79.0 FROM rust:1.78.0
WORKDIR /workdir WORKDIR /workdir

View file

@ -1,10 +1,14 @@
(import
( (
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in import
(
let
lock = builtins.fromJSON (builtins.readFile ./flake.lock);
in
fetchTarball { fetchTarball {
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
sha256 = lock.nodes.flake-compat.locked.narHash; sha256 = lock.nodes.flake-compat.locked.narHash;
} }
) )
{src = ./.;} {src = ./.;}
).defaultNix )
.defaultNix

View file

@ -6,8 +6,6 @@
> **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect > **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect
> **Note:** You can also configure Conduit by using `CONDUIT_{field_name}` environment variables. To set values inside a table, use `CONDUIT_{table_name}__{field_name}`. Example: `CONDUIT_SERVER_NAME="example.org"`
Conduit's configuration file is divided into the following sections: Conduit's configuration file is divided into the following sections:
- [Global](#global) - [Global](#global)
@ -58,8 +56,7 @@ The `global` section contains the following fields:
| `turn_secret` | `string` | The TURN secret | `""` | | `turn_secret` | `string` | The TURN secret | `""` |
| `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` | | `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` |
| `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A | | `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A |
| `well_known_client` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) | | `well_known` | `table` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
| `well_known_server` | `string` | Used for [delegation](delegation.md) | See [delegation](delegation.md) |
### TLS ### TLS

View file

@ -16,18 +16,18 @@ are connected to the server running Conduit using something like a VPN.
> **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration > **Note**: this will automatically allow you to use [sliding sync][0] without any extra configuration
To configure it, use the following options: To configure it, use the following options in the `global.well_known` table:
| Field | Type | Description | Default | | Field | Type | Description | Default |
| --- | --- | --- | --- | | --- | --- | --- | --- |
| `well_known_client` | `String` | The URL that clients should use to connect to Conduit | `https://<server_name>` | | `client` | `String` | The URL that clients should use to connect to Conduit | `https://<server_name>` |
| `well_known_server` | `String` | The hostname and port servers should use to connect to Conduit | `<server_name>:443` | | `server` | `String` | The hostname and port servers should use to connect to Conduit | `<server_name>:443` |
### Example ### Example
```toml ```toml
[global] [global.well_known]
well_known_client = "https://matrix.example.org" client = "https://matrix.example.org"
well_known_server = "matrix.example.org:443" server = "matrix.example.org:443"
``` ```
## Manual ## Manual

View file

@ -64,7 +64,6 @@ docker run -d -p 8448:6167 \
-e CONDUIT_MAX_REQUEST_SIZE="20000000" \ -e CONDUIT_MAX_REQUEST_SIZE="20000000" \
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \ -e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
-e CONDUIT_PORT="6167" \
--name conduit <link> --name conduit <link>
``` ```

View file

@ -17,7 +17,6 @@ You may simply download the binary that fits your machine. Run `uname -m` to see
| Target | Type | Download | | Target | Type | Download |
|-|-|-| |-|-|-|
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl.deb?job=artifacts) | | `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl?job=artifacts) | | `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/x86_64-unknown-linux-musl?job=artifacts) |
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl?job=artifacts) | | `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/aarch64-unknown-linux-musl?job=artifacts) |
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-amd64.tar.gz?job=artifacts) | | `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/master/raw/oci-image-amd64.tar.gz?job=artifacts) |
@ -31,7 +30,6 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to
| Target | Type | Download | | Target | Type | Download |
|-|-|-| |-|-|-|
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) | | `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
| `aarch64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl.deb?job=artifacts) |
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) | | `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) |
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) | | `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) |
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) | | `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) |

View file

@ -35,7 +35,3 @@ Here is an example:
Not really. You can reuse the domain of your current server with Conduit, but you will not be able to migrate accounts automatically. Not really. You can reuse the domain of your current server with Conduit, but you will not be able to migrate accounts automatically.
Rooms that were federated can be re-joined via the other participating servers, however media and the like may be deleted from remote servers after some time, and hence might not be recoverable. Rooms that were federated can be re-joined via the other participating servers, however media and the like may be deleted from remote servers after some time, and hence might not be recoverable.
## How do I make someone an admin?
Simply invite them to the admin room. Once joined, they can administer the server by interacting with the `@conduit:<server_name>` user.

View file

@ -2,7 +2,7 @@
## General instructions ## General instructions
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/element-hq/synapse/blob/develop/docs/turn-howto.md). * It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md).
## Edit/Add a few settings to your existing conduit.toml ## Edit/Add a few settings to your existing conduit.toml

View file

@ -35,6 +35,11 @@ group = "versions"
name = "lychee" name = "lychee"
script = "lychee --version" script = "lychee --version"
[[task]]
group = "versions"
name = "alejandra"
script = "alejandra --version"
[[task]] [[task]]
group = "lints" group = "lints"
name = "cargo-fmt" name = "cargo-fmt"
@ -66,6 +71,11 @@ group = "lints"
name = "lychee" name = "lychee"
script = "lychee --offline docs" script = "lychee --offline docs"
[[task]]
group = "lints"
name = "alejandra"
script = "alejandra --check ."
[[task]] [[task]]
group = "tests" group = "tests"
name = "cargo" name = "cargo"

View file

@ -19,10 +19,10 @@
attic.url = "github:zhaofengli/attic?ref=main"; attic.url = "github:zhaofengli/attic?ref=main";
}; };
outputs = inputs: outputs = inputs: let
let
# Keep sorted # Keep sorted
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { mkScope = pkgs:
pkgs.lib.makeScope pkgs.newScope (self: {
craneLib = craneLib =
(inputs.crane.mkLib pkgs).overrideToolchain self.toolchain; (inputs.crane.mkLib pkgs).overrideToolchain self.toolchain;
@ -34,8 +34,7 @@
book = self.callPackage ./nix/pkgs/book {}; book = self.callPackage ./nix/pkgs/book {};
rocksdb = rocksdb = let
let
version = "9.1.1"; version = "9.1.1";
in in
pkgs.rocksdb.overrideAttrs (old: { pkgs.rocksdb.overrideAttrs (old: {
@ -51,7 +50,8 @@
shell = self.callPackage ./nix/shell.nix {}; shell = self.callPackage ./nix/shell.nix {};
# The Rust toolchain to use # The Rust toolchain to use
toolchain = inputs toolchain =
inputs
.fenix .fenix
.packages .packages
.${pkgs.pkgsBuildHost.system} .${pkgs.pkgsBuildHost.system}
@ -59,26 +59,27 @@
file = ./rust-toolchain.toml; file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml` # See also `rust-toolchain.toml`
sha256 = "sha256-Ngiz76YP4HTY75GGdH2P+APE/DEIx2R/Dn+BwwOyzZU="; sha256 = "sha256-opUgs6ckUQCyDxcB9Wy51pqhd0MPGHUVbwRKKPGiwZU=";
}; };
}); });
in in
inputs.flake-utils.lib.eachDefaultSystem (system: inputs.flake-utils.lib.eachDefaultSystem (
let system: let
pkgs = inputs.nixpkgs.legacyPackages.${system}; pkgs = inputs.nixpkgs.legacyPackages.${system};
in in {
packages =
{ {
packages = {
default = (mkScope pkgs).default; default = (mkScope pkgs).default;
oci-image = (mkScope pkgs).oci-image; oci-image = (mkScope pkgs).oci-image;
book = (mkScope pkgs).book; book = (mkScope pkgs).book;
} }
// // builtins.listToAttrs
builtins.listToAttrs (
(builtins.concatLists builtins.concatLists
(builtins.map (
(crossSystem: builtins.map
let (
crossSystem: let
binaryName = "static-${crossSystem}"; binaryName = "static-${crossSystem}";
pkgsCrossStatic = pkgsCrossStatic =
(import inputs.nixpkgs { (import inputs.nixpkgs {
@ -86,9 +87,9 @@
crossSystem = { crossSystem = {
config = crossSystem; config = crossSystem;
}; };
}).pkgsStatic; })
in .pkgsStatic;
[ in [
# An output for a statically-linked binary # An output for a statically-linked binary
{ {
name = binaryName; name = binaryName;

View file

@ -1,16 +1,18 @@
# Keep sorted # Keep sorted
{ default {
, inputs default,
, mdbook inputs,
, stdenv mdbook,
stdenv,
}: }:
stdenv.mkDerivation { stdenv.mkDerivation {
pname = "${default.pname}-book"; pname = "${default.pname}-book";
version = default.version; version = default.version;
src = let
src = let filter = inputs.nix-filter.lib; in filter { filter = inputs.nix-filter.lib;
in
filter {
root = inputs.self; root = inputs.self;
# Keep sorted # Keep sorted

View file

@ -1,18 +1,18 @@
{ lib {
, pkgsBuildHost lib,
, rust pkgsBuildHost,
, stdenv rust,
stdenv,
}: }:
lib.optionalAttrs stdenv.hostPlatform.isStatic { lib.optionalAttrs stdenv.hostPlatform.isStatic {
ROCKSDB_STATIC = ""; ROCKSDB_STATIC = "";
} }
// // {
{
CARGO_BUILD_RUSTFLAGS = CARGO_BUILD_RUSTFLAGS =
lib.concatStringsSep lib.concatStringsSep
" " " "
([] (
[]
# This disables PIE for static builds, which isn't great in terms of # This disables PIE for static builds, which isn't great in terms of
# security. Unfortunately, my hand is forced because nixpkgs' # security. Unfortunately, my hand is forced because nixpkgs'
# `libstdc++.a` is built without `-fPIE`, which precludes us from # `libstdc++.a` is built without `-fPIE`, which precludes us from
@ -46,15 +46,13 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic {
] ]
); );
} }
# What follows is stolen from [here][0]. Its purpose is to properly configure # What follows is stolen from [here][0]. Its purpose is to properly configure
# compilers and linkers for various stages of the build, and even covers the # compilers and linkers for various stages of the build, and even covers the
# case of build scripts that need native code compiled and run on the build # case of build scripts that need native code compiled and run on the build
# platform (I think). # platform (I think).
# #
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80 # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
// // (
(
let let
inherit (rust.lib) envVars; inherit (rust.lib) envVars;
in in
@ -64,32 +62,27 @@ lib.optionalAttrs stdenv.hostPlatform.isStatic {
( (
let let
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget; inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;
in in {
{
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget; "CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget; "CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
envVars.linkerForTarget; envVars.linkerForTarget;
} }
) )
// // (
(
let let
inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget; inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
in in {
{
"CC_${cargoEnvVarTarget}" = envVars.ccForHost; "CC_${cargoEnvVarTarget}" = envVars.ccForHost;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost; "CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost; "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
CARGO_BUILD_TARGET = rustcTarget; CARGO_BUILD_TARGET = rustcTarget;
} }
) )
// // (
(
let let
inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget; inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget;
in in {
{
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild; "CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild; "CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild; "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;

View file

@ -1,21 +1,18 @@
# Dependencies (keep sorted) # Dependencies (keep sorted)
{ craneLib {
, inputs craneLib,
, lib inputs,
, pkgsBuildHost lib,
, rocksdb pkgsBuildHost,
, rust rocksdb,
, stdenv rust,
stdenv,
# Options (keep sorted) # Options (keep sorted)
, default-features ? true default-features ? true,
, features ? [] features ? [],
, profile ? "release" profile ? "release",
}: }: let
buildDepsOnlyEnv = let
let
buildDepsOnlyEnv =
let
rocksdb' = rocksdb.override { rocksdb' = rocksdb.override {
enableJemalloc = builtins.elem "jemalloc" features; enableJemalloc = builtins.elem "jemalloc" features;
}; };
@ -25,19 +22,21 @@ let
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include"; ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
ROCKSDB_LIB_DIR = "${rocksdb'}/lib"; ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
} }
// // (import ./cross-compilation-env.nix {
(import ./cross-compilation-env.nix {
# Keep sorted # Keep sorted
inherit inherit
lib lib
pkgsBuildHost pkgsBuildHost
rust rust
stdenv; stdenv
;
}); });
buildPackageEnv = { buildPackageEnv =
{
CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev; CONDUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev;
} // buildDepsOnlyEnv; }
// buildDepsOnlyEnv;
commonAttrs = { commonAttrs = {
inherit inherit
@ -45,9 +44,13 @@ let
cargoToml = "${inputs.self}/Cargo.toml"; cargoToml = "${inputs.self}/Cargo.toml";
}) })
pname pname
version; version
;
src = let filter = inputs.nix-filter.lib; in filter { src = let
filter = inputs.nix-filter.lib;
in
filter {
root = inputs.self; root = inputs.self;
# Keep sorted # Keep sorted
@ -68,19 +71,22 @@ let
CARGO_PROFILE = profile; CARGO_PROFILE = profile;
}; };
in in
craneLib.buildPackage (commonAttrs
craneLib.buildPackage ( commonAttrs // { // {
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // { cargoArtifacts = craneLib.buildDepsOnly (commonAttrs
// {
env = buildDepsOnlyEnv; env = buildDepsOnlyEnv;
}); });
cargoExtraArgs = "--locked " cargoExtraArgs =
"--locked "
+ lib.optionalString + lib.optionalString
(!default-features) (!default-features)
"--no-default-features " "--no-default-features "
+ lib.optionalString + lib.optionalString
(features != []) (features != [])
"--features " + (builtins.concatStringsSep "," features); "--features "
+ (builtins.concatStringsSep "," features);
# This is redundant with CI # This is redundant with CI
doCheck = false; doCheck = false;

View file

@ -1,10 +1,10 @@
# Keep sorted # Keep sorted
{ default {
, dockerTools default,
, lib dockerTools,
, tini lib,
tini,
}: }:
dockerTools.buildImage { dockerTools.buildImage {
name = default.pname; name = default.pname;
tag = "next"; tag = "next";

View file

@ -1,21 +1,24 @@
# Keep sorted # Keep sorted
{ cargo-deb {
, default alejandra,
, engage cargo-deb,
, go default,
, inputs engage,
, jq go,
, lychee inputs,
, mdbook jq,
, mkShell lychee,
, olm mdbook,
, system mkShell,
, taplo olm,
, toolchain system,
taplo,
toolchain,
}: }:
mkShell { mkShell {
env = default.env // { env =
default.env
// {
# Rust Analyzer needs to be able to find the path to default crate # Rust Analyzer needs to be able to find the path to default crate
# sources, and it can read this environment variable to do so. The # sources, and it can read this environment variable to do so. The
# `rust-src` component is required in order for this to work. # `rust-src` component is required in order for this to work.
@ -23,7 +26,9 @@ mkShell {
}; };
# Development tools # Development tools
nativeBuildInputs = [ nativeBuildInputs =
default.nativeBuildInputs
++ [
# Always use nightly rustfmt because most of its options are unstable # Always use nightly rustfmt because most of its options are unstable
# #
# This needs to come before `toolchain` in this list, otherwise # This needs to come before `toolchain` in this list, otherwise
@ -57,5 +62,8 @@ mkShell {
# Useful for editing the book locally # Useful for editing the book locally
mdbook mdbook
] ++ default.nativeBuildInputs ;
# nix formatter
alejandra
];
} }

View file

@ -2,6 +2,7 @@
# #
# Other files that need upkeep when this changes: # Other files that need upkeep when this changes:
# #
# * `.gitlab-ci.yml`
# * `Cargo.toml` # * `Cargo.toml`
# * `flake.nix` # * `flake.nix`
# #
@ -9,7 +10,7 @@
# If you're having trouble making the relevant changes, bug a maintainer. # If you're having trouble making the relevant changes, bug a maintainer.
[toolchain] [toolchain]
channel = "1.79.0" channel = "1.78.0"
components = [ components = [
# For rust-analyzer # For rust-analyzer
"rust-src", "rust-src",

View file

@ -75,9 +75,9 @@ pub async fn get_register_available_route(
/// - Creates a new account and populates it with default account data /// - Creates a new account and populates it with default account data
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token /// - If `inhibit_login` is false: Creates a device and returns device id and access_token
pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> { pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> {
if !services().globals.allow_registration().await && body.appservice_info.is_none() { if !services().globals.allow_registration() && body.appservice_info.is_none() {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Registration has been disabled.", "Registration has been disabled.",
)); ));
} }
@ -315,11 +315,7 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
pub async fn change_password_route( pub async fn change_password_route(
body: Ruma<change_password::v3::Request>, body: Ruma<change_password::v3::Request>,
) -> Result<change_password::v3::Response> { ) -> Result<change_password::v3::Response> {
let sender_user = body let sender_user = body.sender_user.as_ref().expect("user is authenticated");
.sender_user
.as_ref()
// In the future password changes could be performed with UIA with 3PIDs, but we don't support that currently
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut uiaainfo = UiaaInfo { let mut uiaainfo = UiaaInfo {
@ -406,11 +402,7 @@ pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3:
pub async fn deactivate_route( pub async fn deactivate_route(
body: Ruma<deactivate::v3::Request>, body: Ruma<deactivate::v3::Request>,
) -> Result<deactivate::v3::Response> { ) -> Result<deactivate::v3::Response> {
let sender_user = body let sender_user = body.sender_user.as_ref().expect("user is authenticated");
.sender_user
.as_ref()
// In the future password changes could be performed with UIA with SSO, but we don't support that currently
.ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?;
let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut uiaainfo = UiaaInfo { let mut uiaainfo = UiaaInfo {
@ -483,7 +475,7 @@ pub async fn request_3pid_management_token_via_email_route(
) -> Result<request_3pid_management_token_via_email::v3::Response> { ) -> Result<request_3pid_management_token_via_email::v3::Response> {
Err(Error::BadRequest( Err(Error::BadRequest(
ErrorKind::ThreepidDenied, ErrorKind::ThreepidDenied,
"Third party identifiers are currently unsupported by this server implementation", "Third party identifier is not allowed",
)) ))
} }
@ -497,6 +489,6 @@ pub async fn request_3pid_management_token_via_msisdn_route(
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> { ) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
Err(Error::BadRequest( Err(Error::BadRequest(
ErrorKind::ThreepidDenied, ErrorKind::ThreepidDenied,
"Third party identifiers are currently unsupported by this server implementation", "Third party identifier is not allowed",
)) ))
} }

View file

@ -18,8 +18,6 @@ use ruma::{
pub async fn create_alias_route( pub async fn create_alias_route(
body: Ruma<create_alias::v3::Request>, body: Ruma<create_alias::v3::Request>,
) -> Result<create_alias::v3::Response> { ) -> Result<create_alias::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.room_alias.server_name() != services().globals.server_name() { if body.room_alias.server_name() != services().globals.server_name() {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
@ -57,7 +55,7 @@ pub async fn create_alias_route(
services() services()
.rooms .rooms
.alias .alias
.set_alias(&body.room_alias, &body.room_id, sender_user)?; .set_alias(&body.room_alias, &body.room_id)?;
Ok(create_alias::v3::Response::new()) Ok(create_alias::v3::Response::new())
} }
@ -66,12 +64,11 @@ pub async fn create_alias_route(
/// ///
/// Deletes a room alias from this server. /// Deletes a room alias from this server.
/// ///
/// - TODO: additional access control checks
/// - TODO: Update canonical alias event /// - TODO: Update canonical alias event
pub async fn delete_alias_route( pub async fn delete_alias_route(
body: Ruma<delete_alias::v3::Request>, body: Ruma<delete_alias::v3::Request>,
) -> Result<delete_alias::v3::Response> { ) -> Result<delete_alias::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.room_alias.server_name() != services().globals.server_name() { if body.room_alias.server_name() != services().globals.server_name() {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
@ -97,10 +94,7 @@ pub async fn delete_alias_route(
)); ));
} }
services() services().rooms.alias.remove_alias(&body.room_alias)?;
.rooms
.alias
.remove_alias(&body.room_alias, sender_user)?;
// TODO: update alt_aliases? // TODO: update alt_aliases?

View file

@ -54,7 +54,7 @@ pub async fn get_context_route(
.user_can_see_event(sender_user, &room_id, &body.event_id)? .user_can_see_event(sender_user, &room_id, &body.event_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"You don't have permission to view this event.", "You don't have permission to view this event.",
)); ));
} }

View file

@ -1,24 +1,12 @@
// Unauthenticated media is deprecated
#![allow(deprecated)]
use std::time::Duration; use std::time::Duration;
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma}; use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE}; use ruma::api::client::{
use ruma::{
api::{
client::{
authenticated_media::{
get_content, get_content_as_filename, get_content_thumbnail, get_media_config,
},
error::ErrorKind, error::ErrorKind,
media::{self, create_content}, media::{
create_content, get_content, get_content_as_filename, get_content_thumbnail,
get_media_config,
}, },
federation::authenticated_media::{self as federation_media, FileOrLocation},
},
http_headers::{ContentDisposition, ContentDispositionType},
media::Method,
ServerName, UInt,
}; };
const MXC_LENGTH: usize = 32; const MXC_LENGTH: usize = 32;
@ -27,22 +15,19 @@ const MXC_LENGTH: usize = 32;
/// ///
/// Returns max upload size. /// Returns max upload size.
pub async fn get_media_config_route( pub async fn get_media_config_route(
_body: Ruma<media::get_media_config::v3::Request>, _body: Ruma<get_media_config::v3::Request>,
) -> Result<media::get_media_config::v3::Response> { ) -> Result<get_media_config::v3::Response> {
Ok(media::get_media_config::v3::Response { Ok(get_media_config::v3::Response {
upload_size: services().globals.max_request_size().into(), upload_size: services().globals.max_request_size().into(),
}) })
} }
/// # `GET /_matrix/client/v1/media/config` fn sanitize_content_type(content_type: String) -> String {
/// if content_type == "image/jpeg" || content_type == "image/png" {
/// Returns max upload size. content_type
pub async fn get_media_config_auth_route( } else {
_body: Ruma<get_media_config::v1::Request>, "application/octet-stream".to_owned()
) -> Result<get_media_config::v1::Response> { }
Ok(get_media_config::v1::Response {
upload_size: services().globals.max_request_size().into(),
})
} }
/// # `POST /_matrix/media/r0/upload` /// # `POST /_matrix/media/r0/upload`
@ -64,10 +49,10 @@ pub async fn create_content_route(
.media .media
.create( .create(
mxc.clone(), mxc.clone(),
Some( body.filename
ContentDisposition::new(ContentDispositionType::Inline) .as_ref()
.with_filename(body.filename.clone()), .map(|filename| "inline; filename=".to_owned() + filename)
), .as_deref(),
body.content_type.as_deref(), body.content_type.as_deref(),
&body.file, &body.file,
) )
@ -81,67 +66,28 @@ pub async fn create_content_route(
pub async fn get_remote_content( pub async fn get_remote_content(
mxc: &str, mxc: &str,
server_name: &ServerName, server_name: &ruma::ServerName,
media_id: String, media_id: String,
) -> Result<get_content::v1::Response, Error> { ) -> Result<get_content::v3::Response, Error> {
let content_response = match services() let content_response = services()
.sending .sending
.send_federation_request( .send_federation_request(
server_name, server_name,
federation_media::get_content::v1::Request { get_content::v3::Request {
media_id: media_id.clone(), allow_remote: false,
timeout_ms: Duration::from_secs(20),
},
)
.await
{
Ok(federation_media::get_content::v1::Response {
metadata: _,
content: FileOrLocation::File(content),
}) => get_content::v1::Response {
file: content.file,
content_type: content.content_type,
content_disposition: content.content_disposition,
},
Ok(federation_media::get_content::v1::Response {
metadata: _,
content: FileOrLocation::Location(url),
}) => get_location_content(url).await?,
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
let media::get_content::v3::Response {
file,
content_type,
content_disposition,
..
} = services()
.sending
.send_federation_request(
server_name,
media::get_content::v3::Request {
server_name: server_name.to_owned(), server_name: server_name.to_owned(),
media_id, media_id,
timeout_ms: Duration::from_secs(20), timeout_ms: Duration::from_secs(20),
allow_remote: false, allow_redirect: false,
allow_redirect: true,
}, },
) )
.await?; .await?;
get_content::v1::Response {
file,
content_type,
content_disposition,
}
}
Err(e) => return Err(e),
};
services() services()
.media .media
.create( .create(
mxc.to_owned(), mxc.to_owned(),
content_response.content_disposition.clone(), content_response.content_disposition.as_deref(),
content_response.content_type.as_deref(), content_response.content_type.as_deref(),
&content_response.file, &content_response.file,
) )
@ -156,57 +102,31 @@ pub async fn get_remote_content(
/// ///
/// - Only allows federation if `allow_remote` is true /// - Only allows federation if `allow_remote` is true
pub async fn get_content_route( pub async fn get_content_route(
body: Ruma<media::get_content::v3::Request>, body: Ruma<get_content::v3::Request>,
) -> Result<media::get_content::v3::Response> { ) -> Result<get_content::v3::Response> {
let get_content::v1::Response { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
file,
content_disposition,
content_type,
} = get_content(&body.server_name, body.media_id.clone(), body.allow_remote).await?;
Ok(media::get_content::v3::Response { if let Some(FileMeta {
content_disposition,
file, file,
content_type, ..
}) = services().media.get(mxc.clone()).await?
{
Ok(get_content::v3::Response {
file,
content_type: Some("application/octet-stream".to_owned()),
content_disposition, content_disposition,
cross_origin_resource_policy: Some("cross-origin".to_owned()), cross_origin_resource_policy: Some("cross-origin".to_owned()),
}) })
} } else if &*body.server_name != services().globals.server_name() && body.allow_remote {
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}`
///
/// Load media from our server or over federation.
pub async fn get_content_auth_route(
body: Ruma<get_content::v1::Request>,
) -> Result<get_content::v1::Response> {
get_content(&body.server_name, body.media_id.clone(), true).await
}
async fn get_content(
server_name: &ServerName,
media_id: String,
allow_remote: bool,
) -> Result<get_content::v1::Response, Error> {
let mxc = format!("mxc://{}/{}", server_name, media_id);
if let Ok(Some(FileMeta {
content_disposition,
content_type,
file,
})) = services().media.get(mxc.clone()).await
{
Ok(get_content::v1::Response {
file,
content_type,
content_disposition: Some(content_disposition),
})
} else if server_name != services().globals.server_name() && allow_remote {
let remote_content_response = let remote_content_response =
get_remote_content(&mxc, server_name, media_id.clone()).await?; get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
Ok(get_content::v1::Response { Ok(get_content::v3::Response {
content_disposition: remote_content_response.content_disposition, content_disposition: remote_content_response.content_disposition,
content_type: remote_content_response.content_type, content_type: Some("application/octet-stream".to_owned()),
file: remote_content_response.file, file: remote_content_response.file,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
}) })
} else { } else {
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
@ -219,74 +139,26 @@ async fn get_content(
/// ///
/// - Only allows federation if `allow_remote` is true /// - Only allows federation if `allow_remote` is true
pub async fn get_content_as_filename_route( pub async fn get_content_as_filename_route(
body: Ruma<media::get_content_as_filename::v3::Request>, body: Ruma<get_content_as_filename::v3::Request>,
) -> Result<media::get_content_as_filename::v3::Response> { ) -> Result<get_content_as_filename::v3::Response> {
let get_content_as_filename::v1::Response { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
file,
content_type,
content_disposition,
} = get_content_as_filename(
&body.server_name,
body.media_id.clone(),
body.filename.clone(),
body.allow_remote,
)
.await?;
Ok(media::get_content_as_filename::v3::Response { if let Some(FileMeta { file, .. }) = services().media.get(mxc.clone()).await? {
Ok(get_content_as_filename::v3::Response {
file, file,
content_type, content_type: Some("application/octet-stream".to_owned()),
content_disposition, content_disposition: Some(format!("inline; filename={}", body.filename)),
cross_origin_resource_policy: Some("cross-origin".to_owned()), cross_origin_resource_policy: Some("cross-origin".to_owned()),
}) })
} } else if &*body.server_name != services().globals.server_name() && body.allow_remote {
/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}`
///
/// Load media from our server or over federation, permitting desired filename.
pub async fn get_content_as_filename_auth_route(
body: Ruma<get_content_as_filename::v1::Request>,
) -> Result<get_content_as_filename::v1::Response, Error> {
get_content_as_filename(
&body.server_name,
body.media_id.clone(),
body.filename.clone(),
true,
)
.await
}
async fn get_content_as_filename(
server_name: &ServerName,
media_id: String,
filename: String,
allow_remote: bool,
) -> Result<get_content_as_filename::v1::Response, Error> {
let mxc = format!("mxc://{}/{}", server_name, media_id);
if let Ok(Some(FileMeta {
file, content_type, ..
})) = services().media.get(mxc.clone()).await
{
Ok(get_content_as_filename::v1::Response {
file,
content_type,
content_disposition: Some(
ContentDisposition::new(ContentDispositionType::Inline)
.with_filename(Some(filename.clone())),
),
})
} else if server_name != services().globals.server_name() && allow_remote {
let remote_content_response = let remote_content_response =
get_remote_content(&mxc, server_name, media_id.clone()).await?; get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
Ok(get_content_as_filename::v1::Response { Ok(get_content_as_filename::v3::Response {
content_disposition: Some( content_disposition: Some(format!("inline: filename={}", body.filename)),
ContentDisposition::new(ContentDispositionType::Inline) content_type: Some("application/octet-stream".to_owned()),
.with_filename(Some(filename.clone())),
),
content_type: remote_content_response.content_type,
file: remote_content_response.file, file: remote_content_response.file,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
}) })
} else { } else {
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
@ -299,169 +171,66 @@ async fn get_content_as_filename(
/// ///
/// - Only allows federation if `allow_remote` is true /// - Only allows federation if `allow_remote` is true
pub async fn get_content_thumbnail_route( pub async fn get_content_thumbnail_route(
body: Ruma<media::get_content_thumbnail::v3::Request>, body: Ruma<get_content_thumbnail::v3::Request>,
) -> Result<media::get_content_thumbnail::v3::Response> { ) -> Result<get_content_thumbnail::v3::Response> {
let get_content_thumbnail::v1::Response { file, content_type } = get_content_thumbnail( let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
&body.server_name,
body.media_id.clone(),
body.height,
body.width,
body.method.clone(),
body.animated,
body.allow_remote,
)
.await?;
Ok(media::get_content_thumbnail::v3::Response { if let Some(FileMeta {
file,
content_type,
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
}
/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}`
///
/// Load media thumbnail from our server or over federation.
pub async fn get_content_thumbnail_auth_route(
body: Ruma<get_content_thumbnail::v1::Request>,
) -> Result<get_content_thumbnail::v1::Response> {
get_content_thumbnail(
&body.server_name,
body.media_id.clone(),
body.height,
body.width,
body.method.clone(),
body.animated,
true,
)
.await
}
async fn get_content_thumbnail(
server_name: &ServerName,
media_id: String,
height: UInt,
width: UInt,
method: Option<Method>,
animated: Option<bool>,
allow_remote: bool,
) -> Result<get_content_thumbnail::v1::Response, Error> {
let mxc = format!("mxc://{}/{}", server_name, media_id);
if let Ok(Some(FileMeta {
file, content_type, .. file, content_type, ..
})) = services() }) = services()
.media .media
.get_thumbnail( .get_thumbnail(
mxc.clone(), mxc.clone(),
width body.width
.try_into() .try_into()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
height body.height
.try_into() .try_into()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Height is invalid."))?, .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
) )
.await .await?
{ {
Ok(get_content_thumbnail::v1::Response { file, content_type }) Ok(get_content_thumbnail::v3::Response {
} else if server_name != services().globals.server_name() && allow_remote { file,
let thumbnail_response = match services() content_type: content_type.map(sanitize_content_type),
cross_origin_resource_policy: Some("cross-origin".to_owned()),
})
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
let mut get_thumbnail_response = services()
.sending .sending
.send_federation_request( .send_federation_request(
server_name, &body.server_name,
federation_media::get_content_thumbnail::v1::Request { get_content_thumbnail::v3::Request {
height, allow_remote: false,
width, height: body.height,
method: method.clone(), width: body.width,
media_id: media_id.clone(), method: body.method.clone(),
timeout_ms: Duration::from_secs(20), server_name: body.server_name.clone(),
animated, media_id: body.media_id.clone(),
},
)
.await
{
Ok(federation_media::get_content_thumbnail::v1::Response {
metadata: _,
content: FileOrLocation::File(content),
}) => get_content_thumbnail::v1::Response {
file: content.file,
content_type: content.content_type,
},
Ok(federation_media::get_content_thumbnail::v1::Response {
metadata: _,
content: FileOrLocation::Location(url),
}) => {
let get_content::v1::Response {
file, content_type, ..
} = get_location_content(url).await?;
get_content_thumbnail::v1::Response { file, content_type }
}
Err(Error::BadRequest(ErrorKind::Unrecognized, _)) => {
let media::get_content_thumbnail::v3::Response {
file, content_type, ..
} = services()
.sending
.send_federation_request(
server_name,
media::get_content_thumbnail::v3::Request {
height,
width,
method: method.clone(),
server_name: server_name.to_owned(),
media_id: media_id.clone(),
timeout_ms: Duration::from_secs(20), timeout_ms: Duration::from_secs(20),
allow_redirect: false, allow_redirect: false,
animated,
allow_remote: false,
}, },
) )
.await?; .await?;
get_content_thumbnail::v1::Response { file, content_type }
}
Err(e) => return Err(e),
};
services() services()
.media .media
.upload_thumbnail( .upload_thumbnail(
mxc, mxc,
thumbnail_response.content_type.as_deref(), None,
width.try_into().expect("all UInts are valid u32s"), get_thumbnail_response.content_type.as_deref(),
height.try_into().expect("all UInts are valid u32s"), body.width.try_into().expect("all UInts are valid u32s"),
&thumbnail_response.file, body.height.try_into().expect("all UInts are valid u32s"),
&get_thumbnail_response.file,
) )
.await?; .await?;
Ok(thumbnail_response) get_thumbnail_response.content_type = get_thumbnail_response
.content_type
.map(sanitize_content_type);
Ok(get_thumbnail_response)
} else { } else {
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
} }
} }
async fn get_location_content(url: String) -> Result<get_content::v1::Response, Error> {
let client = services().globals.default_client();
let response = client.get(url).send().await?;
let headers = response.headers();
let content_type = headers
.get(CONTENT_TYPE)
.and_then(|header| header.to_str().ok())
.map(ToOwned::to_owned);
let content_disposition = headers
.get(CONTENT_DISPOSITION)
.map(|header| header.as_bytes())
.map(TryFrom::try_from)
.and_then(Result::ok);
let file = response.bytes().await?.to_vec();
Ok(get_content::v1::Response {
file,
content_type,
content_disposition,
})
}

View file

@ -18,8 +18,9 @@ use ruma::{
}, },
StateEventType, TimelineEventType, StateEventType, TimelineEventType,
}, },
state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, serde::Base64,
OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId, state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId,
}; };
use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use std::{ use std::{
@ -31,10 +32,7 @@ use tokio::sync::RwLock;
use tracing::{debug, error, info, warn}; use tracing::{debug, error, info, warn};
use crate::{ use crate::{
service::{ service::pdu::{gen_event_id_canonical_json, PduBuilder},
globals::SigningKeys,
pdu::{gen_event_id_canonical_json, PduBuilder},
},
services, utils, Error, PduEvent, Result, Ruma, services, utils, Error, PduEvent, Result, Ruma,
}; };
@ -97,7 +95,7 @@ pub async fn join_room_by_id_or_alias_route(
let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) {
Ok(room_id) => { Ok(room_id) => {
let mut servers = body.via.clone(); let mut servers = body.server_name.clone();
servers.extend( servers.extend(
services() services()
.rooms .rooms
@ -188,7 +186,15 @@ pub async fn kick_user_route(
) -> Result<kick_user::v3::Response> { ) -> Result<kick_user::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event: RoomMemberEventContent = serde_json::from_str( if let Ok(true) = services()
.rooms
.state_cache
.is_left(sender_user, &body.room_id)
{
return Ok(kick_user::v3::Response {});
}
let mut event: RoomMemberEventContent = serde_json::from_str(
services() services()
.rooms .rooms
.state_accessor .state_accessor
@ -199,26 +205,15 @@ pub async fn kick_user_route(
)? )?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::BadState, ErrorKind::BadState,
"Cannot kick a user who is not in the room.", "Cannot kick member that's not in the room.",
))? ))?
.content .content
.get(), .get(),
) )
.map_err(|_| Error::bad_database("Invalid member event in database."))?; .map_err(|_| Error::bad_database("Invalid member event in database."))?;
// If they are already kicked and the reason is unchanged, there isn't any point in sending a new event. event.membership = MembershipState::Leave;
if event.membership == MembershipState::Leave && event.reason == body.reason { event.reason.clone_from(&body.reason);
return Ok(kick_user::v3::Response {});
}
let event = RoomMemberEventContent {
is_direct: None,
membership: MembershipState::Leave,
third_party_invite: None,
reason: body.reason.clone(),
join_authorized_via_users_server: None,
..event
};
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
services() services()
@ -241,7 +236,6 @@ pub async fn kick_user_route(
unsigned: None, unsigned: None,
state_key: Some(body.user_id.to_string()), state_key: Some(body.user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,
@ -260,7 +254,17 @@ pub async fn kick_user_route(
pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_user::v3::Response> { pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_user::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event = if let Some(event) = services() if let Ok(Some(membership_event)) = services()
.rooms
.state_accessor
.get_member(&body.room_id, sender_user)
{
if membership_event.membership == MembershipState::Ban {
return Ok(ban_user::v3::Response {});
}
}
let event = services()
.rooms .rooms
.state_accessor .state_accessor
.room_state_get( .room_state_get(
@ -268,30 +272,27 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
&StateEventType::RoomMember, &StateEventType::RoomMember,
body.user_id.as_ref(), body.user_id.as_ref(),
)? )?
// Even when the previous member content is invalid, we should let the ban go through anyways. .map_or(
.and_then(|event| serde_json::from_str::<RoomMemberEventContent>(event.content.get()).ok()) Ok(RoomMemberEventContent {
{ membership: MembershipState::Ban,
// If they are already banned and the reason is unchanged, there isn't any point in sending a new event. displayname: services().users.displayname(&body.user_id)?,
if event.membership == MembershipState::Ban && event.reason == body.reason { avatar_url: services().users.avatar_url(&body.user_id)?,
return Ok(ban_user::v3::Response {}); is_direct: None,
} third_party_invite: None,
blurhash: services().users.blurhash(&body.user_id)?,
RoomMemberEventContent { reason: body.reason.clone(),
join_authorized_via_users_server: None,
}),
|event| {
serde_json::from_str(event.content.get())
.map(|event: RoomMemberEventContent| RoomMemberEventContent {
membership: MembershipState::Ban, membership: MembershipState::Ban,
join_authorized_via_users_server: None, join_authorized_via_users_server: None,
reason: body.reason.clone(), ..event
third_party_invite: None, })
is_direct: None, .map_err(|_| Error::bad_database("Invalid member event in database."))
avatar_url: event.avatar_url, },
displayname: event.displayname, )?;
blurhash: event.blurhash,
}
} else {
RoomMemberEventContent {
reason: body.reason.clone(),
..RoomMemberEventContent::new(MembershipState::Ban)
}
};
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
services() services()
@ -314,7 +315,6 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
unsigned: None, unsigned: None,
state_key: Some(body.user_id.to_string()), state_key: Some(body.user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,
@ -335,7 +335,17 @@ pub async fn unban_user_route(
) -> Result<unban_user::v3::Response> { ) -> Result<unban_user::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event: RoomMemberEventContent = serde_json::from_str( if let Ok(Some(membership_event)) = services()
.rooms
.state_accessor
.get_member(&body.room_id, sender_user)
{
if membership_event.membership != MembershipState::Ban {
return Ok(unban_user::v3::Response {});
}
}
let mut event: RoomMemberEventContent = serde_json::from_str(
services() services()
.rooms .rooms
.state_accessor .state_accessor
@ -353,19 +363,8 @@ pub async fn unban_user_route(
) )
.map_err(|_| Error::bad_database("Invalid member event in database."))?; .map_err(|_| Error::bad_database("Invalid member event in database."))?;
// If they are already unbanned and the reason is unchanged, there isn't any point in sending a new event. event.membership = MembershipState::Leave;
if event.membership == MembershipState::Leave && event.reason == body.reason { event.reason.clone_from(&body.reason);
return Ok(unban_user::v3::Response {});
}
let event = RoomMemberEventContent {
is_direct: None,
membership: MembershipState::Leave,
third_party_invite: None,
reason: body.reason.clone(),
join_authorized_via_users_server: None,
..event
};
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
services() services()
@ -388,7 +387,6 @@ pub async fn unban_user_route(
unsigned: None, unsigned: None,
state_key: Some(body.user_id.to_string()), state_key: Some(body.user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,
@ -456,7 +454,7 @@ pub async fn get_member_events_route(
.user_can_see_state_events(sender_user, &body.room_id)? .user_can_see_state_events(sender_user, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"You don't have permission to view this room.", "You don't have permission to view this room.",
)); ));
} }
@ -491,7 +489,7 @@ pub async fn joined_members_route(
.user_can_see_state_events(sender_user, &body.room_id)? .user_can_see_state_events(sender_user, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"You don't have permission to view this room.", "You don't have permission to view this room.",
)); ));
} }
@ -627,7 +625,7 @@ async fn join_room_by_id_helper(
let event_id = format!( let event_id = format!(
"${}", "${}",
ruma::signatures::reference_hash(&join_event_stub, &room_version_id) ruma::signatures::reference_hash(&join_event_stub, &room_version_id)
.expect("Event format validated when event was hashed") .expect("ruma can calculate reference hashes")
); );
let event_id = <&EventId>::try_from(event_id.as_str()) let event_id = <&EventId>::try_from(event_id.as_str())
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
@ -941,7 +939,6 @@ async fn join_room_by_id_helper(
unsigned: None, unsigned: None,
state_key: Some(sender_user.to_string()), state_key: Some(sender_user.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
room_id, room_id,
@ -987,8 +984,6 @@ async fn join_room_by_id_helper(
.as_str() .as_str()
}) })
.and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok());
let restricted_join = join_authorized_via_users_server.is_some();
// TODO: Is origin needed? // TODO: Is origin needed?
join_event_stub.insert( join_event_stub.insert(
"origin".to_owned(), "origin".to_owned(),
@ -1035,7 +1030,7 @@ async fn join_room_by_id_helper(
ruma::signatures::reference_hash(&join_event_stub, &room_version_id) ruma::signatures::reference_hash(&join_event_stub, &room_version_id)
.expect("ruma can calculate reference hashes") .expect("ruma can calculate reference hashes")
); );
let event_id = OwnedEventId::try_from(event_id) let event_id = <&EventId>::try_from(event_id.as_str())
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
// Add event_id back // Add event_id back
@ -1060,35 +1055,46 @@ async fn join_room_by_id_helper(
) )
.await?; .await?;
let pdu = if let Some(signed_raw) = send_join_response.room_state.event { if let Some(signed_raw) = send_join_response.room_state.event {
let (signed_event_id, signed_pdu) = let (signed_event_id, signed_value) =
gen_event_id_canonical_json(&signed_raw, &room_version_id)?; match gen_event_id_canonical_json(&signed_raw, &room_version_id) {
Ok(t) => t,
Err(_) => {
// Event could not be converted to canonical json
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Could not convert event to canonical json.",
));
}
};
if signed_event_id != event_id { if signed_event_id != event_id {
return Err(Error::BadServerResponse( return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Server sent event with wrong event id", "Server sent event with wrong event id",
)); ));
} }
signed_pdu
} else if restricted_join {
return Err(Error::BadServerResponse(
"No signed event was returned, despite just performing a restricted join",
));
} else {
join_event
};
drop(state_lock); drop(state_lock);
let pub_key_map = RwLock::new(BTreeMap::new()); let pub_key_map = RwLock::new(BTreeMap::new());
services() services()
.rooms .rooms
.event_handler .event_handler
.handle_incoming_pdu(&remote_server, &event_id, room_id, pdu, true, &pub_key_map) .handle_incoming_pdu(
&remote_server,
&signed_event_id,
room_id,
signed_value,
true,
&pub_key_map,
)
.await?; .await?;
} else { } else {
return Err(error); return Err(error);
} }
} else {
return Err(error);
}
} }
Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) Ok(join_room_by_id::v3::Response::new(room_id.to_owned()))
@ -1136,7 +1142,7 @@ async fn make_join_request(
async fn validate_and_add_event_id( async fn validate_and_add_event_id(
pdu: &RawJsonValue, pdu: &RawJsonValue,
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<(OwnedEventId, CanonicalJsonObject)> { ) -> Result<(OwnedEventId, CanonicalJsonObject)> {
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
@ -1145,7 +1151,7 @@ async fn validate_and_add_event_id(
let event_id = EventId::parse(format!( let event_id = EventId::parse(format!(
"${}", "${}",
ruma::signatures::reference_hash(&value, room_version) ruma::signatures::reference_hash(&value, room_version)
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))? .expect("ruma can calculate reference hashes")
)) ))
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
@ -1183,35 +1189,8 @@ async fn validate_and_add_event_id(
} }
} }
let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| { if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version)
error!("Invalid PDU, no origin_server_ts field"); {
Error::BadRequest(
ErrorKind::MissingParam,
"Invalid PDU, no origin_server_ts field",
)
})?;
let origin_server_ts: MilliSecondsSinceUnixEpoch = {
let ts = origin_server_ts.as_integer().ok_or_else(|| {
Error::BadRequest(
ErrorKind::InvalidParam,
"origin_server_ts must be an integer",
)
})?;
MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Time must be after the unix epoch")
})?)
};
let unfiltered_keys = (*pub_key_map.read().await).clone();
let keys =
services()
.globals
.filter_keys_server_map(unfiltered_keys, origin_server_ts, room_version);
if let Err(e) = ruma::signatures::verify_event(&keys, &value, room_version) {
warn!("Event {} failed verification {:?} {}", event_id, pdu, e); warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
back_off(event_id).await; back_off(event_id).await;
return Err(Error::BadServerResponse("Event failed verification.")); return Err(Error::BadServerResponse("Event failed verification."));
@ -1264,7 +1243,6 @@ pub(crate) async fn invite_helper<'a>(
unsigned: None, unsigned: None,
state_key: Some(user_id.to_string()), state_key: Some(user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
room_id, room_id,
@ -1341,14 +1319,17 @@ pub(crate) async fn invite_helper<'a>(
.filter(|server| &**server != services().globals.server_name()); .filter(|server| &**server != services().globals.server_name());
services().sending.send_pdu(servers, &pdu_id)?; services().sending.send_pdu(servers, &pdu_id)?;
} else {
return Ok(());
}
if !services() if !services()
.rooms .rooms
.state_cache .state_cache
.is_joined(sender_user, room_id)? .is_joined(sender_user, room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"You don't have permission to view this room.", "You don't have permission to view this room.",
)); ));
} }
@ -1384,7 +1365,6 @@ pub(crate) async fn invite_helper<'a>(
unsigned: None, unsigned: None,
state_key: Some(user_id.to_string()), state_key: Some(user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
room_id, room_id,
@ -1392,9 +1372,7 @@ pub(crate) async fn invite_helper<'a>(
) )
.await?; .await?;
// Critical point ends
drop(state_lock); drop(state_lock);
}
Ok(()) Ok(())
} }
@ -1492,15 +1470,12 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
Some(e) => e, Some(e) => e,
}; };
let event = RoomMemberEventContent { let mut event: RoomMemberEventContent = serde_json::from_str(member_event.content.get())
is_direct: None, .map_err(|_| Error::bad_database("Invalid member event in database."))?;
membership: MembershipState::Leave,
third_party_invite: None, event.membership = MembershipState::Leave;
reason, event.reason = reason;
join_authorized_via_users_server: None, event.join_authorized_via_users_server = None;
..serde_json::from_str(member_event.content.get())
.map_err(|_| Error::bad_database("Invalid member event in database."))?
};
services() services()
.rooms .rooms
@ -1512,7 +1487,6 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
unsigned: None, unsigned: None,
state_key: Some(user_id.to_string()), state_key: Some(user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
user_id, user_id,
room_id, room_id,
@ -1614,7 +1588,7 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> {
let event_id = EventId::parse(format!( let event_id = EventId::parse(format!(
"${}", "${}",
ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) ruma::signatures::reference_hash(&leave_event_stub, &room_version_id)
.expect("Event format validated when event was hashed") .expect("ruma can calculate reference hashes")
)) ))
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");

View file

@ -43,7 +43,7 @@ pub async fn send_message_event_route(
&& !services().globals.allow_encryption() && !services().globals.allow_encryption()
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Encryption has been disabled", "Encryption has been disabled",
)); ));
} }
@ -84,11 +84,6 @@ pub async fn send_message_event_route(
unsigned: Some(unsigned), unsigned: Some(unsigned),
state_key: None, state_key: None,
redacts: None, redacts: None,
timestamp: if body.appservice_info.is_some() {
body.timestamp
} else {
None
},
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,

View file

@ -11,7 +11,6 @@ mod keys;
mod media; mod media;
mod membership; mod membership;
mod message; mod message;
mod openid;
mod presence; mod presence;
mod profile; mod profile;
mod push; mod push;
@ -48,7 +47,6 @@ pub use keys::*;
pub use media::*; pub use media::*;
pub use membership::*; pub use membership::*;
pub use message::*; pub use message::*;
pub use openid::*;
pub use presence::*; pub use presence::*;
pub use profile::*; pub use profile::*;
pub use push::*; pub use push::*;

View file

@ -1,23 +0,0 @@
use std::time::Duration;
use ruma::{api::client::account, authentication::TokenType};
use crate::{services, Result, Ruma};
/// # `POST /_matrix/client/r0/user/{userId}/openid/request_token`
///
/// Request an OpenID token to verify identity with third-party services.
///
/// - The token generated is only valid for the OpenID API.
pub async fn create_openid_token_route(
body: Ruma<account::request_openid_token::v3::Request>,
) -> Result<account::request_openid_token::v3::Response> {
let (access_token, expires_in) = services().users.create_openid_token(&body.user_id)?;
Ok(account::request_openid_token::v3::Response {
access_token,
token_type: TokenType::Bearer,
matrix_server_name: services().globals.server_name().to_owned(),
expires_in: Duration::from_secs(expires_in),
})
}

View file

@ -65,7 +65,6 @@ pub async fn set_displayname_route(
unsigned: None, unsigned: None,
state_key: Some(sender_user.to_string()), state_key: Some(sender_user.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
room_id, room_id,
)) ))
@ -201,7 +200,6 @@ pub async fn set_avatar_url_route(
unsigned: None, unsigned: None,
state_key: Some(sender_user.to_string()), state_key: Some(sender_user.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
room_id, room_id,
)) ))

View file

@ -44,7 +44,6 @@ pub async fn redact_event_route(
unsigned: None, unsigned: None,
state_key: None, state_key: None,
redacts: Some(body.event_id.into()), redacts: Some(body.event_id.into()),
timestamp: None,
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,

View file

@ -3,7 +3,7 @@ use ruma::api::client::relations::{
get_relating_events_with_rel_type_and_event_type, get_relating_events_with_rel_type_and_event_type,
}; };
use crate::{services, Result, Ruma}; use crate::{service::rooms::timeline::PduCount, services, Result, Ruma};
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}` /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
pub async fn get_relating_events_with_rel_type_and_event_type_route( pub async fn get_relating_events_with_rel_type_and_event_type_route(
@ -11,6 +11,27 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
) -> Result<get_relating_events_with_rel_type_and_event_type::v1::Response> { ) -> Result<get_relating_events_with_rel_type_and_event_type::v1::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let from = match body.from.clone() {
Some(from) => PduCount::try_from_string(&from)?,
None => match ruma::api::Direction::Backward {
// TODO: fix ruma so `body.dir` exists
ruma::api::Direction::Forward => PduCount::min(),
ruma::api::Direction::Backward => PduCount::max(),
},
};
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(t).ok());
// Use limit or else 10, with maximum 100
let limit = body
.limit
.and_then(|u| u32::try_from(u).ok())
.map_or(10_usize, |u| u as usize)
.min(100);
let res = services() let res = services()
.rooms .rooms
.pdu_metadata .pdu_metadata
@ -20,11 +41,9 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
&body.event_id, &body.event_id,
Some(body.event_type.clone()), Some(body.event_type.clone()),
Some(body.rel_type.clone()), Some(body.rel_type.clone()),
body.from.clone(), from,
body.to.clone(), to,
body.limit, limit,
body.recurse,
&body.dir,
)?; )?;
Ok( Ok(
@ -32,7 +51,6 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
chunk: res.chunk, chunk: res.chunk,
next_batch: res.next_batch, next_batch: res.next_batch,
prev_batch: res.prev_batch, prev_batch: res.prev_batch,
recursion_depth: res.recursion_depth,
}, },
) )
} }
@ -43,6 +61,27 @@ pub async fn get_relating_events_with_rel_type_route(
) -> Result<get_relating_events_with_rel_type::v1::Response> { ) -> Result<get_relating_events_with_rel_type::v1::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let from = match body.from.clone() {
Some(from) => PduCount::try_from_string(&from)?,
None => match ruma::api::Direction::Backward {
// TODO: fix ruma so `body.dir` exists
ruma::api::Direction::Forward => PduCount::min(),
ruma::api::Direction::Backward => PduCount::max(),
},
};
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(t).ok());
// Use limit or else 10, with maximum 100
let limit = body
.limit
.and_then(|u| u32::try_from(u).ok())
.map_or(10_usize, |u| u as usize)
.min(100);
let res = services() let res = services()
.rooms .rooms
.pdu_metadata .pdu_metadata
@ -52,18 +91,15 @@ pub async fn get_relating_events_with_rel_type_route(
&body.event_id, &body.event_id,
None, None,
Some(body.rel_type.clone()), Some(body.rel_type.clone()),
body.from.clone(), from,
body.to.clone(), to,
body.limit, limit,
body.recurse,
&body.dir,
)?; )?;
Ok(get_relating_events_with_rel_type::v1::Response { Ok(get_relating_events_with_rel_type::v1::Response {
chunk: res.chunk, chunk: res.chunk,
next_batch: res.next_batch, next_batch: res.next_batch,
prev_batch: res.prev_batch, prev_batch: res.prev_batch,
recursion_depth: res.recursion_depth,
}) })
} }
@ -73,6 +109,27 @@ pub async fn get_relating_events_route(
) -> Result<get_relating_events::v1::Response> { ) -> Result<get_relating_events::v1::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let from = match body.from.clone() {
Some(from) => PduCount::try_from_string(&from)?,
None => match ruma::api::Direction::Backward {
// TODO: fix ruma so `body.dir` exists
ruma::api::Direction::Forward => PduCount::min(),
ruma::api::Direction::Backward => PduCount::max(),
},
};
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(t).ok());
// Use limit or else 10, with maximum 100
let limit = body
.limit
.and_then(|u| u32::try_from(u).ok())
.map_or(10_usize, |u| u as usize)
.min(100);
services() services()
.rooms .rooms
.pdu_metadata .pdu_metadata
@ -82,10 +139,8 @@ pub async fn get_relating_events_route(
&body.event_id, &body.event_id,
None, None,
None, None,
body.from.clone(), from,
body.to.clone(), to,
body.limit, limit,
body.recurse,
&body.dir,
) )
} }

View file

@ -72,7 +72,7 @@ pub async fn create_room_route(
&& !services().users.is_admin(sender_user)? && !services().users.is_admin(sender_user)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Room creation has been disabled.", "Room creation has been disabled.",
)); ));
} }
@ -230,7 +230,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -259,7 +258,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some(sender_user.to_string()), state_key: Some(sender_user.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -313,7 +311,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -337,7 +334,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -364,7 +360,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -386,7 +381,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -409,7 +403,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -454,7 +447,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -477,7 +469,6 @@ pub async fn create_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&room_id, &room_id,
@ -494,10 +485,7 @@ pub async fn create_room_route(
// Homeserver specific stuff // Homeserver specific stuff
if let Some(alias) = alias { if let Some(alias) = alias {
services() services().rooms.alias.set_alias(&alias, &room_id)?;
.rooms
.alias
.set_alias(&alias, &room_id, sender_user)?;
} }
if body.visibility == room::Visibility::Public { if body.visibility == room::Visibility::Public {
@ -534,7 +522,7 @@ pub async fn get_room_event_route(
&body.event_id, &body.event_id,
)? { )? {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"You don't have permission to view this event.", "You don't have permission to view this event.",
)); ));
} }
@ -563,7 +551,7 @@ pub async fn get_room_aliases_route(
.is_joined(sender_user, &body.room_id)? .is_joined(sender_user, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"You don't have permission to view this room.", "You don't have permission to view this room.",
)); ));
} }
@ -638,7 +626,6 @@ pub async fn upgrade_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,
@ -740,7 +727,6 @@ pub async fn upgrade_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&replacement_room, &replacement_room,
@ -769,7 +755,6 @@ pub async fn upgrade_room_route(
unsigned: None, unsigned: None,
state_key: Some(sender_user.to_string()), state_key: Some(sender_user.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&replacement_room, &replacement_room,
@ -812,7 +797,6 @@ pub async fn upgrade_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&replacement_room, &replacement_room,
@ -831,7 +815,7 @@ pub async fn upgrade_room_route(
services() services()
.rooms .rooms
.alias .alias
.set_alias(&alias, &replacement_room, sender_user)?; .set_alias(&alias, &replacement_room)?;
} }
// Get the old room power levels // Get the old room power levels
@ -863,7 +847,6 @@ pub async fn upgrade_room_route(
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
sender_user, sender_user,
&body.room_id, &body.room_id,

View file

@ -43,7 +43,7 @@ pub async fn search_events_route(
.is_joined(sender_user, &room_id)? .is_joined(sender_user, &room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"You don't have permission to view this room.", "You don't have permission to view this room.",
)); ));
} }
@ -89,8 +89,7 @@ pub async fn search_events_route(
.get_pdu_from_id(result) .get_pdu_from_id(result)
.ok()? .ok()?
.filter(|pdu| { .filter(|pdu| {
!pdu.is_redacted() services()
&& services()
.rooms .rooms
.state_accessor .state_accessor
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)

View file

@ -63,7 +63,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
UserId::parse(user) UserId::parse(user)
} else { } else {
warn!("Bad login type: {:?}", &body.login_info); warn!("Bad login type: {:?}", &body.login_info);
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type.")); return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
} }
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
@ -78,7 +78,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
.users .users
.password_hash(&user_id)? .password_hash(&user_id)?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Wrong username or password.", "Wrong username or password.",
))?; ))?;
@ -93,7 +93,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
if !hash_matches { if !hash_matches {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Wrong username or password.", "Wrong username or password.",
)); ));
} }
@ -143,7 +143,7 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
UserId::parse(user) UserId::parse(user)
} else { } else {
warn!("Bad login type: {:?}", &body.login_info); warn!("Bad login type: {:?}", &body.login_info);
return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type.")); return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
} }
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;

View file

@ -10,7 +10,7 @@ use ruma::{
room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType, room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType,
}, },
serde::Raw, serde::Raw,
EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, EventId, RoomId, UserId,
}; };
use tracing::log::warn; use tracing::log::warn;
@ -32,11 +32,6 @@ pub async fn send_state_event_for_key_route(
&body.event_type, &body.event_type,
&body.body.body, // Yes, I hate it too &body.body.body, // Yes, I hate it too
body.state_key.to_owned(), body.state_key.to_owned(),
if body.appservice_info.is_some() {
body.timestamp
} else {
None
},
) )
.await?; .await?;
@ -59,7 +54,7 @@ pub async fn send_state_event_for_empty_key_route(
// Forbid m.room.encryption if encryption is disabled // Forbid m.room.encryption if encryption is disabled
if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() { if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Encryption has been disabled", "Encryption has been disabled",
)); ));
} }
@ -70,11 +65,6 @@ pub async fn send_state_event_for_empty_key_route(
&body.event_type.to_string().into(), &body.event_type.to_string().into(),
&body.body.body, &body.body.body,
body.state_key.to_owned(), body.state_key.to_owned(),
if body.appservice_info.is_some() {
body.timestamp
} else {
None
},
) )
.await?; .await?;
@ -98,7 +88,7 @@ pub async fn get_state_events_route(
.user_can_see_state_events(sender_user, &body.room_id)? .user_can_see_state_events(sender_user, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"You don't have permission to view the room state.", "You don't have permission to view the room state.",
)); ));
} }
@ -131,7 +121,7 @@ pub async fn get_state_events_for_key_route(
.user_can_see_state_events(sender_user, &body.room_id)? .user_can_see_state_events(sender_user, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"You don't have permission to view the room state.", "You don't have permission to view the room state.",
)); ));
} }
@ -170,7 +160,7 @@ pub async fn get_state_events_for_empty_key_route(
.user_can_see_state_events(sender_user, &body.room_id)? .user_can_see_state_events(sender_user, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"You don't have permission to view the room state.", "You don't have permission to view the room state.",
)); ));
} }
@ -200,7 +190,6 @@ async fn send_state_event_for_key_helper(
event_type: &StateEventType, event_type: &StateEventType,
json: &Raw<AnyStateEventContent>, json: &Raw<AnyStateEventContent>,
state_key: String, state_key: String,
timestamp: Option<MilliSecondsSinceUnixEpoch>,
) -> Result<Arc<EventId>> { ) -> Result<Arc<EventId>> {
let sender_user = sender; let sender_user = sender;
@ -225,7 +214,7 @@ async fn send_state_event_for_key_helper(
.is_none() .is_none()
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"You are only allowed to send canonical_alias \ "You are only allowed to send canonical_alias \
events when it's aliases already exists", events when it's aliases already exists",
)); ));
@ -254,7 +243,6 @@ async fn send_state_event_for_key_helper(
unsigned: None, unsigned: None,
state_key: Some(state_key), state_key: Some(state_key),
redacts: None, redacts: None,
timestamp,
}, },
sender_user, sender_user,
room_id, room_id,

View file

@ -12,7 +12,7 @@ use ruma::{
Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom,
LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice,
}, },
v4::{SlidingOp, SlidingSyncRoomHero}, v4::SlidingOp,
DeviceLists, UnreadNotificationsCount, DeviceLists, UnreadNotificationsCount,
}, },
uiaa::UiaaResponse, uiaa::UiaaResponse,
@ -716,7 +716,7 @@ async fn load_joined_room(
.state_cache .state_cache
.is_invited(&user_id, room_id)?) .is_invited(&user_id, room_id)?)
{ {
Ok::<_, Error>(Some(user_id)) Ok::<_, Error>(Some(state_key.clone()))
} else { } else {
Ok(None) Ok(None)
} }
@ -1572,7 +1572,7 @@ pub async fn sync_events_v4_route(
sender_user.clone(), sender_user.clone(),
sender_device.clone(), sender_device.clone(),
conn_id.clone(), conn_id.clone(),
body.room_subscriptions.clone(), body.room_subscriptions,
); );
} }
@ -1638,37 +1638,33 @@ pub async fn sync_events_v4_route(
.get_member(room_id, &member) .get_member(room_id, &member)
.ok() .ok()
.flatten() .flatten()
.map(|memberevent| SlidingSyncRoomHero { .map(|memberevent| {
user_id: member, (
name: memberevent.displayname, memberevent
avatar: memberevent.avatar_url, .displayname
.unwrap_or_else(|| member.to_string()),
memberevent.avatar_url,
)
}) })
}) })
.take(5) .take(5)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let name = match &heroes[..] { let name = match &heroes[..] {
[] => None, [] => None,
[only] => Some( [only] => Some(only.0.clone()),
only.name
.clone()
.unwrap_or_else(|| only.user_id.to_string()),
),
[firsts @ .., last] => Some( [firsts @ .., last] => Some(
firsts firsts
.iter() .iter()
.map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string())) .map(|h| h.0.clone())
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join(", ") .join(", ")
+ " and " + " and "
+ &last + &last.0,
.name
.clone()
.unwrap_or_else(|| last.user_id.to_string()),
), ),
}; };
let avatar = if let [only] = &heroes[..] { let avatar = if let [only] = &heroes[..] {
only.avatar.clone() only.1.clone()
} else { } else {
None None
}; };
@ -1729,16 +1725,6 @@ pub async fn sync_events_v4_route(
), ),
num_live: None, // Count events in timeline greater than global sync counter num_live: None, // Count events in timeline greater than global sync counter
timestamp: None, timestamp: None,
heroes: if body
.room_subscriptions
.get(room_id)
.map(|sub| sub.include_heroes.unwrap_or_default())
.unwrap_or_default()
{
Some(heroes)
} else {
None
},
}, },
); );
} }

View file

@ -17,7 +17,7 @@ pub async fn create_typing_event_route(
.is_joined(sender_user, &body.room_id)? .is_joined(sender_user, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"You are not in this room.", "You are not in this room.",
)); ));
} }

View file

@ -27,10 +27,7 @@ pub async fn get_supported_versions_route(
"v1.4".to_owned(), "v1.4".to_owned(),
"v1.5".to_owned(), "v1.5".to_owned(),
], ],
unstable_features: BTreeMap::from_iter([ unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
("org.matrix.e2e_cross_signing".to_owned(), true),
("org.matrix.msc3916.stable".to_owned(), true),
]),
}; };
Ok(resp) Ok(resp)

View file

@ -2,22 +2,20 @@ use std::{collections::BTreeMap, iter::FromIterator, str};
use axum::{ use axum::{
async_trait, async_trait,
body::Body, body::{Full, HttpBody},
extract::{FromRequest, Path}, extract::{rejection::TypedHeaderRejectionReason, FromRequest, Path, TypedHeader},
headers::{
authorization::{Bearer, Credentials},
Authorization,
},
response::{IntoResponse, Response}, response::{IntoResponse, Response},
RequestExt, RequestPartsExt, BoxError, RequestExt, RequestPartsExt,
}; };
use axum_extra::{ use bytes::{Buf, BufMut, Bytes, BytesMut};
headers::{authorization::Bearer, Authorization},
typed_header::TypedHeaderRejectionReason,
TypedHeader,
};
use bytes::{BufMut, BytesMut};
use http::{Request, StatusCode}; use http::{Request, StatusCode};
use ruma::{ use ruma::{
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
server_util::authorization::XMatrix, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedUserId, UserId,
}; };
use serde::Deserialize; use serde::Deserialize;
use tracing::{debug, error, warn}; use tracing::{debug, error, warn};
@ -33,33 +31,37 @@ enum Token {
} }
#[async_trait] #[async_trait]
impl<T, S> FromRequest<S> for Ruma<T> impl<T, S, B> FromRequest<S, B> for Ruma<T>
where where
T: IncomingRequest, T: IncomingRequest,
B: HttpBody + Send + 'static,
B::Data: Send,
B::Error: Into<BoxError>,
{ {
type Rejection = Error; type Rejection = Error;
async fn from_request(req: Request<Body>, _state: &S) -> Result<Self, Self::Rejection> { async fn from_request(req: Request<B>, _state: &S) -> Result<Self, Self::Rejection> {
#[derive(Deserialize)] #[derive(Deserialize)]
struct QueryParams { struct QueryParams {
access_token: Option<String>, access_token: Option<String>,
user_id: Option<String>, user_id: Option<String>,
} }
let (mut parts, mut body) = { let (mut parts, mut body) = match req.with_limited_body() {
let limited_req = req.with_limited_body(); Ok(limited_req) => {
let (parts, body) = limited_req.into_parts(); let (parts, body) = limited_req.into_parts();
let body = axum::body::to_bytes( let body = to_bytes(body)
body,
services()
.globals
.max_request_size()
.try_into()
.unwrap_or(usize::MAX),
)
.await .await
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
(parts, body) (parts, body)
}
Err(original_req) => {
let (parts, body) = original_req.into_parts();
let body = to_bytes(body)
.await
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
(parts, body)
}
}; };
let metadata = T::METADATA; let metadata = T::METADATA;
@ -100,15 +102,10 @@ where
let (sender_user, sender_device, sender_servername, appservice_info) = let (sender_user, sender_device, sender_servername, appservice_info) =
match (metadata.authentication, token) { match (metadata.authentication, token) {
(_, Token::Invalid) => { (_, Token::Invalid) => {
// OpenID endpoint uses a query param with the same name, drop this once query params for user auth are removed from the spec
if query_params.access_token.is_some() {
(None, None, None, None)
} else {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::UnknownToken { soft_logout: false }, ErrorKind::UnknownToken { soft_logout: false },
"Unknown access token.", "Unknown access token.",
)); ))
}
} }
(AuthScheme::AccessToken, Token::Appservice(info)) => { (AuthScheme::AccessToken, Token::Appservice(info)) => {
let user_id = query_params let user_id = query_params
@ -135,7 +132,7 @@ where
if !services().users.exists(&user_id)? { if !services().users.exists(&user_id)? {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"User does not exist.", "User does not exist.",
)); ));
} }
@ -175,7 +172,7 @@ where
_ => "Unknown header-related error", _ => "Unknown header-related error",
}; };
Error::BadRequest(ErrorKind::forbidden(), msg) Error::BadRequest(ErrorKind::Forbidden, msg)
})?; })?;
if let Some(dest) = x_matrix.destination { if let Some(dest) = x_matrix.destination {
@ -189,17 +186,12 @@ where
let origin_signatures = BTreeMap::from_iter([( let origin_signatures = BTreeMap::from_iter([(
x_matrix.key.clone(), x_matrix.key.clone(),
CanonicalJsonValue::String(x_matrix.sig.to_string()), CanonicalJsonValue::String(x_matrix.sig),
)]); )]);
let signatures = BTreeMap::from_iter([( let signatures = BTreeMap::from_iter([(
x_matrix.origin.as_str().to_owned(), x_matrix.origin.as_str().to_owned(),
CanonicalJsonValue::Object( CanonicalJsonValue::Object(origin_signatures),
origin_signatures
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect(),
),
)]); )]);
let mut request_map = BTreeMap::from_iter([ let mut request_map = BTreeMap::from_iter([
@ -234,7 +226,7 @@ where
let keys_result = services() let keys_result = services()
.rooms .rooms
.event_handler .event_handler
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_string()], false) .fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()])
.await; .await;
let keys = match keys_result { let keys = match keys_result {
@ -242,25 +234,14 @@ where
Err(e) => { Err(e) => {
warn!("Failed to fetch signing keys: {}", e); warn!("Failed to fetch signing keys: {}", e);
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Failed to fetch signing keys.", "Failed to fetch signing keys.",
)); ));
} }
}; };
// Only verify_keys that are currently valid should be used for validating requests let pub_key_map =
// as per MSC4029 BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]);
let pub_key_map = BTreeMap::from_iter([(
x_matrix.origin.as_str().to_owned(),
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
keys.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect()
} else {
BTreeMap::new()
},
)]);
match ruma::signatures::verify_json(&pub_key_map, &request_map) { match ruma::signatures::verify_json(&pub_key_map, &request_map) {
Ok(()) => (None, None, Some(x_matrix.origin), None), Ok(()) => (None, None, Some(x_matrix.origin), None),
@ -279,7 +260,7 @@ where
} }
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Failed to verify X-Matrix signatures.", "Failed to verify X-Matrix signatures.",
)); ));
} }
@ -359,11 +340,124 @@ where
} }
} }
struct XMatrix {
destination: Option<OwnedServerName>,
origin: OwnedServerName,
key: String, // KeyName?
sig: String,
}
impl Credentials for XMatrix {
const SCHEME: &'static str = "X-Matrix";
fn decode(value: &http::HeaderValue) -> Option<Self> {
debug_assert!(
value.as_bytes().starts_with(b"X-Matrix "),
"HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}",
);
let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..])
.ok()?
.trim_start();
let mut origin = None;
let mut key = None;
let mut sig = None;
let mut destination = None;
for entry in parameters.split_terminator(',') {
let (name, value) = entry.split_once('=')?;
// It's not at all clear why some fields are quoted and others not in the spec,
// let's simply accept either form for every field.
let value = value
.strip_prefix('"')
.and_then(|rest| rest.strip_suffix('"'))
.unwrap_or(value);
// FIXME: Catch multiple fields of the same name
match name {
"origin" => origin = Some(value.try_into().ok()?),
"key" => key = Some(value.to_owned()),
"sig" => sig = Some(value.to_owned()),
"destination" => destination = Some(value.try_into().ok()?),
_ => debug!(
"Unexpected field `{}` in X-Matrix Authorization header",
name
),
}
}
Some(Self {
destination,
origin: origin?,
key: key?,
sig: sig?,
})
}
fn encode(&self) -> http::HeaderValue {
todo!()
}
}
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> { impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
fn into_response(self) -> Response { fn into_response(self) -> Response {
match self.0.try_into_http_response::<BytesMut>() { match self.0.try_into_http_response::<BytesMut>() {
Ok(res) => res.map(BytesMut::freeze).map(Body::from).into_response(), Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(),
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
} }
} }
} }
// copied from hyper under the following license:
// Copyright (c) 2014-2021 Sean McArthur
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
pub(crate) async fn to_bytes<T>(body: T) -> Result<Bytes, T::Error>
where
T: HttpBody,
{
futures_util::pin_mut!(body);
// If there's only 1 chunk, we can just return Buf::to_bytes()
let mut first = if let Some(buf) = body.data().await {
buf?
} else {
return Ok(Bytes::new());
};
let second = if let Some(buf) = body.data().await {
buf?
} else {
return Ok(first.copy_to_bytes(first.remaining()));
};
// With more than 1 buf, we gotta flatten into a Vec first.
let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize;
let mut vec = Vec::with_capacity(cap);
vec.put(first);
vec.put(second);
while let Some(buf) = body.data().await {
vec.put(buf?);
}
Ok(vec.into())
}

View file

@ -2,25 +2,17 @@
use crate::{ use crate::{
api::client_server::{self, claim_keys_helper, get_keys_helper}, api::client_server::{self, claim_keys_helper, get_keys_helper},
service::{ service::pdu::{gen_event_id_canonical_json, PduBuilder},
globals::SigningKeys,
media::FileMeta,
pdu::{gen_event_id_canonical_json, PduBuilder},
},
services, utils, Error, PduEvent, Result, Ruma, services, utils, Error, PduEvent, Result, Ruma,
}; };
use axum::{response::IntoResponse, Json}; use axum::{response::IntoResponse, Json};
use axum_extra::headers::{CacheControl, Header};
use get_profile_information::v1::ProfileField; use get_profile_information::v1::ProfileField;
use http::header::AUTHORIZATION; use http::header::{HeaderValue, AUTHORIZATION};
use ruma::{ use ruma::{
api::{ api::{
client::error::{Error as RumaError, ErrorKind}, client::error::{Error as RumaError, ErrorKind},
federation::{ federation::{
authenticated_media::{
get_content, get_content_thumbnail, Content, ContentMetadata, FileOrLocation,
},
authorization::get_event_authorization, authorization::get_event_authorization,
backfill::get_backfill, backfill::get_backfill,
device::get_devices::{self, v1::UserDevice}, device::get_devices::{self, v1::UserDevice},
@ -32,7 +24,6 @@ use ruma::{
event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, event::{get_event, get_missing_events, get_room_state, get_room_state_ids},
keys::{claim_keys, get_keys}, keys::{claim_keys, get_keys},
membership::{create_invite, create_join_event, prepare_join_event}, membership::{create_invite, create_join_event, prepare_join_event},
openid::get_openid_userinfo,
query::{get_profile_information, get_room_information}, query::{get_profile_information, get_room_information},
transactions::{ transactions::{
edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent},
@ -102,6 +93,13 @@ impl FedDest {
} }
} }
fn into_uri_string(self) -> String {
match self {
Self::Literal(addr) => addr.to_string(),
Self::Named(host, ref port) => host + port,
}
}
fn hostname(&self) -> String { fn hostname(&self) -> String {
match &self { match &self {
Self::Literal(addr) => addr.ip().to_string(), Self::Literal(addr) => addr.ip().to_string(),
@ -137,6 +135,8 @@ where
debug!("Preparing to send request to {destination}"); debug!("Preparing to send request to {destination}");
let mut write_destination_to_cache = false;
let cached_result = services() let cached_result = services()
.globals .globals
.actual_destination_cache .actual_destination_cache
@ -145,63 +145,14 @@ where
.get(destination) .get(destination)
.cloned(); .cloned();
let actual_destination = if let Some(DestinationResponse { let (actual_destination, host) = if let Some(result) = cached_result {
actual_destination, result
dest_type,
}) = cached_result
{
match dest_type {
DestType::IsIpOrHasPort => actual_destination,
DestType::LookupFailed {
well_known_retry,
well_known_backoff_mins,
} => {
if well_known_retry < Instant::now() {
find_actual_destination(destination, None, false, Some(well_known_backoff_mins))
.await
} else { } else {
actual_destination write_destination_to_cache = true;
}
}
DestType::WellKnown { expires } => { let result = find_actual_destination(destination).await;
if expires < Instant::now() {
find_actual_destination(destination, None, false, None).await (result.0, result.1.into_uri_string())
} else {
actual_destination
}
}
DestType::WellKnownSrv {
srv_expires,
well_known_expires,
well_known_host,
} => {
if well_known_expires < Instant::now() {
find_actual_destination(destination, None, false, None).await
} else if srv_expires < Instant::now() {
find_actual_destination(destination, Some(well_known_host), true, None).await
} else {
actual_destination
}
}
DestType::Srv {
well_known_retry,
well_known_backoff_mins,
srv_expires,
} => {
if well_known_retry < Instant::now() {
find_actual_destination(destination, None, false, Some(well_known_backoff_mins))
.await
} else if srv_expires < Instant::now() {
find_actual_destination(destination, None, true, Some(well_known_backoff_mins))
.await
} else {
actual_destination
}
}
}
} else {
find_actual_destination(destination, None, false, None).await
}; };
let actual_destination_str = actual_destination.clone().into_https_string(); let actual_destination_str = actual_destination.clone().into_https_string();
@ -210,7 +161,7 @@ where
.try_into_http_request::<Vec<u8>>( .try_into_http_request::<Vec<u8>>(
&actual_destination_str, &actual_destination_str,
SendAccessToken::IfRequired(""), SendAccessToken::IfRequired(""),
&[MatrixVersion::V1_11], &[MatrixVersion::V1_4],
) )
.map_err(|e| { .map_err(|e| {
warn!( warn!(
@ -274,14 +225,13 @@ where
for s in signature_server { for s in signature_server {
http_request.headers_mut().insert( http_request.headers_mut().insert(
AUTHORIZATION, AUTHORIZATION,
format!( HeaderValue::from_str(&format!(
"X-Matrix origin=\"{}\",destination=\"{}\",key=\"{}\",sig=\"{}\"", "X-Matrix origin=\"{}\",destination=\"{}\",key=\"{}\",sig=\"{}\"",
services().globals.server_name(), services().globals.server_name(),
destination, destination,
s.0, s.0,
s.1 s.1
) ))
.try_into()
.unwrap(), .unwrap(),
); );
} }
@ -339,10 +289,21 @@ where
if status == 200 { if status == 200 {
debug!("Parsing response bytes from {destination}"); debug!("Parsing response bytes from {destination}");
let response = T::IncomingResponse::try_from_http_response(http_response); let response = T::IncomingResponse::try_from_http_response(http_response);
if response.is_ok() && write_destination_to_cache {
services()
.globals
.actual_destination_cache
.write()
.await
.insert(
OwnedServerName::from(destination),
(actual_destination, host),
);
}
response.map_err(|e| { response.map_err(|e| {
warn!( warn!(
"Invalid 200 response from {} on: {} {:?}", "Invalid 200 response from {} on: {} {}",
&destination, url, e &destination, url, e
); );
Error::BadServerResponse("Server returned bad 200 response.") Error::BadServerResponse("Server returned bad 200 response.")
@ -383,175 +344,42 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest {
FedDest::Named(host.to_owned(), port.to_owned()) FedDest::Named(host.to_owned(), port.to_owned())
} }
#[derive(Clone)] /// Returns: actual_destination, host header
pub struct DestinationResponse { /// Implemented according to the specification at <https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names>
pub actual_destination: FedDest,
pub dest_type: DestType,
}
#[derive(Clone)]
pub enum DestType {
WellKnownSrv {
srv_expires: Instant,
well_known_expires: Instant,
well_known_host: String,
},
WellKnown {
expires: Instant,
},
Srv {
srv_expires: Instant,
well_known_retry: Instant,
well_known_backoff_mins: u16,
},
IsIpOrHasPort,
LookupFailed {
well_known_retry: Instant,
well_known_backoff_mins: u16,
},
}
/// Implemented according to the specification at <https://spec.matrix.org/v1.11/server-server-api/#resolving-server-names>
/// Numbers in comments below refer to bullet points in linked section of specification /// Numbers in comments below refer to bullet points in linked section of specification
async fn find_actual_destination( async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) {
destination: &'_ ServerName,
// The host used to potentially lookup SRV records against, only used when only_request_srv is true
well_known_dest: Option<String>,
// Should be used when only the SRV lookup has expired
only_request_srv: bool,
// The backoff time for the last well known failure, if any
well_known_backoff_mins: Option<u16>,
) -> FedDest {
debug!("Finding actual destination for {destination}"); debug!("Finding actual destination for {destination}");
let destination_str = destination.to_string(); let destination_str = destination.as_str().to_owned();
let next_backoff_mins = well_known_backoff_mins let mut hostname = destination_str.clone();
// Errors are recommended to be cached for up to an hour let actual_destination = match get_ip_with_port(&destination_str) {
.map(|mins| (mins * 2).min(60))
.unwrap_or(1);
let (actual_destination, dest_type) = if only_request_srv {
let destination_str = well_known_dest.unwrap_or(destination_str);
let (dest, expires) = get_srv_destination(destination_str).await;
let well_known_retry =
Instant::now() + Duration::from_secs((60 * next_backoff_mins).into());
(
dest,
if let Some(expires) = expires {
DestType::Srv {
well_known_backoff_mins: next_backoff_mins,
srv_expires: expires,
well_known_retry,
}
} else {
DestType::LookupFailed {
well_known_retry,
well_known_backoff_mins: next_backoff_mins,
}
},
)
} else {
match get_ip_with_port(&destination_str) {
Some(host_port) => { Some(host_port) => {
debug!("1: IP literal with provided or default port"); debug!("1: IP literal with provided or default port");
(host_port, DestType::IsIpOrHasPort) host_port
} }
None => { None => {
if let Some(pos) = destination_str.find(':') { if let Some(pos) = destination_str.find(':') {
debug!("2: Hostname with included port"); debug!("2: Hostname with included port");
let (host, port) = destination_str.split_at(pos); let (host, port) = destination_str.split_at(pos);
( FedDest::Named(host.to_owned(), port.to_owned())
FedDest::Named(host.to_owned(), port.to_owned()),
DestType::IsIpOrHasPort,
)
} else { } else {
debug!("Requesting well known for {destination_str}"); debug!("Requesting well known for {destination}");
match request_well_known(destination_str.as_str()).await { match request_well_known(destination.as_str()).await {
Some((delegated_hostname, timestamp)) => { Some(delegated_hostname) => {
debug!("3: A .well-known file is available"); debug!("3: A .well-known file is available");
hostname = add_port_to_hostname(&delegated_hostname).into_uri_string();
match get_ip_with_port(&delegated_hostname) { match get_ip_with_port(&delegated_hostname) {
// 3.1: IP literal in .well-known file Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file
Some(host_and_port) => {
(host_and_port, DestType::WellKnown { expires: timestamp })
}
None => { None => {
if let Some(pos) = delegated_hostname.find(':') { if let Some(pos) = delegated_hostname.find(':') {
debug!("3.2: Hostname with port in .well-known file"); debug!("3.2: Hostname with port in .well-known file");
let (host, port) = delegated_hostname.split_at(pos); let (host, port) = delegated_hostname.split_at(pos);
( FedDest::Named(host.to_owned(), port.to_owned())
FedDest::Named(host.to_owned(), port.to_owned()),
DestType::WellKnown { expires: timestamp },
)
} else { } else {
debug!("Delegated hostname has no port in this branch"); debug!("Delegated hostname has no port in this branch");
let (dest, srv_expires) = if let Some(hostname_override) =
get_srv_destination(delegated_hostname.clone()).await; query_srv_record(&delegated_hostname).await
( {
dest, debug!("3.3: SRV lookup successful");
if let Some(srv_expires) = srv_expires {
DestType::WellKnownSrv {
srv_expires,
well_known_expires: timestamp,
well_known_host: delegated_hostname,
}
} else {
DestType::WellKnown { expires: timestamp }
},
)
}
}
}
}
None => {
debug!("4: No .well-known or an error occured");
let (dest, expires) = get_srv_destination(destination_str).await;
let well_known_retry = Instant::now()
+ Duration::from_secs((60 * next_backoff_mins).into());
(
dest,
if let Some(expires) = expires {
DestType::Srv {
srv_expires: expires,
well_known_retry,
well_known_backoff_mins: next_backoff_mins,
}
} else {
DestType::LookupFailed {
well_known_retry,
well_known_backoff_mins: next_backoff_mins,
}
},
)
}
}
}
}
}
};
debug!("Actual destination: {actual_destination:?}");
let response = DestinationResponse {
actual_destination,
dest_type,
};
services()
.globals
.actual_destination_cache
.write()
.await
.insert(destination.to_owned(), response.clone());
response.actual_destination
}
/// Looks up the SRV records for federation usage
///
/// If no timestamp is returned, that means no SRV record was found
async fn get_srv_destination(delegated_hostname: String) -> (FedDest, Option<Instant>) {
if let Some((hostname_override, timestamp)) = query_srv_record(&delegated_hostname).await {
debug!("SRV lookup successful");
let force_port = hostname_override.port(); let force_port = hostname_override.port();
if let Ok(override_ip) = services() if let Ok(override_ip) = services()
@ -567,41 +395,91 @@ async fn get_srv_destination(delegated_hostname: String) -> (FedDest, Option<Ins
.unwrap() .unwrap()
.insert( .insert(
delegated_hostname.clone(), delegated_hostname.clone(),
(override_ip.iter().collect(), force_port.unwrap_or(8448)), (
override_ip.iter().collect(),
force_port.unwrap_or(8448),
),
); );
} else { } else {
// Removing in case there was previously a SRV record
services()
.globals
.tls_name_override
.write()
.unwrap()
.remove(&delegated_hostname);
warn!("Using SRV record, but could not resolve to IP"); warn!("Using SRV record, but could not resolve to IP");
} }
if let Some(port) = force_port { if let Some(port) = force_port {
( FedDest::Named(delegated_hostname, format!(":{port}"))
FedDest::Named(delegated_hostname, format!(":{port}")),
Some(timestamp),
)
} else { } else {
(add_port_to_hostname(&delegated_hostname), Some(timestamp)) add_port_to_hostname(&delegated_hostname)
} }
} else { } else {
// Removing in case there was previously a SRV record debug!("3.4: No SRV records, just use the hostname from .well-known");
add_port_to_hostname(&delegated_hostname)
}
}
}
}
}
None => {
debug!("4: No .well-known or an error occured");
match query_srv_record(&destination_str).await {
Some(hostname_override) => {
debug!("4: SRV record found");
let force_port = hostname_override.port();
if let Ok(override_ip) = services()
.globals
.dns_resolver()
.lookup_ip(hostname_override.hostname())
.await
{
services() services()
.globals .globals
.tls_name_override .tls_name_override
.write() .write()
.unwrap() .unwrap()
.remove(&delegated_hostname); .insert(
debug!("No SRV records found"); hostname.clone(),
(add_port_to_hostname(&delegated_hostname), None) (
} override_ip.iter().collect(),
force_port.unwrap_or(8448),
),
);
} else {
warn!("Using SRV record, but could not resolve to IP");
} }
async fn query_given_srv_record(record: &str) -> Option<(FedDest, Instant)> { if let Some(port) = force_port {
FedDest::Named(hostname.clone(), format!(":{port}"))
} else {
add_port_to_hostname(&hostname)
}
}
None => {
debug!("5: No SRV record found");
add_port_to_hostname(&destination_str)
}
}
}
}
}
}
};
debug!("Actual destination: {actual_destination:?}");
// Can't use get_ip_with_port here because we don't want to add a port
// to an IP address if it wasn't specified
let hostname = if let Ok(addr) = hostname.parse::<SocketAddr>() {
FedDest::Literal(addr)
} else if let Ok(addr) = hostname.parse::<IpAddr>() {
FedDest::Named(addr.to_string(), ":8448".to_owned())
} else if let Some(pos) = hostname.find(':') {
let (host, port) = hostname.split_at(pos);
FedDest::Named(host.to_owned(), port.to_owned())
} else {
FedDest::Named(hostname, ":8448".to_owned())
};
(actual_destination, hostname)
}
async fn query_given_srv_record(record: &str) -> Option<FedDest> {
services() services()
.globals .globals
.dns_resolver() .dns_resolver()
@ -609,19 +487,16 @@ async fn query_given_srv_record(record: &str) -> Option<(FedDest, Instant)> {
.await .await
.map(|srv| { .map(|srv| {
srv.iter().next().map(|result| { srv.iter().next().map(|result| {
(
FedDest::Named( FedDest::Named(
result.target().to_string().trim_end_matches('.').to_owned(), result.target().to_string().trim_end_matches('.').to_owned(),
format!(":{}", result.port()), format!(":{}", result.port()),
),
srv.as_lookup().valid_until(),
) )
}) })
}) })
.unwrap_or(None) .unwrap_or(None)
} }
async fn query_srv_record(hostname: &'_ str) -> Option<(FedDest, Instant)> { async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
let hostname = hostname.trim_end_matches('.'); let hostname = hostname.trim_end_matches('.');
if let Some(host_port) = query_given_srv_record(&format!("_matrix-fed._tcp.{hostname}.")).await if let Some(host_port) = query_given_srv_record(&format!("_matrix-fed._tcp.{hostname}.")).await
@ -632,7 +507,7 @@ async fn query_srv_record(hostname: &'_ str) -> Option<(FedDest, Instant)> {
} }
} }
async fn request_well_known(destination: &str) -> Option<(String, Instant)> { async fn request_well_known(destination: &str) -> Option<String> {
let response = services() let response = services()
.globals .globals
.default_client() .default_client()
@ -640,40 +515,14 @@ async fn request_well_known(destination: &str) -> Option<(String, Instant)> {
.send() .send()
.await; .await;
debug!("Got well known response"); debug!("Got well known response");
let response = match response { if let Err(e) = &response {
Err(e) => {
debug!("Well known error: {e:?}"); debug!("Well known error: {e:?}");
return None; return None;
} }
Ok(r) => r, let text = response.ok()?.text().await;
};
let mut headers = response.headers().values();
let cache_for = CacheControl::decode(&mut headers)
.ok()
.and_then(|cc| {
// Servers should respect the cache control headers present on the response, or use a sensible default when headers are not present.
if cc.no_store() || cc.no_cache() {
Some(Duration::ZERO)
} else {
cc.max_age()
// Servers should additionally impose a maximum cache time for responses: 48 hours is recommended.
.map(|age| age.min(Duration::from_secs(60 * 60 * 48)))
}
})
// The recommended sensible default is 24 hours.
.unwrap_or_else(|| Duration::from_secs(60 * 60 * 24));
let text = response.text().await;
debug!("Got well known response text"); debug!("Got well known response text");
let host = || {
let body: serde_json::Value = serde_json::from_str(&text.ok()?).ok()?; let body: serde_json::Value = serde_json::from_str(&text.ok()?).ok()?;
body.get("m.server")?.as_str().map(ToOwned::to_owned) Some(body.get("m.server")?.as_str()?.to_owned())
};
host().map(|host| (host, Instant::now() + cache_for))
} }
/// # `GET /_matrix/federation/v1/version` /// # `GET /_matrix/federation/v1/version`
@ -813,78 +662,17 @@ pub fn parse_incoming_pdu(
let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) { let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) {
Ok(t) => t, Ok(t) => t,
Err(e) => { Err(_) => {
// Event could not be converted to canonical json // Event could not be converted to canonical json
return Err(e); return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Could not convert event to canonical json.",
));
} }
}; };
Ok((event_id, value, room_id)) Ok((event_id, value, room_id))
} }
/// Attempts to parse and append PDU to timeline.
/// If no event ID is returned, then the PDU was failed to be parsed.
/// If the Ok(()) is returned, then the PDU was successfully appended to the timeline.
async fn handle_pdu_in_transaction(
origin: &ServerName,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
pdu: &RawJsonValue,
) -> (Option<OwnedEventId>, Result<()>) {
let (event_id, value, room_id) = match parse_incoming_pdu(pdu) {
Ok(t) => t,
Err(e) => {
warn!("Could not parse PDU: {e}");
warn!("Full PDU: {:?}", &pdu);
return (None, Err(Error::BadServerResponse("Could not parse PDU")));
}
};
// Makes use of the m.room.create event. If we cannot fetch this event,
// we must have never been in that room.
if services().rooms.state.get_room_version(&room_id).is_err() {
debug!("Room {room_id} is not known to this server");
return (
Some(event_id),
Err(Error::BadServerResponse("Room is not known to this server")),
);
}
// We do not add the event_id field to the pdu here because of signature and hashes checks
let mutex = Arc::clone(
services()
.globals
.roomid_mutex_federation
.write()
.await
.entry(room_id.to_owned())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let start_time = Instant::now();
if let Err(e) = services()
.rooms
.event_handler
.handle_incoming_pdu(origin, &event_id, &room_id, value, true, pub_key_map)
.await
{
warn!("Error appending PDU to timeline: {}: {:?}", e, pdu);
return (Some(event_id), Err(e));
}
drop(mutex_lock);
let elapsed = start_time.elapsed();
debug!(
"Handling transaction of event {} took {}m{}s",
event_id,
elapsed.as_secs() / 60,
elapsed.as_secs() % 60
);
(Some(event_id), Ok(()))
}
/// # `PUT /_matrix/federation/v1/send/{txnId}` /// # `PUT /_matrix/federation/v1/send/{txnId}`
/// ///
/// Push EDUs and PDUs to this server. /// Push EDUs and PDUs to this server.
@ -909,11 +697,77 @@ pub async fn send_transaction_message_route(
// let mut auth_cache = EventMap::new(); // let mut auth_cache = EventMap::new();
for pdu in &body.pdus { for pdu in &body.pdus {
let (event_id, result) = let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
handle_pdu_in_transaction(sender_servername, &pub_key_map, pdu).await; warn!("Error parsing incoming event {:?}: {:?}", pdu, e);
Error::BadServerResponse("Invalid PDU in server response")
})?;
let room_id: OwnedRoomId = value
.get("room_id")
.and_then(|id| RoomId::parse(id.as_str()?).ok())
.ok_or(Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid room id in pdu",
))?;
if let Some(event_id) = event_id { if services().rooms.state.get_room_version(&room_id).is_err() {
resolved_map.insert(event_id.clone(), result.map_err(|e| e.sanitized_error())); debug!("Server is not in room {room_id}");
continue;
}
let r = parse_incoming_pdu(pdu);
let (event_id, value, room_id) = match r {
Ok(t) => t,
Err(e) => {
warn!("Could not parse PDU: {e}");
warn!("Full PDU: {:?}", &pdu);
continue;
}
};
// We do not add the event_id field to the pdu here because of signature and hashes checks
let mutex = Arc::clone(
services()
.globals
.roomid_mutex_federation
.write()
.await
.entry(room_id.to_owned())
.or_default(),
);
let mutex_lock = mutex.lock().await;
let start_time = Instant::now();
resolved_map.insert(
event_id.clone(),
services()
.rooms
.event_handler
.handle_incoming_pdu(
sender_servername,
&event_id,
&room_id,
value,
true,
&pub_key_map,
)
.await
.map(|_| ()),
);
drop(mutex_lock);
let elapsed = start_time.elapsed();
debug!(
"Handling transaction of event {} took {}m{}s",
event_id,
elapsed.as_secs() / 60,
elapsed.as_secs() % 60
);
}
for pdu in &resolved_map {
if let Err(e) = pdu.1 {
if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) {
warn!("Incoming PDU failed {:?}", pdu);
}
} }
} }
@ -927,7 +781,6 @@ pub async fn send_transaction_message_route(
Edu::Receipt(receipt) => { Edu::Receipt(receipt) => {
for (room_id, room_updates) in receipt.receipts { for (room_id, room_updates) in receipt.receipts {
for (user_id, user_updates) in room_updates.read { for (user_id, user_updates) in room_updates.read {
if user_id.server_name() == sender_servername {
if let Some((event_id, _)) = user_updates if let Some((event_id, _)) = user_updates
.event_ids .event_ids
.iter() .iter()
@ -967,10 +820,8 @@ pub async fn send_transaction_message_route(
} }
} }
} }
}
Edu::Typing(typing) => { Edu::Typing(typing) => {
if typing.user_id.server_name() == sender_servername if services()
&& services()
.rooms .rooms
.state_cache .state_cache
.is_joined(&typing.user_id, &typing.room_id)? .is_joined(&typing.user_id, &typing.room_id)?
@ -997,23 +848,23 @@ pub async fn send_transaction_message_route(
} }
} }
Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => {
if user_id.server_name() == sender_servername {
services().users.mark_device_key_update(&user_id)?; services().users.mark_device_key_update(&user_id)?;
} }
}
Edu::DirectToDevice(DirectDeviceContent { Edu::DirectToDevice(DirectDeviceContent {
sender, sender,
ev_type, ev_type,
message_id, message_id,
messages, messages,
}) => { }) => {
if sender.server_name() == sender_servername
// Check if this is a new transaction id // Check if this is a new transaction id
&& services() if services()
.transaction_ids .transaction_ids
.existing_txnid(&sender, None, &message_id)? .existing_txnid(&sender, None, &message_id)?
.is_none() .is_some()
{ {
continue;
}
for (target_user_id, map) in &messages { for (target_user_id, map) in &messages {
for (target_device_id_maybe, event) in map { for (target_device_id_maybe, event) in map {
match target_device_id_maybe { match target_device_id_maybe {
@ -1060,13 +911,14 @@ pub async fn send_transaction_message_route(
.transaction_ids .transaction_ids
.add_txnid(&sender, None, &message_id, &[])?; .add_txnid(&sender, None, &message_id, &[])?;
} }
}
Edu::SigningKeyUpdate(SigningKeyUpdateContent { Edu::SigningKeyUpdate(SigningKeyUpdateContent {
user_id, user_id,
master_key, master_key,
self_signing_key, self_signing_key,
}) => { }) => {
if user_id.server_name() == sender_servername { if user_id.server_name() != sender_servername {
continue;
}
if let Some(master_key) = master_key { if let Some(master_key) = master_key {
services().users.add_cross_signing_keys( services().users.add_cross_signing_keys(
&user_id, &user_id,
@ -1077,12 +929,16 @@ pub async fn send_transaction_message_route(
)?; )?;
} }
} }
}
Edu::_Custom(_) => {} Edu::_Custom(_) => {}
} }
} }
Ok(send_transaction_message::v1::Response { pdus: resolved_map }) Ok(send_transaction_message::v1::Response {
pdus: resolved_map
.into_iter()
.map(|(e, r)| (e, r.map_err(|e| e.sanitized_error())))
.collect(),
})
} }
/// # `GET /_matrix/federation/v1/event/{eventId}` /// # `GET /_matrix/federation/v1/event/{eventId}`
@ -1121,7 +977,7 @@ pub async fn get_event_route(
.server_in_room(sender_servername, room_id)? .server_in_room(sender_servername, room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Server is not in room", "Server is not in room",
)); ));
} }
@ -1132,7 +988,7 @@ pub async fn get_event_route(
&body.event_id, &body.event_id,
)? { )? {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Server is not allowed to see event.", "Server is not allowed to see event.",
)); ));
} }
@ -1164,7 +1020,7 @@ pub async fn get_backfill_route(
.server_in_room(sender_servername, &body.room_id)? .server_in_room(sender_servername, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Server is not in room.", "Server is not in room.",
)); ));
} }
@ -1234,7 +1090,7 @@ pub async fn get_missing_events_route(
.server_in_room(sender_servername, &body.room_id)? .server_in_room(sender_servername, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Server is not in room", "Server is not in room",
)); ));
} }
@ -1319,7 +1175,7 @@ pub async fn get_event_authorization_route(
.server_in_room(sender_servername, &body.room_id)? .server_in_room(sender_servername, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Server is not in room.", "Server is not in room.",
)); ));
} }
@ -1377,7 +1233,7 @@ pub async fn get_room_state_route(
.server_in_room(sender_servername, &body.room_id)? .server_in_room(sender_servername, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Server is not in room.", "Server is not in room.",
)); ));
} }
@ -1453,7 +1309,7 @@ pub async fn get_room_state_ids_route(
.server_in_room(sender_servername, &body.room_id)? .server_in_room(sender_servername, &body.room_id)?
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Server is not in room.", "Server is not in room.",
)); ));
} }
@ -1585,7 +1441,6 @@ pub async fn create_join_event_template_route(
unsigned: None, unsigned: None,
state_key: Some(body.user_id.to_string()), state_key: Some(body.user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
&body.user_id, &body.user_id,
&body.room_id, &body.room_id,
@ -1825,7 +1680,7 @@ pub async fn create_invite_route(
let event_id = EventId::parse(format!( let event_id = EventId::parse(format!(
"${}", "${}",
ruma::signatures::reference_hash(&signed_event, &body.room_version) ruma::signatures::reference_hash(&signed_event, &body.room_version)
.expect("Event format validated when event was hashed") .expect("ruma can calculate reference hashes")
)) ))
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
@ -1894,90 +1749,6 @@ pub async fn create_invite_route(
}) })
} }
/// # `GET /_matrix/federation/v1/media/download/{serverName}/{mediaId}`
///
/// Load media from our server.
pub async fn get_content_route(
body: Ruma<get_content::v1::Request>,
) -> Result<get_content::v1::Response> {
let mxc = format!(
"mxc://{}/{}",
services().globals.server_name(),
body.media_id
);
if let Some(FileMeta {
content_disposition,
content_type,
file,
}) = services().media.get(mxc.clone()).await?
{
Ok(get_content::v1::Response::new(
ContentMetadata::new(),
FileOrLocation::File(Content {
file,
content_type,
content_disposition: Some(content_disposition),
}),
))
} else {
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
}
}
/// # `GET /_matrix/federation/v1/media/thumbnail/{serverName}/{mediaId}`
///
/// Load media thumbnail from our server or over federation.
pub async fn get_content_thumbnail_route(
body: Ruma<get_content_thumbnail::v1::Request>,
) -> Result<get_content_thumbnail::v1::Response> {
let mxc = format!(
"mxc://{}/{}",
services().globals.server_name(),
body.media_id
);
let Some(FileMeta {
file,
content_type,
content_disposition,
}) = services()
.media
.get_thumbnail(
mxc.clone(),
body.width
.try_into()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
body.height
.try_into()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
)
.await?
else {
return Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."));
};
services()
.media
.upload_thumbnail(
mxc,
content_type.as_deref(),
body.width.try_into().expect("all UInts are valid u32s"),
body.height.try_into().expect("all UInts are valid u32s"),
&file,
)
.await?;
Ok(get_content_thumbnail::v1::Response::new(
ContentMetadata::new(),
FileOrLocation::File(Content {
file,
content_type,
content_disposition: Some(content_disposition),
}),
))
}
/// # `GET /_matrix/federation/v1/user/devices/{userId}` /// # `GET /_matrix/federation/v1/user/devices/{userId}`
/// ///
/// Gets information on all devices of the user. /// Gets information on all devices of the user.
@ -2143,25 +1914,6 @@ pub async fn claim_keys_route(
}) })
} }
/// # `GET /_matrix/federation/v1/openid/userinfo`
///
/// Get information about the user that generated the OpenID token.
pub async fn get_openid_userinfo_route(
body: Ruma<get_openid_userinfo::v1::Request>,
) -> Result<get_openid_userinfo::v1::Response> {
Ok(get_openid_userinfo::v1::Response::new(
services()
.users
.find_from_openid_token(&body.access_token)?
.ok_or_else(|| {
Error::BadRequest(
ErrorKind::Unauthorized,
"OpenID token has expired or does not exist.",
)
})?,
))
}
/// # `GET /.well-known/matrix/server` /// # `GET /.well-known/matrix/server`
/// ///
/// Returns the federation server discovery information. /// Returns the federation server discovery information.

View file

@ -47,8 +47,6 @@ pub struct Config {
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
pub allow_registration: bool, pub allow_registration: bool,
pub registration_token: Option<String>, pub registration_token: Option<String>,
#[serde(default = "default_openid_token_ttl")]
pub openid_token_ttl: u64,
#[serde(default = "true_fn")] #[serde(default = "true_fn")]
pub allow_encryption: bool, pub allow_encryption: bool,
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
@ -59,7 +57,7 @@ pub struct Config {
pub allow_unstable_room_versions: bool, pub allow_unstable_room_versions: bool,
#[serde(default = "default_default_room_version")] #[serde(default = "default_default_room_version")]
pub default_room_version: RoomVersionId, pub default_room_version: RoomVersionId,
#[serde(default, flatten)] #[serde(default)]
pub well_known: WellKnownConfig, pub well_known: WellKnownConfig,
#[serde(default = "false_fn")] #[serde(default = "false_fn")]
pub allow_jaeger: bool, pub allow_jaeger: bool,
@ -97,9 +95,7 @@ pub struct TlsConfig {
#[derive(Clone, Debug, Deserialize, Default)] #[derive(Clone, Debug, Deserialize, Default)]
pub struct WellKnownConfig { pub struct WellKnownConfig {
#[serde(rename = "well_known_client")]
pub client: Option<Url>, pub client: Option<Url>,
#[serde(rename = "well_known_server")]
pub server: Option<OwnedServerName>, pub server: Option<OwnedServerName>,
} }
@ -306,10 +302,6 @@ fn default_turn_ttl() -> u64 {
60 * 60 * 24 60 * 60 * 24
} }
fn default_openid_token_ttl() -> u64 {
60 * 60
}
// I know, it's a great name // I know, it's a great name
pub fn default_default_room_version() -> RoomVersionId { pub fn default_default_room_version() -> RoomVersionId {
RoomVersionId::V10 RoomVersionId::V10

View file

@ -1,19 +1,15 @@
use std::collections::HashMap; use std::collections::{BTreeMap, HashMap};
use async_trait::async_trait; use async_trait::async_trait;
use futures_util::{stream::FuturesUnordered, StreamExt}; use futures_util::{stream::FuturesUnordered, StreamExt};
use lru_cache::LruCache; use lru_cache::LruCache;
use ruma::{ use ruma::{
api::federation::discovery::{OldVerifyKey, ServerSigningKeys}, api::federation::discovery::{ServerSigningKeys, VerifyKey},
signatures::Ed25519KeyPair, signatures::Ed25519KeyPair,
DeviceId, ServerName, UserId, DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId,
}; };
use crate::{ use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
database::KeyValueDatabase,
service::{self, globals::SigningKeys},
services, utils, Error, Result,
};
pub const COUNTER: &[u8] = b"c"; pub const COUNTER: &[u8] = b"c";
pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u"; pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u";
@ -241,97 +237,64 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
self.global.remove(b"keypair") self.global.remove(b"keypair")
} }
fn add_signing_key_from_trusted_server( fn add_signing_key(
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, new_keys: ServerSigningKeys,
) -> Result<SigningKeys> { ) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?; // Not atomic, but this is not critical
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
let mut keys = signingkeys
.and_then(|keys| serde_json::from_slice(&keys).ok())
.unwrap_or_else(|| {
// Just insert "now", it doesn't matter
ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
});
Ok(
if let Some(mut prev_keys) =
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok())
{
let ServerSigningKeys { let ServerSigningKeys {
verify_keys, verify_keys,
old_verify_keys, old_verify_keys,
.. ..
} = new_keys; } = new_keys;
prev_keys.verify_keys.extend(verify_keys); keys.verify_keys.extend(verify_keys);
prev_keys.old_verify_keys.extend(old_verify_keys); keys.old_verify_keys.extend(old_verify_keys);
prev_keys.valid_until_ts = new_keys.valid_until_ts;
self.server_signingkeys.insert( self.server_signingkeys.insert(
origin.as_bytes(), origin.as_bytes(),
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"), &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"),
)?; )?;
prev_keys.into() let mut tree = keys.verify_keys;
} else { tree.extend(
self.server_signingkeys.insert( keys.old_verify_keys
origin.as_bytes(), .into_iter()
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"), .map(|old| (old.0, VerifyKey::new(old.1.key))),
)?; );
new_keys.into() Ok(tree)
},
)
}
fn add_signing_key_from_origin(
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<SigningKeys> {
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
Ok(
if let Some(mut prev_keys) =
prev_keys.and_then(|keys| serde_json::from_slice::<ServerSigningKeys>(&keys).ok())
{
let ServerSigningKeys {
verify_keys,
old_verify_keys,
..
} = new_keys;
// Moving `verify_keys` no longer present to `old_verify_keys`
for (key_id, key) in prev_keys.verify_keys {
if !verify_keys.contains_key(&key_id) {
prev_keys
.old_verify_keys
.insert(key_id, OldVerifyKey::new(prev_keys.valid_until_ts, key.key));
}
}
prev_keys.verify_keys = verify_keys;
prev_keys.old_verify_keys.extend(old_verify_keys);
prev_keys.valid_until_ts = new_keys.valid_until_ts;
self.server_signingkeys.insert(
origin.as_bytes(),
&serde_json::to_vec(&prev_keys).expect("serversigningkeys can be serialized"),
)?;
prev_keys.into()
} else {
self.server_signingkeys.insert(
origin.as_bytes(),
&serde_json::to_vec(&new_keys).expect("serversigningkeys can be serialized"),
)?;
new_keys.into()
},
)
} }
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>> { fn signing_keys_for(
&self,
origin: &ServerName,
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
let signingkeys = self let signingkeys = self
.server_signingkeys .server_signingkeys
.get(origin.as_bytes())? .get(origin.as_bytes())?
.and_then(|bytes| serde_json::from_slice::<SigningKeys>(&bytes).ok()); .and_then(|bytes| serde_json::from_slice(&bytes).ok())
.map(|keys: ServerSigningKeys| {
let mut tree = keys.verify_keys;
tree.extend(
keys.old_verify_keys
.into_iter()
.map(|old| (old.0, VerifyKey::new(old.1.key))),
);
tree
})
.unwrap_or_else(BTreeMap::new);
Ok(signingkeys) Ok(signingkeys)
} }

View file

@ -1,4 +1,4 @@
use ruma::{api::client::error::ErrorKind, http_headers::ContentDisposition}; use ruma::api::client::error::ErrorKind;
use crate::{database::KeyValueDatabase, service, utils, Error, Result}; use crate::{database::KeyValueDatabase, service, utils, Error, Result};
@ -8,7 +8,7 @@ impl service::media::Data for KeyValueDatabase {
mxc: String, mxc: String,
width: u32, width: u32,
height: u32, height: u32,
content_disposition: &ContentDisposition, content_disposition: Option<&str>,
content_type: Option<&str>, content_type: Option<&str>,
) -> Result<Vec<u8>> { ) -> Result<Vec<u8>> {
let mut key = mxc.as_bytes().to_vec(); let mut key = mxc.as_bytes().to_vec();
@ -16,7 +16,12 @@ impl service::media::Data for KeyValueDatabase {
key.extend_from_slice(&width.to_be_bytes()); key.extend_from_slice(&width.to_be_bytes());
key.extend_from_slice(&height.to_be_bytes()); key.extend_from_slice(&height.to_be_bytes());
key.push(0xff); key.push(0xff);
key.extend_from_slice(content_disposition.to_string().as_bytes()); key.extend_from_slice(
content_disposition
.as_ref()
.map(|f| f.as_bytes())
.unwrap_or_default(),
);
key.push(0xff); key.push(0xff);
key.extend_from_slice( key.extend_from_slice(
content_type content_type
@ -35,7 +40,7 @@ impl service::media::Data for KeyValueDatabase {
mxc: String, mxc: String,
width: u32, width: u32,
height: u32, height: u32,
) -> Result<(ContentDisposition, Option<String>, Vec<u8>)> { ) -> Result<(Option<String>, Option<String>, Vec<u8>)> {
let mut prefix = mxc.as_bytes().to_vec(); let mut prefix = mxc.as_bytes().to_vec();
prefix.push(0xff); prefix.push(0xff);
prefix.extend_from_slice(&width.to_be_bytes()); prefix.extend_from_slice(&width.to_be_bytes());
@ -63,9 +68,15 @@ impl service::media::Data for KeyValueDatabase {
.next() .next()
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
let content_disposition = content_disposition_bytes.try_into().unwrap_or_else(|_| { let content_disposition = if content_disposition_bytes.is_empty() {
ContentDisposition::new(ruma::http_headers::ContentDispositionType::Inline) None
}); } else {
Some(
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
Error::bad_database("Content Disposition in mediaid_file is invalid unicode.")
})?,
)
};
Ok((content_disposition, content_type, key)) Ok((content_disposition, content_type, key))
} }
} }

View file

@ -1,15 +1,9 @@
use ruma::{ use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId,
UserId,
};
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
impl service::rooms::alias::Data for KeyValueDatabase { impl service::rooms::alias::Data for KeyValueDatabase {
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> { fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
// Comes first as we don't want a stuck alias
self.alias_userid
.insert(alias.alias().as_bytes(), user_id.as_bytes())?;
self.alias_roomid self.alias_roomid
.insert(alias.alias().as_bytes(), room_id.as_bytes())?; .insert(alias.alias().as_bytes(), room_id.as_bytes())?;
let mut aliasid = room_id.as_bytes().to_vec(); let mut aliasid = room_id.as_bytes().to_vec();
@ -28,13 +22,13 @@ impl service::rooms::alias::Data for KeyValueDatabase {
self.aliasid_alias.remove(&key)?; self.aliasid_alias.remove(&key)?;
} }
self.alias_roomid.remove(alias.alias().as_bytes())?; self.alias_roomid.remove(alias.alias().as_bytes())?;
self.alias_userid.remove(alias.alias().as_bytes())
} else { } else {
Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::NotFound, ErrorKind::NotFound,
"Alias does not exist.", "Alias does not exist.",
)) ));
} }
Ok(())
} }
fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> { fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
@ -63,16 +57,4 @@ impl service::rooms::alias::Data for KeyValueDatabase {
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
})) }))
} }
fn who_created_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedUserId>> {
self.alias_userid
.get(alias.alias().as_bytes())?
.map(|bytes| {
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("User ID in alias_userid is invalid unicode.")
})?)
.map_err(|_| Error::bad_database("User ID in alias_roomid is invalid."))
})
.transpose()
}
} }

View file

@ -2,20 +2,14 @@ use ruma::RoomId;
use crate::{database::KeyValueDatabase, service, services, utils, Result}; use crate::{database::KeyValueDatabase, service, services, utils, Result};
/// Splits a string into tokens used as keys in the search inverted index impl service::rooms::search::Data for KeyValueDatabase {
/// fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
/// This may be used to tokenize both message bodies (for indexing) or search let mut batch = message_body
/// queries (for querying). .split_terminator(|c: char| !c.is_alphanumeric())
fn tokenize(body: &str) -> impl Iterator<Item = String> + '_ {
body.split_terminator(|c: char| !c.is_alphanumeric())
.filter(|s| !s.is_empty()) .filter(|s| !s.is_empty())
.filter(|word| word.len() <= 50) .filter(|word| word.len() <= 50)
.map(str::to_lowercase) .map(str::to_lowercase)
} .map(|word| {
impl service::rooms::search::Data for KeyValueDatabase {
fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
let mut batch = tokenize(message_body).map(|word| {
let mut key = shortroomid.to_be_bytes().to_vec(); let mut key = shortroomid.to_be_bytes().to_vec();
key.extend_from_slice(word.as_bytes()); key.extend_from_slice(word.as_bytes());
key.push(0xff); key.push(0xff);
@ -26,22 +20,6 @@ impl service::rooms::search::Data for KeyValueDatabase {
self.tokenids.insert_batch(&mut batch) self.tokenids.insert_batch(&mut batch)
} }
fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
let batch = tokenize(message_body).map(|word| {
let mut key = shortroomid.to_be_bytes().to_vec();
key.extend_from_slice(word.as_bytes());
key.push(0xFF);
key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here
key
});
for token in batch {
self.tokenids.remove(&token)?;
}
Ok(())
}
fn search_pdus<'a>( fn search_pdus<'a>(
&'a self, &'a self,
room_id: &RoomId, room_id: &RoomId,
@ -55,7 +33,11 @@ impl service::rooms::search::Data for KeyValueDatabase {
.to_be_bytes() .to_be_bytes()
.to_vec(); .to_vec();
let words: Vec<_> = tokenize(search_string).collect(); let words: Vec<_> = search_string
.split_terminator(|c: char| !c.is_alphanumeric())
.filter(|s| !s.is_empty())
.map(str::to_lowercase)
.collect();
let iterators = words.clone().into_iter().map(move |word| { let iterators = words.clone().into_iter().map(move |word| {
let mut prefix2 = prefix.clone(); let mut prefix2 = prefix.clone();

View file

@ -80,7 +80,7 @@ impl service::uiaa::Data for KeyValueDatabase {
.userdevicesessionid_uiaainfo .userdevicesessionid_uiaainfo
.get(&userdevicesessionid)? .get(&userdevicesessionid)?
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"UIAA session does not exist.", "UIAA session does not exist.",
))?, ))?,
) )

View file

@ -11,7 +11,6 @@ use ruma::{
use tracing::warn; use tracing::warn;
use crate::{ use crate::{
api::client_server::TOKEN_LENGTH,
database::KeyValueDatabase, database::KeyValueDatabase,
service::{self, users::clean_signatures}, service::{self, users::clean_signatures},
services, utils, Error, Result, services, utils, Error, Result,
@ -944,52 +943,6 @@ impl service::users::Data for KeyValueDatabase {
Ok(None) Ok(None)
} }
} }
// Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations)
fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)> {
let token = utils::random_string(TOKEN_LENGTH);
let expires_in = services().globals.config.openid_token_ttl;
let expires_at = utils::millis_since_unix_epoch()
.checked_add(expires_in * 1000)
.expect("time is valid");
let mut value = expires_at.to_be_bytes().to_vec();
value.extend_from_slice(user_id.as_bytes());
self.openidtoken_expiresatuserid
.insert(token.as_bytes(), value.as_slice())?;
Ok((token, expires_in))
}
/// Find out which user an OpenID access token belongs to.
fn find_from_openid_token(&self, token: &str) -> Result<Option<OwnedUserId>> {
let Some(value) = self.openidtoken_expiresatuserid.get(token.as_bytes())? else {
return Ok(None);
};
let (expires_at_bytes, user_bytes) = value.split_at(0u64.to_be_bytes().len());
let expires_at = u64::from_be_bytes(
expires_at_bytes
.try_into()
.map_err(|_| Error::bad_database("expires_at in openid_userid is invalid u64."))?,
);
if expires_at < utils::millis_since_unix_epoch() {
self.openidtoken_expiresatuserid.remove(token.as_bytes())?;
return Ok(None);
}
Some(
UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| {
Error::bad_database("User ID in openid_userid is invalid unicode.")
})?)
.map_err(|_| Error::bad_database("User ID in openid_userid is invalid.")),
)
.transpose()
}
} }
impl KeyValueDatabase {} impl KeyValueDatabase {}

View file

@ -6,7 +6,6 @@ use crate::{
SERVICES, SERVICES,
}; };
use abstraction::{KeyValueDatabaseEngine, KvTree}; use abstraction::{KeyValueDatabaseEngine, KvTree};
use base64::{engine::general_purpose, Engine};
use directories::ProjectDirs; use directories::ProjectDirs;
use lru_cache::LruCache; use lru_cache::LruCache;
@ -58,7 +57,6 @@ pub struct KeyValueDatabase {
pub(super) userid_masterkeyid: Arc<dyn KvTree>, pub(super) userid_masterkeyid: Arc<dyn KvTree>,
pub(super) userid_selfsigningkeyid: Arc<dyn KvTree>, pub(super) userid_selfsigningkeyid: Arc<dyn KvTree>,
pub(super) userid_usersigningkeyid: Arc<dyn KvTree>, pub(super) userid_usersigningkeyid: Arc<dyn KvTree>,
pub(super) openidtoken_expiresatuserid: Arc<dyn KvTree>, // expiresatuserid = expiresat + userid
pub(super) userfilterid_filter: Arc<dyn KvTree>, // UserFilterId = UserId + FilterId pub(super) userfilterid_filter: Arc<dyn KvTree>, // UserFilterId = UserId + FilterId
@ -102,8 +100,6 @@ pub struct KeyValueDatabase {
pub(super) userroomid_leftstate: Arc<dyn KvTree>, pub(super) userroomid_leftstate: Arc<dyn KvTree>,
pub(super) roomuserid_leftcount: Arc<dyn KvTree>, pub(super) roomuserid_leftcount: Arc<dyn KvTree>,
pub(super) alias_userid: Arc<dyn KvTree>, // User who created the alias
pub(super) disabledroomids: Arc<dyn KvTree>, // Rooms where incoming federation handling is disabled pub(super) disabledroomids: Arc<dyn KvTree>, // Rooms where incoming federation handling is disabled
pub(super) lazyloadedids: Arc<dyn KvTree>, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId pub(super) lazyloadedids: Arc<dyn KvTree>, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId
@ -294,7 +290,6 @@ impl KeyValueDatabase {
userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, userid_masterkeyid: builder.open_tree("userid_masterkeyid")?,
userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?, userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?,
userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?,
openidtoken_expiresatuserid: builder.open_tree("openidtoken_expiresatuserid")?,
userfilterid_filter: builder.open_tree("userfilterid_filter")?, userfilterid_filter: builder.open_tree("userfilterid_filter")?,
todeviceid_events: builder.open_tree("todeviceid_events")?, todeviceid_events: builder.open_tree("todeviceid_events")?,
@ -330,8 +325,6 @@ impl KeyValueDatabase {
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
alias_userid: builder.open_tree("alias_userid")?,
disabledroomids: builder.open_tree("disabledroomids")?, disabledroomids: builder.open_tree("disabledroomids")?,
lazyloadedids: builder.open_tree("lazyloadedids")?, lazyloadedids: builder.open_tree("lazyloadedids")?,
@ -411,9 +404,11 @@ impl KeyValueDatabase {
// Matrix resource ownership is based on the server name; changing it // Matrix resource ownership is based on the server name; changing it
// requires recreating the database from scratch. // requires recreating the database from scratch.
if services().users.count()? > 0 { if services().users.count()? > 0 {
let conduit_user = services().globals.server_user(); let conduit_user =
UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("@conduit:server_name is valid");
if !services().users.exists(conduit_user)? { if !services().users.exists(&conduit_user)? {
error!( error!(
"The {} server user does not exist, and the database is not new.", "The {} server user does not exist, and the database is not new.",
conduit_user conduit_user
@ -425,7 +420,7 @@ impl KeyValueDatabase {
} }
// If the database has any data, perform data migrations before starting // If the database has any data, perform data migrations before starting
let latest_database_version = 16; let latest_database_version = 13;
if services().users.count()? > 0 { if services().users.count()? > 0 {
// MIGRATIONS // MIGRATIONS
@ -942,86 +937,6 @@ impl KeyValueDatabase {
warn!("Migration: 12 -> 13 finished"); warn!("Migration: 12 -> 13 finished");
} }
if services().globals.database_version()? < 16 {
// Reconstruct all media using the filesystem
db.mediaid_file.clear().unwrap();
for file in fs::read_dir(services().globals.get_media_folder()).unwrap() {
let file = file.unwrap();
let mediaid = general_purpose::URL_SAFE_NO_PAD
.decode(file.file_name().into_string().unwrap())
.unwrap();
let mut parts = mediaid.rsplit(|&b| b == 0xff);
let mut removed_bytes = 0;
let content_type_bytes = parts.next().unwrap();
removed_bytes += content_type_bytes.len() + 1;
let content_disposition_bytes = parts
.next()
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
removed_bytes += content_disposition_bytes.len();
let mut content_disposition =
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
Error::bad_database("Content Disposition in mediaid_file is invalid.")
})?;
if content_disposition.contains("filename=")
&& !content_disposition.contains("filename=\"")
{
println!("{}", &content_disposition);
content_disposition =
content_disposition.replacen("filename=", "filename=\"", 1);
content_disposition.push('"');
println!("{}", &content_disposition);
let mut new_key = mediaid[..(mediaid.len() - removed_bytes)].to_vec();
assert!(*new_key.last().unwrap() == 0xff);
let mut shorter_key = new_key.clone();
shorter_key.extend(
ruma::http_headers::ContentDisposition::new(
ruma::http_headers::ContentDispositionType::Inline,
)
.to_string()
.as_bytes(),
);
shorter_key.push(0xff);
shorter_key.extend_from_slice(content_type_bytes);
new_key.extend_from_slice(content_disposition.to_string().as_bytes());
new_key.push(0xff);
new_key.extend_from_slice(content_type_bytes);
// Some file names are too long. Ignore those.
match fs::rename(
services().globals.get_media_file(&mediaid),
services().globals.get_media_file(&new_key),
) {
Ok(_) => {
db.mediaid_file.insert(&new_key, &[])?;
}
Err(_) => {
fs::rename(
services().globals.get_media_file(&mediaid),
services().globals.get_media_file(&shorter_key),
)
.unwrap();
db.mediaid_file.insert(&shorter_key, &[])?;
}
}
} else {
db.mediaid_file.insert(&mediaid, &[])?;
}
}
services().globals.bump_database_version(16)?;
warn!("Migration: 13 -> 16 finished");
}
assert_eq!( assert_eq!(
services().globals.database_version().unwrap(), services().globals.database_version().unwrap(),
latest_database_version latest_database_version
@ -1187,21 +1102,22 @@ impl KeyValueDatabase {
/// Sets the emergency password and push rules for the @conduit account in case emergency password is set /// Sets the emergency password and push rules for the @conduit account in case emergency password is set
fn set_emergency_access() -> Result<bool> { fn set_emergency_access() -> Result<bool> {
let conduit_user = services().globals.server_user(); let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("@conduit:server_name is a valid UserId");
services().users.set_password( services().users.set_password(
conduit_user, &conduit_user,
services().globals.emergency_password().as_deref(), services().globals.emergency_password().as_deref(),
)?; )?;
let (ruleset, res) = match services().globals.emergency_password() { let (ruleset, res) = match services().globals.emergency_password() {
Some(_) => (Ruleset::server_default(conduit_user), Ok(true)), Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)),
None => (Ruleset::new(), Ok(false)), None => (Ruleset::new(), Ok(false)),
}; };
services().account_data.update( services().account_data.update(
None, None,
conduit_user, &conduit_user,
GlobalAccountDataEventType::PushRules.to_string().into(), GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(&GlobalAccountDataEvent { &serde_json::to_value(&GlobalAccountDataEvent {
content: PushRulesEventContent { global: ruleset }, content: PushRulesEventContent { global: ruleset },

View file

@ -1,10 +1,8 @@
use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration}; use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration};
use axum::{ use axum::{
body::Body,
extract::{DefaultBodyLimit, FromRequestParts, MatchedPath}, extract::{DefaultBodyLimit, FromRequestParts, MatchedPath},
middleware::map_response, response::IntoResponse,
response::{IntoResponse, Response},
routing::{any, get, on, MethodFilter}, routing::{any, get, on, MethodFilter},
Router, Router,
}; };
@ -15,7 +13,7 @@ use figment::{
Figment, Figment,
}; };
use http::{ use http::{
header::{self, HeaderName, CONTENT_SECURITY_POLICY}, header::{self, HeaderName},
Method, StatusCode, Uri, Method, StatusCode, Uri,
}; };
use ruma::api::{ use ruma::api::{
@ -57,7 +55,7 @@ async fn main() {
)) ))
.nested(), .nested(),
) )
.merge(Env::prefixed("CONDUIT_").global().split("__")); .merge(Env::prefixed("CONDUIT_").global());
let config = match raw_config.extract::<Config>() { let config = match raw_config.extract::<Config>() {
Ok(s) => s, Ok(s) => s,
@ -70,13 +68,11 @@ async fn main() {
config.warn_deprecated(); config.warn_deprecated();
if config.allow_jaeger { if config.allow_jaeger {
opentelemetry::global::set_text_map_propagator( opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
opentelemetry_jaeger_propagator::Propagator::new(), let tracer = opentelemetry_jaeger::new_agent_pipeline()
); .with_auto_split_batch(true)
let tracer = opentelemetry_otlp::new_pipeline() .with_service_name("conduit")
.tracing() .install_batch(opentelemetry::runtime::Tokio)
.with_exporter(opentelemetry_otlp::new_exporter().tonic())
.install_batch(opentelemetry_sdk::runtime::Tokio)
.unwrap(); .unwrap();
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
@ -145,13 +141,6 @@ async fn main() {
} }
} }
/// Adds additional headers to prevent any potential XSS attacks via the media repo
async fn set_csp_header(response: Response) -> impl IntoResponse {
(
[(CONTENT_SECURITY_POLICY, "sandbox; default-src 'none'; script-src 'none'; plugin-types application/pdf; style-src 'unsafe-inline'; object-src 'self';")], response
)
}
async fn run_server() -> io::Result<()> { async fn run_server() -> io::Result<()> {
let config = &services().globals.config; let config = &services().globals.config;
let addr = SocketAddr::from((config.address, config.port)); let addr = SocketAddr::from((config.address, config.port));
@ -192,7 +181,6 @@ async fn run_server() -> io::Result<()> {
]) ])
.max_age(Duration::from_secs(86400)), .max_age(Duration::from_secs(86400)),
) )
.layer(map_response(set_csp_header))
.layer(DefaultBodyLimit::max( .layer(DefaultBodyLimit::max(
config config
.max_request_size .max_request_size
@ -228,10 +216,10 @@ async fn run_server() -> io::Result<()> {
Ok(()) Ok(())
} }
async fn spawn_task( async fn spawn_task<B: Send + 'static>(
req: http::Request<Body>, req: http::Request<B>,
next: axum::middleware::Next, next: axum::middleware::Next<B>,
) -> std::result::Result<Response, StatusCode> { ) -> std::result::Result<axum::response::Response, StatusCode> {
if services().globals.shutdown.load(atomic::Ordering::Relaxed) { if services().globals.shutdown.load(atomic::Ordering::Relaxed) {
return Err(StatusCode::SERVICE_UNAVAILABLE); return Err(StatusCode::SERVICE_UNAVAILABLE);
} }
@ -240,10 +228,10 @@ async fn spawn_task(
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
} }
async fn unrecognized_method( async fn unrecognized_method<B: Send>(
req: http::Request<Body>, req: http::Request<B>,
next: axum::middleware::Next, next: axum::middleware::Next<B>,
) -> std::result::Result<Response, StatusCode> { ) -> std::result::Result<axum::response::Response, StatusCode> {
let method = req.method().clone(); let method = req.method().clone();
let uri = req.uri().clone(); let uri = req.uri().clone();
let inner = next.run(req).await; let inner = next.run(req).await;
@ -289,7 +277,6 @@ fn routes(config: &Config) -> Router {
.ruma_route(client_server::get_room_aliases_route) .ruma_route(client_server::get_room_aliases_route)
.ruma_route(client_server::get_filter_route) .ruma_route(client_server::get_filter_route)
.ruma_route(client_server::create_filter_route) .ruma_route(client_server::create_filter_route)
.ruma_route(client_server::create_openid_token_route)
.ruma_route(client_server::set_global_account_data_route) .ruma_route(client_server::set_global_account_data_route)
.ruma_route(client_server::set_room_account_data_route) .ruma_route(client_server::set_room_account_data_route)
.ruma_route(client_server::get_global_account_data_route) .ruma_route(client_server::get_global_account_data_route)
@ -379,14 +366,10 @@ fn routes(config: &Config) -> Router {
.ruma_route(client_server::turn_server_route) .ruma_route(client_server::turn_server_route)
.ruma_route(client_server::send_event_to_device_route) .ruma_route(client_server::send_event_to_device_route)
.ruma_route(client_server::get_media_config_route) .ruma_route(client_server::get_media_config_route)
.ruma_route(client_server::get_media_config_auth_route)
.ruma_route(client_server::create_content_route) .ruma_route(client_server::create_content_route)
.ruma_route(client_server::get_content_route) .ruma_route(client_server::get_content_route)
.ruma_route(client_server::get_content_auth_route)
.ruma_route(client_server::get_content_as_filename_route) .ruma_route(client_server::get_content_as_filename_route)
.ruma_route(client_server::get_content_as_filename_auth_route)
.ruma_route(client_server::get_content_thumbnail_route) .ruma_route(client_server::get_content_thumbnail_route)
.ruma_route(client_server::get_content_thumbnail_auth_route)
.ruma_route(client_server::get_devices_route) .ruma_route(client_server::get_devices_route)
.ruma_route(client_server::get_device_route) .ruma_route(client_server::get_device_route)
.ruma_route(client_server::update_device_route) .ruma_route(client_server::update_device_route)
@ -444,13 +427,10 @@ fn routes(config: &Config) -> Router {
.ruma_route(server_server::create_join_event_v2_route) .ruma_route(server_server::create_join_event_v2_route)
.ruma_route(server_server::create_invite_route) .ruma_route(server_server::create_invite_route)
.ruma_route(server_server::get_devices_route) .ruma_route(server_server::get_devices_route)
.ruma_route(server_server::get_content_route)
.ruma_route(server_server::get_content_thumbnail_route)
.ruma_route(server_server::get_room_information_route) .ruma_route(server_server::get_room_information_route)
.ruma_route(server_server::get_profile_information_route) .ruma_route(server_server::get_profile_information_route)
.ruma_route(server_server::get_keys_route) .ruma_route(server_server::get_keys_route)
.ruma_route(server_server::claim_keys_route) .ruma_route(server_server::claim_keys_route)
.ruma_route(server_server::get_openid_userinfo_route)
.ruma_route(server_server::well_known_server) .ruma_route(server_server::well_known_server)
} else { } else {
router router

View file

@ -1,4 +1,9 @@
use std::{collections::BTreeMap, convert::TryFrom, sync::Arc, time::Instant}; use std::{
collections::BTreeMap,
convert::{TryFrom, TryInto},
sync::Arc,
time::Instant,
};
use clap::Parser; use clap::Parser;
use regex::Regex; use regex::Regex;
@ -19,8 +24,7 @@ use ruma::{
}, },
TimelineEventType, TimelineEventType,
}, },
EventId, MilliSecondsSinceUnixEpoch, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
RoomVersionId, ServerName, UserId,
}; };
use serde_json::value::to_raw_value; use serde_json::value::to_raw_value;
use tokio::sync::{mpsc, Mutex, RwLock}; use tokio::sync::{mpsc, Mutex, RwLock};
@ -73,12 +77,6 @@ enum AdminCommand {
/// List all rooms we are currently handling an incoming pdu from /// List all rooms we are currently handling an incoming pdu from
IncomingFederation, IncomingFederation,
/// Removes an alias from the server
RemoveAlias {
/// The alias to be removed
alias: Box<RoomAliasId>,
},
/// Deactivate a user /// Deactivate a user
/// ///
/// User will not be removed from all rooms by default. /// User will not be removed from all rooms by default.
@ -162,23 +160,24 @@ enum AdminCommand {
password: Option<String>, password: Option<String>,
}, },
/// Temporarily toggle user registration by passing either true or false as an argument, does not persist between restarts
AllowRegistration { status: Option<bool> },
/// Disables incoming federation handling for a room. /// Disables incoming federation handling for a room.
DisableRoom { room_id: Box<RoomId> }, DisableRoom { room_id: Box<RoomId> },
/// Enables incoming federation handling for a room again. /// Enables incoming federation handling for a room again.
EnableRoom { room_id: Box<RoomId> }, EnableRoom { room_id: Box<RoomId> },
/// Sign a json object using Conduit's signing keys, putting the json in a codeblock /// Verify json signatures
/// [commandbody]()
/// # ```
/// # json here
/// # ```
SignJson, SignJson,
/// Verify json signatures, putting the json in a codeblock /// Verify json signatures
/// [commandbody]()
/// # ```
/// # json here
/// # ```
VerifyJson, VerifyJson,
/// Parses a JSON object as an event then creates a hash and signs it, putting a room
/// version as an argument, and the json in a codeblock
HashAndSignEvent { room_version_id: RoomVersionId },
} }
#[derive(Debug)] #[derive(Debug)]
@ -213,7 +212,8 @@ impl Service {
// TODO: Use futures when we have long admin commands // TODO: Use futures when we have long admin commands
//let mut futures = FuturesUnordered::new(); //let mut futures = FuturesUnordered::new();
let conduit_user = services().globals.server_user(); let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name()))
.expect("@conduit:server_name is valid");
if let Ok(Some(conduit_room)) = services().admin.get_admin_room() { if let Ok(Some(conduit_room)) = services().admin.get_admin_room() {
loop { loop {
@ -246,9 +246,8 @@ impl Service {
unsigned: None, unsigned: None,
state_key: None, state_key: None,
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, &conduit_user,
&conduit_room, &conduit_room,
&state_lock, &state_lock,
) )
@ -661,24 +660,6 @@ impl Service {
"Created user with user_id: {user_id} and password: {password}" "Created user with user_id: {user_id} and password: {password}"
)) ))
} }
AdminCommand::AllowRegistration { status } => {
if let Some(status) = status {
services().globals.set_registration(status).await;
RoomMessageEventContent::text_plain(if status {
"Registration is now enabled"
} else {
"Registration is now disabled"
})
} else {
RoomMessageEventContent::text_plain(
if services().globals.allow_registration().await {
"Registration is currently enabled"
} else {
"Registration is currently disabled"
},
)
}
}
AdminCommand::DisableRoom { room_id } => { AdminCommand::DisableRoom { room_id } => {
services().rooms.metadata.disable_room(&room_id, true)?; services().rooms.metadata.disable_room(&room_id, true)?;
RoomMessageEventContent::text_plain("Room disabled.") RoomMessageEventContent::text_plain("Room disabled.")
@ -860,46 +841,15 @@ impl Service {
services() services()
.rooms .rooms
.event_handler .event_handler
// Generally we shouldn't be checking against expired keys unless required, so in the admin
// room it might be best to not allow expired keys
.fetch_required_signing_keys(&value, &pub_key_map) .fetch_required_signing_keys(&value, &pub_key_map)
.await?; .await?;
let mut expired_key_map = BTreeMap::new(); let pub_key_map = pub_key_map.read().await;
let mut valid_key_map = BTreeMap::new(); match ruma::signatures::verify_json(&pub_key_map, &value) {
Ok(_) => RoomMessageEventContent::text_plain("Signature correct"),
for (server, keys) in pub_key_map.into_inner().into_iter() { Err(e) => RoomMessageEventContent::text_plain(format!(
if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
valid_key_map.insert(
server,
keys.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect(),
);
} else {
expired_key_map.insert(
server,
keys.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect(),
);
}
}
if ruma::signatures::verify_json(&valid_key_map, &value).is_ok() {
RoomMessageEventContent::text_plain("Signature correct")
} else if let Err(e) =
ruma::signatures::verify_json(&expired_key_map, &value)
{
RoomMessageEventContent::text_plain(format!(
"Signature verification failed: {e}" "Signature verification failed: {e}"
)) )),
} else {
RoomMessageEventContent::text_plain(
"Signature correct (with expired keys)",
)
} }
} }
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")), Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
@ -910,61 +860,6 @@ impl Service {
) )
} }
} }
AdminCommand::HashAndSignEvent { room_version_id } => {
if body.len() > 2
// Language may be specified as part of the codeblock (e.g. "```json")
&& body[0].trim().starts_with("```")
&& body.last().unwrap().trim() == "```"
{
let string = body[1..body.len() - 1].join("\n");
match serde_json::from_str(&string) {
Ok(mut value) => {
if let Err(e) = ruma::signatures::hash_and_sign_event(
services().globals.server_name().as_str(),
services().globals.keypair(),
&mut value,
&room_version_id,
) {
RoomMessageEventContent::text_plain(format!("Invalid event: {e}"))
} else {
let json_text = serde_json::to_string_pretty(&value)
.expect("canonical json is valid json");
RoomMessageEventContent::text_plain(json_text)
}
}
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
}
} else {
RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
)
}
}
AdminCommand::RemoveAlias { alias } => {
if alias.server_name() != services().globals.server_name() {
RoomMessageEventContent::text_plain(
"Cannot remove alias which is not from this server",
)
} else if services()
.rooms
.alias
.resolve_local_alias(&alias)?
.is_none()
{
RoomMessageEventContent::text_plain("No such alias exists")
} else {
// We execute this as the server user for two reasons
// 1. If the user can execute commands in the admin room, they can always remove the alias.
// 2. In the future, we are likely going to be able to allow users to execute commands via
// other methods, such as IPC, which would lead to us not knowing their user id
services()
.rooms
.alias
.remove_alias(&alias, services().globals.server_user())?;
RoomMessageEventContent::text_plain("Alias removed sucessfully")
}
}
}; };
Ok(reply_message_content) Ok(reply_message_content)
@ -1072,9 +967,11 @@ impl Service {
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
// Create a user for the server // Create a user for the server
let conduit_user = services().globals.server_user(); let conduit_user =
UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("@conduit:server_name is valid");
services().users.create(conduit_user, None)?; services().users.create(&conduit_user, None)?;
let room_version = services().globals.default_room_version(); let room_version = services().globals.default_room_version();
let mut content = match room_version { let mut content = match room_version {
@ -1087,7 +984,7 @@ impl Service {
| RoomVersionId::V7 | RoomVersionId::V7
| RoomVersionId::V8 | RoomVersionId::V8
| RoomVersionId::V9 | RoomVersionId::V9
| RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.to_owned()), | RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.clone()),
RoomVersionId::V11 => RoomCreateEventContent::new_v11(), RoomVersionId::V11 => RoomCreateEventContent::new_v11(),
_ => unreachable!("Validity of room version already checked"), _ => unreachable!("Validity of room version already checked"),
}; };
@ -1106,9 +1003,8 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, &conduit_user,
&room_id, &room_id,
&state_lock, &state_lock,
) )
@ -1135,9 +1031,8 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some(conduit_user.to_string()), state_key: Some(conduit_user.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, &conduit_user,
&room_id, &room_id,
&state_lock, &state_lock,
) )
@ -1145,7 +1040,7 @@ impl Service {
// 3. Power levels // 3. Power levels
let mut users = BTreeMap::new(); let mut users = BTreeMap::new();
users.insert(conduit_user.to_owned(), 100.into()); users.insert(conduit_user.clone(), 100.into());
services() services()
.rooms .rooms
@ -1161,9 +1056,8 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, &conduit_user,
&room_id, &room_id,
&state_lock, &state_lock,
) )
@ -1181,9 +1075,8 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, &conduit_user,
&room_id, &room_id,
&state_lock, &state_lock,
) )
@ -1203,9 +1096,8 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, &conduit_user,
&room_id, &room_id,
&state_lock, &state_lock,
) )
@ -1225,9 +1117,8 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, &conduit_user,
&room_id, &room_id,
&state_lock, &state_lock,
) )
@ -1246,9 +1137,8 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, &conduit_user,
&room_id, &room_id,
&state_lock, &state_lock,
) )
@ -1267,16 +1157,17 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, &conduit_user,
&room_id, &room_id,
&state_lock, &state_lock,
) )
.await?; .await?;
// 6. Room alias // 6. Room alias
let alias: OwnedRoomAliasId = services().globals.admin_alias().to_owned(); let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
services() services()
.rooms .rooms
@ -1292,18 +1183,14 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, &conduit_user,
&room_id, &room_id,
&state_lock, &state_lock,
) )
.await?; .await?;
services() services().rooms.alias.set_alias(&alias, &room_id)?;
.rooms
.alias
.set_alias(&alias, &room_id, conduit_user)?;
Ok(()) Ok(())
} }
@ -1312,10 +1199,15 @@ impl Service {
/// ///
/// Errors are propagated from the database, and will have None if there is no admin room /// Errors are propagated from the database, and will have None if there is no admin room
pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> { pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> {
let admin_room_alias: Box<RoomAliasId> =
format!("#admins:{}", services().globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
services() services()
.rooms .rooms
.alias .alias
.resolve_local_alias(services().globals.admin_alias()) .resolve_local_alias(&admin_room_alias)
} }
/// Invite the user to the conduit admin room. /// Invite the user to the conduit admin room.
@ -1339,7 +1231,9 @@ impl Service {
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
// Use the server user to grant the new admin's power level // Use the server user to grant the new admin's power level
let conduit_user = services().globals.server_user(); let conduit_user =
UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("@conduit:server_name is valid");
// Invite and join the real user // Invite and join the real user
services() services()
@ -1362,9 +1256,8 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some(user_id.to_string()), state_key: Some(user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, &conduit_user,
&room_id, &room_id,
&state_lock, &state_lock,
) )
@ -1389,7 +1282,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some(user_id.to_string()), state_key: Some(user_id.to_string()),
redacts: None, redacts: None,
timestamp: None,
}, },
user_id, user_id,
&room_id, &room_id,
@ -1416,9 +1308,8 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some("".to_owned()), state_key: Some("".to_owned()),
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, &conduit_user,
&room_id, &room_id,
&state_lock, &state_lock,
) )
@ -1436,24 +1327,14 @@ impl Service {
unsigned: None, unsigned: None,
state_key: None, state_key: None,
redacts: None, redacts: None,
timestamp: None,
}, },
conduit_user, &conduit_user,
&room_id, &room_id,
&state_lock, &state_lock,
).await?; ).await?;
} }
Ok(()) Ok(())
} }
/// Checks whether a given user is an admin of this server
pub fn user_is_admin(&self, user_id: &UserId) -> Result<bool> {
let Some(admin_room) = self.get_admin_room()? else {
return Ok(false);
};
services().rooms.state_cache.is_joined(user_id, &admin_room)
}
} }
#[cfg(test)] #[cfg(test)]

View file

@ -1,71 +1,13 @@
use std::{ use std::collections::BTreeMap;
collections::BTreeMap,
time::{Duration, SystemTime},
};
use crate::{services, Result};
use async_trait::async_trait; use async_trait::async_trait;
use ruma::{ use ruma::{
api::federation::discovery::{OldVerifyKey, ServerSigningKeys, VerifyKey}, api::federation::discovery::{ServerSigningKeys, VerifyKey},
serde::Base64,
signatures::Ed25519KeyPair, signatures::Ed25519KeyPair,
DeviceId, MilliSecondsSinceUnixEpoch, ServerName, UserId, DeviceId, OwnedServerSigningKeyId, ServerName, UserId,
};
use serde::Deserialize;
/// Similar to ServerSigningKeys, but drops a few unnecessary fields we don't require post-validation
#[derive(Deserialize, Debug, Clone)]
pub struct SigningKeys {
pub verify_keys: BTreeMap<String, VerifyKey>,
pub old_verify_keys: BTreeMap<String, OldVerifyKey>,
pub valid_until_ts: MilliSecondsSinceUnixEpoch,
}
impl SigningKeys {
/// Creates the SigningKeys struct, using the keys of the current server
pub fn load_own_keys() -> Self {
let mut keys = Self {
verify_keys: BTreeMap::new(),
old_verify_keys: BTreeMap::new(),
valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000"),
}; };
keys.verify_keys.insert( use crate::Result;
format!("ed25519:{}", services().globals.keypair().version()),
VerifyKey {
key: Base64::new(services().globals.keypair.public_key().to_vec()),
},
);
keys
}
}
impl From<ServerSigningKeys> for SigningKeys {
fn from(value: ServerSigningKeys) -> Self {
let ServerSigningKeys {
verify_keys,
old_verify_keys,
valid_until_ts,
..
} = value;
Self {
verify_keys: verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key))
.collect(),
old_verify_keys: old_verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key))
.collect(),
valid_until_ts,
}
}
}
#[async_trait] #[async_trait]
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
@ -79,23 +21,17 @@ pub trait Data: Send + Sync {
fn clear_caches(&self, amount: u32); fn clear_caches(&self, amount: u32);
fn load_keypair(&self) -> Result<Ed25519KeyPair>; fn load_keypair(&self) -> Result<Ed25519KeyPair>;
fn remove_keypair(&self) -> Result<()>; fn remove_keypair(&self) -> Result<()>;
/// Only extends the cached keys, not moving any verify_keys to old_verify_keys, as if we suddenly fn add_signing_key(
/// recieve requests from the origin server, we want to be able to accept requests from them
fn add_signing_key_from_trusted_server(
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, new_keys: ServerSigningKeys,
) -> Result<SigningKeys>; ) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>>;
/// Extends cached keys, as well as moving verify_keys that are not present in these new keys to
/// old_verify_keys, so that potnetially comprimised keys cannot be used to make requests
fn add_signing_key_from_origin(
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<SigningKeys>;
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>>; fn signing_keys_for(
&self,
origin: &ServerName,
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>>;
fn database_version(&self) -> Result<u64>; fn database_version(&self) -> Result<u64>;
fn bump_database_version(&self, new_version: u64) -> Result<()>; fn bump_database_version(&self, new_version: u64) -> Result<()>;
} }

View file

@ -1,19 +1,25 @@
mod data; mod data;
pub use data::{Data, SigningKeys}; pub use data::Data;
use ruma::{ use ruma::{
serde::Base64, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedEventId, OwnedRoomAliasId, serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName,
OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId, OwnedServerSigningKeyId, OwnedUserId,
}; };
use crate::api::server_server::DestinationResponse; use crate::api::server_server::FedDest;
use crate::{services, Config, Error, Result}; use crate::{services, Config, Error, Result};
use futures_util::FutureExt; use futures_util::FutureExt;
use hickory_resolver::TokioAsyncResolver; use hickory_resolver::TokioAsyncResolver;
use hyper_util::client::legacy::connect::dns::{GaiResolver, Name as HyperName}; use hyper::{
use reqwest::dns::{Addrs, Name, Resolve, Resolving}; client::connect::dns::{GaiResolver, Name},
service::Service as HyperService,
};
use reqwest::dns::{Addrs, Resolve, Resolving};
use ruma::{ use ruma::{
api::{client::sync::sync_events, federation::discovery::ServerSigningKeys}, api::{
client::sync::sync_events,
federation::discovery::{ServerSigningKeys, VerifyKey},
},
DeviceId, RoomVersionId, ServerName, UserId, DeviceId, RoomVersionId, ServerName, UserId,
}; };
use std::{ use std::{
@ -24,7 +30,6 @@ use std::{
iter, iter,
net::{IpAddr, SocketAddr}, net::{IpAddr, SocketAddr},
path::PathBuf, path::PathBuf,
str::FromStr,
sync::{ sync::{
atomic::{self, AtomicBool}, atomic::{self, AtomicBool},
Arc, RwLock as StdRwLock, Arc, RwLock as StdRwLock,
@ -32,12 +37,11 @@ use std::{
time::{Duration, Instant}, time::{Duration, Instant},
}; };
use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore}; use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore};
use tower_service::Service as TowerService;
use tracing::{error, info}; use tracing::{error, info};
use base64::{engine::general_purpose, Engine as _}; use base64::{engine::general_purpose, Engine as _};
type WellKnownMap = HashMap<OwnedServerName, DestinationResponse>; type WellKnownMap = HashMap<OwnedServerName, (FedDest, String)>;
type TlsNameMap = HashMap<String, (Vec<IpAddr>, u16)>; type TlsNameMap = HashMap<String, (Vec<IpAddr>, u16)>;
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
type SyncHandle = ( type SyncHandle = (
@ -51,7 +55,6 @@ pub struct Service {
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
pub tls_name_override: Arc<StdRwLock<TlsNameMap>>, pub tls_name_override: Arc<StdRwLock<TlsNameMap>>,
pub config: Config, pub config: Config,
allow_registration: RwLock<bool>,
keypair: Arc<ruma::signatures::Ed25519KeyPair>, keypair: Arc<ruma::signatures::Ed25519KeyPair>,
dns_resolver: TokioAsyncResolver, dns_resolver: TokioAsyncResolver,
jwt_decoding_key: Option<jsonwebtoken::DecodingKey>, jwt_decoding_key: Option<jsonwebtoken::DecodingKey>,
@ -68,8 +71,6 @@ pub struct Service {
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>, pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
server_user: OwnedUserId,
admin_alias: OwnedRoomAliasId,
pub stateres_mutex: Arc<Mutex<()>>, pub stateres_mutex: Arc<Mutex<()>>,
pub rotate: RotationHandler, pub rotate: RotationHandler,
@ -136,19 +137,11 @@ impl Resolve for Resolver {
}) })
.unwrap_or_else(|| { .unwrap_or_else(|| {
let this = &mut self.inner.clone(); let this = &mut self.inner.clone();
Box::pin( Box::pin(HyperService::<Name>::call(this, name).map(|result| {
TowerService::<HyperName>::call(
this,
// Beautiful hack, please remove this in the future.
HyperName::from_str(name.as_str())
.expect("reqwest Name is just wrapper for hyper-util Name"),
)
.map(|result| {
result result
.map(|addrs| -> Addrs { Box::new(addrs) }) .map(|addrs| -> Addrs { Box::new(addrs) })
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) }) .map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
}), }))
)
}) })
} }
} }
@ -191,11 +184,6 @@ impl Service {
let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5];
let mut s = Self { let mut s = Self {
allow_registration: RwLock::new(config.allow_registration),
admin_alias: RoomAliasId::parse(format!("#admins:{}", &config.server_name))
.expect("#admins:server_name is a valid alias name"),
server_user: UserId::parse(format!("@conduit:{}", &config.server_name))
.expect("@conduit:server_name is valid"),
db, db,
config, config,
keypair: Arc::new(keypair), keypair: Arc::new(keypair),
@ -289,14 +277,6 @@ impl Service {
self.config.server_name.as_ref() self.config.server_name.as_ref()
} }
pub fn server_user(&self) -> &UserId {
self.server_user.as_ref()
}
pub fn admin_alias(&self) -> &RoomAliasId {
self.admin_alias.as_ref()
}
pub fn max_request_size(&self) -> u32 { pub fn max_request_size(&self) -> u32 {
self.config.max_request_size self.config.max_request_size
} }
@ -305,15 +285,8 @@ impl Service {
self.config.max_fetch_prev_events self.config.max_fetch_prev_events
} }
/// Allows for the temporary (non-persistant) toggling of registration pub fn allow_registration(&self) -> bool {
pub async fn set_registration(&self, status: bool) { self.config.allow_registration
let mut lock = self.allow_registration.write().await;
*lock = status;
}
/// Checks whether user registration is allowed
pub async fn allow_registration(&self) -> bool {
*self.allow_registration.read().await
} }
pub fn allow_encryption(&self) -> bool { pub fn allow_encryption(&self) -> bool {
@ -389,89 +362,36 @@ impl Service {
room_versions room_versions
} }
/// TODO: the key valid until timestamp is only honored in room version > 4
/// Remove the outdated keys and insert the new ones.
///
/// This doesn't actually check that the keys provided are newer than the old set. /// This doesn't actually check that the keys provided are newer than the old set.
pub fn add_signing_key_from_trusted_server( pub fn add_signing_key(
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, new_keys: ServerSigningKeys,
) -> Result<SigningKeys> { ) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
self.db self.db.add_signing_key(origin, new_keys)
.add_signing_key_from_trusted_server(origin, new_keys)
} }
/// Same as from_trusted_server, except it will move active keys not present in `new_keys` to old_signing_keys /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
pub fn add_signing_key_from_origin( pub fn signing_keys_for(
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, ) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
) -> Result<SigningKeys> { let mut keys = self.db.signing_keys_for(origin)?;
self.db.add_signing_key_from_origin(origin, new_keys)
}
/// This returns Ok(None) when there are no keys found for the server.
pub fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<SigningKeys>> {
Ok(self.db.signing_keys_for(origin)?.or_else(|| {
if origin == self.server_name() { if origin == self.server_name() {
Some(SigningKeys::load_own_keys()) keys.insert(
} else { format!("ed25519:{}", services().globals.keypair().version())
None .try_into()
} .expect("found invalid server signing keys in DB"),
})) VerifyKey {
key: Base64::new(self.keypair.public_key().to_vec()),
},
);
} }
/// Filters the key map of multiple servers down to keys that should be accepted given the expiry time, Ok(keys)
/// room version, and timestamp of the paramters
pub fn filter_keys_server_map(
&self,
keys: BTreeMap<String, SigningKeys>,
timestamp: MilliSecondsSinceUnixEpoch,
room_version_id: &RoomVersionId,
) -> BTreeMap<String, BTreeMap<String, Base64>> {
keys.into_iter()
.filter_map(|(server, keys)| {
self.filter_keys_single_server(keys, timestamp, room_version_id)
.map(|keys| (server, keys))
})
.collect()
}
/// Filters the keys of a single server down to keys that should be accepted given the expiry time,
/// room version, and timestamp of the paramters
pub fn filter_keys_single_server(
&self,
keys: SigningKeys,
timestamp: MilliSecondsSinceUnixEpoch,
room_version_id: &RoomVersionId,
) -> Option<BTreeMap<String, Base64>> {
if keys.valid_until_ts > timestamp
// valid_until_ts MUST be ignored in room versions 1, 2, 3, and 4.
// https://spec.matrix.org/v1.10/server-server-api/#get_matrixkeyv2server
|| matches!(room_version_id, RoomVersionId::V1
| RoomVersionId::V2
| RoomVersionId::V4
| RoomVersionId::V3)
{
// Given that either the room version allows stale keys, or the valid_until_ts is
// in the future, all verify_keys are valid
let mut map: BTreeMap<_, _> = keys
.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect();
map.extend(keys.old_verify_keys.into_iter().filter_map(|(id, key)| {
// Even on old room versions, we don't allow old keys if they are expired
if key.expired_ts > timestamp {
Some((id, key.key))
} else {
None
}
}));
Some(map)
} else {
None
}
} }
pub fn database_version(&self) -> Result<u64> { pub fn database_version(&self) -> Result<u64> {

View file

@ -1,5 +1,3 @@
use ruma::http_headers::ContentDisposition;
use crate::Result; use crate::Result;
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
@ -8,7 +6,7 @@ pub trait Data: Send + Sync {
mxc: String, mxc: String,
width: u32, width: u32,
height: u32, height: u32,
content_disposition: &ContentDisposition, content_disposition: Option<&str>,
content_type: Option<&str>, content_type: Option<&str>,
) -> Result<Vec<u8>>; ) -> Result<Vec<u8>>;
@ -18,5 +16,5 @@ pub trait Data: Send + Sync {
mxc: String, mxc: String,
width: u32, width: u32,
height: u32, height: u32,
) -> Result<(ContentDisposition, Option<String>, Vec<u8>)>; ) -> Result<(Option<String>, Option<String>, Vec<u8>)>;
} }

View file

@ -2,7 +2,6 @@ mod data;
use std::io::Cursor; use std::io::Cursor;
pub use data::Data; pub use data::Data;
use ruma::http_headers::{ContentDisposition, ContentDispositionType};
use crate::{services, Result}; use crate::{services, Result};
use image::imageops::FilterType; use image::imageops::FilterType;
@ -13,7 +12,7 @@ use tokio::{
}; };
pub struct FileMeta { pub struct FileMeta {
pub content_disposition: ContentDisposition, pub content_disposition: Option<String>,
pub content_type: Option<String>, pub content_type: Option<String>,
pub file: Vec<u8>, pub file: Vec<u8>,
} }
@ -27,17 +26,14 @@ impl Service {
pub async fn create( pub async fn create(
&self, &self,
mxc: String, mxc: String,
content_disposition: Option<ContentDisposition>, content_disposition: Option<&str>,
content_type: Option<&str>, content_type: Option<&str>,
file: &[u8], file: &[u8],
) -> Result<()> { ) -> Result<()> {
let content_disposition =
content_disposition.unwrap_or(ContentDisposition::new(ContentDispositionType::Inline));
// Width, Height = 0 if it's not a thumbnail // Width, Height = 0 if it's not a thumbnail
let key = self let key = self
.db .db
.create_file_metadata(mxc, 0, 0, &content_disposition, content_type)?; .create_file_metadata(mxc, 0, 0, content_disposition, content_type)?;
let path = services().globals.get_media_file(&key); let path = services().globals.get_media_file(&key);
let mut f = File::create(path).await?; let mut f = File::create(path).await?;
@ -50,18 +46,15 @@ impl Service {
pub async fn upload_thumbnail( pub async fn upload_thumbnail(
&self, &self,
mxc: String, mxc: String,
content_disposition: Option<&str>,
content_type: Option<&str>, content_type: Option<&str>,
width: u32, width: u32,
height: u32, height: u32,
file: &[u8], file: &[u8],
) -> Result<()> { ) -> Result<()> {
let key = self.db.create_file_metadata( let key =
mxc, self.db
width, .create_file_metadata(mxc, width, height, content_disposition, content_type)?;
height,
&ContentDisposition::new(ContentDispositionType::Inline),
content_type,
)?;
let path = services().globals.get_media_file(&key); let path = services().globals.get_media_file(&key);
let mut f = File::create(path).await?; let mut f = File::create(path).await?;
@ -173,20 +166,22 @@ impl Service {
/ u64::from(original_height) / u64::from(original_height)
}; };
if use_width { if use_width {
if intermediate <= u64::from(u32::MAX) { if intermediate <= u64::from(::std::u32::MAX) {
(width, intermediate as u32) (width, intermediate as u32)
} else { } else {
( (
(u64::from(width) * u64::from(u32::MAX) / intermediate) as u32, (u64::from(width) * u64::from(::std::u32::MAX) / intermediate)
u32::MAX, as u32,
::std::u32::MAX,
) )
} }
} else if intermediate <= u64::from(u32::MAX) { } else if intermediate <= u64::from(::std::u32::MAX) {
(intermediate as u32, height) (intermediate as u32, height)
} else { } else {
( (
u32::MAX, ::std::u32::MAX,
(u64::from(height) * u64::from(u32::MAX) / intermediate) as u32, (u64::from(height) * u64::from(::std::u32::MAX) / intermediate)
as u32,
) )
} }
}; };
@ -205,7 +200,7 @@ impl Service {
mxc, mxc,
width, width,
height, height,
&content_disposition, content_disposition.as_deref(),
content_type.as_deref(), content_type.as_deref(),
)?; )?;

View file

@ -1,6 +1,5 @@
use crate::Error; use crate::Error;
use ruma::{ use ruma::{
api::client::error::ErrorKind,
canonical_json::redact_content_in_place, canonical_json::redact_content_in_place,
events::{ events::{
room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent}, room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent},
@ -73,23 +72,6 @@ impl PduEvent {
Ok(()) Ok(())
} }
pub fn is_redacted(&self) -> bool {
#[derive(Deserialize)]
struct ExtractRedactedBecause {
redacted_because: Option<serde::de::IgnoredAny>,
}
let Some(unsigned) = &self.unsigned else {
return false;
};
let Ok(unsigned) = ExtractRedactedBecause::deserialize(&**unsigned) else {
return false;
};
unsigned.redacted_because.is_some()
}
pub fn remove_transaction_id(&mut self) -> crate::Result<()> { pub fn remove_transaction_id(&mut self) -> crate::Result<()> {
if let Some(unsigned) = &self.unsigned { if let Some(unsigned) = &self.unsigned {
let mut unsigned: BTreeMap<String, Box<RawJsonValue>> = let mut unsigned: BTreeMap<String, Box<RawJsonValue>> =
@ -444,7 +426,7 @@ pub(crate) fn gen_event_id_canonical_json(
"${}", "${}",
// Anything higher than version3 behaves the same // Anything higher than version3 behaves the same
ruma::signatures::reference_hash(&value, room_version_id) ruma::signatures::reference_hash(&value, room_version_id)
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))? .expect("ruma can calculate reference hashes")
) )
.try_into() .try_into()
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
@ -461,8 +443,4 @@ pub struct PduBuilder {
pub unsigned: Option<BTreeMap<String, serde_json::Value>>, pub unsigned: Option<BTreeMap<String, serde_json::Value>>,
pub state_key: Option<String>, pub state_key: Option<String>,
pub redacts: Option<Arc<EventId>>, pub redacts: Option<Arc<EventId>>,
/// For timestamped messaging, should only be used for appservices
///
/// Will be set to current time if None
pub timestamp: Option<MilliSecondsSinceUnixEpoch>,
} }

View file

@ -1,12 +1,9 @@
use crate::Result; use crate::Result;
use ruma::{OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, UserId}; use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
/// Creates or updates the alias to the given room id. /// Creates or updates the alias to the given room id.
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()>; fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()>;
/// Finds the user who assigned the given alias to a room
fn who_created_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedUserId>>;
/// Forgets about an alias. Returns an error if the alias did not exist. /// Forgets about an alias. Returns an error if the alias did not exist.
fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>; fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>;

View file

@ -1,17 +1,9 @@
mod data; mod data;
pub use data::Data; pub use data::Data;
use tracing::error;
use crate::{services, Error, Result}; use crate::Result;
use ruma::{ use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
api::client::error::ErrorKind,
events::{
room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
StateEventType,
},
OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, UserId,
};
pub struct Service { pub struct Service {
pub db: &'static dyn Data, pub db: &'static dyn Data,
@ -19,71 +11,13 @@ pub struct Service {
impl Service { impl Service {
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> { pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
if alias == services().globals.admin_alias() && user_id != services().globals.server_user() self.db.set_alias(alias, room_id)
{
Err(Error::BadRequest(
ErrorKind::forbidden(),
"Only the server user can set this alias",
))
} else {
self.db.set_alias(alias, room_id, user_id)
}
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
fn user_can_remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<bool> { pub fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> {
let Some(room_id) = self.resolve_local_alias(alias)? else {
return Err(Error::BadRequest(ErrorKind::NotFound, "Alias not found."));
};
// The creator of an alias can remove it
if self
.db
.who_created_alias(alias)?
.map(|user| user == user_id)
.unwrap_or_default()
// Server admins can remove any local alias
|| services().admin.user_is_admin(user_id)?
// Always allow the Conduit user to remove the alias, since there may not be an admin room
|| services().globals.server_user ()== user_id
{
Ok(true)
// Checking whether the user is able to change canonical aliases of the room
} else if let Some(event) = services().rooms.state_accessor.room_state_get(
&room_id,
&StateEventType::RoomPowerLevels,
"",
)? {
serde_json::from_str(event.content.get())
.map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels"))
.map(|content: RoomPowerLevelsEventContent| {
RoomPowerLevels::from(content)
.user_can_send_state(user_id, StateEventType::RoomCanonicalAlias)
})
// If there is no power levels event, only the room creator can change canonical aliases
} else if let Some(event) = services().rooms.state_accessor.room_state_get(
&room_id,
&StateEventType::RoomCreate,
"",
)? {
Ok(event.sender == user_id)
} else {
error!("Room {} has no m.room.create event (VERY BAD)!", room_id);
Err(Error::bad_database("Room has no m.room.create event"))
}
}
#[tracing::instrument(skip(self))]
pub fn remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<()> {
if self.user_can_remove_alias(alias, user_id)? {
self.db.remove_alias(alias) self.db.remove_alias(alias)
} else {
Err(Error::BadRequest(
ErrorKind::forbidden(),
"User is not permitted to remove this alias.",
))
}
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]

View file

@ -133,10 +133,7 @@ impl Service {
match services().rooms.timeline.get_pdu(&event_id) { match services().rooms.timeline.get_pdu(&event_id) {
Ok(Some(pdu)) => { Ok(Some(pdu)) => {
if pdu.room_id != room_id { if pdu.room_id != room_id {
return Err(Error::BadRequest( return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db"));
ErrorKind::forbidden(),
"Evil event in db",
));
} }
for auth_event in &pdu.auth_events { for auth_event in &pdu.auth_events {
let sauthevent = services() let sauthevent = services()

View file

@ -9,7 +9,6 @@ use std::{
}; };
use futures_util::{stream::FuturesUnordered, Future, StreamExt}; use futures_util::{stream::FuturesUnordered, Future, StreamExt};
use globals::SigningKeys;
use ruma::{ use ruma::{
api::{ api::{
client::error::ErrorKind, client::error::ErrorKind,
@ -31,6 +30,7 @@ use ruma::{
StateEventType, TimelineEventType, StateEventType, TimelineEventType,
}, },
int, int,
serde::Base64,
state_res::{self, RoomVersion, StateMap}, state_res::{self, RoomVersion, StateMap},
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName, OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName,
@ -78,7 +78,7 @@ impl Service {
room_id: &'a RoomId, room_id: &'a RoomId,
value: BTreeMap<String, CanonicalJsonValue>, value: BTreeMap<String, CanonicalJsonValue>,
is_timeline_event: bool, is_timeline_event: bool,
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<Option<Vec<u8>>> { ) -> Result<Option<Vec<u8>>> {
// 0. Check the server is in the room // 0. Check the server is in the room
if !services().rooms.metadata.exists(room_id)? { if !services().rooms.metadata.exists(room_id)? {
@ -90,7 +90,7 @@ impl Service {
if services().rooms.metadata.is_disabled(room_id)? { if services().rooms.metadata.is_disabled(room_id)? {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Federation of this room is currently disabled on this server.", "Federation of this room is currently disabled on this server.",
)); ));
} }
@ -162,7 +162,7 @@ impl Service {
// Check for disabled again because it might have changed // Check for disabled again because it might have changed
if services().rooms.metadata.is_disabled(room_id)? { if services().rooms.metadata.is_disabled(room_id)? {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Federation of this room is currently disabled on this server.", "Federation of this room is currently disabled on this server.",
)); ));
} }
@ -304,12 +304,19 @@ impl Service {
room_id: &'a RoomId, room_id: &'a RoomId,
mut value: BTreeMap<String, CanonicalJsonValue>, mut value: BTreeMap<String, CanonicalJsonValue>,
auth_events_known: bool, auth_events_known: bool,
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>> { ) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>> {
Box::pin(async move { Box::pin(async move {
// 1.1. Remove unsigned field // 1.1. Remove unsigned field
value.remove("unsigned"); value.remove("unsigned");
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
// We go through all the signatures we see on the value and fetch the corresponding signing
// keys
self.fetch_required_signing_keys(&value, pub_key_map)
.await?;
// 2. Check signatures, otherwise drop // 2. Check signatures, otherwise drop
// 3. check content hash, redact if doesn't match // 3. check content hash, redact if doesn't match
let create_event_content: RoomCreateEventContent = let create_event_content: RoomCreateEventContent =
@ -322,47 +329,8 @@ impl Service {
let room_version = let room_version =
RoomVersion::new(room_version_id).expect("room version is supported"); RoomVersion::new(room_version_id).expect("room version is supported");
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
// We go through all the signatures we see on the value and fetch the corresponding signing
// keys
self.fetch_required_signing_keys(&value, pub_key_map)
.await?;
let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| {
error!("Invalid PDU, no origin_server_ts field");
Error::BadRequest(
ErrorKind::MissingParam,
"Invalid PDU, no origin_server_ts field",
)
})?;
let origin_server_ts: MilliSecondsSinceUnixEpoch = {
let ts = origin_server_ts.as_integer().ok_or_else(|| {
Error::BadRequest(
ErrorKind::InvalidParam,
"origin_server_ts must be an integer",
)
})?;
MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Time must be after the unix epoch")
})?)
};
let guard = pub_key_map.read().await; let guard = pub_key_map.read().await;
let mut val = match ruma::signatures::verify_event(&guard, &value, room_version_id) {
let pkey_map = (*guard).clone();
// Removing all the expired keys, unless the room version allows stale keys
let filtered_keys = services().globals.filter_keys_server_map(
pkey_map,
origin_server_ts,
room_version_id,
);
let mut val =
match ruma::signatures::verify_event(&filtered_keys, &value, room_version_id) {
Err(e) => { Err(e) => {
// Drop // Drop
warn!("Dropping bad event {}: {}", event_id, e,); warn!("Dropping bad event {}: {}", event_id, e,);
@ -519,7 +487,7 @@ impl Service {
create_event: &PduEvent, create_event: &PduEvent,
origin: &ServerName, origin: &ServerName,
room_id: &RoomId, room_id: &RoomId,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<Option<Vec<u8>>> { ) -> Result<Option<Vec<u8>>> {
// Skip the PDU if we already have it as a timeline event // Skip the PDU if we already have it as a timeline event
if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) { if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) {
@ -1129,7 +1097,7 @@ impl Service {
create_event: &'a PduEvent, create_event: &'a PduEvent,
room_id: &'a RoomId, room_id: &'a RoomId,
room_version_id: &'a RoomVersionId, room_version_id: &'a RoomVersionId,
pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>> ) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>>
{ {
Box::pin(async move { Box::pin(async move {
@ -1312,7 +1280,7 @@ impl Service {
create_event: &PduEvent, create_event: &PduEvent,
room_id: &RoomId, room_id: &RoomId,
room_version_id: &RoomVersionId, room_version_id: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
initial_set: Vec<Arc<EventId>>, initial_set: Vec<Arc<EventId>>,
) -> Result<( ) -> Result<(
Vec<Arc<EventId>>, Vec<Arc<EventId>>,
@ -1410,7 +1378,7 @@ impl Service {
pub(crate) async fn fetch_required_signing_keys( pub(crate) async fn fetch_required_signing_keys(
&self, &self,
event: &BTreeMap<String, CanonicalJsonValue>, event: &BTreeMap<String, CanonicalJsonValue>,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> { ) -> Result<()> {
let signatures = event let signatures = event
.get("signatures") .get("signatures")
@ -1439,7 +1407,6 @@ impl Service {
) )
})?, })?,
signature_ids, signature_ids,
true,
) )
.await; .await;
@ -1467,7 +1434,7 @@ impl Service {
pdu: &RawJsonValue, pdu: &RawJsonValue,
servers: &mut BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>, servers: &mut BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, SigningKeys>>, pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> { ) -> Result<()> {
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); error!("Invalid PDU in server response: {:?}: {:?}", pdu, e);
@ -1477,7 +1444,7 @@ impl Service {
let event_id = format!( let event_id = format!(
"${}", "${}",
ruma::signatures::reference_hash(&value, room_version) ruma::signatures::reference_hash(&value, room_version)
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid PDU format"))? .expect("ruma can calculate reference hashes")
); );
let event_id = <&EventId>::try_from(event_id.as_str()) let event_id = <&EventId>::try_from(event_id.as_str())
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
@ -1518,18 +1485,8 @@ impl Service {
let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>(); let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>();
let contains_all_ids = |keys: &SigningKeys| { let contains_all_ids = |keys: &BTreeMap<String, Base64>| {
signature_ids.iter().all(|id| { signature_ids.iter().all(|id| keys.contains_key(id))
keys.verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
|| keys
.old_verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
})
}; };
let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| {
@ -1542,7 +1499,13 @@ impl Service {
trace!("Loading signing keys for {}", origin); trace!("Loading signing keys for {}", origin);
if let Some(result) = services().globals.signing_keys_for(origin)? { let result: BTreeMap<_, _> = services()
.globals
.signing_keys_for(origin)?
.into_iter()
.map(|(k, v)| (k.to_string(), v.key))
.collect();
if !contains_all_ids(&result) { if !contains_all_ids(&result) {
trace!("Signing key not loaded for {}", origin); trace!("Signing key not loaded for {}", origin);
servers.insert(origin.to_owned(), BTreeMap::new()); servers.insert(origin.to_owned(), BTreeMap::new());
@ -1550,7 +1513,6 @@ impl Service {
pub_key_map.insert(origin.to_string(), result); pub_key_map.insert(origin.to_string(), result);
} }
}
Ok(()) Ok(())
} }
@ -1559,7 +1521,7 @@ impl Service {
&self, &self,
event: &create_join_event::v2::Response, event: &create_join_event::v2::Response,
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> { ) -> Result<()> {
let mut servers: BTreeMap< let mut servers: BTreeMap<
OwnedServerName, OwnedServerName,
@ -1622,7 +1584,10 @@ impl Service {
let result = services() let result = services()
.globals .globals
.add_signing_key_from_trusted_server(&k.server_name, k.clone())?; .add_signing_key(&k.server_name, k.clone())?
.into_iter()
.map(|(k, v)| (k.to_string(), v.key))
.collect::<BTreeMap<_, _>>();
pkm.insert(k.server_name.to_string(), result); pkm.insert(k.server_name.to_string(), result);
} }
@ -1653,9 +1618,12 @@ impl Service {
if let (Ok(get_keys_response), origin) = result { if let (Ok(get_keys_response), origin) = result {
info!("Result is from {origin}"); info!("Result is from {origin}");
if let Ok(key) = get_keys_response.server_key.deserialize() { if let Ok(key) = get_keys_response.server_key.deserialize() {
let result = services() let result: BTreeMap<_, _> = services()
.globals .globals
.add_signing_key_from_origin(&origin, key)?; .add_signing_key(&origin, key)?
.into_iter()
.map(|(k, v)| (k.to_string(), v.key))
.collect();
pub_key_map.write().await.insert(origin.to_string(), result); pub_key_map.write().await.insert(origin.to_string(), result);
} }
} }
@ -1687,6 +1655,11 @@ impl Service {
} }
}; };
if acl_event_content.allow.is_empty() {
// Ignore broken acl events
return Ok(());
}
if acl_event_content.is_allowed(server_name) { if acl_event_content.is_allowed(server_name) {
Ok(()) Ok(())
} else { } else {
@ -1695,7 +1668,7 @@ impl Service {
server_name, room_id server_name, room_id
); );
Err(Error::BadRequest( Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Server was denied by room ACL", "Server was denied by room ACL",
)) ))
} }
@ -1708,23 +1681,9 @@ impl Service {
&self, &self,
origin: &ServerName, origin: &ServerName,
signature_ids: Vec<String>, signature_ids: Vec<String>,
// Whether to ask for keys from trusted servers. Should be false when getting ) -> Result<BTreeMap<String, Base64>> {
// keys for validating requests, as per MSC4029 let contains_all_ids =
query_via_trusted_servers: bool, |keys: &BTreeMap<String, Base64>| signature_ids.iter().all(|id| keys.contains_key(id));
) -> Result<SigningKeys> {
let contains_all_ids = |keys: &SigningKeys| {
signature_ids.iter().all(|id| {
keys.verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
|| keys
.old_verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
})
};
let permit = services() let permit = services()
.globals .globals
@ -1785,91 +1744,48 @@ impl Service {
trace!("Loading signing keys for {}", origin); trace!("Loading signing keys for {}", origin);
let result = services().globals.signing_keys_for(origin)?; let mut result: BTreeMap<_, _> = services()
.globals
let mut expires_soon_or_has_expired = false; .signing_keys_for(origin)?
.into_iter()
if let Some(result) = result.clone() { .map(|(k, v)| (k.to_string(), v.key))
let ts_threshold = MilliSecondsSinceUnixEpoch::from_system_time( .collect();
SystemTime::now() + Duration::from_secs(30 * 60),
)
.expect("Should be valid until year 500,000,000");
debug!(
"The treshhold is {:?}, found time is {:?} for server {}",
ts_threshold, result.valid_until_ts, origin
);
if contains_all_ids(&result) { if contains_all_ids(&result) {
// We want to ensure that the keys remain valid by the time the other functions that handle signatures reach them
if result.valid_until_ts > ts_threshold {
debug!(
"Keys for {} are deemed as valid, as they expire at {:?}",
&origin, &result.valid_until_ts
);
return Ok(result); return Ok(result);
} }
expires_soon_or_has_expired = true;
}
}
let mut keys = result.unwrap_or_else(|| SigningKeys {
verify_keys: BTreeMap::new(),
old_verify_keys: BTreeMap::new(),
valid_until_ts: MilliSecondsSinceUnixEpoch::now(),
});
// We want to set this to the max, and then lower it whenever we see older keys
keys.valid_until_ts = MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000");
debug!("Fetching signing keys for {} over federation", origin); debug!("Fetching signing keys for {} over federation", origin);
if let Some(mut server_key) = services() if let Some(server_key) = services()
.sending .sending
.send_federation_request(origin, get_server_keys::v2::Request::new()) .send_federation_request(origin, get_server_keys::v2::Request::new())
.await .await
.ok() .ok()
.and_then(|resp| resp.server_key.deserialize().ok()) .and_then(|resp| resp.server_key.deserialize().ok())
{ {
// Keys should only be valid for a maximum of seven days
server_key.valid_until_ts = server_key.valid_until_ts.min(
MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000"),
);
services() services()
.globals .globals
.add_signing_key_from_origin(origin, server_key.clone())?; .add_signing_key(origin, server_key.clone())?;
if keys.valid_until_ts > server_key.valid_until_ts { result.extend(
keys.valid_until_ts = server_key.valid_until_ts;
}
keys.verify_keys.extend(
server_key server_key
.verify_keys .verify_keys
.into_iter() .into_iter()
.map(|(id, key)| (id.to_string(), key)), .map(|(k, v)| (k.to_string(), v.key)),
); );
keys.old_verify_keys.extend( result.extend(
server_key server_key
.old_verify_keys .old_verify_keys
.into_iter() .into_iter()
.map(|(id, key)| (id.to_string(), key)), .map(|(k, v)| (k.to_string(), v.key)),
); );
if contains_all_ids(&keys) { if contains_all_ids(&result) {
return Ok(keys); return Ok(result);
} }
} }
if query_via_trusted_servers {
for server in services().globals.trusted_servers() { for server in services().globals.trusted_servers() {
debug!("Asking {} for {}'s signing key", server, origin); debug!("Asking {} for {}'s signing key", server, origin);
if let Some(server_keys) = services() if let Some(server_keys) = services()
@ -1896,60 +1812,25 @@ impl Service {
}) })
{ {
trace!("Got signing keys: {:?}", server_keys); trace!("Got signing keys: {:?}", server_keys);
for mut k in server_keys { for k in server_keys {
if k.valid_until_ts services().globals.add_signing_key(origin, k.clone())?;
// Half an hour should give plenty of time for the server to respond with keys that are still result.extend(
// valid, given we requested keys which are valid at least an hour from now
< MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(30 * 60),
)
.expect("Should be valid until year 500,000,000")
{
// Keys should only be valid for a maximum of seven days
k.valid_until_ts = k.valid_until_ts.min(
MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000"),
);
if keys.valid_until_ts > k.valid_until_ts {
keys.valid_until_ts = k.valid_until_ts;
}
services()
.globals
.add_signing_key_from_trusted_server(origin, k.clone())?;
keys.verify_keys.extend(
k.verify_keys k.verify_keys
.into_iter() .into_iter()
.map(|(id, key)| (id.to_string(), key)), .map(|(k, v)| (k.to_string(), v.key)),
); );
keys.old_verify_keys.extend( result.extend(
k.old_verify_keys k.old_verify_keys
.into_iter() .into_iter()
.map(|(id, key)| (id.to_string(), key)), .map(|(k, v)| (k.to_string(), v.key)),
);
} else {
warn!(
"Server {} gave us keys older than we requested, valid until: {:?}",
origin, k.valid_until_ts
); );
} }
if contains_all_ids(&keys) { if contains_all_ids(&result) {
return Ok(keys); return Ok(result);
} }
} }
} }
}
}
// We should return these keys if fresher keys were not found
if expires_soon_or_has_expired {
info!("Returning stale keys for {}", origin);
return Ok(keys);
}
drop(permit); drop(permit);

View file

@ -3,9 +3,9 @@ use std::sync::Arc;
pub use data::Data; pub use data::Data;
use ruma::{ use ruma::{
api::{client::relations::get_relating_events, Direction}, api::client::relations::get_relating_events,
events::{relation::RelationType, TimelineEventType}, events::{relation::RelationType, TimelineEventType},
EventId, RoomId, UInt, UserId, EventId, RoomId, UserId,
}; };
use serde::Deserialize; use serde::Deserialize;
@ -48,48 +48,26 @@ impl Service {
target: &EventId, target: &EventId,
filter_event_type: Option<TimelineEventType>, filter_event_type: Option<TimelineEventType>,
filter_rel_type: Option<RelationType>, filter_rel_type: Option<RelationType>,
from: Option<String>, from: PduCount,
to: Option<String>, to: Option<PduCount>,
limit: Option<UInt>, limit: usize,
recurse: bool,
dir: &Direction,
) -> Result<get_relating_events::v1::Response> { ) -> Result<get_relating_events::v1::Response> {
let from = match from {
Some(from) => PduCount::try_from_string(&from)?,
None => match dir {
Direction::Forward => PduCount::min(),
Direction::Backward => PduCount::max(),
},
};
let to = to.as_ref().and_then(|t| PduCount::try_from_string(t).ok());
// Use limit or else 10, with maximum 100
let limit = limit
.and_then(|u| u32::try_from(u).ok())
.map_or(10_usize, |u| u as usize)
.min(100);
let next_token; let next_token;
// Spec (v1.10) recommends depth of at least 3 //TODO: Fix ruma: match body.dir {
let depth: u8 = if recurse { 3 } else { 1 }; match ruma::api::Direction::Backward {
ruma::api::Direction::Forward => {
match dir { let events_after: Vec<_> = services()
Direction::Forward => { .rooms
let relations_until = &services().rooms.pdu_metadata.relations_until( .pdu_metadata
sender_user, .relations_until(sender_user, room_id, target, from)? // TODO: should be relations_after
room_id, .filter(|r| {
target, r.as_ref().map_or(true, |(_, pdu)| {
from,
depth,
)?;
let events_after: Vec<_> = relations_until // TODO: should be relations_after
.iter()
.filter(|(_, pdu)| {
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
&& if let Ok(content) = && if let Ok(content) =
serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get()) serde_json::from_str::<ExtractRelatesToEventId>(
pdu.content.get(),
)
{ {
filter_rel_type filter_rel_type
.as_ref() .as_ref()
@ -98,7 +76,9 @@ impl Service {
false false
} }
}) })
})
.take(limit) .take(limit)
.filter_map(|r| r.ok()) // Filter out buggy events
.filter(|(_, pdu)| { .filter(|(_, pdu)| {
services() services()
.rooms .rooms
@ -106,7 +86,7 @@ impl Service {
.user_can_see_event(sender_user, room_id, &pdu.event_id) .user_can_see_event(sender_user, room_id, &pdu.event_id)
.unwrap_or(false) .unwrap_or(false)
}) })
.take_while(|(k, _)| Some(k) != to.as_ref()) // Stop at `to` .take_while(|&(k, _)| Some(k) != to) // Stop at `to`
.collect(); .collect();
next_token = events_after.last().map(|(count, _)| count).copied(); next_token = events_after.last().map(|(count, _)| count).copied();
@ -121,23 +101,20 @@ impl Service {
chunk: events_after, chunk: events_after,
next_batch: next_token.map(|t| t.stringify()), next_batch: next_token.map(|t| t.stringify()),
prev_batch: Some(from.stringify()), prev_batch: Some(from.stringify()),
recursion_depth: if recurse { Some(depth.into()) } else { None },
}) })
} }
Direction::Backward => { ruma::api::Direction::Backward => {
let relations_until = &services().rooms.pdu_metadata.relations_until( let events_before: Vec<_> = services()
sender_user, .rooms
room_id, .pdu_metadata
target, .relations_until(sender_user, room_id, target, from)?
from, .filter(|r| {
depth, r.as_ref().map_or(true, |(_, pdu)| {
)?;
let events_before: Vec<_> = relations_until
.iter()
.filter(|(_, pdu)| {
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
&& if let Ok(content) = && if let Ok(content) =
serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get()) serde_json::from_str::<ExtractRelatesToEventId>(
pdu.content.get(),
)
{ {
filter_rel_type filter_rel_type
.as_ref() .as_ref()
@ -146,7 +123,9 @@ impl Service {
false false
} }
}) })
})
.take(limit) .take(limit)
.filter_map(|r| r.ok()) // Filter out buggy events
.filter(|(_, pdu)| { .filter(|(_, pdu)| {
services() services()
.rooms .rooms
@ -154,7 +133,7 @@ impl Service {
.user_can_see_event(sender_user, room_id, &pdu.event_id) .user_can_see_event(sender_user, room_id, &pdu.event_id)
.unwrap_or(false) .unwrap_or(false)
}) })
.take_while(|&(k, _)| Some(k) != to.as_ref()) // Stop at `to` .take_while(|&(k, _)| Some(k) != to) // Stop at `to`
.collect(); .collect();
next_token = events_before.last().map(|(count, _)| count).copied(); next_token = events_before.last().map(|(count, _)| count).copied();
@ -168,7 +147,6 @@ impl Service {
chunk: events_before, chunk: events_before,
next_batch: next_token.map(|t| t.stringify()), next_batch: next_token.map(|t| t.stringify()),
prev_batch: Some(from.stringify()), prev_batch: Some(from.stringify()),
recursion_depth: if recurse { Some(depth.into()) } else { None },
}) })
} }
} }
@ -180,44 +158,14 @@ impl Service {
room_id: &'a RoomId, room_id: &'a RoomId,
target: &'a EventId, target: &'a EventId,
until: PduCount, until: PduCount,
max_depth: u8, ) -> Result<impl Iterator<Item = Result<(PduCount, PduEvent)>> + 'a> {
) -> Result<Vec<(PduCount, PduEvent)>> {
let room_id = services().rooms.short.get_or_create_shortroomid(room_id)?; let room_id = services().rooms.short.get_or_create_shortroomid(room_id)?;
let target = match services().rooms.timeline.get_pdu_count(target)? { let target = match services().rooms.timeline.get_pdu_count(target)? {
Some(PduCount::Normal(c)) => c, Some(PduCount::Normal(c)) => c,
// TODO: Support backfilled relations // TODO: Support backfilled relations
_ => 0, // This will result in an empty iterator _ => 0, // This will result in an empty iterator
}; };
self.db.relations_until(user_id, room_id, target, until)
self.db
.relations_until(user_id, room_id, target, until)
.map(|mut relations| {
let mut pdus: Vec<_> = (*relations).into_iter().filter_map(Result::ok).collect();
let mut stack: Vec<_> =
pdus.clone().iter().map(|pdu| (pdu.to_owned(), 1)).collect();
while let Some(stack_pdu) = stack.pop() {
let target = match stack_pdu.0 .0 {
PduCount::Normal(c) => c,
// TODO: Support backfilled relations
PduCount::Backfilled(_) => 0, // This will result in an empty iterator
};
if let Ok(relations) = self.db.relations_until(user_id, room_id, target, until)
{
for relation in relations.flatten() {
if stack_pdu.1 < max_depth {
stack.push((relation.clone(), stack_pdu.1 + 1));
}
pdus.push(relation);
}
}
}
pdus.sort_by(|a, b| a.0.cmp(&b.0));
pdus
})
} }
#[tracing::instrument(skip(self, room_id, event_ids))] #[tracing::instrument(skip(self, room_id, event_ids))]

View file

@ -4,8 +4,6 @@ use ruma::RoomId;
pub trait Data: Send + Sync { pub trait Data: Send + Sync {
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn search_pdus<'a>( fn search_pdus<'a>(
&'a self, &'a self,

View file

@ -15,16 +15,6 @@ impl Service {
self.db.index_pdu(shortroomid, pdu_id, message_body) self.db.index_pdu(shortroomid, pdu_id, message_body)
} }
#[tracing::instrument(skip(self))]
pub fn deindex_pdu<'a>(
&self,
shortroomid: u64,
pdu_id: &[u8],
message_body: &str,
) -> Result<()> {
self.db.deindex_pdu(shortroomid, pdu_id, message_body)
}
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn search_pdus<'a>( pub fn search_pdus<'a>(
&'a self, &'a self,

View file

@ -408,7 +408,7 @@ impl Service {
debug!("User is not allowed to see room {room_id}"); debug!("User is not allowed to see room {room_id}");
// This error will be caught later // This error will be caught later
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"User is not allowed to see the room", "User is not allowed to see the room",
)); ));
} }

View file

@ -321,7 +321,6 @@ impl Service {
unsigned: None, unsigned: None,
state_key: Some(target_user.into()), state_key: Some(target_user.into()),
redacts: None, redacts: None,
timestamp: None,
}; };
Ok(services() Ok(services()

View file

@ -21,9 +21,10 @@ use ruma::{
GlobalAccountDataEventType, StateEventType, TimelineEventType, GlobalAccountDataEventType, StateEventType, TimelineEventType,
}, },
push::{Action, Ruleset, Tweak}, push::{Action, Ruleset, Tweak},
serde::Base64,
state_res::{self, Event, RoomVersion}, state_res::{self, Event, RoomVersion},
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId,
}; };
use serde::Deserialize; use serde::Deserialize;
use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
@ -32,10 +33,7 @@ use tracing::{error, info, warn};
use crate::{ use crate::{
api::server_server, api::server_server,
service::{ service::pdu::{EventHash, PduBuilder},
globals::SigningKeys,
pdu::{EventHash, PduBuilder},
},
services, utils, Error, PduEvent, Result, services, utils, Error, PduEvent, Result,
}; };
@ -401,7 +399,7 @@ impl Service {
&pdu.room_id, &pdu.room_id,
false, false,
)? { )? {
self.redact_pdu(redact_id, pdu, shortroomid)?; self.redact_pdu(redact_id, pdu)?;
} }
} }
} }
@ -418,7 +416,7 @@ impl Service {
&pdu.room_id, &pdu.room_id,
false, false,
)? { )? {
self.redact_pdu(redact_id, pdu, shortroomid)?; self.redact_pdu(redact_id, pdu)?;
} }
} }
} }
@ -485,27 +483,20 @@ impl Service {
.search .search
.index_pdu(shortroomid, &pdu_id, &body)?; .index_pdu(shortroomid, &pdu_id, &body)?;
let server_user = services().globals.server_user(); let server_user = format!("@conduit:{}", services().globals.server_name());
let to_conduit = body.starts_with(&format!("{server_user}: ")) let to_conduit = body.starts_with(&format!("{server_user}: "))
|| body.starts_with(&format!("{server_user} ")) || body.starts_with(&format!("{server_user} "))
|| body == format!("{server_user}:") || body == format!("{server_user}:")
|| body == server_user.as_str(); || body == server_user;
// This will evaluate to false if the emergency password is set up so that // This will evaluate to false if the emergency password is set up so that
// the administrator can execute commands as conduit // the administrator can execute commands as conduit
let from_conduit = pdu.sender == *server_user let from_conduit = pdu.sender == server_user
&& services().globals.emergency_password().is_none(); && services().globals.emergency_password().is_none();
if let Some(admin_room) = services().admin.get_admin_room()? { if let Some(admin_room) = services().admin.get_admin_room()? {
if to_conduit if to_conduit && !from_conduit && admin_room == pdu.room_id {
&& !from_conduit
&& admin_room == pdu.room_id
&& services()
.rooms
.state_cache
.is_joined(server_user, &admin_room)?
{
services().admin.process_message(body); services().admin.process_message(body);
} }
} }
@ -665,7 +656,6 @@ impl Service {
unsigned, unsigned,
state_key, state_key,
redacts, redacts,
timestamp,
} = pdu_builder; } = pdu_builder;
let prev_events: Vec<_> = services() let prev_events: Vec<_> = services()
@ -735,9 +725,9 @@ impl Service {
event_id: ruma::event_id!("$thiswillbefilledinlater").into(), event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
room_id: room_id.to_owned(), room_id: room_id.to_owned(),
sender: sender.to_owned(), sender: sender.to_owned(),
origin_server_ts: timestamp origin_server_ts: utils::millis_since_unix_epoch()
.map(|ts| ts.get()) .try_into()
.unwrap_or_else(|| MilliSecondsSinceUnixEpoch::now().get()), .expect("time is valid"),
kind: event_type, kind: event_type,
content, content,
state_key, state_key,
@ -772,7 +762,7 @@ impl Service {
if !auth_check { if !auth_check {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Event is not authorized.", "Event is not authorized.",
)); ));
} }
@ -815,7 +805,7 @@ impl Service {
pdu.event_id = EventId::parse_arc(format!( pdu.event_id = EventId::parse_arc(format!(
"${}", "${}",
ruma::signatures::reference_hash(&pdu_json, &room_version_id) ruma::signatures::reference_hash(&pdu_json, &room_version_id)
.expect("Event format validated when event was hashed") .expect("ruma can calculate reference hashes")
)) ))
.expect("ruma's reference hashes are valid event ids"); .expect("ruma's reference hashes are valid event ids");
@ -852,7 +842,7 @@ impl Service {
TimelineEventType::RoomEncryption => { TimelineEventType::RoomEncryption => {
warn!("Encryption is not allowed in the admins room"); warn!("Encryption is not allowed in the admins room");
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Encryption is not allowed in the admins room.", "Encryption is not allowed in the admins room.",
)); ));
} }
@ -867,7 +857,7 @@ impl Service {
.filter(|v| v.starts_with('@')) .filter(|v| v.starts_with('@'))
.unwrap_or(sender.as_str()); .unwrap_or(sender.as_str());
let server_name = services().globals.server_name(); let server_name = services().globals.server_name();
let server_user = services().globals.server_user().as_str(); let server_user = format!("@conduit:{}", server_name);
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get()) let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in pdu."))?; .map_err(|_| Error::bad_database("Invalid content in pdu."))?;
@ -875,7 +865,7 @@ impl Service {
if target == server_user { if target == server_user {
warn!("Conduit user cannot leave from admins room"); warn!("Conduit user cannot leave from admins room");
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Conduit user cannot leave from admins room.", "Conduit user cannot leave from admins room.",
)); ));
} }
@ -891,7 +881,7 @@ impl Service {
if count < 2 { if count < 2 {
warn!("Last admin cannot leave from admins room"); warn!("Last admin cannot leave from admins room");
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Last admin cannot leave from admins room.", "Last admin cannot leave from admins room.",
)); ));
} }
@ -901,7 +891,7 @@ impl Service {
if target == server_user { if target == server_user {
warn!("Conduit user cannot be banned in admins room"); warn!("Conduit user cannot be banned in admins room");
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Conduit user cannot be banned in admins room.", "Conduit user cannot be banned in admins room.",
)); ));
} }
@ -917,7 +907,7 @@ impl Service {
if count < 2 { if count < 2 {
warn!("Last admin cannot be banned in admins room"); warn!("Last admin cannot be banned in admins room");
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"Last admin cannot be banned in admins room.", "Last admin cannot be banned in admins room.",
)); ));
} }
@ -949,7 +939,7 @@ impl Service {
false, false,
)? { )? {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"User cannot redact this event.", "User cannot redact this event.",
)); ));
} }
@ -970,7 +960,7 @@ impl Service {
false, false,
)? { )? {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::Forbidden,
"User cannot redact this event.", "User cannot redact this event.",
)); ));
} }
@ -1110,33 +1100,14 @@ impl Service {
/// Replace a PDU with the redacted form. /// Replace a PDU with the redacted form.
#[tracing::instrument(skip(self, reason))] #[tracing::instrument(skip(self, reason))]
pub fn redact_pdu( pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> {
&self,
event_id: &EventId,
reason: &PduEvent,
shortroomid: u64,
) -> Result<()> {
// TODO: Don't reserialize, keep original json // TODO: Don't reserialize, keep original json
if let Some(pdu_id) = self.get_pdu_id(event_id)? { if let Some(pdu_id) = self.get_pdu_id(event_id)? {
let mut pdu = self let mut pdu = self
.get_pdu_from_id(&pdu_id)? .get_pdu_from_id(&pdu_id)?
.ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?;
#[derive(Deserialize)]
struct ExtractBody {
body: String,
}
if let Ok(content) = serde_json::from_str::<ExtractBody>(pdu.content.get()) {
services()
.rooms
.search
.deindex_pdu(shortroomid, &pdu_id, &content.body)?;
}
let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?; let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?;
pdu.redact(room_version_id, reason)?; pdu.redact(room_version_id, reason)?;
self.replace_pdu( self.replace_pdu(
&pdu_id, &pdu_id,
&utils::to_canonical_object(&pdu).expect("PDU is an object"), &utils::to_canonical_object(&pdu).expect("PDU is an object"),
@ -1217,7 +1188,7 @@ impl Service {
&self, &self,
origin: &ServerName, origin: &ServerName,
pdu: Box<RawJsonValue>, pdu: Box<RawJsonValue>,
pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> { ) -> Result<()> {
let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?; let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?;

View file

@ -86,7 +86,7 @@ impl Service {
if !hash_matches { if !hash_matches {
uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody {
kind: ErrorKind::forbidden(), kind: ErrorKind::Forbidden,
message: "Invalid username or password.".to_owned(), message: "Invalid username or password.".to_owned(),
}); });
return Ok((false, uiaainfo)); return Ok((false, uiaainfo));
@ -101,7 +101,7 @@ impl Service {
uiaainfo.completed.push(AuthType::RegistrationToken); uiaainfo.completed.push(AuthType::RegistrationToken);
} else { } else {
uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody {
kind: ErrorKind::forbidden(), kind: ErrorKind::Forbidden,
message: "Invalid registration token.".to_owned(), message: "Invalid registration token.".to_owned(),
}); });
return Ok((false, uiaainfo)); return Ok((false, uiaainfo));

View file

@ -211,10 +211,4 @@ pub trait Data: Send + Sync {
fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result<String>; fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result<String>;
fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result<Option<FilterDefinition>>; fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result<Option<FilterDefinition>>;
// Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations)
fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)>;
/// Find out which user an OpenID access token belongs to.
fn find_from_openid_token(&self, token: &str) -> Result<Option<OwnedUserId>>;
} }

View file

@ -9,6 +9,7 @@ pub use data::Data;
use ruma::{ use ruma::{
api::client::{ api::client::{
device::Device, device::Device,
error::ErrorKind,
filter::FilterDefinition, filter::FilterDefinition,
sync::sync_events::{ sync::sync_events::{
self, self,
@ -19,7 +20,7 @@ use ruma::{
events::AnyToDeviceEvent, events::AnyToDeviceEvent,
serde::Raw, serde::Raw,
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri,
OwnedRoomId, OwnedUserId, UInt, UserId, OwnedRoomId, OwnedUserId, RoomAliasId, UInt, UserId,
}; };
use crate::{services, Error, Result}; use crate::{services, Error, Result};
@ -261,14 +262,19 @@ impl Service {
/// Check if a user is an admin /// Check if a user is an admin
pub fn is_admin(&self, user_id: &UserId) -> Result<bool> { pub fn is_admin(&self, user_id: &UserId) -> Result<bool> {
if let Some(admin_room_id) = services().admin.get_admin_room()? { let admin_room_alias_id =
RoomAliasId::parse(format!("#admins:{}", services().globals.server_name()))
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
let admin_room_id = services()
.rooms
.alias
.resolve_local_alias(&admin_room_alias_id)?
.unwrap();
services() services()
.rooms .rooms
.state_cache .state_cache
.is_joined(user_id, &admin_room_id) .is_joined(user_id, &admin_room_id)
} else {
Ok(false)
}
} }
/// Create a new user account on this homeserver. /// Create a new user account on this homeserver.
@ -592,16 +598,6 @@ impl Service {
) -> Result<Option<FilterDefinition>> { ) -> Result<Option<FilterDefinition>> {
self.db.get_filter(user_id, filter_id) self.db.get_filter(user_id, filter_id)
} }
// Creates an OpenID token, which can be used to prove that a user has access to an account (primarily for integrations)
pub fn create_openid_token(&self, user_id: &UserId) -> Result<(String, u64)> {
self.db.create_openid_token(user_id)
}
/// Find out which user an OpenID access token belongs to.
pub fn find_from_openid_token(&self, token: &str) -> Result<Option<OwnedUserId>> {
self.db.find_from_openid_token(token)
}
} }
/// Ensure that a user only sees signatures from themselves and the target user /// Ensure that a user only sees signatures from themselves and the target user

View file

@ -128,7 +128,7 @@ impl Error {
kind.clone(), kind.clone(),
match kind { match kind {
WrongRoomKeysVersion { .. } WrongRoomKeysVersion { .. }
| Forbidden { .. } | Forbidden
| GuestAccessForbidden | GuestAccessForbidden
| ThreepidAuthFailed | ThreepidAuthFailed
| ThreepidDenied => StatusCode::FORBIDDEN, | ThreepidDenied => StatusCode::FORBIDDEN,